2025-12-09

This commit is contained in:
2026-03-17 15:03:35 -06:00
parent 4b82b57113
commit aae574f8dc
137 changed files with 17355 additions and 4067 deletions
+10 -10
View File
@@ -10,13 +10,13 @@ D:\Work\9 iClone\Amazon\
D:\Amazon\00_external-files\
N:\1. CHARACTERS\remapping\
[Recent]
D:\Work\9 iClone\Demon Hunters\Blender\
P:\251031_Tatt2Away_Idol\Assets\Blends\Char\
A:\1 Amazon_Active_Projects\251121_Gold-L6_redo\Assets\Blends\
P:\250827_FestivalTurf\Renders\06 Infill And Powerbrooming\Visual 4B\
F:\jobs\2025-11-25-155447.841674-Scenario 3 Scene 1 - Feedback A\
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\
F:\jobs\2025-11-25-114624.321965-Visual 4B\
F:\jobs\2025-11-24-172754.802929-Visual 3A\
F:\jobs\2025-11-24-171247.279407-Visual 2B\
P:\251031_Tatt2Away_Idol\Blends\animations\02 Dressing Room\
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\
T:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\blendcache_Visual_7_phone_insert\
C:\Users\Nathan\AppData\Local\Temp\
P:\250827_FestivalTurf\Assets\Mocap\07 final\
P:\250827_FestivalTurf\Assets\Blends\
T:\251031_Tatt2Away_Idol\Assets\Blends\Char\
D:\Work\9 iClone\Demon Hunters\Blender_v2\
T:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\
D:\Work\9 iClone\Demon Hunters\Blender_v2\imports\Zoey\
F:\jobs\2025-12-04-163406.132815-Visual_2_push_broom\blendcache_Visual_2_push_broom.flamenco\
+30 -30
View File
@@ -1,30 +1,30 @@
A:\1 Amazon_Active_Projects\251121_Gold-L6_redo\Assets\Blends\Aaron_vest-textured.blend
P:\251031_Tatt2Away_Idol\Assets\Blends\Char\Rumi v2.0.blend
A:\1 Amazon_Active_Projects\251121_Gold-L6_redo\Assets\Blends\Aaron.blend
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\Visual 2C.blend
A:\1 Amazon_Active_Projects\251121_Gold-L6_redo\Assets\Blends\amazon_warehouse_CYCLES.blend
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\Visual 4B.blend
C:\Users\Nathan\Downloads\Scenario 3 Scene 1 - Feedback A.blend
F:\jobs\2025-11-25-155447.841674-Scenario 3 Scene 1 - Feedback A\Scenario 3 Scene 1 - Feedback A.flamenco.blend
A:\1 Amazon_Active_Projects\251121_Gold-L6_redo\Blends\animations\Scenario 3 Scene 1 - Feedback A.blend
C:\Users\Nathan\Downloads\Visual 5B.blend
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\Visual 5B.blend
F:\jobs\2025-11-25-114624.321965-Visual 4B\Visual 4B.flamenco.blend
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\Visual 5.blend
C:\Users\Nathan\Downloads\Visual 3B.blend
C:\Users\Nathan\Downloads\Visual 4B\Blends\animations\06 Infill And Powerbrooming\Visual 4B.blend
F:\jobs\2025-11-24-172754.802929-Visual 3A\Visual 3A.flamenco.blend
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\Visual 3A.blend
C:\Users\Nathan\Downloads\Visual 3A\Blends\animations\06 Infill And Powerbrooming\Visual 3A.blend
C:\Users\Nathan\Downloads\Visual 2C.blend
F:\jobs\2025-11-24-171247.279407-Visual 2B\Visual 2B.flamenco.blend
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\Visual 2B.blend
C:\Users\Nathan\Downloads\Visual 2D.blend
C:\Users\Nathan\Downloads\Visual 2A.blend
C:\Users\Nathan\Downloads\01_opening.blend
C:\Users\Nathan\Downloads\09_ending.blend
P:\250827_FestivalTurf\Blends\animations\04 Securing Your Seam\09_ending.blend
P:\251031_Tatt2Away_Idol\Blends\animations\02 Dressing Room\2B.blend
P:\251031_Tatt2Away_Idol\Blends\animations\02 Dressing Room\2D.blend
P:\251031_Tatt2Away_Idol\Blends\animations\02 Dressing Room\2C.blend
P:\251031_Tatt2Away_Idol\Blends\animations\02 Dressing Room\2A.blend
A:\1 Amazon_Active_Projects\251203_ADTA_December_2025\Blends\animations\Bag2PackScan_animation 1a.blend
T:\251203_ADTA_December_2025\Blends\animations\Pack2BagScan_animation 6d.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_8.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_5.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_4_leaf_blower_insert.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_2_broom.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_2_push_broom.blend
F:\jobs\2025-12-08-164101.711842-Visual_8\Visual_8.flamenco.blend
P:\250827_FestivalTurf\Blends\animations\04 Securing Your Seam\08_smooth blend.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_7_phone_insert.blend
T:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_7_phone_insert.blend
C:\Users\Nathan\AppData\Local\Temp\2025-11-08_17-11_Visual_7_phone_insert.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_7.blend
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\0template.blend
C:\Users\Nathan\Downloads\Visual_7.blend
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\Visual 3B.blend
P:\250827_FestivalTurf\Blends\animations\06 Infill And Powerbrooming\Visual 4A.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_3_PE_spread.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_4_leaf_blower.blend
P:\250827_FestivalTurf\Blends\animations\07 Final Touches And Maintenance\Visual_6.blend
C:\Users\Nathan\Downloads\Visual_4_leaf_blower.blend
C:\Users\Nathan\Downloads\Visual_4_leaf_blower_insert.blend
F:\jobs\2025-12-08-125318.280619-Bag2PackScan_animation 1b\Bag2PackScan_animation 1b.flamenco.blend
F:\jobs\2025-12-08-121420.823326-Bag2PackScan_animation 1a\Bag2PackScan_animation 1a.flamenco.blend
F:\jobs\2025-12-08-111004.205815-Bag2PackScan_animation 2b\Bag2PackScan_animation 2b.flamenco.blend
F:\jobs\2025-12-08-104805.832937-Bag2PackScan_animation 2a\Bag2PackScan_animation 2a.flamenco.blend
P:\250827_FestivalTurf\Assets\Blends\Spa Pitbull.blend
T:\251031_Tatt2Away_Idol\Assets\Blends\Char\Rumi_v1.blend
D:\Work\9 iClone\Demon Hunters\Blender_v2\Rumi_v1.blend
F:\jobs\2025-12-08-095931.062624-Bag2PackScan_animation 4b\Bag2PackScan_animation 4b.flamenco.blend
Binary file not shown.
BIN
View File
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -10,7 +10,6 @@ module_names = (
"op_pie_wrappers",
"op_copy_to_selected",
"bs_utils",
"hotkeys",
"prefs",
"sidebar",
"tweak_builtin_pies",
@@ -83,10 +82,4 @@ def register():
bpy.app.timers.register(delayed_register, first_interval=0.5, persistent=True)
def unregister():
# save add-on prefs to file before unregistering.
from .bs_utils.prefs import get_addon_prefs, update_prefs_on_file
addon_prefs = get_addon_prefs()
if addon_prefs:
if bpy.context.preferences.use_preferences_save:
update_prefs_on_file()
register_unregister_modules(reversed(modules), False)
register_unregister_modules(reversed(modules), False)
@@ -1,7 +1,7 @@
schema_version = "1.0.0"
id = "viewport_pie_menus"
name = "3D Viewport Pie Menus"
version = "1.7.0"
version = "1.7.1"
tagline = "Various pie menus to speed up your workflow"
maintainer = "Community"
type = "add-on"
@@ -7,24 +7,8 @@
import bpy
from bpy.types import KeyMap, KeyMapItem, UILayout
ADDON_KEYMAPS = []
class HotkeyDrawMixin:
"""Expose these functions as a mix-in class so that add-ons can more easily override functionality as needed.
Add-ons should simply inherit this class in their AddonPreferences class.
"""
@staticmethod
def draw_hotkey_list(context, layout, compact=False, debug=False, sort_mode='BY_KEYMAP', ignore_missing=False):
draw_hotkey_list(context, layout, compact, debug, sort_mode, ignore_missing)
@staticmethod
def get_user_kmis_of_addon(context) -> list[tuple[KeyMap, KeyMapItem]]:
return get_user_kmis_of_addon(context)
@staticmethod
def draw_kmi(km: KeyMap, kmi: KeyMapItem, layout: UILayout, compact=False, debug=False):
draw_kmi(km, kmi, layout, compact=compact, debug=debug)
if "ADDON_KEYMAPS" not in locals():
ADDON_KEYMAPS = []
KEYMAP_ICONS = {
'Object Mode': 'OBJECT_DATAMODE',
@@ -57,7 +41,6 @@ def register_hotkey(
hotkey_kwargs={'type': "SPACE", 'value': "PRESS"},
keymap_name='Window'
):
global ADDON_KEYMAPS
wm = bpy.context.window_manager
@@ -80,8 +63,8 @@ def register_hotkey(
# it is SUPPOSED TO stick around for ever.
# This allows Blender to store the associated user keymap, meaning the user's modifications
# will be stored and restored as expected, whenever the add-on is enabled again.
if (addon_km, existing_kmi) not in ADDON_KEYMAPS:
ADDON_KEYMAPS.append((addon_km, existing_kmi))
# if (addon_km, existing_kmi) not in ADDON_KEYMAPS:
# ADDON_KEYMAPS.append((addon_km, existing_kmi))
return
addon_kmi = addon_km.keymap_items.new(bl_idname, **hotkey_kwargs)
for key in op_kwargs:
@@ -204,8 +187,6 @@ def find_kmi_in_km_by_data(km: KeyMap, hotkey_kwargs: dict, op_idname: str, op_k
def is_kmi_matching(kmi: KeyMapItem, hotkey_kwargs: dict, op_idname: str, op_kwargs: dict) -> bool:
if kmi.idname != op_idname:
return False
if kmi.properties == None:
return False
combined_hotkey = KMI_DEFAULTS.copy()
combined_hotkey.update(hotkey_kwargs)
@@ -213,11 +194,17 @@ def find_kmi_in_km_by_data(km: KeyMap, hotkey_kwargs: dict, op_idname: str, op_k
if value != getattr(kmi, key):
return False
for key, value in op_kwargs.items():
if key not in kmi.properties:
return False
if value != kmi.properties[key]:
want_to_crash = False
if want_to_crash:
# These checks cause https://projects.blender.org/Mets/CloudRig/issues/201
# They don't seem necessary.
if kmi.properties == None:
return False
for key, value in op_kwargs.items():
if key not in kmi.properties:
return False
if value != kmi.properties[key]:
return False
return True
@@ -342,18 +329,27 @@ def restore_deleted_keymap_items_global(context) -> int:
keyconfigs = context.window_manager.keyconfigs
user_kc = keyconfigs.user
total_restored = 0
for user_km in user_kc.keymaps:
total_restored += restore_deleted_keymap_items(context, user_km)
keymap_names = [km.name for km in user_kc.keymaps]
for km_name in keymap_names:
num_restored = restore_deleted_keymap_items(context, km_name)
user_km = user_kc.keymaps[km_name]
if num_restored != 0:
user_km = user_kc.keymaps[km_name]
print(f"{user_km.name}: Restored {num_restored}")
total_restored += num_restored
return total_restored
def restore_deleted_keymap_items(context, user_km) -> int:
def restore_deleted_keymap_items(context, user_km_name) -> int:
keyconfigs = context.window_manager.keyconfigs
user_kc = keyconfigs.user
default_kc = keyconfigs.default
addon_kc = keyconfigs.addon
user_km = user_kc.keymaps[user_km_name]
# Step 1: Store modified and added KeyMapItems in a temp keymap.
temp_km = user_kc.keymaps.new("temp_"+user_km.name)
temp_km_name = "temp_"+user_km_name
temp_km = user_kc.keymaps.new(temp_km_name)
kmis_user_modified = []
kmis_user_defined = []
for user_kmi in user_km.keymap_items:
@@ -374,6 +370,10 @@ def restore_deleted_keymap_items(context, user_km) -> int:
# Step 2: Restore User KeyMap to default.
num_kmis = len(user_km.keymap_items)
user_km.restore_to_default()
# XXX: restore_to_default() will shuffle the memory addresses, so we need to re-reference user_km.
# I don't think this was the case pre-Blender 5.0!!
user_km = user_kc.keymaps[user_km_name]
temp_km = user_kc.keymaps[temp_km_name]
# Step 3: Restore modified and added KeyMapItems.
for temp_def_kmi in kmis_user_defined:
@@ -1,6 +1,6 @@
from pathlib import Path
import bpy, json
import bpy, json, os
from bpy.types import PropertyGroup
from rna_prop_ui import IDPropertyGroup
from bpy.types import AddonPreferences
@@ -40,12 +40,14 @@ class PrefsFileSaveLoadMixin:
# This could still fail if Blender loads too slowly, so it could be better.
# Ideally, Blender would simply save add-on preferences to disk, and none of this should be needed.
def timer_func(_scene=None):
prefs = None
try:
prefs = get_addon_prefs()
except KeyError:
# Add-on got un-registered in the meantime.
return
prefs.load_and_apply_prefs_from_file()
if prefs:
prefs.load_and_apply_prefs_from_file()
bpy.app.timers.register(timer_func, first_interval=delay)
def apply_prefs_from_dict_recursive(self, propgroup: PropertyGroup, data: dict):
@@ -71,6 +73,8 @@ class PrefsFileSaveLoadMixin:
def save_prefs_to_file(self, _context=None):
filepath = get_prefs_filepath()
os.makedirs(os.path.dirname(filepath), exist_ok=True)
with open(filepath, "w") as f:
json.dump(self.to_dict(), f, indent=4)
@@ -4,18 +4,19 @@
from bpy.types import UILayout
def aligned_label(layout: UILayout, text: str, icon=None, alert=False, alignment='LEFT', **kwargs):
def aligned_label(layout: UILayout, *, alert=False, alignment='LEFT', **kwargs):
"""Draw some text in the single-column-layout style, ie. offset by 60%."""
row = layout.split(factor=0.4)
row.separator()
row.alert = alert
row.alignment = alignment
row.label(text=text, icon=icon, **kwargs)
row.label(**kwargs)
def label_split(layout: UILayout, text: str, icon=None, alert=False, **kwargs) -> UILayout:
def label_split(layout: UILayout, *, alert=False, **kwargs) -> UILayout:
"""Return an empty UILayout with a text label to its left in the single-column-layout style."""
split = layout.split(factor=0.4, align=True)
split.alert = alert
row = split.row(align=True)
row.alignment = 'RIGHT'
row.label(text=text)
row.label(**kwargs)
return split
@@ -1,52 +0,0 @@
# SPDX-FileCopyrightText: 2016-2024 Blender Foundation
#
# SPDX-License-Identifier: GPL-3.0-or-later
import bpy
class WM_OT_toggle_keymap_item_on_drag(bpy.types.Operator):
"When Drag is enabled, this pie menu will only appear when the mouse is dragged while the assigned key combo is held down"
bl_idname = "wm.toggle_keymap_item_property"
bl_label = "Toggle On Drag"
bl_options = {'REGISTER', 'INTERNAL'}
km_name: bpy.props.StringProperty(options={'SKIP_SAVE'})
kmi_idname: bpy.props.StringProperty(options={'SKIP_SAVE'})
pie_name: bpy.props.StringProperty(options={'SKIP_SAVE'})
prop_name: bpy.props.StringProperty(options={'SKIP_SAVE'})
def execute(self, context):
# Another sign of the fragility of Blender's keymap API.
# The reason for the existence of this property wrapper operator is that
# when we draw the `on_drag` property in the UI directly, Blender's keymap
# system (for some reason??) doesn't realize that a keymap entry has changed,
# and fails to refresh caches, which has disasterous results.
# This operator fires a refreshing of internal keymap data via
# `user_kmi.type = user_kmi.type`
user_kc = context.window_manager.keyconfigs.user
user_km = user_kc.keymaps.get(self.km_name)
if not user_km:
# This really shouldn't happen.
self.report({'ERROR'}, f"Couldn't find KeyMap: {self.km_name}")
return {'CANCELLED'}
for user_kmi in user_km.keymap_items:
if user_kmi.idname == self.kmi_idname and user_kmi.properties and user_kmi.properties.name == self.pie_name:
if hasattr(user_kmi.properties, self.prop_name):
setattr(
user_kmi.properties,
self.prop_name,
not getattr(user_kmi.properties, self.prop_name),
)
# This is the magic line that causes internal keymap data to be kept up to date and not break.
user_kmi.type = user_kmi.type
else:
self.report({'ERROR'}, "Property not in keymap: " + self.prop_name)
return {'CANCELLED'}
return {'FINISHED'}
registry = [
WM_OT_toggle_keymap_item_on_drag,
]
@@ -29,12 +29,19 @@ class WM_OT_call_menu_pie_drag_only(Operator):
bl_label = "Pie Menu on Drag"
bl_options = {'REGISTER', 'INTERNAL'}
def update_kmi(self, context):
if not hasattr(context, 'keymapitem'):
return
kmi = context.keymapitem # Set via UILayout.context_pointer_set().
kmi.type = kmi.type
name: StringProperty(options={'SKIP_SAVE'})
on_drag: BoolProperty(
name="On Drag",
default=True,
description="Only show this pie menu on mouse drag, otherwise execute a default operator",
options={'SKIP_SAVE'},
update=update_kmi,
)
fallback_operator: StringProperty(options={'SKIP_SAVE'})
fallback_op_kwargs: StringProperty(default="{}", options={'SKIP_SAVE'})
@@ -102,6 +109,7 @@ class WM_OT_call_menu_pie_drag_only(Operator):
if km:
for kmi in km.keymap_items:
for i, condition in enumerate([
kmi.idname != 'wm.call_menu_pie_drag_only',
kmi.type == hotkey_kwargs.get('type', ""),
kmi.value == hotkey_kwargs.get('value', "PRESS"),
kmi.ctrl == hotkey_kwargs.get('ctrl', False),
@@ -61,8 +61,6 @@ class OUTLINER_MT_relationship_pie(Menu):
remap = pie.operator(
'outliner.remap_users_ui', icon='FILE_REFRESH', text="Remap Users"
)
remap.id_type = id.id_type
remap.id_name_source = id.name
if id.library:
remap.library_path_source = id.library.filepath
else:
@@ -290,7 +288,7 @@ class RemapTarget(bpy.types.PropertyGroup):
class OUTLINER_OT_remap_users_ui(bpy.types.Operator):
"""Remap users of a selected ID to any other ID of the same type"""
"""Remap users of selected IDs to any other ID of the same type"""
bl_idname = "outliner.remap_users_ui"
bl_label = "Remap Users"
@@ -300,9 +298,9 @@ class OUTLINER_OT_remap_users_ui(bpy.types.Operator):
# Prepare the ID selector.
remap_targets = context.scene.remap_targets
remap_targets.clear()
source_id = get_id(self.id_name_source, self.id_type, self.library_path_source)
for id in get_id_storage_by_type_str(self.id_type)[0]:
if id == source_id:
source_ids = get_selected_ids_of_active_type(context)
for id in get_id_storage_by_type_str(source_ids[0].id_type)[0]:
if id in source_ids:
continue
if (self.library_path == 'Local Data' and not id.library) or (
id.library and (self.library_path == id.library.filepath)
@@ -315,44 +313,50 @@ class OUTLINER_OT_remap_users_ui(bpy.types.Operator):
description="Library path, if we want to remap to a linked ID",
update=update_library_path,
)
id_type: StringProperty(description="ID type, eg. 'OBJECT' or 'MESH'")
library_path_source: StringProperty()
id_name_source: StringProperty(
name="Source ID Name", description="Name of the ID we're remapping the users of"
)
id_name_target: StringProperty(
name="Target ID Name", description="Name of the ID we're remapping users to"
)
@classmethod
def poll(cls, context):
source_ids = get_selected_ids_of_active_type(context)
if not source_ids:
cls.poll_message_set("No selected IDs.")
return False
return True
def invoke(self, context, _event):
# Populate the remap_targets string list with possible options based on
# what was passed to the operator.
assert (
self.id_type and self.id_name_source
), "Error: UI must provide ID and ID type to this operator."
# selection context.
# Prepare the library selector.
remap_target_libraries = context.scene.remap_target_libraries
remap_target_libraries.clear()
local = remap_target_libraries.add()
local.name = "Local Data"
source_id = get_id(self.id_name_source, self.id_type, self.library_path_source)
source_ids = get_selected_ids_of_active_type(context)
for lib in bpy.data.libraries:
for id in lib.users_id:
if type(id) == type(source_id):
if type(id) == type(source_ids[0]):
lib_entry = remap_target_libraries.add()
lib_entry.name = lib.filepath
break
container = get_id_storage_by_type_str(source_ids[0].id_type)[0]
self.library_path = "Local Data"
if source_id.name[-4] == ".":
storage = get_id_storage_by_type_str(self.id_type)[0]
suggestion = storage.get(source_id.name[:-4])
if suggestion:
self.id_name_target = suggestion.name
if suggestion.library:
self.library_path = suggestion.library.filepath
suffixed_id = next((id for id in source_ids if id.name[-4] == "."), None)
if suffixed_id:
default_target = container.get(suffixed_id.name[:-4])
if default_target:
self.id_name_target = default_target.name
if default_target.library:
self.library_path = default_target.library.filepath
else:
self.id_name_target = ""
self.library_path = 'Local Data'
return context.window_manager.invoke_props_dialog(self, width=600)
@@ -362,14 +366,18 @@ class OUTLINER_OT_remap_users_ui(bpy.types.Operator):
layout.use_property_decorate = False
scene = context.scene
row = layout.row()
id = get_id(self.id_name_source, self.id_type, self.library_path_source)
id_icon = get_datablock_icon(id)
split = row.split()
split.row().label(text="Anything that was referencing this:")
row = split.row()
row.prop(self, 'id_name_source', text="", icon=id_icon)
row.enabled = False
source_ids = get_selected_ids_of_active_type(context)
id_icon = get_datablock_icon(source_ids[0])
for i, source_id in enumerate(source_ids):
row = layout.row()
split = row.split()
if i==0:
split.row().label(text="Anything that was referencing these:")
else:
split.row()
row = split.row()
row.prop(source_id, 'name', text="", icon=id_icon)
row.enabled = False
layout.separator()
col = layout.column()
@@ -392,11 +400,12 @@ class OUTLINER_OT_remap_users_ui(bpy.types.Operator):
)
def execute(self, context):
source_id = get_id(self.id_name_source, self.id_type, self.library_path_source)
target_id = get_id(self.id_name_target, self.id_type, self.library_path)
assert source_id and target_id, "Error: Failed to find source or target."
source_ids = get_selected_ids_of_active_type(context)
target_id = get_id(self.id_name_target, source_ids[0].id_type, self.library_path)
assert source_ids and target_id, "Error: Failed to find source or target."
source_id.user_remap(target_id)
for source_id in source_ids:
source_id.user_remap(target_id)
return {'FINISHED'}
@@ -409,9 +418,7 @@ class OBJECT_OT_instancer_empty_to_collection(Operator):
@classmethod
def poll(cls, context):
obj = context.active_object
if context.area.ui_type == 'OUTLINER':
obj = context.id
obj = get_active_id(context)
if not (
obj
@@ -429,9 +436,7 @@ class OBJECT_OT_instancer_empty_to_collection(Operator):
return True
def execute(self, context):
obj = context.active_object
if context.area.ui_type == 'OUTLINER':
obj = context.id
obj = get_active_id(context)
coll = obj.instance_collection
bpy.data.objects.remove(obj)
@@ -559,6 +564,13 @@ def get_fundamental_id_type(datablock: ID) -> tuple[Any, str]:
)
def get_selected_ids_of_active_type(context):
active_id = get_active_id(context)
return [
id for id in context.selected_ids
if type(id) == type(active_id)
]
def get_id(id_name: str, id_type: str, lib_path="") -> ID:
container = get_id_storage_by_type_str(id_type)[0]
if lib_path and lib_path != 'Local Data':
@@ -6,16 +6,14 @@ import platform, struct, urllib
import bpy
import addon_utils
from bpy.types import AddonPreferences, Operator, KeyMap, KeyMapItem
from bpy.types import AddonPreferences, KeyMap, KeyMapItem
from bpy.props import BoolProperty
from bl_ui.space_userpref import USERPREF_PT_interface_menus_pie
from .bs_utils.prefs import PrefsFileSaveLoadMixin, update_prefs_on_file, get_addon_prefs
from .bs_utils.hotkeys import HotkeyDrawMixin, get_sidebar, draw_hotkey_list
from .bs_utils.prefs import get_addon_prefs
from .bs_utils.hotkeys import get_sidebar, draw_hotkey_list
class ExtraPies_AddonPrefs(
PrefsFileSaveLoadMixin,
HotkeyDrawMixin,
AddonPreferences,
USERPREF_PT_interface_menus_pie, # We use this class's `draw_centered` function to draw built-in pie settings.
):
@@ -83,16 +81,9 @@ def button_draw_func(layout, km: KeyMap, kmi: KeyMapItem, compact=False):
sub = split.row(align=True)
sub.enabled = kmi.active
op = sub.operator(
'wm.toggle_keymap_item_property',
text=text,
icon='MOUSE_MOVE',
depress=kmi.properties.on_drag,
)
op.km_name = km.name
op.kmi_idname = kmi.idname
op.pie_name = kmi.properties.name
op.prop_name = 'on_drag'
sub.context_pointer_set("keymapitem", kmi)
sub.use_property_split=False
sub.prop(kmi.properties, 'on_drag', icon='MOUSE_MOVE', text=text)
def get_bug_report_url():
op_sys = "%s %d Bits\n" % (
@@ -123,46 +114,4 @@ def get_bug_report_url():
+ urllib.parse.quote(op_sys)
)
class WINDOW_OT_extra_pies_prefs_save(Operator):
"""Save Extra Pies add-on preferences"""
bl_idname = "window.extra_pies_prefs_save"
bl_label = "Save Pie Hotkeys"
bl_options = {'REGISTER'}
def execute(self, context):
filepath, data = update_prefs_on_file(context)
self.report({'INFO'}, f"Saved Pie Prefs to {filepath}.")
return {'FINISHED'}
class WINDOW_OT_extra_pies_prefs_load(Operator):
"""Load Extra Pies add-on preferences"""
bl_idname = "window.extra_pies_prefs_load"
bl_label = "Load Pie Hotkeys"
bl_options = {'REGISTER'}
def execute(self, context):
prefs = get_addon_prefs(context)
filepath = prefs.get_prefs_filepath()
success = prefs.load_and_apply_prefs_from_file()
if success:
self.report({'INFO'}, f"Loaded pie preferences from {filepath}.")
else:
self.report({'ERROR'}, "Failed to load Pie preferences.")
return {'FINISHED'}
registry = [
ExtraPies_AddonPrefs,
WINDOW_OT_extra_pies_prefs_save,
WINDOW_OT_extra_pies_prefs_load,
]
def register():
ExtraPies_AddonPrefs.register_autoload_from_file()
registry = [ExtraPies_AddonPrefs]
@@ -7,13 +7,13 @@
"id": "basedplayblast",
"name": "BasedPlayblast",
"tagline": "Easily create playblasts from Blender and Flamenco",
"version": "2.3.1",
"version": "2.4.0",
"type": "add-on",
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "5.0.0",
"blender_version_min": "4.2.0",
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
"permissions": {
"files": "Import/export files and data"
@@ -24,9 +24,34 @@
"Workflow",
"Video"
],
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.3.1/BasedPlayblast.v2.3.1.zip",
"archive_size": 38295,
"archive_hash": "sha256:98f978a96fb8d15bae60987f305901ba0acd7a37ddb45627724326809e43622d"
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.4.0/BasedPlayblast.v2.4.0.zip",
"archive_size": 40376,
"archive_hash": "sha256:544369c72024681cb45a4ee073ae684b56f08f2e0d8d9906a75fcbb11e0a2196"
},
{
"schema_version": "1.0.0",
"id": "rainclouds_bulk_scene_tools",
"name": "Raincloud's Bulk Scene Tools",
"tagline": "Bulk utilities for optimizing scene data",
"version": "0.11.0",
"type": "add-on",
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "4.5.0",
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
"permissions": {
"files": "Read and write external resources referenced by scenes"
},
"tags": [
"Scene",
"Workflow",
"Materials"
],
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.11.0/Rainys_Bulk_Scene_Tools.v0.11.0.zip",
"archive_size": 72969,
"archive_hash": "sha256:827451b11808488e8682f4bdd4bfff8b6f1f0fe2aa5bbc53845fbf9a13e15757"
}
]
}
@@ -0,0 +1,57 @@
{
"version": "v1",
"blocklist": [],
"data": [
{
"schema_version": "1.0.0",
"id": "basedplayblast",
"name": "BasedPlayblast",
"tagline": "Easily create playblasts from Blender and Flamenco",
"version": "2.3.1",
"type": "add-on",
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "5.0.0",
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
"permissions": {
"files": "Import/export files and data"
},
"tags": [
"Animation",
"Render",
"Workflow",
"Video"
],
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.3.1/BasedPlayblast.v2.3.1.zip",
"archive_size": 38295,
"archive_hash": "sha256:98f978a96fb8d15bae60987f305901ba0acd7a37ddb45627724326809e43622d"
},
{
"schema_version": "1.0.0",
"id": "rainclouds_bulk_scene_tools",
"name": "Raincloud's Bulk Scene Tools",
"tagline": "Bulk utilities for optimizing scene data",
"version": "0.11.0",
"type": "add-on",
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "4.5.0",
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
"permissions": {
"files": "Read and write external resources referenced by scenes"
},
"tags": [
"Scene",
"Workflow",
"Materials"
],
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.11.0/Rainys_Bulk_Scene_Tools.v0.11.0.zip",
"archive_size": 72969,
"archive_hash": "sha256:827451b11808488e8682f4bdd4bfff8b6f1f0fe2aa5bbc53845fbf9a13e15757"
}
]
}
@@ -0,0 +1,59 @@
# BasedPlayblast
**Easily create playblasts from Blender**
BasedPlayblast is a Blender addon that streamlines the process of creating video playblasts for animation review. It provides optimized render settings for fast preview generation while maintaining visual quality suitable for review purposes.
## Features
- **Fast Playblast Creation**: Optimized render settings for different preview modes (Solid, Material, Rendered)
- **Multiple Display Modes**: Support for Wireframe, Solid, Material Preview, and Rendered modes
- **Flexible Resolution**: Scene, preset, or custom resolution options
- **Video Format Support**: MP4, MOV, AVI, MKV with various codecs (H.264, H.265, AV1, etc.)
- **Metadata Integration**: Automatic inclusion of frame numbers, camera info, and custom notes
- **Settings Management**: Apply and restore render settings without losing your project configuration
- **Flamenco Support**: Custom Flamenco Job Script with a simple, non-destructive workflow
## Installation
### Via BlenderKit's Extension Repository (Recommended)
1. Open Blender (5.0+)
2. Install BlenderKit via https://www.blenderkit.com/get-blenderkit/
3. Open Preferences (Ctrl + ,)
4. Go to **Edit > Preferences > Get Extensions**
5. Search for "BasedPlayblast"
6. Click **Install**
7. Enjoy automatic updating!
### Manual Installation
1. Download the latest release, or the release that supports your intended Blender version
2. In Blender, go to **Edit > Preferences > Add-ons**
3. Click **Install from Disk** and select the downloaded file
4. Enable the addon in the list
## Usage
1. **Locate the Panel**: Go to **Properties > Output > BasedPlayblast**
2. **Configure Settings**: Set your output path, resolution, and display mode
3. **Create Playblast**: Click the **PLAYBLAST** button
4. **View Result**: Click **VIEW** to open the generated video
- **Apply Blast Settings**: Use this button to apply optimized render settings without rendering
- Intended particularly for Flamenco. Apply, check the resultant render settings to ensure they're correct, then send to Flamenco using the BasedPlayblast custom Job type.
- **Restore Original Settings**: Return to your original render configuration
- **Display Modes**:
- **Wireframe/Solid**
- Fast workbench viewport rendering. Recommended for short and/or locally-blasted projects.
- **Material**
- **Rendered**
## Requirements
- Blender 5.0.0 or higher
- Python 3.x (included with Blender)
## Support
- **Documentation**: [GitHub Repository](https://github.com/RaincloudTheDragon/BasedPlayblast)
- **Issues**: Report bugs or request features on GitHub
- **License**: GPL-3.0-or-later
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,28 @@
schema_version = "1.0.0"
id = "basedplayblast"
name = "BasedPlayblast"
tagline = "Easily create playblasts from Blender and Flamenco"
version = "2.4.0"
type = "add-on"
maintainer = "RaincloudTheDragon <raincloudthedragon@gmail.com>"
license = ["GPL-3.0-or-later"]
blender_version_min = "4.2.0"
website = "https://github.com/RaincloudTheDragon/BasedPlayblast"
tags = ["Animation", "Render", "Workflow", "Video"]
[permissions]
files = "Import/export files and data"
[build]
paths_exclude_pattern = [
"__pycache__/",
"*.pyc",
".git/",
".github/",
"addon_updater*",
"basedplayblast_updater/"
]
@@ -0,0 +1,164 @@
import bpy # type: ignore
RAINYS_EXTENSIONS_REPO_NAME = "Rainy's Extensions"
RAINYS_EXTENSIONS_REPO_URL = (
"https://raw.githubusercontent.com/RaincloudTheDragon/rainys-blender-extensions/refs/heads/main/index.json"
)
_BOOTSTRAP_DONE = False
def _log(message: str) -> None:
print(f"RainysExtensionsCheck: {message}")
def ensure_rainys_extensions_repo(_deferred: bool = False) -> None:
"""
Ensure the Rainy's Extensions repository is registered in Blender.
Safe to import and call from multiple add-ons; the helper guards against doing the
work more than once per Blender session.
"""
global _BOOTSTRAP_DONE
if _BOOTSTRAP_DONE:
return
_log("starting repository verification")
context_class_name = type(bpy.context).__name__
if context_class_name == "_RestrictContext":
if _deferred:
_log("context still restricted after deferral; aborting repo check")
return
_log("context restricted; scheduling repo check retry")
def _retry():
ensure_rainys_extensions_repo(_deferred=True)
return None
bpy.app.timers.register(_retry, first_interval=0.5)
return
prefs = getattr(bpy.context, "preferences", None)
if prefs is None:
_log("no preferences available on context; skipping")
return
preferences_changed = False
addon_prefs = None
addon_entry = None
if hasattr(getattr(prefs, "addons", None), "get"):
addon_entry = prefs.addons.get(__name__)
elif hasattr(prefs, "addons"):
try:
addon_entry = prefs.addons[__name__]
except Exception:
addon_entry = None
if addon_entry:
addon_prefs = getattr(addon_entry, "preferences", None)
addon_repo_initialized = bool(
addon_prefs and getattr(addon_prefs, "repo_initialized", False)
)
experimental = getattr(prefs, "experimental", None)
if experimental and hasattr(experimental, "use_extension_platform"):
if not experimental.use_extension_platform:
experimental.use_extension_platform = True
preferences_changed = True
_log("enabled experimental extension platform")
repositories = None
extensions_obj = getattr(prefs, "extensions", None)
if extensions_obj:
if hasattr(extensions_obj, "repos"):
repositories = extensions_obj.repos
elif hasattr(extensions_obj, "repositories"):
repositories = extensions_obj.repositories
if repositories is None:
filepaths = getattr(prefs, "filepaths", None)
repositories = getattr(filepaths, "extension_repos", None) if filepaths else None
if repositories is None:
_log("extension repositories collection missing; skipping")
return
def _repo_matches(repo) -> bool:
return getattr(repo, "remote_url", "") == RAINYS_EXTENSIONS_REPO_URL or getattr(
repo, "url", ""
) == RAINYS_EXTENSIONS_REPO_URL
matching_indices = [idx for idx, repo in enumerate(repositories) if _repo_matches(repo)]
target_repo = None
if matching_indices:
target_repo = repositories[matching_indices[0]]
if len(matching_indices) > 1 and hasattr(repositories, "remove"):
for dup_idx in reversed(matching_indices[1:]):
try:
repositories.remove(dup_idx)
_log(f"removed duplicate repository entry at index {dup_idx}")
except Exception as exc:
_log(f"could not remove duplicate repository at index {dup_idx}: {exc}")
else:
target_repo = next(
(
repo
for repo in repositories
if getattr(repo, "name", "") == RAINYS_EXTENSIONS_REPO_NAME
),
None,
)
if target_repo is None:
_log("repo missing; creating new entry")
if hasattr(repositories, "new"):
target_repo = repositories.new()
elif hasattr(repositories, "add"):
target_repo = repositories.add()
else:
_log("repository collection does not support creation; aborting")
return
else:
_log("repo entry already present; validating fields")
changed = preferences_changed
def _ensure_attr(obj, attr, value):
if hasattr(obj, attr) and getattr(obj, attr) != value:
setattr(obj, attr, value)
return True
if not hasattr(obj, attr):
_log(f"repository entry missing attribute '{attr}', skipping field")
return False
changed |= _ensure_attr(target_repo, "name", RAINYS_EXTENSIONS_REPO_NAME)
changed |= _ensure_attr(target_repo, "module", "rainys_extensions")
changed |= _ensure_attr(target_repo, "use_remote_url", True)
changed |= _ensure_attr(target_repo, "remote_url", RAINYS_EXTENSIONS_REPO_URL)
changed |= _ensure_attr(target_repo, "use_sync_on_startup", True)
changed |= _ensure_attr(target_repo, "use_cache", True)
changed |= _ensure_attr(target_repo, "use_access_token", False)
if addon_prefs and hasattr(addon_prefs, "repo_initialized") and not addon_prefs.repo_initialized:
addon_prefs.repo_initialized = True
changed = True
if not changed:
_log("repository already configured; skipping preference save")
_BOOTSTRAP_DONE = True
return
if hasattr(bpy.ops, "wm") and hasattr(bpy.ops.wm, "save_userpref"):
try:
bpy.ops.wm.save_userpref()
_log("preferences updated and saved")
except Exception as exc: # pragma: no cover
print(f"RainysExtensionsCheck: could not save preferences after repo update -> {exc}")
else:
_log("preferences API unavailable; changes not persisted")
_BOOTSTRAP_DONE = True
@@ -0,0 +1,133 @@
import bpy # type: ignore
from bpy.types import AddonPreferences, Panel # type: ignore
from bpy.props import BoolProperty # type: ignore
from .panels import bulk_viewport_display
from .panels import bulk_data_remap
from .panels import bulk_path_management
from .panels import bulk_scene_general
from .ops.AutoMatExtractor import AutoMatExtractor, AUTOMAT_OT_summary_dialog
from .ops.Rename_images_by_mat import Rename_images_by_mat, RENAME_OT_summary_dialog
from .ops.FreeGPU import BST_FreeGPU
from .ops import ghost_buster
# Addon preferences class for update settings
class BST_AddonPreferences(AddonPreferences):
bl_idname = __package__
# AutoMat Extractor settings
automat_common_outside_blend: BoolProperty(
name="Place 'common' folder outside 'blend' folder",
description="If enabled, the 'common' folder for shared textures will be placed directly in 'textures/'. If disabled, it will be placed inside 'textures/<blend_name>/'",
default=False,
)
def draw(self, context):
layout = self.layout
# AutoMat Extractor settings
box = layout.box()
box.label(text="AutoMat Extractor Settings")
row = box.row()
row.prop(self, "automat_common_outside_blend")
# Main panel for Bulk Scene Tools
class VIEW3D_PT_BulkSceneTools(Panel):
"""Bulk Scene Tools Panel"""
bl_label = "Bulk Scene Tools"
bl_idname = "VIEW3D_PT_bulk_scene_tools"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Edit'
def draw(self, context):
layout = self.layout
layout.label(text="Tools for bulk operations on scene data")
# List of all classes in this module
classes = (
VIEW3D_PT_BulkSceneTools,
BST_AddonPreferences,
AutoMatExtractor,
AUTOMAT_OT_summary_dialog,
Rename_images_by_mat,
RENAME_OT_summary_dialog,
BST_FreeGPU,
)
def register():
# Register classes from this module (do this first to ensure preferences are available)
for cls in classes:
bpy.utils.register_class(cls)
# Print debug info about preferences
try:
prefs = bpy.context.preferences.addons.get(__package__)
if prefs:
print(f"Addon preferences registered successfully: {prefs}")
else:
print("WARNING: Addon preferences not found after registration!")
print(f"Available addons: {', '.join(bpy.context.preferences.addons.keys())}")
except Exception as e:
print(f"Error accessing preferences: {str(e)}")
# Register modules
bulk_scene_general.register()
bulk_viewport_display.register()
bulk_data_remap.register()
bulk_path_management.register()
ghost_buster.register()
# Add keybind for Free GPU (global context)
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
# Use Screen keymap for global shortcuts that work everywhere
km = kc.keymaps.new(name='Screen', space_type='EMPTY')
kmi = km.keymap_items.new('bst.free_gpu', 'M', 'PRESS', ctrl=True, alt=True, shift=True)
# Store keymap for cleanup
addon_keymaps = getattr(bpy.types.Scene, '_bst_keymaps', [])
addon_keymaps.append((km, kmi))
bpy.types.Scene._bst_keymaps = addon_keymaps
def unregister():
# Remove keybinds
addon_keymaps = getattr(bpy.types.Scene, '_bst_keymaps', [])
for km, kmi in addon_keymaps:
try:
km.keymap_items.remove(kmi)
except:
pass
addon_keymaps.clear()
if hasattr(bpy.types.Scene, '_bst_keymaps'):
delattr(bpy.types.Scene, '_bst_keymaps')
# Unregister modules
try:
ghost_buster.unregister()
except Exception:
pass
try:
bulk_path_management.unregister()
except Exception:
pass
try:
bulk_data_remap.unregister()
except Exception:
pass
try:
bulk_viewport_display.unregister()
except Exception:
pass
try:
bulk_scene_general.unregister()
except Exception:
pass
# Unregister classes from this module
for cls in reversed(classes):
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
pass
if __name__ == "__main__":
register()
@@ -0,0 +1,29 @@
schema_version = "1.0.0"
id = "rainclouds_bulk_scene_tools"
name = "Raincloud's Bulk Scene Tools"
tagline = "Bulk utilities for optimizing scene data"
version = "0.11.0"
type = "add-on"
maintainer = "RaincloudTheDragon <raincloudthedragon@gmail.com>"
license = ["GPL-3.0-or-later"]
blender_version_min = "4.5.0"
website = "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools"
tags = ["Scene", "Workflow", "Materials"]
[permissions]
files = "Read and write external resources referenced by scenes"
[build]
paths_exclude_pattern = [
"__pycache__/",
"*.pyc",
".git/",
".github/",
"docs/",
"tests/",
]
@@ -6,7 +6,6 @@ A couple Blender tools to help me automate some tedious tasks in scene optimizat
- Bulk Data Remap
- Bulk Viewport Display
- Automatic update checking and one-click updates from GitHub releases
Officially supports Blender 4.4.1, but may still work on older versions.
@@ -37,15 +36,6 @@ Officially supports Blender 4.4.1, but may still work on older versions.
4. Bulk Path Management > Save All (If selected, will save selected, if none are selected, will save all images in file)
5. Remove pack
### Updating the addon
The addon will automatically check for updates when Blender starts. You can also:
1. Go to Edit > Preferences > Add-ons
2. Find "Raincloud's Bulk Scene Tools" in the list
3. In the addon preferences, click "Check Now" to check for updates
4. If an update is available, click "Install Update" to download and install it
## Author
- **RaincloudTheDragon**
@@ -0,0 +1,78 @@
{
"version": "v1",
"blocklist": [],
"data": [
{
"schema_version": "1.0.0",
"id": "basedplayblast",
"name": "BasedPlayblast",
"tagline": "Easily create playblasts from Blender and Flamenco",
"version": "2.6.0",
"type": "add-on",
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "4.2.0",
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
"permissions": {
"files": "Import/export files and data"
},
"tags": [
"Animation",
"Render",
"Workflow",
"Video"
],
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.0/BasedPlayblast.v2.6.0.zip",
"archive_size": 47989,
"archive_hash": "sha256:ba8307675a0ca0d24496c7151e84349608fee709cc088dc82acaacec56d1dc7f"
},
{
"schema_version": "1.0.0",
"id": "rainclouds_bulk_scene_tools",
"name": "Raincloud's Bulk Scene Tools",
"tagline": "Bulk utilities for optimizing scene data",
"version": "0.12.0",
"type": "add-on",
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "4.2.0",
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
"permissions": {
"files": "Read and write external resources referenced by scenes"
},
"tags": [
"Scene",
"Workflow",
"Materials"
],
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.12.0/Rainys_Bulk_Scene_Tools.v0.12.0.zip",
"archive_size": 75117,
"archive_hash": "sha256:0607fafbd9f74f792fdb96e5913f03d9e4cc13cff8b5e3225468174959ca5b18"
},
{
"schema_version": "1.0.0",
"id": "atomic_data_manager",
"name": "Atomic Data Manager",
"tagline": "Smart cleanup and inspection of Blender data-blocks",
"version": "2.0.0",
"type": "add-on",
"maintainer": "RaincloudTheDragon",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "4.2.0",
"tags": [
"utility",
"management",
"cleanup"
],
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.0.0/Atomic_Data_Manager.v2.0.0.zip",
"archive_size": 67447,
"archive_hash": "sha256:5adf9ff89d1d24eaa79012b2a6c86f962fc107abc09b16a065e8327fbe57fb10"
}
]
}
@@ -0,0 +1,78 @@
{
"version": "v1",
"blocklist": [],
"data": [
{
"schema_version": "1.0.0",
"id": "basedplayblast",
"name": "BasedPlayblast",
"tagline": "Easily create playblasts from Blender and Flamenco",
"version": "2.6.0",
"type": "add-on",
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "4.2.0",
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
"permissions": {
"files": "Import/export files and data"
},
"tags": [
"Animation",
"Render",
"Workflow",
"Video"
],
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.0/BasedPlayblast.v2.6.0.zip",
"archive_size": 47989,
"archive_hash": "sha256:ba8307675a0ca0d24496c7151e84349608fee709cc088dc82acaacec56d1dc7f"
},
{
"schema_version": "1.0.0",
"id": "rainclouds_bulk_scene_tools",
"name": "Raincloud's Bulk Scene Tools",
"tagline": "Bulk utilities for optimizing scene data",
"version": "0.12.0",
"type": "add-on",
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "4.2.0",
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
"permissions": {
"files": "Read and write external resources referenced by scenes"
},
"tags": [
"Scene",
"Workflow",
"Materials"
],
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.12.0/Rainys_Bulk_Scene_Tools.v0.12.0.zip",
"archive_size": 75117,
"archive_hash": "sha256:0607fafbd9f74f792fdb96e5913f03d9e4cc13cff8b5e3225468174959ca5b18"
},
{
"schema_version": "1.0.0",
"id": "atomic_data_manager",
"name": "Atomic Data Manager",
"tagline": "Smart cleanup and inspection of Blender data-blocks",
"version": "2.0.0",
"type": "add-on",
"maintainer": "RaincloudTheDragon",
"license": [
"GPL-3.0-or-later"
],
"blender_version_min": "4.2.0",
"tags": [
"utility",
"management",
"cleanup"
],
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.0.0/Atomic_Data_Manager.v2.0.0.zip",
"archive_size": 67447,
"archive_hash": "sha256:5adf9ff89d1d24eaa79012b2a6c86f962fc107abc09b16a065e8327fbe57fb10"
}
]
}
@@ -0,0 +1,20 @@
# Changelog
All notable changes to this project will be documented in this file.
## [v2.0.0] - Raincloud's first re-release
### Feature
- Multi-version Blender support (4.2 LTS, 4.5 LTS, and 5.0)
- Version detection utilities in `utils/version.py`
- API compatibility layer in `utils/compat.py` for handling version differences
### Fixes
- Blender 5.0 compatibility: Fixed `AttributeError` when accessing scene compositor node tree (changed from `scene.node_tree` to `scene.compositing_node_tree`)
- Collections assigned to `rigidbody_world.collection` are now correctly detected as used
### Internal
- GitHub Actions release workflow
- Integrated `rainys_repo_bootstrap` into `__init__.py` so the Rainy's Extensions repository is registered on add-on enable and the bootstrap guard resets on disable.
- Removed "Support Remington Creative" popup and all related functionality
- Removed Support popup preferences
@@ -23,28 +23,13 @@ registration for all packages within the add-on.
"""
import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from atomic_data_manager import ops
from atomic_data_manager import ui
from atomic_data_manager.ui import inspect_ui
from atomic_data_manager.updater import addon_updater_ops
bl_info = {
"name": "Atomic Data Manager",
"author": "Remington Creative",
"blender": (2, 80, 0),
"version": (1, 0, 3),
"location": "Properties > Scene",
"category": "Remington Creative",
"description": "An Intelligent Data Manager for Blender.",
"wiki_url":
"https://remington.pro/software/blender/atomic",
"tracker_url":
"https://github.com/grantwilk/atomic-data-manager/issues"
}
from . import ops
from . import ui
from .ui import inspect_ui
from . import rainys_repo_bootstrap
# Atomic Data Manager Properties
@@ -216,22 +201,21 @@ class ATOMIC_PG_main(bpy.types.PropertyGroup):
def register():
# add-on updater registration
addon_updater_ops.register(bl_info)
register_class(ATOMIC_PG_main)
bpy.types.Scene.atomic = bpy.props.PointerProperty(type=ATOMIC_PG_main)
# atomic package registration
ui.register()
ops.register()
# bootstrap Rainy's Extensions repository
rainys_repo_bootstrap.register()
def unregister():
# add-on updated unregistration
addon_updater_ops.unregister()
# bootstrap unregistration
rainys_repo_bootstrap.unregister()
# atomic package unregistration
ui.unregister()
ops.unregister()
@@ -0,0 +1,18 @@
schema_version = "1.0.0"
id = "atomic_data_manager"
name = "Atomic Data Manager"
version = "2.0.0"
type = "add-on"
author = "RaincloudTheDragon"
maintainer = "RaincloudTheDragon"
blender_version_min = "4.2.0"
license = ["GPL-3.0-or-later"]
description = "An Intelligent Data Manager for Blender."
homepage = "https://github.com/grantwilk/atomic-data-manager"
tagline = "Smart cleanup and inspection of Blender data-blocks"
tags = ["utility", "management", "cleanup"]
# Python modules to load for this add-on
modules = ["atomic_data_manager"]
@@ -30,7 +30,6 @@ Blender, not in here.
# visible atomic preferences
enable_missing_file_warning = True
enable_support_me_popup = True
include_fake_users = False
enable_pie_menu_ui = True
@@ -40,5 +39,4 @@ pie_menu_alt = False
pie_menu_any = False
pie_menu_ctrl = False
pie_menu_oskey = False
pie_menu_shift = False
last_popup_day = 0
pie_menu_shift = False
@@ -22,11 +22,10 @@ This file handles the registration of the atomic_data_manager.ops package
"""
from atomic_data_manager.ops import main_ops
from atomic_data_manager.ops import inspect_ops
from atomic_data_manager.ops import direct_use_ops
from atomic_data_manager.ops import missing_file_ops
from atomic_data_manager.ops import support_me_ops
from . import main_ops
from . import inspect_ops
from . import direct_use_ops
from . import missing_file_ops
def register():
@@ -34,12 +33,10 @@ def register():
inspect_ops.register()
direct_use_ops.register()
missing_file_ops.register()
support_me_ops.register()
def unregister():
main_ops.unregister()
inspect_ops.unregister()
direct_use_ops.unregister()
missing_file_ops.unregister()
support_me_ops.unregister()
missing_file_ops.unregister()
@@ -31,11 +31,11 @@ intefaces in Blender.
import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from atomic_data_manager import config
from atomic_data_manager.stats import unused
from atomic_data_manager.ops.utils import nuke
from atomic_data_manager.ops.utils import clean
from atomic_data_manager.ui.utils import ui_layouts
from .. import config
from ..stats import unused
from .utils import nuke
from .utils import clean
from ..ui.utils import ui_layouts
class ATOMIC_OT_invoke_pie_menu_ui(bpy.types.Operator):
@@ -27,8 +27,8 @@ operators.
import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from atomic_data_manager.ops.utils import delete
from atomic_data_manager.ops.utils import duplicate
from .utils import delete
from .utils import duplicate
# Atomic Data Manager Inspection Rename Operator
@@ -27,10 +27,10 @@ various selection operations.
import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from atomic_data_manager.stats import unused
from atomic_data_manager.ops.utils import clean
from atomic_data_manager.ops.utils import nuke
from atomic_data_manager.ui.utils import ui_layouts
from ..stats import unused
from .utils import clean
from .utils import nuke
from ..ui.utils import ui_layouts
# Atomic Data Manager Nuke Operator
@@ -32,8 +32,8 @@ attempting to reload missing project files.
import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from atomic_data_manager.stats import missing
from atomic_data_manager.ui.utils import ui_layouts
from ..stats import missing
from ..ui.utils import ui_layouts
# Atomic Data Manager Reload Missing Files Operator
@@ -23,7 +23,7 @@ This file contains functions for cleaning out specific data categories.
"""
import bpy
from atomic_data_manager.stats import unused
from ...stats import unused
def collections():
@@ -0,0 +1,174 @@
import bpy # type: ignore
RAINYS_EXTENSIONS_REPO_NAME = "Rainy's Extensions"
RAINYS_EXTENSIONS_REPO_URL = (
"https://raw.githubusercontent.com/RaincloudTheDragon/rainys-blender-extensions/refs/heads/main/index.json"
)
_BOOTSTRAP_DONE = False
def _log(message: str) -> None:
print(f"RainysExtensionsCheck: {message}")
def ensure_rainys_extensions_repo(_deferred: bool = False) -> None:
"""
Ensure the Rainy's Extensions repository is registered in Blender.
Safe to import and call from multiple add-ons; the helper guards against doing the
work more than once per Blender session.
"""
global _BOOTSTRAP_DONE
if _BOOTSTRAP_DONE:
return
_log("starting repository verification")
context_class_name = type(bpy.context).__name__
if context_class_name == "_RestrictContext":
if _deferred:
_log("context still restricted after deferral; aborting repo check")
return
_log("context restricted; scheduling repo check retry")
def _retry():
ensure_rainys_extensions_repo(_deferred=True)
return None
bpy.app.timers.register(_retry, first_interval=0.5)
return
prefs = getattr(bpy.context, "preferences", None)
if prefs is None:
_log("no preferences available on context; skipping")
return
preferences_changed = False
addon_prefs = None
addon_entry = None
if hasattr(getattr(prefs, "addons", None), "get"):
addon_entry = prefs.addons.get(__name__)
elif hasattr(prefs, "addons"):
try:
addon_entry = prefs.addons[__name__]
except Exception:
addon_entry = None
if addon_entry:
addon_prefs = getattr(addon_entry, "preferences", None)
addon_repo_initialized = bool(
addon_prefs and getattr(addon_prefs, "repo_initialized", False)
)
experimental = getattr(prefs, "experimental", None)
if experimental and hasattr(experimental, "use_extension_platform"):
if not experimental.use_extension_platform:
experimental.use_extension_platform = True
preferences_changed = True
_log("enabled experimental extension platform")
repositories = None
extensions_obj = getattr(prefs, "extensions", None)
if extensions_obj:
if hasattr(extensions_obj, "repos"):
repositories = extensions_obj.repos
elif hasattr(extensions_obj, "repositories"):
repositories = extensions_obj.repositories
if repositories is None:
filepaths = getattr(prefs, "filepaths", None)
repositories = getattr(filepaths, "extension_repos", None) if filepaths else None
if repositories is None:
_log("extension repositories collection missing; skipping")
return
def _repo_matches(repo) -> bool:
return getattr(repo, "remote_url", "") == RAINYS_EXTENSIONS_REPO_URL or getattr(
repo, "url", ""
) == RAINYS_EXTENSIONS_REPO_URL
matching_indices = [idx for idx, repo in enumerate(repositories) if _repo_matches(repo)]
target_repo = None
if matching_indices:
target_repo = repositories[matching_indices[0]]
if len(matching_indices) > 1 and hasattr(repositories, "remove"):
for dup_idx in reversed(matching_indices[1:]):
try:
repositories.remove(dup_idx)
_log(f"removed duplicate repository entry at index {dup_idx}")
except Exception as exc:
_log(f"could not remove duplicate repository at index {dup_idx}: {exc}")
else:
target_repo = next(
(
repo
for repo in repositories
if getattr(repo, "name", "") == RAINYS_EXTENSIONS_REPO_NAME
),
None,
)
if target_repo is None:
_log("repo missing; creating new entry")
if hasattr(repositories, "new"):
target_repo = repositories.new()
elif hasattr(repositories, "add"):
target_repo = repositories.add()
else:
_log("repository collection does not support creation; aborting")
return
else:
_log("repo entry already present; validating fields")
changed = preferences_changed
def _ensure_attr(obj, attr, value):
if hasattr(obj, attr) and getattr(obj, attr) != value:
setattr(obj, attr, value)
return True
if not hasattr(obj, attr):
_log(f"repository entry missing attribute '{attr}', skipping field")
return False
changed |= _ensure_attr(target_repo, "name", RAINYS_EXTENSIONS_REPO_NAME)
changed |= _ensure_attr(target_repo, "module", "rainys_extensions")
changed |= _ensure_attr(target_repo, "use_remote_url", True)
changed |= _ensure_attr(target_repo, "remote_url", RAINYS_EXTENSIONS_REPO_URL)
changed |= _ensure_attr(target_repo, "use_sync_on_startup", True)
changed |= _ensure_attr(target_repo, "use_cache", True)
changed |= _ensure_attr(target_repo, "use_access_token", False)
if addon_prefs and hasattr(addon_prefs, "repo_initialized") and not addon_prefs.repo_initialized:
addon_prefs.repo_initialized = True
changed = True
if not changed:
_log("repository already configured; skipping preference save")
_BOOTSTRAP_DONE = True
return
if hasattr(bpy.ops, "wm") and hasattr(bpy.ops.wm, "save_userpref"):
try:
bpy.ops.wm.save_userpref()
_log("preferences updated and saved")
except Exception as exc: # pragma: no cover
print(f"RainysExtensionsCheck: could not save preferences after repo update -> {exc}")
else:
_log("preferences API unavailable; changes not persisted")
_BOOTSTRAP_DONE = True
def register() -> None:
"""Entry point for Blender add-on registration."""
ensure_rainys_extensions_repo()
def unregister() -> None:
"""Reset bootstrap guard so next registration re-runs the checks."""
global _BOOTSTRAP_DONE
_BOOTSTRAP_DONE = False
@@ -23,9 +23,9 @@ This file contains functions that count quantities of various sets of data.
"""
import bpy
from atomic_data_manager.stats import unused
from atomic_data_manager.stats import unnamed
from atomic_data_manager.stats import missing
from . import unused
from . import unnamed
from . import missing
def collections():
@@ -24,8 +24,8 @@ as determined by stats.users.py
"""
import bpy
from atomic_data_manager import config
from atomic_data_manager.stats import users
from .. import config
from . import users
def shallow(data):
@@ -184,6 +184,9 @@ def node_groups_shallow():
def particles_deep():
# returns a list of keys of unused particle systems
if not hasattr(bpy.data, 'particles'):
return []
unused = []
for particle in bpy.data.particles:
@@ -201,12 +204,15 @@ def particles_shallow():
# returns a list of keys of unused particle systems that may be
# incomplete, but is significantly faster than doing a deep search
return shallow(bpy.data.particles)
return shallow(bpy.data.particles) if hasattr(bpy.data, 'particles') else []
def textures_deep():
# returns a list of keys of unused textures
if not hasattr(bpy.data, 'textures'):
return []
unused = []
for texture in bpy.data.textures:
@@ -224,7 +230,7 @@ def textures_shallow():
# returns a list of keys of unused textures that may be
# incomplete, but is significantly faster than doing a deep search
return shallow(bpy.data.textures)
return shallow(bpy.data.textures) if hasattr(bpy.data, 'textures') else []
def worlds():
@@ -40,7 +40,9 @@ def collection_all(collection_key):
collection_children(collection_key) + \
collection_lights(collection_key) + \
collection_meshes(collection_key) + \
collection_others(collection_key)
collection_others(collection_key) + \
collection_rigidbody_world(collection_key) + \
collection_scenes(collection_key)
def collection_cameras(collection_key):
@@ -155,6 +157,52 @@ def collection_others(collection_key):
return distinct(users)
def collection_rigidbody_world(collection_key):
# returns a list containing "RigidBodyWorld" if the collection is used
# by any scene's rigidbody_world.collection
users = []
collection = bpy.data.collections[collection_key]
# check all scenes for rigidbody_world usage
for scene in bpy.data.scenes:
# check if scene has rigidbody_world and if it uses our collection
if hasattr(scene, 'rigidbody_world') and scene.rigidbody_world:
if hasattr(scene.rigidbody_world, 'collection') and scene.rigidbody_world.collection:
if scene.rigidbody_world.collection.name == collection.name:
users.append("RigidBodyWorld")
return distinct(users)
def collection_scenes(collection_key):
# returns a list of scene names that include this collection anywhere in
# their collection hierarchy
users = []
collection = bpy.data.collections[collection_key]
for scene in bpy.data.scenes:
if _scene_collection_contains(scene.collection, collection):
users.append(scene.name)
return distinct(users)
def _scene_collection_contains(parent_collection, target_collection):
# helper that checks whether target_collection exists inside the
# parent_collection hierarchy
if parent_collection.name == target_collection.name:
return True
for child in parent_collection.children:
if _scene_collection_contains(child, target_collection):
return True
return False
def image_all(image_key):
# returns a list of keys of every data-block that uses this image
@@ -162,7 +210,8 @@ def image_all(image_key):
image_materials(image_key) + \
image_node_groups(image_key) + \
image_textures(image_key) + \
image_worlds(image_key)
image_worlds(image_key) + \
image_geometry_nodes(image_key)
def image_compositors(image_key):
@@ -175,26 +224,31 @@ def image_compositors(image_key):
# a list of node groups that use our image
node_group_users = image_node_groups(image_key)
# Import compat module for version-safe compositor access
from ..utils import compat
# if our compositor uses nodes and has a valid node tree
if bpy.context.scene.use_nodes and bpy.context.scene.node_tree:
scene = bpy.context.scene
if scene.use_nodes:
node_tree = compat.get_scene_compositor_node_tree(scene)
if node_tree:
# check each node in the compositor
for node in node_tree.nodes:
# check each node in the compositor
for node in bpy.context.scene.node_tree.nodes:
# if the node is an image node with a valid image
if hasattr(node, 'image') and node.image:
# if the node is an image node with a valid image
if hasattr(node, 'image') and node.image:
# if the node's image is our image
if node.image.name == image.name:
users.append("Compositor")
# if the node's image is our image
if node.image.name == image.name:
users.append("Compositor")
# if the node is a group node with a valid node tree
elif hasattr(node, 'node_tree') and node.node_tree:
# if the node is a group node with a valid node tree
elif hasattr(node, 'node_tree') and node.node_tree:
# if the node tree's name is in our list of node group
# users
if node.node_tree.name in node_group_users:
users.append("Compositor")
# if the node tree's name is in our list of node group
# users
if node.node_tree.name in node_group_users:
users.append("Compositor")
return distinct(users)
@@ -252,6 +306,9 @@ def image_node_groups(image_key):
def image_textures(image_key):
# returns a list of texture keys that use the image
if not hasattr(bpy.data, 'textures'):
return []
users = []
image = bpy.data.images[image_key]
@@ -290,6 +347,35 @@ def image_textures(image_key):
return distinct(users)
def image_geometry_nodes(image_key):
# returns a list of object keys that use the image through Geometry Nodes
users = []
image = bpy.data.images[image_key]
# list of node groups that use this image
node_group_users = image_node_groups(image_key)
# Import compat module for version-safe geometry nodes access
from ..utils import compat
for obj in bpy.data.objects:
# check Geometry Nodes modifiers
if hasattr(obj, 'modifiers'):
for modifier in obj.modifiers:
if compat.is_geometry_nodes_modifier(modifier):
ng = compat.get_geometry_nodes_modifier_node_group(modifier)
if ng:
# direct usage in the modifier's tree
if node_group_has_image(ng.name, image.name):
users.append(obj.name)
# usage via nested node groups
elif ng.name in node_group_users:
users.append(obj.name)
return distinct(users)
def image_worlds(image_key):
# returns a list of world keys that use the image
@@ -340,8 +426,29 @@ def light_objects(light_key):
def material_all(material_key):
# returns a list of keys of every data-block that uses this material
return material_objects(material_key) + \
material_geometry_nodes(material_key)
return material_objects(material_key)
def material_geometry_nodes(material_key):
# returns a list of object keys that use the material via Geometry Nodes
users = []
material = bpy.data.materials[material_key]
# Import compat module for version-safe geometry nodes access
from ..utils import compat
for obj in bpy.data.objects:
if hasattr(obj, 'modifiers'):
for modifier in obj.modifiers:
if compat.is_geometry_nodes_modifier(modifier):
ng = compat.get_geometry_nodes_modifier_node_group(modifier)
if ng:
if node_group_has_material(ng.name, material.name):
users.append(obj.name)
return distinct(users)
def material_objects(material_key):
@@ -373,7 +480,8 @@ def node_group_all(node_group_key):
node_group_materials(node_group_key) + \
node_group_node_groups(node_group_key) + \
node_group_textures(node_group_key) + \
node_group_worlds(node_group_key)
node_group_worlds(node_group_key) + \
node_group_objects(node_group_key)
def node_group_compositors(node_group_key):
@@ -386,22 +494,27 @@ def node_group_compositors(node_group_key):
# a list of node groups that use our node group
node_group_users = node_group_node_groups(node_group_key)
# Import compat module for version-safe compositor access
from ..utils import compat
# if our compositor uses nodes and has a valid node tree
if bpy.context.scene.use_nodes and bpy.context.scene.node_tree:
scene = bpy.context.scene
if scene.use_nodes:
node_tree = compat.get_scene_compositor_node_tree(scene)
if node_tree:
# check each node in the compositor
for node in node_tree.nodes:
# check each node in the compositor
for node in bpy.context.scene.node_tree.nodes:
# if the node is a group and has a valid node tree
if hasattr(node, 'node_tree') and node.node_tree:
# if the node is a group and has a valid node tree
if hasattr(node, 'node_tree') and node.node_tree:
# if the node group is our node group
if node.node_tree.name == node_group.name:
users.append("Compositor")
# if the node group is our node group
if node.node_tree.name == node_group.name:
users.append("Compositor")
# if the node group is in our list of node group users
if node.node_tree.name in node_group_users:
users.append("Compositor")
# if the node group is in our list of node group users
if node.node_tree.name in node_group_users:
users.append("Compositor")
return distinct(users)
@@ -458,6 +571,9 @@ def node_group_textures(node_group_key):
# returns a list of texture keys that use this node group in their
# node trees
if not hasattr(bpy.data, 'textures'):
return []
users = []
node_group = bpy.data.node_groups[node_group_key]
@@ -515,6 +631,30 @@ def node_group_worlds(node_group_key):
return distinct(users)
def node_group_objects(node_group_key):
# returns a list of object keys that use this node group via Geometry Nodes modifiers
users = []
node_group = bpy.data.node_groups[node_group_key]
# node groups that use this node group
node_group_users = node_group_node_groups(node_group_key)
# Import compat module for version-safe geometry nodes access
from ..utils import compat
for obj in bpy.data.objects:
if hasattr(obj, 'modifiers'):
for modifier in obj.modifiers:
if compat.is_geometry_nodes_modifier(modifier):
ng = compat.get_geometry_nodes_modifier_node_group(modifier)
if ng:
if ng.name == node_group.name or ng.name in node_group_users:
users.append(obj.name)
return distinct(users)
def node_group_has_image(node_group_key, image_key):
# recursively returns true if the node group contains this image
# directly or if it contains a node group a node group that contains
@@ -587,6 +727,8 @@ def node_group_has_texture(node_group_key, texture_key):
# returns true if a node group contains this image
has_texture = False
if not hasattr(bpy.data, 'textures'):
return has_texture
node_group = bpy.data.node_groups[node_group_key]
texture = bpy.data.textures[texture_key]
@@ -614,6 +756,30 @@ def node_group_has_texture(node_group_key, texture_key):
return has_texture
def node_group_has_material(node_group_key, material_key):
# returns true if a node group contains this material (directly or nested)
has_material = False
node_group = bpy.data.node_groups[node_group_key]
material = bpy.data.materials[material_key]
for node in node_group.nodes:
# base case: nodes with a material property (e.g., Set Material)
if hasattr(node, 'material') and node.material:
if node.material.name == material.name:
has_material = True
# recurse case: nested node groups
elif hasattr(node, 'node_tree') and node.node_tree:
has_material = node_group_has_material(
node.node_tree.name, material.name)
if has_material:
break
return has_material
def particle_all(particle_key):
# returns a list of keys of every data-block that uses this particle
# system
@@ -624,6 +790,9 @@ def particle_all(particle_key):
def particle_objects(particle_key):
# returns a list of object keys that use the particle system
if not hasattr(bpy.data, 'particles'):
return []
users = []
particle_system = bpy.data.particles[particle_key]
@@ -653,6 +822,9 @@ def texture_all(texture_key):
def texture_brushes(texture_key):
# returns a list of brush keys that use the texture
if not hasattr(bpy.data, 'textures'):
return []
users = []
texture = bpy.data.textures[texture_key]
@@ -672,32 +844,40 @@ def texture_compositor(texture_key):
# returns a list containing "Compositor" if the texture is used in
# the scene's compositor
if not hasattr(bpy.data, 'textures'):
return []
users = []
texture = bpy.data.textures[texture_key]
# a list of node groups that use our image
node_group_users = texture_node_groups(texture_key)
# Import compat module for version-safe compositor access
from ..utils import compat
# if our compositor uses nodes and has a valid node tree
if bpy.context.scene.use_nodes and bpy.context.scene.node_tree:
scene = bpy.context.scene
if scene.use_nodes:
node_tree = compat.get_scene_compositor_node_tree(scene)
if node_tree:
# check each node in the compositor
for node in node_tree.nodes:
# check each node in the compositor
for node in bpy.context.scene.node_tree.nodes:
# if the node is an texture node with a valid texture
if hasattr(node, 'texture') and node.texture:
# if the node is an texture node with a valid texture
if hasattr(node, 'texture') and node.texture:
# if the node's texture is our texture
if node.texture.name == texture.name:
users.append("Compositor")
# if the node's texture is our texture
if node.texture.name == texture.name:
users.append("Compositor")
# if the node is a group node with a valid node tree
elif hasattr(node, 'node_tree') and node.node_tree:
# if the node is a group node with a valid node tree
elif hasattr(node, 'node_tree') and node.node_tree:
# if the node tree's name is in our list of node group
# users
if node.node_tree.name in node_group_users:
users.append("Compositor")
# if the node tree's name is in our list of node group
# users
if node.node_tree.name in node_group_users:
users.append("Compositor")
return distinct(users)
@@ -706,6 +886,9 @@ def texture_objects(texture_key):
# returns a list of object keys that use the texture in one of their
# modifiers
if not hasattr(bpy.data, 'textures'):
return []
users = []
texture = bpy.data.textures[texture_key]
@@ -744,6 +927,9 @@ def texture_objects(texture_key):
def texture_node_groups(texture_key):
# returns a list of keys of all node groups that use this texture
if not hasattr(bpy.data, 'textures'):
return []
users = []
texture = bpy.data.textures[texture_key]
@@ -762,6 +948,9 @@ def texture_particles(texture_key):
# returns a list of particle system keys that use the texture in
# their texture slots
if not hasattr(bpy.data, 'textures') or not hasattr(bpy.data, 'particles'):
return []
users = []
texture = bpy.data.textures[texture_key]
@@ -22,14 +22,13 @@ This file handles the registration of the atomic_data_manager.ui package
"""
from atomic_data_manager.ui import main_panel_ui
from atomic_data_manager.ui import stats_panel_ui
from atomic_data_manager.ui import inspect_ui
from atomic_data_manager.ui import missing_file_ui
from atomic_data_manager.ui import missing_file_ui
from atomic_data_manager.ui import pie_menu_ui
from atomic_data_manager.ui import preferences_ui
from atomic_data_manager.ui import support_me_ui
from . import main_panel_ui
from . import stats_panel_ui
from . import inspect_ui
from . import missing_file_ui
from . import missing_file_ui
from . import pie_menu_ui
from . import preferences_ui
def register():
@@ -42,7 +41,6 @@ def register():
inspect_ui.register()
missing_file_ui.register()
pie_menu_ui.register()
support_me_ui.register()
def unregister():
@@ -52,4 +50,3 @@ def unregister():
missing_file_ui.unregister()
pie_menu_ui.unregister()
preferences_ui.unregister()
support_me_ui.unregister()
@@ -25,8 +25,8 @@ This file contains the inspection user interface.
import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from atomic_data_manager.stats import users
from atomic_data_manager.ui.utils import ui_layouts
from ..stats import users
from .utils import ui_layouts
# bool that triggers an inspection update if it is True when the
@@ -29,8 +29,8 @@ category toggles and the category selection tools.
import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from atomic_data_manager.stats import count
from atomic_data_manager.ui.utils import ui_layouts
from ..stats import count
from .utils import ui_layouts
# Atomic Data Manager Main Panel
@@ -27,9 +27,9 @@ import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from bpy.app.handlers import persistent
from atomic_data_manager import config
from atomic_data_manager.stats import missing
from atomic_data_manager.ui.utils import ui_layouts
from .. import config
from ..stats import missing
from .utils import ui_layouts
# Atomic Data Manager Detect Missing Files Popup
@@ -26,35 +26,75 @@ some functions for syncing the preference properties with external factors.
import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from atomic_data_manager import config
from atomic_data_manager.updater import addon_updater_ops
from .. import config
# updater removed in Blender 4.5 extension format
def set_enable_support_me_popup(value):
# sets the value of the enable_support_me_popup boolean property
def _get_addon_prefs():
# robustly find our AddonPreferences instance regardless of module name
prefs = bpy.context.preferences
for addon in prefs.addons.values():
ap = getattr(addon, "preferences", None)
if ap and hasattr(ap, "bl_idname") and ap.bl_idname == ATOMIC_PT_preferences_panel.bl_idname:
return ap
# fallback: match by known property
if ap and hasattr(ap, "enable_missing_file_warning"):
return ap
return None
bpy.context.preferences.addons["atomic_data_manager"]\
.preferences.enable_support_me_popup = value
copy_prefs_to_config(None, None)
def _save_after_pref_change():
"""
Persist user preferences after programmatic updates.
"""
bpy.ops.wm.save_userpref()
def set_last_popup_day(day):
# sets the value of the last_popup_day float property
bpy.context.preferences.addons["atomic_data_manager"]\
.preferences.last_popup_day = day
def set_enable_missing_file_warning(value):
"""
Programmatically toggle the missing file warning preference.
"""
ap = _get_addon_prefs()
if not ap:
return
ap.enable_missing_file_warning = value
copy_prefs_to_config(None, None)
_save_after_pref_change()
def set_include_fake_users(value):
"""
Programmatically toggle inclusion of fake users.
"""
ap = _get_addon_prefs()
if not ap:
return
ap.include_fake_users = value
copy_prefs_to_config(None, None)
_save_after_pref_change()
def set_enable_pie_menu_ui(value):
"""
Programmatically toggle the pie menu UI preference.
"""
ap = _get_addon_prefs()
if not ap:
return
ap.enable_pie_menu_ui = value
copy_prefs_to_config(None, None)
_save_after_pref_change()
def copy_prefs_to_config(self, context):
# copies the values of Atomic's preferences to the variables in
# config.py for global use
preferences = bpy.context.preferences
atomic_preferences = preferences.addons['atomic_data_manager']\
.preferences
atomic_preferences = _get_addon_prefs()
if not atomic_preferences:
return
# visible atomic preferences
config.enable_missing_file_warning = \
@@ -63,9 +103,6 @@ def copy_prefs_to_config(self, context):
config.enable_pie_menu_ui = \
atomic_preferences.enable_pie_menu_ui
config.enable_support_me_popup = \
atomic_preferences.enable_support_me_popup
config.include_fake_users = \
atomic_preferences.include_fake_users
@@ -88,14 +125,11 @@ def copy_prefs_to_config(self, context):
config.pie_menu_shift = \
atomic_preferences.pie_menu_shift
config.last_popup_day = \
atomic_preferences.last_popup_day
def update_pie_menu_hotkeys(self, context):
preferences = bpy.context.preferences
atomic_preferences = preferences.addons['atomic_data_manager'] \
.preferences
atomic_preferences = _get_addon_prefs()
if not atomic_preferences:
return
# add the hotkeys if the preference is enabled
if atomic_preferences.enable_pie_menu_ui:
@@ -167,12 +201,6 @@ class ATOMIC_PT_preferences_panel(bpy.types.AddonPreferences):
default=True
)
enable_support_me_popup: bpy.props.BoolProperty(
description="Occasionally display a popup asking if you would "
"like to support Remington Creative",
default=True
)
include_fake_users: bpy.props.BoolProperty(
description="Include data-blocks with only fake users in unused "
"data detection",
@@ -211,44 +239,7 @@ class ATOMIC_PT_preferences_panel(bpy.types.AddonPreferences):
default=False
)
last_popup_day: bpy.props.FloatProperty(
default=0
)
# add-on updater properties
auto_check_update: bpy.props.BoolProperty(
name="Auto-check for Update",
description="If enabled, auto-check for updates using an interval",
default=True,
)
updater_intrval_months: bpy.props.IntProperty(
name='Months',
description="Number of months between checking for updates",
default=0,
min=0,
max=6
)
updater_intrval_days: bpy.props.IntProperty(
name='Days',
description="Number of days between checking for updates",
default=7,
min=0,
)
updater_intrval_hours: bpy.props.IntProperty(
name='Hours',
description="Number of hours between checking for updates",
default=0,
min=0,
max=23
)
updater_intrval_minutes: bpy.props.IntProperty(
name='Minutes',
description="Number of minutes between checking for updates",
default=0,
min=0,
max=59
)
# updater properties removed
def draw(self, context):
layout = self.layout
@@ -265,13 +256,6 @@ class ATOMIC_PT_preferences_panel(bpy.types.AddonPreferences):
text="Show Missing File Warning"
)
# enable support me popup toggle
col.prop(
self,
"enable_support_me_popup",
text="Show \"Support Me\" Popup"
)
# right column
col = split.column()
@@ -321,8 +305,7 @@ class ATOMIC_PT_preferences_panel(bpy.types.AddonPreferences):
separator = layout.row() # extra space
# add-on updater box
addon_updater_ops.update_settings_ui(self, context)
# updater UI removed
# update config with any new preferences
copy_prefs_to_config(None, None)
@@ -29,9 +29,9 @@ it.
import bpy
from bpy.utils import register_class
from bpy.utils import unregister_class
from atomic_data_manager.stats import count
from atomic_data_manager.stats import misc
from atomic_data_manager.ui.utils import ui_layouts
from ..stats import count
from ..stats import misc
from .utils import ui_layouts
# Atomic Data Manager Statistics SubPanel
@@ -28,8 +28,8 @@ import time
from bpy.utils import register_class
from bpy.utils import unregister_class
from bpy.app.handlers import persistent
from atomic_data_manager import config
from atomic_data_manager.ui import preferences_ui
from .. import config
from . import preferences_ui
def get_current_day():
@@ -0,0 +1,29 @@
"""
Copyright (C) 2019 Remington Creative
This file is part of Atomic Data Manager.
Atomic Data Manager is free software: you can redistribute
it and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Atomic Data Manager is distributed in the hope that it will
be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with Atomic Data Manager. If not, see <https://www.gnu.org/licenses/>.
---
This package contains utility modules for version detection and API compatibility.
"""
from . import version
from . import compat
__all__ = ['version', 'compat']
@@ -0,0 +1,154 @@
"""
Copyright (C) 2019 Remington Creative
This file is part of Atomic Data Manager.
Atomic Data Manager is free software: you can redistribute
it and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Atomic Data Manager is distributed in the hope that it will
be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with Atomic Data Manager. If not, see <https://www.gnu.org/licenses/>.
---
This module provides API compatibility functions for handling differences
between Blender 4.2 LTS, 4.5 LTS, and 5.0.
"""
import bpy
from bpy.utils import register_class, unregister_class
from . import version
def safe_register_class(cls):
"""
Safely register a class, handling any version-specific registration issues.
Args:
cls: The class to register
Returns:
bool: True if registration succeeded, False otherwise
"""
try:
register_class(cls)
return True
except Exception as e:
print(f"Warning: Failed to register {cls.__name__}: {e}")
return False
def safe_unregister_class(cls):
"""
Safely unregister a class, handling any version-specific unregistration issues.
Args:
cls: The class to unregister
Returns:
bool: True if unregistration succeeded, False otherwise
"""
try:
unregister_class(cls)
return True
except Exception as e:
print(f"Warning: Failed to unregister {cls.__name__}: {e}")
return False
def get_addon_prefs():
"""
Get the addon preferences instance, compatible across versions.
Returns:
AddonPreferences or None: The addon preferences instance if found
"""
prefs = bpy.context.preferences
for addon in prefs.addons.values():
ap = getattr(addon, "preferences", None)
if ap and hasattr(ap, "enable_missing_file_warning"):
return ap
return None
def get_geometry_nodes_modifier_node_group(modifier):
"""
Get the node group from a geometry nodes modifier, handling version differences.
Args:
modifier: The modifier object
Returns:
NodeGroup or None: The node group if available
"""
if not hasattr(modifier, 'type') or modifier.type != 'NODES':
return None
# Check for node_group attribute (available in all supported versions)
if hasattr(modifier, 'node_group') and modifier.node_group:
return modifier.node_group
return None
def is_geometry_nodes_modifier(modifier):
"""
Check if a modifier is a geometry nodes modifier, compatible across versions.
Args:
modifier: The modifier object
Returns:
bool: True if the modifier is a geometry nodes modifier
"""
if not hasattr(modifier, 'type'):
return False
return modifier.type == 'NODES'
def get_node_tree_from_node(node):
"""
Get the node tree from a node, handling version differences.
Args:
node: The node object
Returns:
NodeTree or None: The node tree if available
"""
if hasattr(node, 'node_tree') and node.node_tree:
return node.node_tree
return None
def get_scene_compositor_node_tree(scene):
"""
Get the compositor node tree from a scene, handling version differences.
In Blender 4.2/4.5: scene.node_tree
In Blender 5.0+: scene.compositing_node_tree
Args:
scene: The scene object
Returns:
NodeTree or None: The compositor node tree if available
"""
# Blender 5.0+ uses compositing_node_tree
if version.is_version_at_least(5, 0, 0):
if hasattr(scene, 'compositing_node_tree') and scene.compositing_node_tree:
return scene.compositing_node_tree
else:
# Blender 4.2/4.5 uses node_tree
if hasattr(scene, 'node_tree') and scene.node_tree:
return scene.node_tree
return None
@@ -0,0 +1,128 @@
"""
Copyright (C) 2019 Remington Creative
This file is part of Atomic Data Manager.
Atomic Data Manager is free software: you can redistribute
it and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Atomic Data Manager is distributed in the hope that it will
be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with Atomic Data Manager. If not, see <https://www.gnu.org/licenses/>.
---
This module provides version detection and comparison utilities for
multi-version Blender support (4.2 LTS, 4.5 LTS, and 5.0).
"""
import bpy
# Version constants
VERSION_4_2_LTS = (4, 2, 0)
VERSION_4_5_LTS = (4, 5, 0)
VERSION_5_0 = (5, 0, 0)
def get_blender_version():
"""
Returns the current Blender version as a tuple (major, minor, patch).
Returns:
tuple: (major, minor, patch) version numbers
"""
return bpy.app.version
def get_version_string():
"""
Returns the current Blender version as a string (e.g., "4.2.0").
Returns:
str: Version string in format "major.minor.patch"
"""
version = get_blender_version()
return f"{version[0]}.{version[1]}.{version[2]}"
def is_version_at_least(major, minor=0, patch=0):
"""
Check if the current Blender version is at least the specified version.
Args:
major (int): Major version number
minor (int): Minor version number (default: 0)
patch (int): Patch version number (default: 0)
Returns:
bool: True if current version >= specified version
"""
current = get_blender_version()
target = (major, minor, patch)
if current[0] != target[0]:
return current[0] > target[0]
if current[1] != target[1]:
return current[1] > target[1]
return current[2] >= target[2]
def is_version_less_than(major, minor=0, patch=0):
"""
Check if the current Blender version is less than the specified version.
Args:
major (int): Major version number
minor (int): Minor version number (default: 0)
patch (int): Patch version number (default: 0)
Returns:
bool: True if current version < specified version
"""
return not is_version_at_least(major, minor, patch)
def get_version_category():
"""
Returns the version category string for the current Blender version.
Returns:
str: '4.2', '4.5', or '5.0' based on the current version
"""
version = get_blender_version()
major, minor = version[0], version[1]
if major == 4:
if minor < 5:
return '4.2'
else:
return '4.5'
elif major >= 5:
return '5.0'
else:
# Fallback for older versions
return f"{major}.{minor}"
def is_version_4_2():
"""Check if running Blender 4.2 LTS (4.2.x only, not 4.3 or 4.4)."""
version = get_blender_version()
return version[0] == 4 and version[1] == 2
def is_version_4_5():
"""Check if running Blender 4.5 LTS."""
return is_version_at_least(4, 5, 0) and is_version_less_than(5, 0, 0)
def is_version_5_0():
"""Check if running Blender 5.0 or later."""
return is_version_at_least(5, 0, 0)
@@ -0,0 +1,59 @@
# BasedPlayblast
**Easily create playblasts from Blender**
BasedPlayblast is a Blender addon that streamlines the process of creating video playblasts for animation review. It provides optimized render settings for fast preview generation while maintaining visual quality suitable for review purposes.
## Features
- **Fast Playblast Creation**: Optimized render settings for different preview modes (Solid, Material, Rendered)
- **Multiple Display Modes**: Support for Wireframe, Solid, Material Preview, and Rendered modes
- **Flexible Resolution**: Scene, preset, or custom resolution options
- **Video Format Support**: MP4, MOV, AVI, MKV with various codecs (H.264, H.265, AV1, etc.)
- **Metadata Integration**: Automatic inclusion of frame numbers, camera info, and custom notes
- **Settings Management**: Apply and restore render settings without losing your project configuration
- **Flamenco Support**: Custom Flamenco Job Script with a simple, non-destructive workflow
## Installation
### Via BlenderKit's Extension Repository (Recommended)
1. Open Blender (4.2 LTS or newer)
2. Install BlenderKit via https://www.blenderkit.com/get-blenderkit/
3. Open Preferences (Ctrl + ,)
4. Go to **Edit > Preferences > Get Extensions**
5. Search for "BasedPlayblast"
6. Click **Install**
7. Enjoy automatic updating!
### Manual Installation
1. Download the latest release, or the release that supports your intended Blender version
2. In Blender, go to **Edit > Preferences > Add-ons**
3. Click **Install from Disk** and select the downloaded file
4. Enable the addon in the list
## Usage
1. **Locate the Panel**: Go to **Properties > Output > BasedPlayblast**
2. **Configure Settings**: Set your output path, resolution, and display mode
3. **Create Playblast**: Click the **PLAYBLAST** button
4. **View Result**: Click **VIEW** to open the generated video
- **Apply Blast Settings**: Use this button to apply optimized render settings without rendering
- Intended particularly for Flamenco. Apply, check the resultant render settings to ensure they're correct, then send to Flamenco using the BasedPlayblast custom Job type.
- **Restore Original Settings**: Return to your original render configuration
- **Display Modes**:
- **Wireframe/Solid**
- Fast workbench viewport rendering. Recommended for short and/or locally-blasted projects.
- **Material**
- **Rendered**
## Requirements
- Blender 4.2 LTS or newer (validated on 4.2 LTS, 4.5 LTS, and 5.0+)
- Python 3.x (included with Blender)
## Support
- **Documentation**: [GitHub Repository](https://github.com/RaincloudTheDragon/BasedPlayblast)
- **Issues**: Report bugs or request features on GitHub
- **License**: GPL-3.0-or-later
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,28 @@
schema_version = "1.0.0"
id = "basedplayblast"
name = "BasedPlayblast"
tagline = "Easily create playblasts from Blender and Flamenco"
version = "2.6.0"
type = "add-on"
maintainer = "RaincloudTheDragon <raincloudthedragon@gmail.com>"
license = ["GPL-3.0-or-later"]
blender_version_min = "4.2.0"
website = "https://github.com/RaincloudTheDragon/BasedPlayblast"
tags = ["Animation", "Render", "Workflow", "Video"]
[permissions]
files = "Import/export files and data"
[build]
paths_exclude_pattern = [
"__pycache__/",
"*.pyc",
".git/",
".github/",
"addon_updater*",
"basedplayblast_updater/"
]
@@ -0,0 +1,164 @@
import bpy # type: ignore
RAINYS_EXTENSIONS_REPO_NAME = "Rainy's Extensions"
RAINYS_EXTENSIONS_REPO_URL = (
"https://raw.githubusercontent.com/RaincloudTheDragon/rainys-blender-extensions/refs/heads/main/index.json"
)
_BOOTSTRAP_DONE = False
def _log(message: str) -> None:
print(f"RainysExtensionsCheck: {message}")
def ensure_rainys_extensions_repo(_deferred: bool = False) -> None:
"""
Ensure the Rainy's Extensions repository is registered in Blender.
Safe to import and call from multiple add-ons; the helper guards against doing the
work more than once per Blender session.
"""
global _BOOTSTRAP_DONE
if _BOOTSTRAP_DONE:
return
_log("starting repository verification")
context_class_name = type(bpy.context).__name__
if context_class_name == "_RestrictContext":
if _deferred:
_log("context still restricted after deferral; aborting repo check")
return
_log("context restricted; scheduling repo check retry")
def _retry():
ensure_rainys_extensions_repo(_deferred=True)
return None
bpy.app.timers.register(_retry, first_interval=0.5)
return
prefs = getattr(bpy.context, "preferences", None)
if prefs is None:
_log("no preferences available on context; skipping")
return
preferences_changed = False
addon_prefs = None
addon_entry = None
if hasattr(getattr(prefs, "addons", None), "get"):
addon_entry = prefs.addons.get(__name__)
elif hasattr(prefs, "addons"):
try:
addon_entry = prefs.addons[__name__]
except Exception:
addon_entry = None
if addon_entry:
addon_prefs = getattr(addon_entry, "preferences", None)
addon_repo_initialized = bool(
addon_prefs and getattr(addon_prefs, "repo_initialized", False)
)
experimental = getattr(prefs, "experimental", None)
if experimental and hasattr(experimental, "use_extension_platform"):
if not experimental.use_extension_platform:
experimental.use_extension_platform = True
preferences_changed = True
_log("enabled experimental extension platform")
repositories = None
extensions_obj = getattr(prefs, "extensions", None)
if extensions_obj:
if hasattr(extensions_obj, "repos"):
repositories = extensions_obj.repos
elif hasattr(extensions_obj, "repositories"):
repositories = extensions_obj.repositories
if repositories is None:
filepaths = getattr(prefs, "filepaths", None)
repositories = getattr(filepaths, "extension_repos", None) if filepaths else None
if repositories is None:
_log("extension repositories collection missing; skipping")
return
def _repo_matches(repo) -> bool:
return getattr(repo, "remote_url", "") == RAINYS_EXTENSIONS_REPO_URL or getattr(
repo, "url", ""
) == RAINYS_EXTENSIONS_REPO_URL
matching_indices = [idx for idx, repo in enumerate(repositories) if _repo_matches(repo)]
target_repo = None
if matching_indices:
target_repo = repositories[matching_indices[0]]
if len(matching_indices) > 1 and hasattr(repositories, "remove"):
for dup_idx in reversed(matching_indices[1:]):
try:
repositories.remove(dup_idx)
_log(f"removed duplicate repository entry at index {dup_idx}")
except Exception as exc:
_log(f"could not remove duplicate repository at index {dup_idx}: {exc}")
else:
target_repo = next(
(
repo
for repo in repositories
if getattr(repo, "name", "") == RAINYS_EXTENSIONS_REPO_NAME
),
None,
)
if target_repo is None:
_log("repo missing; creating new entry")
if hasattr(repositories, "new"):
target_repo = repositories.new()
elif hasattr(repositories, "add"):
target_repo = repositories.add()
else:
_log("repository collection does not support creation; aborting")
return
else:
_log("repo entry already present; validating fields")
changed = preferences_changed
def _ensure_attr(obj, attr, value):
if hasattr(obj, attr) and getattr(obj, attr) != value:
setattr(obj, attr, value)
return True
if not hasattr(obj, attr):
_log(f"repository entry missing attribute '{attr}', skipping field")
return False
changed |= _ensure_attr(target_repo, "name", RAINYS_EXTENSIONS_REPO_NAME)
changed |= _ensure_attr(target_repo, "module", "rainys_extensions")
changed |= _ensure_attr(target_repo, "use_remote_url", True)
changed |= _ensure_attr(target_repo, "remote_url", RAINYS_EXTENSIONS_REPO_URL)
changed |= _ensure_attr(target_repo, "use_sync_on_startup", True)
changed |= _ensure_attr(target_repo, "use_cache", True)
changed |= _ensure_attr(target_repo, "use_access_token", False)
if addon_prefs and hasattr(addon_prefs, "repo_initialized") and not addon_prefs.repo_initialized:
addon_prefs.repo_initialized = True
changed = True
if not changed:
_log("repository already configured; skipping preference save")
_BOOTSTRAP_DONE = True
return
if hasattr(bpy.ops, "wm") and hasattr(bpy.ops.wm, "save_userpref"):
try:
bpy.ops.wm.save_userpref()
_log("preferences updated and saved")
except Exception as exc: # pragma: no cover
print(f"RainysExtensionsCheck: could not save preferences after repo update -> {exc}")
else:
_log("preferences API unavailable; changes not persisted")
_BOOTSTRAP_DONE = True
@@ -0,0 +1,12 @@
"""
Utility helpers for BasedPlayblast.
Grouped here so Blender version/compatibility helpers stay isolated from the
main add-on module.
"""
from . import version, compat
__all__ = ["version", "compat"]
@@ -0,0 +1,232 @@
"""
Compatibility helpers wrapping Blender version-specific logic.
Anything that differs between Blender 4.2 LTS, 4.5 LTS, and 5.0+ should live here
so the main add-on stays focused on user-facing behavior.
"""
from __future__ import annotations
import os
from typing import Iterable, Optional
try:
import bpy # type: ignore
from bpy.utils import register_class, unregister_class # type: ignore
except ImportError: # pragma: no cover - for static tooling
bpy = None # type: ignore
register_class = unregister_class = lambda cls: None # type: ignore
from . import version
# -- Registration helpers --------------------------------------------------
def safe_register_class(cls) -> bool:
try:
register_class(cls)
return True
except Exception as exc: # pragma: no cover - Blender runtime logging
print(f"[BasedPlayblast] register fail: {cls.__name__}: {exc}")
return False
def safe_unregister_class(cls) -> bool:
try:
unregister_class(cls)
return True
except Exception as exc: # pragma: no cover
print(f"[BasedPlayblast] unregister fail: {cls.__name__}: {exc}")
return False
# -- Scene/helpers ---------------------------------------------------------
def get_compositor_tree(scene):
"""
Return the compositor node tree, accounting for Blender 5.0 renames.
"""
if version.is_version_at_least(5, 0, 0):
return getattr(scene, "compositing_node_tree", None)
return getattr(scene, "node_tree", None)
def is_geometry_nodes_modifier(modifier) -> bool:
return getattr(modifier, "type", None) == "NODES"
def get_geometry_nodes_node_group(modifier):
if is_geometry_nodes_modifier(modifier):
return getattr(modifier, "node_group", None)
return None
# -- Render IO -------------------------------------------------------------
def set_video_file_format(scene) -> bool:
"""
Force Blender onto a video-friendly output. Returns True if a usable
format was chosen; False means callers should warn/abort.
- Blender 4.2/4.5: Can set FFMPEG directly for direct video output
- Blender 5.0+: image_settings.file_format no longer includes video formats.
We use PNG with 0% compression (fast, lossless) and encode frames manually.
"""
if not scene or not getattr(scene, "render", None):
return False
render = scene.render
is_blender_5 = version.is_version_at_least(5, 0, 0)
if not is_blender_5:
# Blender 4.2/4.5: Can set FFMPEG directly for direct video output
try:
render.image_settings.file_format = "FFMPEG"
return True
except Exception as exc:
print(f"[BasedPlayblast] FFMPEG set failed: {exc}")
return False
# Blender 5.0+: image_settings.file_format only supports image formats
# Use PNG with 0% compression - fast writes, lossless quality, then encode to video
if hasattr(render, "ffmpeg"):
try:
render.image_settings.file_format = "PNG"
render.image_settings.compression = 0 # 0% compression = fastest PNG writes
print("[BasedPlayblast] Blender 5.0: Using PNG with 0% compression "
"(fast, lossless quality). Blender 5.0 removed video formats from "
"image_settings.file_format, so we encode frames to video manually.")
return True
except Exception as exc:
print(f"[BasedPlayblast] PNG with 0% compression failed: {exc}")
# Last resort: PNG with default compression
try:
render.image_settings.file_format = "PNG"
print("[BasedPlayblast] video fallback -> PNG sequence (will encode manually)")
return False
except Exception as exc:
print(f"[BasedPlayblast] PNG fallback failed: {exc}")
return False
def viewport_opengl_render(context, area=None, region=None):
"""
Invoke viewport OpenGL animation render with overrides tuned per version.
"""
if bpy is None:
raise RuntimeError("bpy unavailable")
is_blender_5 = version.is_version_at_least(5, 0, 0)
def _call(**override_kwargs):
with context.temp_override(**override_kwargs):
bpy.ops.render.opengl(
"INVOKE_DEFAULT",
animation=True,
sequencer=False,
write_still=False,
**({"view_context": True} if not is_blender_5 else {}),
)
def _resolve_region(target_area, candidate_region):
if candidate_region and getattr(candidate_region, "type", None) == "WINDOW":
return candidate_region
if target_area:
for reg in target_area.regions:
if getattr(reg, "type", None) == "WINDOW":
return reg
return None
target_region = _resolve_region(area, region)
try:
if area and target_region:
override = context.copy()
override["area"] = area
override["region"] = target_region
_call(**override)
return True
bpy.ops.render.opengl(
"INVOKE_DEFAULT", animation=True, sequencer=False, write_still=False
)
return True
except TypeError:
# Blender 5 requires explicit view_context flag in some builds
if area and target_region:
override = context.copy()
override["area"] = area
override["region"] = target_region
with context.temp_override(**override):
bpy.ops.render.opengl(
"INVOKE_DEFAULT",
animation=True,
sequencer=False,
write_still=False,
view_context=False,
)
return True
bpy.ops.render.opengl(
"INVOKE_DEFAULT",
animation=True,
sequencer=False,
write_still=False,
view_context=False,
)
return True
except Exception as exc:
print(f"[BasedPlayblast] OpenGL render failed: {exc}")
raise
# -- Studio lights helpers --------------------------------------------------
def iter_studio_light_dirs(blender_binary_path: Optional[str]) -> Iterable[str]:
if not blender_binary_path:
return []
blender_dir = os.path.dirname(blender_binary_path)
version_token = version.get_version_category().split("+")[0]
candidates = [
os.path.join(blender_dir, "datafiles", "studiolights", "world"),
os.path.join(blender_dir, version_token, "datafiles", "studiolights", "world"),
os.path.join(os.path.dirname(blender_dir), version_token, "datafiles", "studiolights", "world"),
os.path.join(os.path.dirname(os.path.dirname(blender_binary_path)), version_token, "datafiles", "studiolights", "world"),
os.path.join("C:\\Program Files\\Blender Foundation", f"Blender {version_token}", version_token, "datafiles", "studiolights", "world"),
]
seen = set()
for path in candidates:
if path and path not in seen:
seen.add(path)
yield path
def find_first_existing_path(paths: Iterable[str]) -> Optional[str]:
for path in paths:
if path and os.path.exists(path):
return path
return None
def resolve_hdri_path(studio_dir: Optional[str]) -> Optional[str]:
if not studio_dir:
return None
preferred = [
"forest.exr",
"studio.exr",
"city.exr",
"courtyard.exr",
"night.exr",
"sunrise.exr",
"sunset.exr",
]
for fname in preferred:
candidate = os.path.join(studio_dir, fname)
if os.path.exists(candidate):
return candidate
try:
for entry in os.listdir(studio_dir):
if entry.lower().endswith(".exr"):
return os.path.join(studio_dir, entry)
except Exception as exc:
print(f"[BasedPlayblast] Studio dir listing failed: {exc}")
return None
@@ -0,0 +1,65 @@
"""
Blender version helpers for BasedPlayblast.
Keeps the add-on logic clean by centralizing all version comparisons and
common constants for the supported tracks (4.2 LTS, 4.5 LTS, 5.0+).
"""
from __future__ import annotations
try: # Blender runtime
import bpy # type: ignore
except ImportError: # During static analysis or packaging
bpy = None # type: ignore
# Targeted anchors
VERSION_4_2_LTS = (4, 2, 0)
VERSION_4_5_LTS = (4, 5, 0)
VERSION_5_0 = (5, 0, 0)
def _current_version() -> tuple[int, int, int]:
"""Return Blender's version tuple or a fallback."""
if bpy and getattr(bpy.app, "version", None):
return bpy.app.version # type: ignore[return-value]
return (0, 0, 0)
def get_blender_version() -> tuple[int, int, int]:
return _current_version()
def get_version_string() -> str:
v = _current_version()
return f"{v[0]}.{v[1]}.{v[2]}"
def is_version_at_least(major: int, minor: int = 0, patch: int = 0) -> bool:
current = _current_version()
return current >= (major, minor, patch)
def is_version_less_than(major: int, minor: int = 0, patch: int = 0) -> bool:
current = _current_version()
return current < (major, minor, patch)
def get_version_category() -> str:
"""
Collapse Blender versions into the compatibility buckets we actively test.
"""
major, minor, _ = _current_version()
if major < 4:
return f"{major}.{minor}"
if major == 4 and minor < 5:
return "4.2"
if major == 4:
return "4.5"
return "5.0+"
def is_supported() -> bool:
"""Check if the detected version is at least our minimum target."""
return not is_version_less_than(*VERSION_4_2_LTS)
@@ -1,20 +1,6 @@
bl_info = {
"name": "Raincloud's Bulk Scene Tools",
"author": "RaincloudTheDragon",
"version": (0, 9, 1),
"blender": (4, 5, 0),
"location": "View3D > Sidebar > Edit Tab",
"description": "Tools for bulk operations on scene data",
"warning": "",
"doc_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
"category": "Scene",
"maintainer": "RaincloudTheDragon",
"support": "COMMUNITY",
}
import bpy # type: ignore
from bpy.types import AddonPreferences, Operator, Panel # type: ignore
from bpy.props import BoolProperty, IntProperty # type: ignore
from bpy.types import AddonPreferences, Panel # type: ignore
from bpy.props import BoolProperty # type: ignore
from .panels import bulk_viewport_display
from .panels import bulk_data_remap
from .panels import bulk_path_management
@@ -23,27 +9,12 @@ from .ops.AutoMatExtractor import AutoMatExtractor, AUTOMAT_OT_summary_dialog
from .ops.Rename_images_by_mat import Rename_images_by_mat, RENAME_OT_summary_dialog
from .ops.FreeGPU import BST_FreeGPU
from .ops import ghost_buster
from . import updater
from . import rainys_repo_bootstrap
# Addon preferences class for update settings
class BST_AddonPreferences(AddonPreferences):
bl_idname = __package__
# Auto Updater settings
check_for_updates: BoolProperty(
name="Check for Updates on Startup",
description="Automatically check for new versions of the addon when Blender starts",
default=True,
)
update_check_interval: IntProperty( # type: ignore
name="Update check interval (hours)",
description="How often to check for updates (in hours)",
default=24,
min=1,
max=168 # 1 week max
)
# AutoMat Extractor settings
automat_common_outside_blend: BoolProperty(
name="Place 'common' folder outside 'blend' folder",
@@ -54,30 +25,6 @@ class BST_AddonPreferences(AddonPreferences):
def draw(self, context):
layout = self.layout
# Custom updater UI
box = layout.box()
box.label(text="Update Settings")
row = box.row()
row.prop(self, "check_for_updates")
row = box.row()
row.prop(self, "update_check_interval")
# Check for updates button
row = box.row()
row.operator("bst.check_for_updates", icon='FILE_REFRESH')
# Show update status if available
if updater.UpdaterState.update_available:
box.label(text=f"Update available: v{updater.UpdaterState.update_version}")
row = box.row()
row.operator("bst.install_update", icon='IMPORT')
row = box.row()
row.operator("wm.url_open", text="Download Update").url = updater.UpdaterState.update_download_url
elif updater.UpdaterState.checking_for_updates:
box.label(text="Checking for updates...")
elif updater.UpdaterState.error_message:
box.label(text=f"Error checking for updates: {updater.UpdaterState.error_message}")
# AutoMat Extractor settings
box = layout.box()
box.label(text="AutoMat Extractor Settings")
@@ -124,13 +71,6 @@ def register():
except Exception as e:
print(f"Error accessing preferences: {str(e)}")
# Register the updater module
updater.register()
# Check for updates on startup
if hasattr(updater, "check_for_updates"):
updater.check_for_updates()
# Register modules
bulk_scene_general.register()
bulk_viewport_display.register()
@@ -150,6 +90,8 @@ def register():
addon_keymaps.append((km, kmi))
bpy.types.Scene._bst_keymaps = addon_keymaps
rainys_repo_bootstrap.register()
def unregister():
# Remove keybinds
addon_keymaps = getattr(bpy.types.Scene, '_bst_keymaps', [])
@@ -183,11 +125,7 @@ def unregister():
bulk_scene_general.unregister()
except Exception:
pass
# Unregister the updater module
try:
updater.unregister()
except Exception:
pass
rainys_repo_bootstrap.unregister()
# Unregister classes from this module
for cls in reversed(classes):
try:
@@ -0,0 +1,29 @@
schema_version = "1.0.0"
id = "rainclouds_bulk_scene_tools"
name = "Raincloud's Bulk Scene Tools"
tagline = "Bulk utilities for optimizing scene data"
version = "0.12.0"
type = "add-on"
maintainer = "RaincloudTheDragon <raincloudthedragon@gmail.com>"
license = ["GPL-3.0-or-later"]
blender_version_min = "4.2.0"
website = "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools"
tags = ["Scene", "Workflow", "Materials"]
[permissions]
files = "Read and write external resources referenced by scenes"
[build]
paths_exclude_pattern = [
"__pycache__/",
"*.pyc",
".git/",
".github/",
"docs/",
"tests/",
]
@@ -0,0 +1,155 @@
# v0.12.0
- Integrate Rainy's Extension Repo bootstrapper
- Set minimum Blender version to 4.2 for #9
# v0.11.0
- Ported to blender extension type
- Removed CGCookie autoupdater
# v 0.10.0
- **AutoMat Extractor**
- Added UDIM/tiled image detection so multi-tile textures are organized alongside standard images without errors. #8
- Path builder now emits UDIM filename templates (e.g., `name.[UDIM].png`) plus per-tile targets (e.g., `name.1001.png`), preventing collisions during relocation.
- Remapping helper sets tile-level `filepath` values and ensures directories exist before saving.
- Saving routine attempts whole-image writes first, then falls back to per-tile saves via the Image Editor context, with summary logs noting UDIM sets processed.
- **Viewport Colors**
- Added a Refresh Material Previews button that clears thumbnails, assigns each material to a temporary preview mesh, and forces `preview_ensure()` so stubborn viewport colors now reliably pick up thumbnail data. #7
# v 0.9.1
- **Convert Relations to Constraint**
- Bugfix: Now converts bone parenting as intended
# v 0.9.0
- **Convert Relations to Constraint**: Operator in Animation Data section that converts regular parenting relationships to Child Of constraints for selected objects, maintaining world position and transform hierarchy
- Bugfix: Adapted old operator that wasn't drawing due to using the wrong icon string name.
# v 0.8.1
- Delete Single Keyframe Actions: fixed bug caused by not ignoring linked files
# v 0.8.0
## New Features
- **Delete Single Keyframe Actions**: New operator to remove unwanted animation actions (no keyframes, single keyframe, or all keyframes on same frame)
- **Find Material Users**: New operator with native material selector interface that displays detailed material usage analysis in a popup dialog, showing:
- Object users with material slots
- Node tree references
- Material node tree usage
- Blender's internal user count and fake user status
- **Remove Unused Material Slots**: New operator to clean up unused material slots from all mesh objects in the scene
- **Enhanced Bulk Scene General Panel**: Reorganized panel with new sections:
- Materials section containing material analysis and cleanup tools
- Animation Data section for keyframe/action management
- All new operators integrated with consistent UI and project formatting standards
## Fixes
- PathMan
- Automat summary no longer gives invoke error
- Fixed timing/cancellation error when cancelling Rename Flat Colors operation
- Pack files operator no longer throws AttributeError for is_generated (now uses img.source != 'GENERATED')
- Pack files operator now properly skips special Blender images like "Render Result" and "Viewer Node" that can't be packed
- General
- Removed debug print statement that was showing "Subdivision Surface modifiers removed from all objects" on every addon load
# v 0.7.1
## Ghost Buster Enhancements
### Added
- **Low Priority Ghost Detection**: New option to delete objects not in scenes with no legitimate use and users < 2
- **Smart Instance Collection Detection**: Ghost Buster now properly detects when objects are used by instance collections in scenes
- **Enhanced Legitimacy Checks**: Improved detection of objects with valid uses outside scenes (constraints, modifiers, particle systems only count if the using object is in a scene)
### Improved
- **More Accurate Ghost Detection**: Eliminated false positives by checking if instance collection targets are actually being used by scene objects
- **Better Classification**: Objects are now classified as "Legitimate", "Ghosts (users >= 2)", or "Low Priority (users < 2)" with clearer reasoning
- Cleaned UI
### Technical Changes
- Added `is_object_used_by_scene_instance_collections()` function for precise instance collection detection
- Enhanced `is_object_legitimate_outside_scene()` with scene-aware checks for modifiers, constraints, and particle systems
- Updated ghost analysis and removal logic to use more precise categorization
- Added scene property `ghost_buster_delete_low_priority` for user preference storage
# v 0.7.0
## New: Ghost Detection System
- **Universal Object Analysis**: Expanded ghost detection from CC-objects only to all object types (meshes, empties, curves, etc.)
- **Enhanced Safety Framework**: Added comprehensive protection for legitimate objects outside scenes:
- WGT rig widgets (`WGT-*` objects)
- Modifier targets (curve modifiers, constraints)
- Constraint targets and references
- Particle system objects
- Collection instance objects (linked collection references)
- **Smart Classification**: Objects not in scenes now categorized as:
- `LEGITIMATE`: Has valid use outside scenes (protected)
- `LOW PRIORITY`: Only collection reference (preserved)
- `GHOST`: Multiple users but not in scenes (removed)
- **Conservative Cleanup Logic**: Only removes objects with 2+ users that have no legitimate purpose
- **Updated UI**: Ghost Detector popup now shows "Ghost Objects Analysis" with enhanced categorization and object type details
- **Improved Safety**: All linked/library content automatically protected from ghost detection
# v 0.6.1
## Bug Fixes
- **Fixed flat color detection**: Redesigned algorithm with exact pixel matching and smart sampling
- **Fixed AutoMat Extractor**: Now properly organizes images by material instead of dumping everything to common folder
- **Fixed viewport color setting**: Resolved context restriction errors with deferred color application
- **Fixed timer performance**: Reduced timer frequency and improved cancellation reliability
- **Enhanced debugging**: Added comprehensive console reporting for all bulk operations
## Improvements
- Better performance with optimized sampling
- More reliable cancellation system
- Context-safe operations that don't interfere with Blender's drawing state
# v 0.6.0
- **Enhancement: Progress Reporting & Cancellation**
- Some of the PathMan's operators are pretty resource-intense. Due to Python's GIL, I haven't been able to figure out how to run some of these more efficiently. Without the console window, you're flying blind, so I've integrated a loading bar with progress reporting for the following operators:
- Flat Color Texture Renamer
- Remove Extensions
- Save All to image Paths
- Remap Selected
- Rename by Material
- AutoMat Extractor
# v 0.5.1
- **Enhanced AutoMat Extractor:**
- Added a crucial safety check to prevent textures from overwriting each other if they resolve to the same filename (e.g., `Image.001.png` and `Image.002.png` both becoming `Image.png`).
- The operator now correctly sanitizes names with numerical suffixes before saving.
- A new summary dialog now appears after the operation, reporting how many files were extracted successfully and listing any files that were skipped due to naming conflicts.
- Added a user preference to control the location of the `common` folder, allowing it to be placed either inside or outside the blend file's specific texture folder. A checkbox for this setting was added to the UI.
- **Improved Suffix Handling:**
- The "Rename by Material" tool now correctly preserves spaces in packed texture names (e.g., `Flow Pack` instead of `FlowPack`).
- Added support for underscore-separated packed texture names (e.g., `flow_pack`).
- **Bug Fixes:**
- Resolved multiple `AttributeError` and `TypeError` exceptions that occurred due to incorrect addon name lookups and invalid icon names, making the UI and addon registration more robust.
# v 0.5.0
- **Integrated Scene General: Free GPU VRAM**
- **Integrated PathMan: Automatic Material Extractor**
- **Integrated PathMan: Rename Image Textures by Material**: Added comprehensive texture suffix recognition
- Recognizes many Character Creator suffixes
- Recognizes most standard material suffixes
- Images with unrecognized suffixes are skipped instead of renamed, preventing unintended modifications
- Enhanced logging: Unrecognized suffix images are listed separately for easy identification
- **UI Improvements**:
- Rearranged workflow layout: Make Paths Relative/Absolute moved to main workflow section
- Remap Selected moved under path preview for better workflow progression
- Rename by Material and AutoMat Extractor repositioned after Remap Selected
- Added Autopack toggle at beginning of workflow sections (both Node Editor and 3D Viewport)
- Consolidated draw functions: Node Editor panel now serves as master template for both panels
# v 0.4.1
- Fixed traceback error causing remap to fail to draw buttons
# v 0.4.0
Overhaul! Added new Scene General panel, major enhancements to all panels and functions.
# v0.3.0
- Added image path remapping for unpacked images, keeping them organized.
@@ -0,0 +1,540 @@
import bpy
import os
import re
from ..panels.bulk_path_management import (
get_image_extension,
bulk_remap_paths,
set_image_paths,
ensure_directory_for_path,
)
class AUTOMAT_OT_summary_dialog(bpy.types.Operator):
"""Show AutoMat Extractor operation summary"""
bl_idname = "bst.automat_summary_dialog"
bl_label = "AutoMat Extractor Summary"
bl_options = {'REGISTER', 'INTERNAL'}
# Properties to store summary data
total_selected: bpy.props.IntProperty(default=0)
success_count: bpy.props.IntProperty(default=0)
overwrite_skipped_count: bpy.props.IntProperty(default=0)
failed_remap_count: bpy.props.IntProperty(default=0)
overwrite_details: bpy.props.StringProperty(default="")
failed_remap_details: bpy.props.StringProperty(default="")
def draw(self, context):
layout = self.layout
layout.label(text="AutoMat Extractor - Summary", icon='INFO')
layout.separator()
box = layout.box()
col = box.column(align=True)
col.label(text=f"Total selected images: {self.total_selected}")
col.label(text=f"Successfully extracted: {self.success_count}", icon='CHECKMARK')
if self.overwrite_skipped_count > 0:
col.label(text=f"Skipped to prevent overwrite: {self.overwrite_skipped_count}", icon='ERROR')
if self.failed_remap_count > 0:
col.label(text=f"Failed to remap (path issue): {self.failed_remap_count}", icon='ERROR')
if self.overwrite_details:
layout.separator()
box = layout.box()
box.label(text="Overwrite Conflicts (Skipped):", icon='FILE_TEXT')
for line in self.overwrite_details.split('\n'):
if line.strip():
box.label(text=line)
if self.failed_remap_details:
layout.separator()
box = layout.box()
box.label(text="Failed Remaps:", icon='FILE_TEXT')
for line in self.failed_remap_details.split('\n'):
if line.strip():
box.label(text=line)
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
return context.window_manager.invoke_popup(self, width=500)
class AutoMatExtractor(bpy.types.Operator):
bl_idname = "bst.automatextractor"
bl_label = "AutoMatExtractor"
bl_description = "Pack selected images and extract them with organized paths by blend file and material"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
# Get addon preferences
addon_name = __package__.split('.')[0]
prefs = context.preferences.addons.get(addon_name).preferences
common_outside = prefs.automat_common_outside_blend
# Get selected images
selected_images = [img for img in bpy.data.images if hasattr(img, "bst_selected") and img.bst_selected]
if not selected_images:
self.report({'WARNING'}, "No images selected for extraction")
return {'CANCELLED'}
# Set up progress tracking
props = context.scene.bst_path_props
props.is_operation_running = True
props.operation_progress = 0.0
props.operation_status = f"Preparing AutoMat extraction for {len(selected_images)} images..."
# Store data for timer processing
self.selected_images = selected_images
self.common_outside = common_outside
self.current_step = 0
self.current_index = 0
self.packed_count = 0
self.success_count = 0
self.overwrite_skipped = []
self.failed_list = []
self.path_mapping = {}
self.udim_summary = {
"found": 0,
"saved": 0,
}
# Start timer for processing
bpy.app.timers.register(self._process_step)
return {'FINISHED'}
def _process_step(self):
"""Process AutoMat extraction in steps to avoid blocking the UI"""
props = bpy.context.scene.bst_path_props
# Check for cancellation
if props.cancel_operation:
props.is_operation_running = False
props.operation_progress = 0.0
props.operation_status = "Operation cancelled"
props.cancel_operation = False
return None
if self.current_step == 0:
# Step 1: Pack images
if self.current_index >= len(self.selected_images):
# Packing complete, move to next step
self.current_step = 1
self.current_index = 0
props.operation_status = "Removing extensions from image names..."
props.operation_progress = 25.0
return 0.01
# Pack current image
img = self.selected_images[self.current_index]
props.operation_status = f"Packing {img.name}..."
if not img.packed_file:
try:
img.pack()
self.packed_count += 1
except Exception as e:
# Continue even if packing fails
pass
self.current_index += 1
progress = (self.current_index / len(self.selected_images)) * 25.0
props.operation_progress = progress
elif self.current_step == 1:
# Step 2: Remove extensions (this is a quick operation)
try:
bpy.ops.bst.remove_extensions()
except Exception as e:
pass # Continue even if this fails
self.current_step = 2
self.current_index = 0
props.operation_status = "Analyzing material usage..."
props.operation_progress = 30.0
elif self.current_step == 2:
# Step 3: Organize images by material usage
if self.current_index >= len(self.selected_images):
# Analysis complete, move to path building
self.current_step = 3
self.current_index = 0
props.operation_status = "Building path mapping..."
props.operation_progress = 50.0
return 0.01
# Get material mapping for all selected images
if self.current_index == 0:
self.material_mapping = self.get_image_material_mapping(self.selected_images)
print(f"DEBUG: Material mapping created for {len(self.selected_images)} images")
# This step is quick, just mark progress
self.current_index += 1
progress = 30.0 + (self.current_index / len(self.selected_images)) * 20.0
props.operation_progress = progress
elif self.current_step == 3:
# Step 4: Build path mapping
if self.current_index >= len(self.selected_images):
# Path building complete, move to remapping
self.current_step = 4
self.current_index = 0
props.operation_status = "Remapping image paths..."
props.operation_progress = 70.0
return 0.01
# Build path for current image
img = self.selected_images[self.current_index]
props.operation_status = f"Building path for {img.name}..."
# Get blend file name
blend_name = bpy.path.basename(bpy.data.filepath)
if blend_name:
blend_name = os.path.splitext(blend_name)[0]
else:
blend_name = "untitled"
blend_name = self.sanitize_filename(blend_name)
# Determine common path
if self.common_outside:
common_path_part = "common"
else:
common_path_part = f"{blend_name}\\common"
# Get extension and build path
extension = get_image_extension(img)
sanitized_base_name = self.sanitize_filename(img.name)
filename = f"{sanitized_base_name}{extension}"
if img.name.startswith('#'):
# Flat colors go to FlatColors subfolder
base_folder = f"//textures\\{common_path_part}\\FlatColors"
else:
# Check material usage for this image
materials_using_image = self.material_mapping.get(img.name, [])
if not materials_using_image:
# No materials found, put in common folder
base_folder = f"//textures\\{common_path_part}"
print(f"DEBUG: {img.name} - No materials found, using common folder")
elif len(materials_using_image) == 1:
# Used by exactly one material, organize by material name
material_name = self.sanitize_filename(materials_using_image[0])
base_folder = f"//textures\\{blend_name}\\{material_name}"
print(f"DEBUG: {img.name} - Used by {material_name}, organizing by material")
else:
# Used by multiple materials, put in common folder
base_folder = f"//textures\\{common_path_part}"
print(f"DEBUG: {img.name} - Used by multiple materials: {materials_using_image}, using common folder")
is_udim = self.is_udim_image(img)
if is_udim:
udim_mapping = self.build_udim_mapping(base_folder, sanitized_base_name, extension, img)
self.path_mapping[img.name] = udim_mapping
self.udim_summary["found"] += 1
print(f"DEBUG: {img.name} - UDIM detected with {len(udim_mapping.get('tiles', {}))} tiles")
else:
path = f"{base_folder}\\{filename}"
self.path_mapping[img.name] = path
self.current_index += 1
progress = 50.0 + (self.current_index / len(self.selected_images)) * 20.0
props.operation_progress = progress
elif self.current_step == 4:
# Step 5: Remap paths
if self.current_index >= len(self.path_mapping):
# Remapping complete, move to saving
self.current_step = 5
self.current_index = 0
props.operation_status = "Saving images to new locations..."
props.operation_progress = 85.0
return 0.01
# Remap current image
img_name = list(self.path_mapping.keys())[self.current_index]
mapping_entry = self.path_mapping[img_name]
props.operation_status = f"Remapping {img_name}..."
if isinstance(mapping_entry, dict) and mapping_entry.get("udim"):
success = set_image_paths(
img_name,
mapping_entry.get("template", ""),
tile_paths=mapping_entry.get("tiles", {})
)
else:
success = set_image_paths(img_name, mapping_entry)
if success:
self.success_count += 1
else:
self.failed_list.append(img_name)
self.current_index += 1
progress = 70.0 + (self.current_index / len(self.path_mapping)) * 15.0
props.operation_progress = progress
elif self.current_step == 5:
# Step 6: Save images
if self.current_index >= len(self.selected_images):
# Operation complete
props.is_operation_running = False
props.operation_progress = 100.0
props.operation_status = f"Completed! Extracted {self.success_count} images{f', {len(self.failed_list)} failed' if self.failed_list else ''}"
# Show summary dialog
self.show_summary_dialog(
bpy.context,
total_selected=len(self.selected_images),
success_count=self.success_count,
overwrite_skipped_list=self.overwrite_skipped,
failed_remap_list=self.failed_list
)
# Console summary
print(f"\n=== AUTOMAT EXTRACTION SUMMARY ===")
print(f"Total images processed: {len(self.selected_images)}")
print(f"Successfully extracted: {self.success_count}")
print(f"Failed to remap: {len(self.failed_list)}")
# Show organization breakdown
material_organized = 0
common_organized = 0
flat_colors = 0
for img_name, path in self.path_mapping.items():
current_path = path["template"] if isinstance(path, dict) else path
if "FlatColors" in current_path:
flat_colors += 1
elif "common" in current_path:
common_organized += 1
else:
material_organized += 1
print(f"\nOrganization breakdown:")
print(f" Material-specific folders: {material_organized}")
print(f" Common folder: {common_organized}")
print(f" Flat colors: {flat_colors}")
# Show material organization details
if material_organized > 0:
print(f"\nMaterial organization details:")
material_folders = {}
for img_name, path in self.path_mapping.items():
if "FlatColors" not in path and "common" not in path:
# Extract material name from path
if isinstance(path, dict):
continue
path_parts = path.split('\\')
if len(path_parts) >= 3:
material_name = path_parts[-2]
if material_name not in material_folders:
material_folders[material_name] = []
material_folders[material_name].append(img_name)
for material_name, images in material_folders.items():
print(f" {material_name}: {len(images)} images")
print(f"=====================================\n")
if self.udim_summary["found"]:
print(f"UDIM images processed: {self.udim_summary['found']} (saved successfully: {self.udim_summary['saved']})")
# Force UI update
for area in bpy.context.screen.areas:
area.tag_redraw()
return None
# Save current image
img = self.selected_images[self.current_index]
props.operation_status = f"Saving {img.name}..."
mapping_entry = self.path_mapping.get(img.name)
if isinstance(mapping_entry, dict) and mapping_entry.get("udim"):
self.save_udim_image(img, mapping_entry)
else:
self.save_standard_image(img)
self.current_index += 1
progress = 85.0 + (self.current_index / len(self.selected_images)) * 15.0
props.operation_progress = progress
# Force UI update
for area in bpy.context.screen.areas:
area.tag_redraw()
# Continue processing
return 0.01
def show_summary_dialog(self, context, total_selected, success_count, overwrite_skipped_list, failed_remap_list):
"""Show a popup dialog with the extraction summary"""
overwrite_details = ""
if overwrite_skipped_list:
for name, path in overwrite_skipped_list:
overwrite_details += f"'{name}' -> '{path}'\n"
failed_remap_details = ""
if failed_remap_list:
for name, path in failed_remap_list:
failed_remap_details += f"'{name}' -> '{path}'\n"
bpy.ops.bst.automat_summary_dialog('INVOKE_DEFAULT',
total_selected=total_selected,
success_count=success_count,
overwrite_skipped_count=len(overwrite_skipped_list),
failed_remap_count=len(failed_remap_list),
overwrite_details=overwrite_details.strip(),
failed_remap_details=failed_remap_details.strip()
)
def sanitize_filename(self, filename):
"""Sanitize filename/folder name for filesystem compatibility"""
# First, remove potential file extensions, including numerical ones like .001
base_name = re.sub(r'\.\d{3}$', '', filename) # Remove .001, .002 etc.
base_name = os.path.splitext(base_name)[0] # Remove standard extensions
# Remove or replace invalid characters for Windows/Mac/Linux
sanitized = re.sub(r'[<>:"/\\|?*]', '_', base_name)
# Remove leading/trailing spaces and dots
sanitized = sanitized.strip(' .')
# Ensure it's not empty
if not sanitized:
sanitized = "unnamed"
return sanitized
def get_image_material_mapping(self, images):
"""Create mapping of image names to materials that use them"""
image_to_materials = {}
# Initialize mapping
for img in images:
image_to_materials[img.name] = []
# Check all materials for image usage
for material in bpy.data.materials:
if not material.use_nodes:
continue
material_images = set()
# Find all image texture nodes in this material
for node in material.node_tree.nodes:
if node.type == 'TEX_IMAGE' and node.image:
material_images.add(node.image.name)
# Add this material to each image's usage list
for img_name in material_images:
if img_name in image_to_materials:
image_to_materials[img_name].append(material.name)
return image_to_materials
def is_udim_image(self, image):
"""Return True when the image contains UDIM/tiled data"""
has_tiles = hasattr(image, "source") and image.source == 'TILED'
tiles_attr = getattr(image, "tiles", None)
if tiles_attr and len(tiles_attr) > 1:
return True
return has_tiles
def build_udim_mapping(self, base_folder, base_name, extension, image):
"""Create a path mapping structure for UDIM images"""
udim_token = "<UDIM>"
template_filename = f"{base_name}.{udim_token}{extension}"
template_path = f"{base_folder}\\{template_filename}"
tile_paths = {}
tiles = getattr(image, "tiles", [])
for tile in tiles:
tile_number = str(getattr(tile, "number", "1001"))
tile_filename = f"{base_name}.{tile_number}{extension}"
tile_paths[tile_number] = f"{base_folder}\\{tile_filename}"
return {
"udim": True,
"template": template_path,
"tiles": tile_paths,
}
def save_udim_image(self, image, mapping):
"""Attempt to save each tile for a UDIM image"""
success = False
try:
image.save()
success = True
except Exception as e:
print(f"DEBUG: UDIM bulk save failed for {image.name}: {e}")
success = self._save_udim_tiles_individually(image, mapping)
if success:
self.udim_summary["saved"] += 1
return success
def save_standard_image(self, image):
"""Save a non-UDIM image safely"""
try:
if hasattr(image, 'save'):
image.save()
return True
except Exception as e:
print(f"DEBUG: Failed to save image {image.name}: {e}")
return False
def _save_udim_tiles_individually(self, image, mapping):
"""Fallback saving routine when image.save() fails on UDIMs"""
tile_paths = mapping.get("tiles", {})
any_saved = False
for tile in getattr(image, "tiles", []):
tile_number = str(getattr(tile, "number", "1001"))
target_path = tile_paths.get(tile_number)
if not target_path:
continue
try:
ensure_directory_for_path(target_path)
self._save_tile_via_image_editor(image, tile_number, target_path)
any_saved = True
except Exception as e:
print(f"DEBUG: Failed to save UDIM tile {tile_number} for {image.name}: {e}")
return any_saved
def _save_tile_via_image_editor(self, image, tile_number, filepath):
"""Use an IMAGE_EDITOR override to save a specific tile"""
# Try to find an existing image editor to reuse Blender UI context
for area in bpy.context.screen.areas:
if area.type != 'IMAGE_EDITOR':
continue
override = bpy.context.copy()
override['area'] = area
override['space_data'] = area.spaces.active
region = next((r for r in area.regions if r.type == 'WINDOW'), None)
if region is None:
continue
override['region'] = region
space = area.spaces.active
space.image = image
if hasattr(space, "image_user"):
space.image_user.tile = int(tile_number)
bpy.ops.image.save(override, filepath=filepath)
return
# Fallback: attempt to set filepath and invoke save without override
image.filepath = filepath
image.save()
# Must register the new dialog class as well
classes = (
AUTOMAT_OT_summary_dialog,
AutoMatExtractor,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
@@ -0,0 +1,14 @@
import bpy
class BST_FreeGPU(bpy.types.Operator):
bl_idname = "bst.free_gpu"
bl_label = "Free VRAM"
bl_description = "Unallocate all material images from VRAM"
def execute(self, context):
for mat in bpy.data.materials:
if mat.use_nodes:
for node in mat.node_tree.nodes:
if hasattr(node, 'image') and node.image:
node.image.gl_free()
return {"FINISHED"}
@@ -0,0 +1,29 @@
import bpy
class NoSubdiv(bpy.types.Operator):
"""Remove all subdivision surface modifiers from objects"""
bl_idname = "bst.no_subdiv"
bl_label = "No Subdiv"
bl_options = {'REGISTER', 'UNDO'}
only_selected: bpy.props.BoolProperty(
name="Only Selected Objects",
description="Apply only to selected objects",
default=True
)
def execute(self, context):
# Choose objects based on the property
if self.only_selected:
objects = context.selected_objects
else:
objects = bpy.data.objects
removed_count = 0
for obj in objects:
if obj.modifiers:
subdiv_mods = [mod for mod in obj.modifiers if mod.type == 'SUBSURF']
for mod in subdiv_mods:
obj.modifiers.remove(mod)
removed_count += 1
self.report({'INFO'}, f"Subdivision Surface modifiers removed from {'selected' if self.only_selected else 'all'} objects. ({removed_count} removed)")
return {'FINISHED'}
@@ -0,0 +1,513 @@
import bpy
import re
class RENAME_OT_summary_dialog(bpy.types.Operator):
"""Show rename operation summary"""
bl_idname = "bst.rename_summary_dialog"
bl_label = "Rename Summary"
bl_options = {'REGISTER', 'INTERNAL'}
# Properties to store summary data
total_selected: bpy.props.IntProperty(default=0)
renamed_count: bpy.props.IntProperty(default=0)
shared_count: bpy.props.IntProperty(default=0)
unused_count: bpy.props.IntProperty(default=0)
cc3iid_count: bpy.props.IntProperty(default=0)
flatcolor_count: bpy.props.IntProperty(default=0)
already_correct_count: bpy.props.IntProperty(default=0)
unrecognized_suffix_count: bpy.props.IntProperty(default=0)
rename_details: bpy.props.StringProperty(default="")
def draw(self, context):
layout = self.layout
# Title
layout.label(text="Rename by Material - Summary", icon='INFO')
layout.separator()
# Statistics box
box = layout.box()
col = box.column(align=True)
col.label(text=f"Total selected images: {self.total_selected}")
col.label(text=f"Successfully renamed: {self.renamed_count}", icon='CHECKMARK')
if self.already_correct_count > 0:
col.label(text=f"Already correctly named: {self.already_correct_count}", icon='CHECKMARK')
if self.shared_count > 0:
col.label(text=f"Shared images skipped: {self.shared_count}", icon='RADIOBUT_OFF')
if self.unused_count > 0:
col.label(text=f"Unused images skipped: {self.unused_count}", icon='RADIOBUT_OFF')
if self.cc3iid_count > 0:
col.label(text=f"CC3 ID textures skipped: {self.cc3iid_count}", icon='RADIOBUT_OFF')
if self.flatcolor_count > 0:
col.label(text=f"Flat colors skipped: {self.flatcolor_count}", icon='RADIOBUT_OFF')
if self.unrecognized_suffix_count > 0:
col.label(text=f"Unrecognized suffixes skipped: {self.unrecognized_suffix_count}", icon='RADIOBUT_OFF')
# Show detailed rename information if available
if self.rename_details:
layout.separator()
box = layout.box()
box.label(text="Renamed Images:", icon='FILE_TEXT')
# Split the details by lines and show each one
lines = self.rename_details.split('\n')
for line in lines[:10]: # Limit to first 10 to avoid overly long dialogs
if line.strip():
box.label(text=line)
if len(lines) > 10:
box.label(text=f"... and {len(lines) - 10} more")
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
return context.window_manager.invoke_popup(self, width=500)
class Rename_images_by_mat(bpy.types.Operator):
bl_idname = "bst.rename_images_by_mat"
bl_label = "Rename Images by Material"
bl_description = "Rename selected images based on their material usage, preserving texture type suffixes"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
# Get selected images
selected_images = [img for img in bpy.data.images if hasattr(img, "bst_selected") and img.bst_selected]
if not selected_images:
self.report({'WARNING'}, "No images selected for renaming")
return {'CANCELLED'}
# Get image to material mapping
image_to_materials = self.get_image_material_mapping(selected_images)
renamed_count = 0
shared_count = 0
unused_count = 0
cc3iid_count = 0 # Track CC3 ID textures
flatcolor_count = 0 # Track flat color textures
already_correct_count = 0 # Track images already correctly named
unrecognized_suffix_count = 0 # Track images with unrecognized suffixes
renamed_list = [] # Track renamed images for debug
unrecognized_list = [] # Track images with unrecognized suffixes
for img in selected_images:
# Skip CC3 ID textures (ignore case)
if img.name.lower().startswith('cc3iid'):
cc3iid_count += 1
print(f"DEBUG: Skipped CC3 ID texture: {img.name}")
continue
# Skip flat color textures (start with #)
if img.name.startswith('#'):
flatcolor_count += 1
print(f"DEBUG: Skipped flat color texture: {img.name}")
continue
materials = image_to_materials.get(img.name, [])
if len(materials) == 0:
# Unused image - skip
unused_count += 1
print(f"DEBUG: Skipped unused image: {img.name}")
continue
elif len(materials) == 1:
# Single material usage - check suffix recognition
material_name = materials[0]
suffix = self.extract_texture_suffix(img.name)
original_name = img.name
# Skip images with unrecognized suffixes (only if they have a potential suffix pattern)
if suffix is None and self.has_potential_suffix(img.name):
unrecognized_suffix_count += 1
unrecognized_list.append(img.name)
print(f"DEBUG: Skipped image with unrecognized suffix: {img.name}")
continue
if suffix:
# Capitalize the suffix properly
capitalized_suffix = self.capitalize_suffix(suffix)
expected_name = f"{material_name}_{capitalized_suffix}"
else:
# No suffix detected, use material name only
expected_name = material_name
# Check if the image is already correctly named
if img.name == expected_name:
already_correct_count += 1
print(f"DEBUG: Skipped already correctly named: {img.name}")
continue
# Avoid duplicate names
new_name = self.ensure_unique_name(expected_name)
img.name = new_name
renamed_count += 1
renamed_list.append((original_name, new_name, material_name, capitalized_suffix if suffix else None))
print(f"DEBUG: Renamed '{original_name}''{new_name}' (Material: {material_name}, Suffix: {capitalized_suffix if suffix else 'none'})")
else:
# Shared across multiple materials - skip
shared_count += 1
print(f"DEBUG: Skipped shared image: {img.name} (used by {len(materials)} materials: {', '.join(materials[:3])}{'...' if len(materials) > 3 else ''})")
# Console debug summary (keep for development)
print(f"\n=== RENAME BY MATERIAL SUMMARY ===")
print(f"Total selected: {len(selected_images)}")
print(f"Renamed: {renamed_count}")
print(f"Already correct (skipped): {already_correct_count}")
print(f"Shared (skipped): {shared_count}")
print(f"Unused (skipped): {unused_count}")
print(f"CC3 ID textures (skipped): {cc3iid_count}")
print(f"Flat colors (skipped): {flatcolor_count}")
print(f"Unrecognized suffixes (skipped): {unrecognized_suffix_count}")
if renamed_list:
print(f"\nDetailed rename log:")
for original, new, material, suffix in renamed_list:
suffix_info = f" (suffix: {suffix})" if suffix else " (no suffix)"
print(f" '{original}''{new}' for material '{material}'{suffix_info}")
if unrecognized_list:
print(f"\nImages with unrecognized suffixes:")
for img_name in unrecognized_list:
print(f" '{img_name}'")
print(f"===================================\n")
# Show popup summary dialog
self.show_summary_dialog(context, len(selected_images), renamed_count, shared_count, unused_count, cc3iid_count, flatcolor_count, already_correct_count, unrecognized_suffix_count, renamed_list)
return {'FINISHED'}
def show_summary_dialog(self, context, total_selected, renamed_count, shared_count, unused_count, cc3iid_count, flatcolor_count, already_correct_count, unrecognized_suffix_count, renamed_list):
"""Show a popup dialog with the rename summary"""
# Prepare detailed rename information for display
details_text = ""
if renamed_list:
for original, new, material, suffix in renamed_list:
suffix_info = f" ({suffix})" if suffix else ""
details_text += f"'{original}''{new}'{suffix_info}\n"
# Invoke the summary dialog
dialog = bpy.ops.bst.rename_summary_dialog('INVOKE_DEFAULT',
total_selected=total_selected,
renamed_count=renamed_count,
shared_count=shared_count,
unused_count=unused_count,
cc3iid_count=cc3iid_count,
flatcolor_count=flatcolor_count,
already_correct_count=already_correct_count,
unrecognized_suffix_count=unrecognized_suffix_count,
rename_details=details_text.strip())
def get_image_material_mapping(self, images):
"""Create mapping of image names to materials that use them"""
image_to_materials = {}
# Initialize mapping
for img in images:
image_to_materials[img.name] = []
# Check all materials for image usage
for material in bpy.data.materials:
if not material.use_nodes:
continue
material_images = set()
# Find all image texture nodes in this material
for node in material.node_tree.nodes:
if node.type == 'TEX_IMAGE' and node.image:
material_images.add(node.image.name)
# Add this material to each image's usage list
for img_name in material_images:
if img_name in image_to_materials:
image_to_materials[img_name].append(material.name)
return image_to_materials
def extract_texture_suffix(self, name):
"""Extract texture type suffix from image name (case-insensitive)"""
# Comprehensive list of texture suffixes
suffixes = [
# Standard PBR suffixes
'diffuse', 'basecolor', 'base_color', 'albedo', 'color', 'col',
'normal', 'norm', 'nrm', 'bump',
'roughness', 'rough', 'rgh',
'metallic', 'metal', 'mtl',
'specular', 'spec', 'spc',
'ao', 'ambient_occlusion', 'ambientocclusion', 'occlusion',
'gradao',
'height', 'displacement', 'disp', 'displace',
'opacity', 'alpha', 'mask',
'emission', 'emissive', 'emit',
'subsurface', 'sss', 'transmission',
# Character Creator / iClone suffixes
'base', 'diffusemap', 'normalmap', 'roughnessmap', 'metallicmap',
'aomap', 'opacitymap', 'emissionmap', 'heightmap', 'displacementmap',
'detail_normal', 'detail_diffuse', 'detail_mask',
'blend', 'id', 'cavity', 'curvature', 'transmap', 'rgbamask', 'sssmap', 'micronmask',
'bcbmap', 'mnaomask', 'specmask', 'micron', 'cfulcmask', 'nmuilmask', 'nbmap', 'enmask', 'blend_multiply',
# Hair-related compound suffixes (no spaces)
'hairflowmap', 'hairidmap', 'hairrootmap', 'hairdepthmap',
'flowmap', 'idmap', 'rootmap', 'depthmap',
# Wrinkle map suffixes (Character Creator)
'wrinkle_normal1', 'wrinkle_normal2', 'wrinkle_normal3',
'wrinkle_roughness1', 'wrinkle_roughness2', 'wrinkle_roughness3',
'wrinkle_diffuse1', 'wrinkle_diffuse2', 'wrinkle_diffuse3',
'wrinkle_mask1', 'wrinkle_mask2', 'wrinkle_mask3',
'wrinkle_flow1', 'wrinkle_flow2', 'wrinkle_flow3',
# Character Creator pack suffixes (with spaces)
'flow pack', 'msmnao pack', 'roughness pack', 'sstm pack',
'flow_pack', 'msmnao_pack', 'roughness_pack', 'sstm_pack',
# Hair-related multi-word suffixes (spaces)
'hair flow map', 'hair id map', 'hair root map', 'hair depth map',
'flow map', 'id map', 'root map', 'depth map',
# Additional common variations
'tex', 'map', 'img', 'texture',
'd', 'n', 'r', 'm', 's', 'a', 'h', 'o', 'e' # Single letter abbreviations
]
# Remove file extension first
base_name = re.sub(r'\.[^.]+$', '', name)
# Sort suffixes by length (longest first) to prioritize more specific matches
sorted_suffixes = sorted(suffixes, key=len, reverse=True)
# First, try to find multi-word suffixes with spaces (case-insensitive)
for suffix in sorted_suffixes:
if ' ' in suffix: # Multi-word suffix
# Pattern: ends with space + suffix
pattern = rf'\s+({re.escape(suffix)})$'
match = re.search(pattern, base_name, re.IGNORECASE)
if match:
return match.group(1).lower()
# Pattern: ends with suffix (no space separator, but exact match)
if base_name.lower().endswith(suffix.lower()) and len(base_name) > len(suffix):
# Check if there's a word boundary before the suffix
prefix_end = len(base_name) - len(suffix)
if prefix_end > 0 and base_name[prefix_end - 1] in ' _-':
return suffix.lower()
# Then try single-word suffixes with traditional separators
for suffix in sorted_suffixes:
if ' ' not in suffix: # Single word suffix
# Pattern: ends with _suffix or -suffix or .suffix
pattern = rf'[._-]({re.escape(suffix)})$'
match = re.search(pattern, base_name, re.IGNORECASE)
if match:
return match.group(1).lower()
# Check for numeric suffixes (like _01, _02, etc.)
numeric_match = re.search(r'[._-](\d+)$', base_name)
if numeric_match:
return numeric_match.group(1)
return None
def ensure_unique_name(self, proposed_name):
"""Ensure the proposed name is unique among all images"""
if proposed_name not in bpy.data.images:
return proposed_name
# If name exists, add numerical suffix
counter = 1
while f"{proposed_name}.{counter:03d}" in bpy.data.images:
counter += 1
return f"{proposed_name}.{counter:03d}"
def capitalize_suffix(self, suffix):
"""Properly capitalize texture type suffixes with correct formatting"""
# Dictionary of common texture suffixes with proper capitalization
suffix_mapping = {
# Standard PBR suffixes
'diffuse': 'Diffuse',
'basecolor': 'BaseColor',
'base_color': 'BaseColor',
'albedo': 'Albedo',
'color': 'Color',
'col': 'Color',
'normal': 'Normal',
'norm': 'Normal',
'nrm': 'Normal',
'bump': 'Bump',
'roughness': 'Roughness',
'rough': 'Roughness',
'rgh': 'Roughness',
'metallic': 'Metallic',
'metal': 'Metallic',
'mtl': 'Metallic',
'specular': 'Specular',
'spec': 'Specular',
'spc': 'Specular',
'ao': 'AO',
'ambient_occlusion': 'AmbientOcclusion',
'ambientocclusion': 'AmbientOcclusion',
'occlusion': 'Occlusion',
'gradao': 'GradAO',
'height': 'Height',
'displacement': 'Displacement',
'disp': 'Displacement',
'displace': 'Displacement',
'opacity': 'Opacity',
'alpha': 'Alpha',
'mask': 'Mask',
'transmap': 'TransMap',
'emission': 'Emission',
'emissive': 'Emission',
'emit': 'Emission',
'subsurface': 'Subsurface',
'sss': 'SSS',
'transmission': 'Transmission',
# Character Creator / iClone suffixes
'base': 'Base',
'diffusemap': 'DiffuseMap',
'normalmap': 'NormalMap',
'roughnessmap': 'RoughnessMap',
'metallicmap': 'MetallicMap',
'aomap': 'AOMap',
'opacitymap': 'OpacityMap',
'emissionmap': 'EmissionMap',
'heightmap': 'HeightMap',
'displacementmap': 'DisplacementMap',
'detail_normal': 'DetailNormal',
'detail_diffuse': 'DetailDiffuse',
'detail_mask': 'DetailMask',
'blend': 'Blend',
'id': 'ID',
'cavity': 'Cavity',
'curvature': 'Curvature',
'transmap': 'TransMap',
'rgbamask': 'RGBAMask',
'sssmap': 'SSSMap',
'micronmask': 'MicroNMask',
'bcbmap': 'BCBMap',
'mnaomask': 'MNAOMask',
'specmask': 'SpecMask',
'micron': 'MicroN',
'cfulcmask': 'CFULCMask',
'nmuilmask': 'NMUILMask',
'nbmap': 'NBMap',
'enmask': 'ENMask',
'blend_multiply': 'Blend_Multiply',
# Hair-related compound suffixes (no spaces)
'hairflowmap': 'HairFlowMap',
'hairidmap': 'HairIDMap',
'hairrootmap': 'HairRootMap',
'hairdepthmap': 'HairDepthMap',
'flowmap': 'FlowMap',
'idmap': 'IDMap',
'rootmap': 'RootMap',
'depthmap': 'DepthMap',
# Wrinkle map suffixes (Character Creator)
'wrinkle_normal1': 'Wrinkle_Normal1',
'wrinkle_normal2': 'Wrinkle_Normal2',
'wrinkle_normal3': 'Wrinkle_Normal3',
'wrinkle_roughness1': 'Wrinkle_Roughness1',
'wrinkle_roughness2': 'Wrinkle_Roughness2',
'wrinkle_roughness3': 'Wrinkle_Roughness3',
'wrinkle_diffuse1': 'Wrinkle_Diffuse1',
'wrinkle_diffuse2': 'Wrinkle_Diffuse2',
'wrinkle_diffuse3': 'Wrinkle_Diffuse3',
'wrinkle_mask1': 'Wrinkle_Mask1',
'wrinkle_mask2': 'Wrinkle_Mask2',
'wrinkle_mask3': 'Wrinkle_Mask3',
'wrinkle_flow1': 'Wrinkle_Flow1',
'wrinkle_flow2': 'Wrinkle_Flow2',
'wrinkle_flow3': 'Wrinkle_Flow3',
# Character Creator pack suffixes (with spaces)
'flow pack': 'Flow Pack',
'msmnao pack': 'MSMNAO Pack',
'roughness pack': 'Roughness Pack',
'sstm pack': 'SSTM Pack',
'flow_pack': 'Flow_Pack',
'msmnao_pack': 'MSMNAO_Pack',
'roughness_pack': 'Roughness_Pack',
'sstm_pack': 'SSTM_Pack',
# Hair-related multi-word suffixes
'hair flow map': 'HairFlowMap',
'hair id map': 'HairIDMap',
'hair root map': 'HairRootMap',
'hair depth map': 'HairDepthMap',
'flow map': 'FlowMap',
'id map': 'IDMap',
'root map': 'RootMap',
'depth map': 'DepthMap',
# Additional common variations
'tex': 'Texture',
'map': 'Map',
'img': 'Image',
'texture': 'Texture',
# Single letter abbreviations
'd': 'Diffuse',
'n': 'Normal',
'r': 'Roughness',
'm': 'Metallic',
's': 'Specular',
'a': 'Alpha',
'h': 'Height',
'o': 'Occlusion',
'e': 'Emission'
}
# Get the proper capitalization from mapping, or capitalize first letter as fallback
return suffix_mapping.get(suffix.lower(), suffix.capitalize())
def has_potential_suffix(self, name):
"""Check if the image name has a potential suffix pattern that we should try to recognize"""
# Remove file extension first
base_name = re.sub(r'\.[^.]+$', '', name)
# Check for common suffix patterns: _something, -something, .something, or space something
suffix_patterns = [
r'[._-][a-zA-Z0-9]+$', # Underscore, dot, or dash followed by alphanumeric
r'\s+[a-zA-Z0-9\s]+$', # Space followed by alphanumeric (for multi-word suffixes)
]
for pattern in suffix_patterns:
if re.search(pattern, base_name):
return True
return False
# Registration classes - need to register both operators
classes = (
RENAME_OT_summary_dialog,
Rename_images_by_mat,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
@@ -0,0 +1,87 @@
import bpy
class ConvertRelationsToConstraint(bpy.types.Operator):
"""Convert regular parenting to Child Of constraints for all selected objects"""
bl_idname = "bst.convert_relations_to_constraint"
bl_label = "Convert Relations to Constraint"
bl_description = "Convert regular parenting relationships to Child Of constraints for selected objects"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
result = convert_relations_to_constraint()
if result:
self.report({'INFO'}, f"Converted {result} objects to Child Of constraints")
else:
self.report({'WARNING'}, "No objects with parents found in selection")
return {'FINISHED'}
def convert_relations_to_constraint():
"""Convert regular parenting to Child Of constraints for all selected objects"""
# Get all selected objects
selected_objects = bpy.context.selected_objects
if not selected_objects:
print("No objects selected!")
return 0
print(f"Converting parenting to Child Of constraints for {len(selected_objects)} objects...")
converted_count = 0
for obj in selected_objects:
# Check if object has a parent
if obj.parent is None:
print(f"Skipping {obj.name}: No parent found")
continue
# Store bone information if parented to a bone
parent_bone = obj.parent_bone if obj.parent_bone else None
bone_info = f" (bone: {parent_bone})" if parent_bone else ""
print(f"Processing {obj.name} -> {obj.parent.name}{bone_info}")
# Store original parent and current world matrix
original_parent = obj.parent
world_matrix = obj.matrix_world.copy()
# Remove the parent relationship
obj.parent = None
obj.parent_bone = "" # Clear the bone reference
# Add Child Of constraint
child_of_constraint = obj.constraints.new(type='CHILD_OF')
child_of_constraint.name = f"Child_Of_{original_parent.name}"
child_of_constraint.target = original_parent
# Transfer bone information to constraint subtarget
if parent_bone:
child_of_constraint.subtarget = parent_bone
print(f" ✓ Transferred bone target: {parent_bone}")
# Set the inverse matrix properly to maintain world position
# This is equivalent to clicking "Set Inverse" in the UI
child_of_constraint.inverse_matrix = original_parent.matrix_world.inverted()
# Restore the original world position
obj.matrix_world = world_matrix
# Set the constraint to be active
child_of_constraint.influence = 1.0
converted_count += 1
print(f" ✓ Converted {obj.name} to Child Of constraint")
print(f"\nConversion complete! Converted {converted_count} objects.")
# Report remaining parented objects
remaining_parented = [obj for obj in bpy.context.selected_objects if obj.parent is not None]
if remaining_parented:
print(f"\nObjects that still have parents (not converted):")
for obj in remaining_parented:
print(f" - {obj.name} -> {obj.parent.name}")
return converted_count
# Run the conversion
if __name__ == "__main__":
convert_relations_to_constraint()
@@ -0,0 +1,47 @@
import bpy
from bpy.types import Operator
class CreateOrthoCamera(Operator):
"""Create an orthographic camera with predefined settings"""
bl_idname = "bst.create_ortho_camera"
bl_label = "Create Ortho Camera"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
# Create a new camera
bpy.ops.object.camera_add()
camera = context.active_object
# Set camera to orthographic
camera.data.type = 'ORTHO'
camera.data.ortho_scale = 1.8 # Set orthographic scale
# Set camera position
camera.location = (0, -2, 1) # x=0, y=-2m, z=1m
# Set camera rotation (90 degrees around X axis)
camera.rotation_euler = (1.5708, 0, 0) # 90 degrees in radians
# Get or create camera collection
camera_collection = bpy.data.collections.get("Camera")
if not camera_collection:
camera_collection = bpy.data.collections.new("Camera")
context.scene.collection.children.link(camera_collection)
# Move camera to camera collection
# First unlink from current collection
for collection in camera.users_collection:
collection.objects.unlink(camera)
# Then link to camera collection
camera_collection.objects.link(camera)
return {'FINISHED'}
def register():
bpy.utils.register_class(CreateOrthoCamera)
def unregister():
bpy.utils.unregister_class(CreateOrthoCamera)
if __name__ == "__main__":
register()
@@ -0,0 +1,39 @@
import bpy
class DeleteSingleKeyframeActions(bpy.types.Operator):
"""Delete actions that have no keyframes, only one keyframe, or all keyframes on the same frame"""
bl_idname = "bst.delete_single_keyframe_actions"
bl_label = "Delete Single Keyframe Actions"
bl_description = "Delete actions with unwanted keyframe patterns (no keyframes, single keyframe, or all keyframes on same frame)"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
actions = bpy.data.actions
actions_to_delete = []
for action in actions:
keyframe_frames = set()
total_keyframes = 0
for fcurve in action.fcurves:
for kf in fcurve.keyframe_points:
keyframe_frames.add(kf.co[0])
total_keyframes += 1
# No keyframes
if total_keyframes == 0:
actions_to_delete.append(action)
# Only one keyframe
elif total_keyframes == 1:
actions_to_delete.append(action)
# All keyframes on the same frame
elif len(keyframe_frames) == 1:
actions_to_delete.append(action)
deleted_count = 0
for action in actions_to_delete:
print(f"Deleting action '{action.name}' (unwanted keyframe pattern)")
bpy.data.actions.remove(action)
deleted_count += 1
self.report({'INFO'}, f"Deleted {deleted_count} unwanted actions")
return {'FINISHED'}
@@ -0,0 +1,157 @@
import bpy
class MATERIAL_USERS_OT_summary_dialog(bpy.types.Operator):
"""Show material users analysis in a popup dialog"""
bl_idname = "bst.material_users_summary_dialog"
bl_label = "Material Users Summary"
bl_options = {'REGISTER', 'INTERNAL'}
# Properties to store summary data
material_name: bpy.props.StringProperty(default="")
users_count: bpy.props.IntProperty(default=0)
fake_user: bpy.props.BoolProperty(default=False)
object_users: bpy.props.StringProperty(default="")
node_users: bpy.props.StringProperty(default="")
material_node_users: bpy.props.StringProperty(default="")
total_user_count: bpy.props.IntProperty(default=0)
def draw(self, context):
layout = self.layout
# Title
layout.label(text=f"Material Users - '{self.material_name}'", icon='MATERIAL')
layout.separator()
# Basic info box
box = layout.box()
col = box.column(align=True)
col.label(text=f"Blender Users Count: {self.users_count}")
col.label(text=f"Fake User: {'Yes' if self.fake_user else 'No'}")
col.label(text=f"Total Found Users: {self.total_user_count}")
layout.separator()
# Object users section
if self.object_users:
layout.label(text="Object Users:", icon='OBJECT_DATA')
objects_box = layout.box()
objects_col = objects_box.column(align=True)
for obj_name in self.object_users.split('|'):
if obj_name.strip():
objects_col.label(text=f"{obj_name}", icon='RIGHTARROW_THIN')
else:
layout.label(text="Object Users: None", icon='OBJECT_DATA')
# Node tree users section
if self.node_users:
layout.separator()
layout.label(text="Node Tree Users:", icon='NODETREE')
nodes_box = layout.box()
nodes_col = nodes_box.column(align=True)
for node_ref in self.node_users.split('|'):
if node_ref.strip():
nodes_col.label(text=f"{node_ref}", icon='RIGHTARROW_THIN')
# Material node tree users section
if self.material_node_users:
layout.separator()
layout.label(text="Material Node Tree Users:", icon='MATERIAL')
mat_nodes_box = layout.box()
mat_nodes_col = mat_nodes_box.column(align=True)
for mat_node_ref in self.material_node_users.split('|'):
if mat_node_ref.strip():
mat_nodes_col.label(text=f"{mat_node_ref}", icon='RIGHTARROW_THIN')
layout.separator()
def execute(self, context):
return {'FINISHED'}
def invoke(self, context, event):
return context.window_manager.invoke_popup(self, width=500)
class FindMaterialUsers(bpy.types.Operator):
"""Find all users of a specified material and display detailed information"""
bl_idname = "bst.find_material_users"
bl_label = "Find Material Users"
bl_description = "Find and display all users of a specified material"
bl_options = {'REGISTER'}
material_name: bpy.props.StringProperty(
name="Material",
description="Name of the material to analyze",
default="",
)
def draw(self, context):
layout = self.layout
# Set the material if we have a name
if self.material_name and self.material_name in bpy.data.materials:
context.scene.bst_temp_material = bpy.data.materials[self.material_name]
# Use template_ID to get the proper material selector (without new button)
layout.template_ID(context.scene, "bst_temp_material", text="Material")
def execute(self, context):
# Get the material from the temp property
material = getattr(context.scene, 'bst_temp_material', None)
if not material:
self.report({'ERROR'}, "No material selected")
return {'CANCELLED'}
# Update our material_name property
self.material_name = material.name
# Check objects
object_users = []
for obj in bpy.data.objects:
if obj.material_slots:
for slot in obj.material_slots:
if slot.material == material:
object_users.append(obj.name)
break
# Check node groups more thoroughly
node_users = []
for node_tree in bpy.data.node_groups:
for node in node_tree.nodes:
# Check material nodes
if hasattr(node, 'material') and node.material == material:
node_users.append(f"{node_tree.name}.{node.name}")
# Check material input sockets
for input_socket in node.inputs:
if hasattr(input_socket, 'default_value') and hasattr(input_socket.default_value, 'name'):
if input_socket.default_value.name == material.name:
node_users.append(f"{node_tree.name}.{node.name}.{input_socket.name}")
# Check material node trees
material_node_users = []
for mat in bpy.data.materials:
if mat.node_tree:
for node in mat.node_tree.nodes:
if hasattr(node, 'material') and node.material == material:
material_node_users.append(f"{mat.name}.{node.name}")
# Show summary dialog
self.show_summary_dialog(context, material, object_users, node_users, material_node_users)
return {'FINISHED'}
def show_summary_dialog(self, context, material, object_users, node_users, material_node_users):
"""Show the material users summary in a popup dialog"""
total_user_count = len(object_users) + len(node_users) + len(material_node_users)
# Create and configure the summary dialog
dialog_op = bpy.ops.bst.material_users_summary_dialog
dialog_op('INVOKE_DEFAULT',
material_name=material.name,
users_count=material.users,
fake_user=material.use_fake_user,
object_users='|'.join(object_users),
node_users='|'.join(node_users),
material_node_users='|'.join(material_node_users),
total_user_count=total_user_count)
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)
@@ -0,0 +1,253 @@
import bpy
import bmesh
from mathutils import Color
def rgb_to_hex(r, g, b, a=1.0):
"""Convert RGBA values (0-1 range) to hex color code."""
# Convert to 0-255 range and format as hex
r_int = int(round(r * 255))
g_int = int(round(g * 255))
b_int = int(round(b * 255))
a_int = int(round(a * 255))
# If alpha is full (255), use RGB format, otherwise use RGBA
if a_int == 255:
return f"#{r_int:02X}{g_int:02X}{b_int:02X}"
else:
return f"#{r_int:02X}{g_int:02X}{b_int:02X}{a_int:02X}"
def is_flat_color_image_efficient(image, max_pixels_to_check=10000):
"""
Efficiently check if an image has all pixels of the same color.
Args:
image: The image to check
max_pixels_to_check: Maximum number of pixels to check (for performance)
Returns:
tuple: (is_flat, color) where is_flat is bool and color is RGBA tuple
"""
if not image or not image.pixels:
print(f" DEBUG: No image or no pixels")
return False, None
# Get pixel data
pixels = image.pixels[:]
if len(pixels) == 0:
print(f" DEBUG: Empty pixel array")
return False, None
# Images in Blender are typically RGBA, so 4 values per pixel
channels = image.channels
if channels not in [3, 4]: # RGB or RGBA
print(f" DEBUG: Unsupported channels: {channels}")
return False, None
# Get the first pixel color as reference
first_pixel = pixels[:channels]
print(f" DEBUG: Reference color: {first_pixel}")
# Calculate total pixels
total_pixels = len(pixels) // channels
print(f" DEBUG: Total pixels: {total_pixels}")
# Determine how many pixels to check
pixels_to_check = min(total_pixels, max_pixels_to_check)
# For small images, check every pixel
if total_pixels <= max_pixels_to_check:
step = 1
print(f" DEBUG: Checking all {total_pixels} pixels")
else:
# For large images, sample evenly across the image
step = total_pixels // pixels_to_check
print(f" DEBUG: Sampling {pixels_to_check} pixels with step {step}")
# Check pixels
checked_count = 0
for i in range(0, total_pixels, step):
pixel_start = i * channels
current_pixel = pixels[pixel_start:pixel_start + channels]
checked_count += 1
# Compare with reference pixel (exact match)
for j in range(channels):
if current_pixel[j] != first_pixel[j]:
print(f" DEBUG: Pixel {i} differs at channel {j}: {current_pixel[j]} vs {first_pixel[j]}")
print(f" DEBUG: Checked {checked_count} pixels before finding difference")
return False, None
print(f" DEBUG: All {checked_count} checked pixels are identical")
# If we get here, all checked pixels are the same color
if channels == 3:
return True, (first_pixel[0], first_pixel[1], first_pixel[2], 1.0)
else:
return True, tuple(first_pixel)
def is_flat_color_image(image):
"""Check if an image has all pixels of the same color."""
# Use the efficient version by default
return is_flat_color_image_efficient(image, max_pixels_to_check=10000)
def safe_rename_image(image, new_name):
"""Safely rename an image datablock using context override."""
try:
# Method 1: Try direct assignment first (works in some contexts)
image.name = new_name
return True
except:
try:
# Method 2: Use context override with outliner
for area in bpy.context.screen.areas:
if area.type == 'OUTLINER':
with bpy.context.temp_override(area=area):
image.name = new_name
return True
except:
try:
# Method 3: Use bpy.ops with context override
# Set the image as active and use the rename operator
bpy.context.view_layer.objects.active = None
# Create a temporary override context
override_context = bpy.context.copy()
override_context['edit_image'] = image
with bpy.context.temp_override(**override_context):
image.name = new_name
return True
except:
# Method 4: Try using the data API directly with update
try:
old_name = image.name
# Force an update cycle
bpy.context.view_layer.update()
image.name = new_name
bpy.context.view_layer.update()
return True
except:
return False
def rename_flat_color_textures():
"""Main function to find and rename flat color textures."""
renamed_count = 0
failed_count = 0
processed_count = 0
print("Scanning for flat color textures...")
# Store rename operations to perform them in batch
rename_operations = []
for image in bpy.data.images:
processed_count += 1
# Skip if image has no pixel data
if not hasattr(image, 'pixels') or len(image.pixels) == 0:
print(f"Skipping '{image.name}': No pixel data available")
continue
# Check if image has flat color
is_flat, color = is_flat_color_image(image)
if is_flat and color:
# Convert color to hex
hex_color = rgb_to_hex(*color)
# Store original name for logging
original_name = image.name
# Check if name is already a hex color (to avoid renaming again)
if not original_name.startswith('#'):
rename_operations.append((image, original_name, hex_color, color))
else:
print(f"Skipping '{original_name}': Already appears to be hex-named")
else:
print(f"'{image.name}': Not a flat color texture")
# Perform rename operations
print(f"\nPerforming {len(rename_operations)} rename operation(s)...")
for image, original_name, hex_color, color in rename_operations:
success = safe_rename_image(image, hex_color)
if success:
print(f"Renamed '{original_name}' to '{hex_color}' (Color: RGBA{color})")
renamed_count += 1
else:
print(f"Failed to rename '{original_name}' to '{hex_color}' - Context restriction")
failed_count += 1
print(f"\nSummary:")
print(f"Processed: {processed_count} images")
print(f"Successfully renamed: {renamed_count} flat color textures")
if failed_count > 0:
print(f"Failed to rename: {failed_count} textures (try running from Python Console instead)")
return renamed_count
def reload_image_pixels():
"""Reload pixel data for all images (useful if images aren't loaded)."""
print("Reloading pixel data for all images...")
for image in bpy.data.images:
if image.source == 'FILE' and image.filepath:
try:
image.reload()
print(f"Reloaded: {image.name}")
except:
print(f"Failed to reload: {image.name}")
# Alternative function for running in restricted contexts
def print_rename_suggestions():
"""Print suggested renames without actually renaming (for restricted contexts)."""
suggestions = []
print("Scanning for flat color textures (suggestion mode)...")
for image in bpy.data.images:
if not hasattr(image, 'pixels') or len(image.pixels) == 0:
continue
is_flat, color = is_flat_color_image(image)
if is_flat and color and not image.name.startswith('#'):
hex_color = rgb_to_hex(*color)
suggestions.append((image.name, hex_color, color))
if suggestions:
print(f"\nFound {len(suggestions)} flat color texture(s) that could be renamed:")
print("-" * 60)
for original_name, hex_color, color in suggestions:
print(f"'{original_name}' -> '{hex_color}' (RGBA{color})")
print("\nTo actually rename them, run this script from:")
print("1. Blender's Python Console, or")
print("2. Command line with: blender file.blend --python script.py")
else:
print("\nNo flat color textures found that need renaming.")
# Main execution
if __name__ == "__main__":
print("=" * 50)
print("Flat Color Texture Renamer")
print("=" * 50)
# Optional: Reload images to ensure pixel data is available
# Uncomment the line below if you want to force reload all images
# reload_image_pixels()
# Try to run the renaming process
try:
renamed_count = rename_flat_color_textures()
if renamed_count > 0:
print(f"\nSuccessfully renamed {renamed_count} flat color texture(s)!")
else:
print("\nNo flat color textures found to rename.")
except Exception as e:
print(f"\nContext restriction detected. Running in suggestion mode...")
print_rename_suggestions()
print("Script completed.")

Some files were not shown because too many files have changed in this diff Show More