2026-02-16
This commit is contained in:
+10
-11
@@ -10,14 +10,13 @@ D:\Work\9 iClone\Amazon\
|
|||||||
D:\Amazon\00_external-files\
|
D:\Amazon\00_external-files\
|
||||||
N:\1. CHARACTERS\remapping\
|
N:\1. CHARACTERS\remapping\
|
||||||
[Recent]
|
[Recent]
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Assets\Blends\
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\
|
||||||
!ToDraw
|
C:\Users\Nathan\AppData\Local\Temp\
|
||||||
D:\2.ToDraw\Amazon Projects\
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Assets\Blends\
|
||||||
G:\Amazon\2025\1. CHARACTERS\1. Current\241219_Cartoon\textures\
|
C:\Users\Nathan\Downloads\
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Blends\animations\leader\
|
\\nas\amazon\2. ASSETS\Media\Motion Graphics\Ae\profiles (for speech bubble)\
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AS\
|
T:\1 BlenderAssets\Amazon\Char\Cartoon3\
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AS\textures\Paul\
|
A:\1 Amazon_Active_Projects\1 BlenderAssets\Amazon\Nodes\
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AM\
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Assets\Blends\textures\Products_1\Black Package\
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Deliverable\Stills\Leader\
|
A:\1 Amazon_Active_Projects\1 BlenderAssets\Amazon\Char\Cartoon2\AS\
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Blends\stills\Leader\
|
A:\1 Amazon_Active_Projects\1 BlenderAssets\Amazon\Char\Cartoon3\
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Blends\stills\
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
"location": "F:\\jobs",
|
"location": "F:\\jobs",
|
||||||
"audience": "users",
|
"audience": "users",
|
||||||
"platform": "windows",
|
"platform": "windows",
|
||||||
"shaman_enabled": false
|
"shaman_enabled": true
|
||||||
},
|
},
|
||||||
"job_types": {
|
"job_types": {
|
||||||
"job_types": [
|
"job_types": [
|
||||||
@@ -529,6 +529,139 @@
|
|||||||
"etag": "bbbc0874b45e8f3715c0cf727938604cba6c25cf",
|
"etag": "bbbc0874b45e8f3715c0cf727938604cba6c25cf",
|
||||||
"description": "OPTIX GPU rendering + extra checkboxes for some experimental features + extra CLI args for Blender"
|
"description": "OPTIX GPU rendering + extra checkboxes for some experimental features + extra CLI args for Blender"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "TalkingHeads cycles-optix-gpu-greg-inspect",
|
||||||
|
"label": "TalkingHeads Cycles OPTIX GPU",
|
||||||
|
"settings": [
|
||||||
|
{
|
||||||
|
"key": "frames",
|
||||||
|
"type": "string",
|
||||||
|
"description": "Frame range to render. Examples: '47', '1-30', '3, 5-10, 47-327'",
|
||||||
|
"eval": "f'{C.scene.frame_start}-{C.scene.frame_end}'",
|
||||||
|
"eval_info": {
|
||||||
|
"show_link_button": true,
|
||||||
|
"description": "Scene frame range"
|
||||||
|
},
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "chunk_size",
|
||||||
|
"type": "int32",
|
||||||
|
"default": 1.0,
|
||||||
|
"description": "Number of frames to render in one Blender render task",
|
||||||
|
"visible": "submission"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "render_output_root",
|
||||||
|
"type": "string",
|
||||||
|
"description": "Base path where renders are stored, typically the project's Renders folder. If empty, derived automatically.",
|
||||||
|
"eval": "__import__('os').path.normpath(__import__('os').path.join(((__import__('re').search(r'^(.*?)[\\/][Bb]lends[\\/]', bpy.data.filepath.replace('\\\\','/')) and __import__('re').search(r'^(.*?)[\\/][Bb]lends[\\/]', bpy.data.filepath.replace('\\\\','/')).group(1)) or __import__('os').path.dirname(bpy.data.filepath)), 'Renders'))",
|
||||||
|
"eval_info": {
|
||||||
|
"show_link_button": true,
|
||||||
|
"description": "Auto-detect the project's Renders folder"
|
||||||
|
},
|
||||||
|
"required": false,
|
||||||
|
"subtype": "dir_path",
|
||||||
|
"visible": "submission"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "use_submodule",
|
||||||
|
"type": "bool",
|
||||||
|
"default": false,
|
||||||
|
"description": "Include a submodule folder under Renders. Turn off to omit submodule entirely.",
|
||||||
|
"label": "Use Submodule",
|
||||||
|
"required": false,
|
||||||
|
"visible": "submission"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "submodule",
|
||||||
|
"type": "string",
|
||||||
|
"description": "Optional submodule under Renders (e.g. 'Waterspider B'). If empty, omitted.",
|
||||||
|
"eval": "(__import__('os').path.basename(__import__('os').path.dirname(bpy.data.filepath)) if settings.use_submodule else '')",
|
||||||
|
"eval_info": {
|
||||||
|
"show_link_button": true,
|
||||||
|
"description": "Auto-fill with the current .blend file's parent folder"
|
||||||
|
},
|
||||||
|
"required": false,
|
||||||
|
"visible": "submission"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "render_output_path",
|
||||||
|
"type": "string",
|
||||||
|
"description": "Final file path of where render output will be saved",
|
||||||
|
"editable": false,
|
||||||
|
"eval": "((lambda Path, abspath, os_path, settings_obj, blend: str(Path(abspath(settings_obj.render_output_root or '//')) / (((str(settings_obj.submodule or '').strip()) if (settings_obj.use_submodule and str(settings_obj.submodule or '').strip()) else ((os_path.basename(os_path.dirname(bpy.data.filepath))) if settings_obj.use_submodule else ''))) / blend / (blend + '_######')))(__import__('pathlib').Path, __import__('os').path.abspath, __import__('os').path, settings, __import__('os').path.splitext(__import__('os').path.basename(bpy.data.filepath))[0]))",
|
||||||
|
"subtype": "file_path"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "experimental_gp3",
|
||||||
|
"type": "bool",
|
||||||
|
"description": "Experimental Flag: Grease Pencil 3",
|
||||||
|
"label": "Experimental: GPv3",
|
||||||
|
"required": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "experimental_new_anim",
|
||||||
|
"type": "bool",
|
||||||
|
"description": "Experimental Flag: New Animation Data-block",
|
||||||
|
"label": "Experimental: Baklava",
|
||||||
|
"required": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "blender_args_before",
|
||||||
|
"type": "string",
|
||||||
|
"description": "CLI arguments for Blender, placed before the .blend filename",
|
||||||
|
"label": "Blender CLI args: Before",
|
||||||
|
"required": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "blender_args_after",
|
||||||
|
"type": "string",
|
||||||
|
"description": "CLI arguments for Blender, placed after the .blend filename",
|
||||||
|
"label": "After",
|
||||||
|
"required": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "blendfile",
|
||||||
|
"type": "string",
|
||||||
|
"description": "Path of the Blend file to render",
|
||||||
|
"eval": "bpy.data.filepath",
|
||||||
|
"required": true,
|
||||||
|
"visible": "web"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "fps",
|
||||||
|
"type": "float",
|
||||||
|
"eval": "C.scene.render.fps / C.scene.render.fps_base",
|
||||||
|
"visible": "hidden"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "format",
|
||||||
|
"type": "string",
|
||||||
|
"eval": "C.scene.render.image_settings.file_format",
|
||||||
|
"required": true,
|
||||||
|
"visible": "web"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "image_file_extension",
|
||||||
|
"type": "string",
|
||||||
|
"description": "File extension used when rendering images",
|
||||||
|
"eval": "C.scene.render.file_extension",
|
||||||
|
"required": true,
|
||||||
|
"visible": "hidden"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"key": "has_previews",
|
||||||
|
"type": "bool",
|
||||||
|
"description": "Whether Blender will render preview images.",
|
||||||
|
"eval": "C.scene.render.image_settings.use_preview",
|
||||||
|
"required": false,
|
||||||
|
"visible": "hidden"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"etag": "1ac1baa75e355de50b11b56dde8118200d2a74b6",
|
||||||
|
"description": "OPTIX GPU rendering + extra checkboxes for some experimental features + extra CLI args for Blender"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "cycles-optix-gpu",
|
"name": "cycles-optix-gpu",
|
||||||
"label": "Cycles OPTIX GPU",
|
"label": "Cycles OPTIX GPU",
|
||||||
|
|||||||
@@ -1,4 +1,11 @@
|
|||||||
{
|
{
|
||||||
|
"save_before_close": true,
|
||||||
|
"save_interval": 2,
|
||||||
"max_save_files": 0,
|
"max_save_files": 0,
|
||||||
"save_interval": 2
|
"compress_files": true,
|
||||||
|
"autosave_paths": [],
|
||||||
|
"relative_to_blend": false,
|
||||||
|
"save_images": true,
|
||||||
|
"save_sculpt": true,
|
||||||
|
"print_saves": true
|
||||||
}
|
}
|
||||||
@@ -11,3 +11,4 @@
|
|||||||
{NVIDIA Corporation/NVIDIA GeForce RTX 4080 SUPER/NVIDIA 581.57}=SUPPORTED
|
{NVIDIA Corporation/NVIDIA GeForce RTX 4080 SUPER/NVIDIA 581.57}=SUPPORTED
|
||||||
{NVIDIA Corporation/NVIDIA GeForce RTX 4080 SUPER/PCIe/SSE2/4.6.0 NVIDIA 581.57}=SUPPORTED
|
{NVIDIA Corporation/NVIDIA GeForce RTX 4080 SUPER/PCIe/SSE2/4.6.0 NVIDIA 581.57}=SUPPORTED
|
||||||
{NVIDIA Corporation/NVIDIA GeForce RTX 4080 SUPER/NVIDIA 591.44}=SUPPORTED
|
{NVIDIA Corporation/NVIDIA GeForce RTX 4080 SUPER/NVIDIA 591.44}=SUPPORTED
|
||||||
|
{NVIDIA Corporation/NVIDIA GeForce RTX 4080 SUPER/NVIDIA 591.74}=SUPPORTED
|
||||||
|
|||||||
+30
-30
@@ -1,30 +1,30 @@
|
|||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Assets\Blends\non-con_animation 1_induct characters.blend
|
T:\260206_PAE_2026\Blends\animations\PAE_animation 8G.blend
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Blends\animations\leader\non-con_animation 1_leader.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_animation 8G.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AS\Sarah_v3.2.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 3C.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AS\Heidi_v3.2.blend
|
C:\Users\Nathan\AppData\Local\Temp\2026-02-16_16-53_PAE_Animation 3C.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AS\Paul_v3.3.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_animation 8G_alt.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AS\Marvin_v3.2.blend
|
C:\Users\Nathan\Downloads\PAE_animation 8F.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AS\Kennedy_v3.2.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_animation 8F.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AS\Ciara_v3.3.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 2B.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AM\AM_Tamu_v3.2.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 9b.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AM\AM_Glenna_v3.2.blend
|
C:\Users\Nathan\Downloads\PAE_Animation 9b.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AM\AM_Toni_v3.2.blend
|
A:\1 Amazon_Active_Projects\1 BlenderAssets\Amazon\Char\Cartoon3\Koto_v1.2.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon2\AM\AM_Beth_v3.2.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 9a.blend
|
||||||
T:\1 BlenderAssets\Amazon\Char\Cartoon1\Chan_v4.3.blend
|
C:\Users\Nathan\Downloads\PAE_Animation 1D.blend
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Blends\stills\Leader\Non-Con_15.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 1D.blend
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Blends\stills\Non-Con_15.blend
|
C:\Users\Nathan\AppData\Local\Temp\2026-02-16_13-24_PAE_Animation 9b.blend
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Blends\stills\Leader\Non-Con_6.blend
|
C:\Users\Nathan\Downloads\PAE_Animation 11D.blend
|
||||||
A:\1 Amazon_Active_Projects\251216_NonCon_Media\Blends\stills\Non-Con_6.blend
|
C:\Users\Nathan\Downloads\PAE_Animation 11C.blend
|
||||||
A:\1 Amazon_Active_Projects\251222_Same-Day-Delivery_Updates\Blends\animations\SSD_2B.blend
|
A:\1 Amazon_Active_Projects\260206_Dock_Unified\Blends\animations\Waterspider B\WS-B_1G.blend
|
||||||
A:\1 Amazon_Active_Projects\251222_Same-Day-Delivery_Updates\Blends\animations\SSD_1G.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 10b.blend
|
||||||
C:\Users\Nathan\SynologyDrive\work\2025 websitetalkingheads\2025-05-21 NewZoo Rigs Optimization\Charlie_Owl_009.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_animation 7A.blend
|
||||||
A:\1 Amazon_Active_Projects\251222_Same-Day-Delivery_Updates\Blends\animations\SSD_2A.blend
|
C:\Users\Nathan\Downloads\PAE_Animation 5C.blend
|
||||||
A:\1 Amazon_Active_Projects\251222_Same-Day-Delivery_Updates\Blends\animations\SSD_2K.blend
|
C:\Users\Nathan\Downloads\PAE_Animation 5B.blend
|
||||||
A:\1 Amazon_Active_Projects\251222_Same-Day-Delivery_Updates\Blends\animations\SSD_2KA.blend
|
C:\Users\Nathan\Downloads\PAE_Animation 5A.blend
|
||||||
A:\1 Amazon_Active_Projects\251222_Same-Day-Delivery_Updates\Blends\animations\SSD_2G_2H.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Assets\Blends\PAE_area_dock.blend
|
||||||
A:\1 Amazon_Active_Projects\0 AssetArchive\Amazon\Char\Cartoon1\Kirk_v4.3.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 5C.blend
|
||||||
A:\1 Amazon_Active_Projects\251222_Same-Day-Delivery_Updates\Blends\animations\WS-A_1A.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 5B.blend
|
||||||
G:\Amazon\2025\250404_Dock-2.5\Blends\animations\Waterspider A\WS-A_1A.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 5A.blend
|
||||||
P:\251120_monty-python-crm\blends\castle-wall_shadow-test1.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 4B.blend
|
||||||
F:\jobs\2025-12-22-170344.595818-SSD_2J\SSD_2J.flamenco.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 3E.blend
|
||||||
F:\jobs\2025-12-22-154849.096298-SSD_1F\SSD_1F.flamenco.blend
|
A:\1 Amazon_Active_Projects\260206_PAE_2026\Blends\animations\PAE_Animation 2A.blend
|
||||||
|
|||||||
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -4,6 +4,11 @@
|
|||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
|
||||||
|
## [0.5.1] - 2026-02-07
|
||||||
|
|
||||||
|
- Fix bug caused by a change in Blender 5.0's Python API for creating drivers. It caused the location component of the shake to not animate.
|
||||||
|
|
||||||
|
|
||||||
## [0.5.0] - 2025-02-10
|
## [0.5.0] - 2025-02-10
|
||||||
|
|
||||||
- Update code to work properly with the new slotted/layered Action APIs in Blender 4.4. Otherwise functionally identical to v0.4.0.
|
- Update code to work properly with the new slotted/layered Action APIs in Blender 4.4. Otherwise functionally identical to v0.4.0.
|
||||||
@@ -31,7 +36,8 @@ To be filled out.
|
|||||||
To be filled out.
|
To be filled out.
|
||||||
|
|
||||||
|
|
||||||
[Unreleased]: https://github.com/cessen/colorbox/compare/v0.5.0...HEAD
|
[Unreleased]: https://github.com/cessen/colorbox/compare/v0.5.1...HEAD
|
||||||
|
[0.5.1]: https://github.com/cessen/colorbox/compare/v0.5.0...v0.5.1
|
||||||
[0.5.0]: https://github.com/cessen/colorbox/compare/v0.4.0...v0.5.0
|
[0.5.0]: https://github.com/cessen/colorbox/compare/v0.4.0...v0.5.0
|
||||||
[0.4.0]: https://github.com/cessen/colorbox/compare/v0.3.0...v0.4.0
|
[0.4.0]: https://github.com/cessen/colorbox/compare/v0.3.0...v0.4.0
|
||||||
[0.3.0]: https://github.com/cessen/colorbox/compare/v0.2.0...v0.3.0
|
[0.3.0]: https://github.com/cessen/colorbox/compare/v0.2.0...v0.3.0
|
||||||
|
|||||||
@@ -284,9 +284,16 @@ def build_single_shake(camera, shake_item_index, collection, context):
|
|||||||
rot_constraint.mix_mode = 'AFTER'
|
rot_constraint.mix_mode = 'AFTER'
|
||||||
|
|
||||||
# Set up the location constraint driver.
|
# Set up the location constraint driver.
|
||||||
driver = loc_constraint.driver_add("influence").driver
|
#
|
||||||
|
# Note: we clear the keyframes from the driver's fcurve to dodge some
|
||||||
|
# small-value rounding that Blender does internally when evaluating fcurves.
|
||||||
|
# This way the driver expression evaluation gets used directly, without any
|
||||||
|
# intermediate steps that might interfere.
|
||||||
|
fcurve = loc_constraint.driver_add("influence")
|
||||||
|
fcurve.keyframe_points.clear()
|
||||||
|
driver = fcurve.driver
|
||||||
driver.type = 'SCRIPTED'
|
driver.type = 'SCRIPTED'
|
||||||
driver.expression = "{} * influence * location_scale / unit_scale".format(1.0 / (UNIT_SCALE_MAX * INFLUENCE_MAX * SCALE_MAX))
|
driver.expression = "{} * influence * location_scale / unit_scale * int(\"1\")".format(1.0 / (UNIT_SCALE_MAX * INFLUENCE_MAX * SCALE_MAX))
|
||||||
if "influence" not in driver.variables:
|
if "influence" not in driver.variables:
|
||||||
var = driver.variables.new()
|
var = driver.variables.new()
|
||||||
var.name = "influence"
|
var.name = "influence"
|
||||||
@@ -310,7 +317,11 @@ def build_single_shake(camera, shake_item_index, collection, context):
|
|||||||
var.targets[0].data_path ='unit_settings.scale_length'
|
var.targets[0].data_path ='unit_settings.scale_length'
|
||||||
|
|
||||||
# Set up the rotation constraint driver.
|
# Set up the rotation constraint driver.
|
||||||
driver = rot_constraint.driver_add("influence").driver
|
#
|
||||||
|
# Note: see further-above note for why we clear the keyframes here.
|
||||||
|
fcurve = rot_constraint.driver_add("influence")
|
||||||
|
fcurve.keyframe_points.clear()
|
||||||
|
driver = fcurve.driver
|
||||||
driver.type = 'SCRIPTED'
|
driver.type = 'SCRIPTED'
|
||||||
driver.expression = "influence * {}".format(1.0 / INFLUENCE_MAX)
|
driver.expression = "influence * {}".format(1.0 / INFLUENCE_MAX)
|
||||||
if "influence" not in driver.variables:
|
if "influence" not in driver.variables:
|
||||||
@@ -633,7 +644,7 @@ def register():
|
|||||||
|
|
||||||
# The list of camera shakes active on an camera, along with each shake's parameters.
|
# The list of camera shakes active on an camera, along with each shake's parameters.
|
||||||
bpy.types.Object.camera_shakes = bpy.props.CollectionProperty(type=CameraShakeInstance)
|
bpy.types.Object.camera_shakes = bpy.props.CollectionProperty(type=CameraShakeInstance)
|
||||||
bpy.types.Object.camera_shakes_active_index = bpy.props.IntProperty(name="Camera Shake List Active Item Index")
|
bpy.types.Object.camera_shakes_active_index = bpy.props.IntProperty(name="Camera Shake List Active Item Index", options = set())
|
||||||
|
|
||||||
bpy.types.WindowManager.camera_shake_show_utils = bpy.props.BoolProperty(name="Show Camera Shake Utils UI", default=False)
|
bpy.types.WindowManager.camera_shake_show_utils = bpy.props.BoolProperty(name="Show Camera Shake Utils UI", default=False)
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
schema_version = "1.0.0"
|
schema_version = "1.0.0"
|
||||||
|
|
||||||
id = "camera_shakify"
|
id = "camera_shakify"
|
||||||
version = "0.5.0"
|
version = "0.5.1"
|
||||||
name = "Camera Shakify"
|
name = "Camera Shakify"
|
||||||
tagline = "Add captured camera shake/wobble to your cameras"
|
tagline = "Add captured camera shake/wobble to your cameras"
|
||||||
maintainer = "Nathan Vegadahl <cessen@cessen.com>"
|
maintainer = "Nathan Vegadahl <cessen@cessen.com>"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
schema_version = "1.0.0"
|
schema_version = "1.0.0"
|
||||||
id = "datablock_utils"
|
id = "datablock_utils"
|
||||||
version = "1.2.3"
|
version = "1.3.0"
|
||||||
name = "Data-Block Utilities"
|
name = "Data-Block Utilities"
|
||||||
tagline = "Show users, merge duplicates, find similar, and more"
|
tagline = "Show users, merge duplicates, find similar, and more"
|
||||||
maintainer = "Leonardo Pike-Excell <leonardopike.excell@gmail.com>"
|
maintainer = "Leonardo Pike-Excell <leonardopike.excell@gmail.com>"
|
||||||
|
|||||||
@@ -143,7 +143,8 @@ class NodeProperties:
|
|||||||
self._add_link(root_link, node_map)
|
self._add_link(root_link, node_map)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not links[socket].from_node.mute:
|
from_node = links[socket].from_node
|
||||||
|
if not from_node.mute and from_node.name in node_map:
|
||||||
self._add_link(links[socket], node_map)
|
self._add_link(links[socket], node_map)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|||||||
@@ -52,15 +52,6 @@ def get_path_to_light(
|
|||||||
return get_path_to_light(nested_users, users[0]) # type: ignore
|
return get_path_to_light(nested_users, users[0]) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
def get_node_editor() -> tuple[bpy.types.Area, bpy.types.Region]:
|
|
||||||
assert bpy.context
|
|
||||||
areas = [a for a in bpy.context.window.screen.areas if a.type == 'NODE_EDITOR']
|
|
||||||
area = areas[0] if len(areas) == 1 else next(
|
|
||||||
a for a in areas if not cast(SpaceNodeEditor, a.spaces[0]).pin)
|
|
||||||
region = next(r for r in area.regions if r.type == 'WINDOW')
|
|
||||||
return area, region
|
|
||||||
|
|
||||||
|
|
||||||
def get_geometry_node_group(
|
def get_geometry_node_group(
|
||||||
space: SpaceNodeEditor,
|
space: SpaceNodeEditor,
|
||||||
id_data: bpy.types.GeometryNodeTree,
|
id_data: bpy.types.GeometryNodeTree,
|
||||||
@@ -198,15 +189,34 @@ class DBU_OT_GoToDatablock(Operator):
|
|||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
|
||||||
assert bpy.context
|
areas = [a for a in context.window.screen.areas if a.type == 'NODE_EDITOR']
|
||||||
|
|
||||||
try:
|
if not areas:
|
||||||
area, region = get_node_editor()
|
|
||||||
except StopIteration:
|
|
||||||
self.report({'WARNING'}, "Node editor not open")
|
self.report({'WARNING'}, "Node editor not open")
|
||||||
return {'FINISHED'}
|
return {'FINISHED'}
|
||||||
|
|
||||||
area.ui_type = 'GeometryNodeTree' if is_geo else 'ShaderNodeTree'
|
target_ui_type = 'GeometryNodeTree' if is_geo else 'ShaderNodeTree'
|
||||||
|
|
||||||
|
if len(areas) == 1:
|
||||||
|
area = areas[0]
|
||||||
|
else:
|
||||||
|
unpinned_areas = [a for a in areas if not cast(SpaceNodeEditor, a.spaces[0]).pin]
|
||||||
|
|
||||||
|
if not unpinned_areas:
|
||||||
|
self.report({'WARNING'}, "No unpinned node editor")
|
||||||
|
return {'FINISHED'}
|
||||||
|
|
||||||
|
area = next(
|
||||||
|
(a for a in unpinned_areas if a.ui_type == target_ui_type),
|
||||||
|
unpinned_areas[0],
|
||||||
|
)
|
||||||
|
|
||||||
|
area.ui_type = target_ui_type
|
||||||
|
region = next(r for r in area.regions if r.type == 'WINDOW')
|
||||||
|
|
||||||
|
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|
||||||
|
|
||||||
|
assert bpy.context
|
||||||
with bpy.context.temp_override(area=area, region=region):
|
with bpy.context.temp_override(area=area, region=region):
|
||||||
space = cast(SpaceNodeEditor, context.space_data)
|
space = cast(SpaceNodeEditor, context.space_data)
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,16 @@
|
|||||||
This add-on makes Blender's autosaves a bit more powerful and reliable.
|
[Incremental Auto-Save](https://extensions.blender.org/add-ons/incremental-auto-save/) makes Blender's autosaves more configurable and reliable.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
I specifically created this add-on to be able to share my user preferences across multiple computers. Normally, when you specify an autosave path in your Blender preferences, and that path does not exist, autosaves will simply not work at all. Also, Blender's autosaves overwrite each other, so you only have one backup per file, making shorter autosave intervals risky, since you might want to go back more than a few minutes in your saves.
|
|
||||||
|
|
||||||
### Features:
|
### Features:
|
||||||
- Incremental autosave: Automatically save in configurable time intervals, without each autosave overwriting the previous one.
|
- Incremental auto-save: Automatically save in configurable time intervals, without each autosave overwriting the previous one.
|
||||||
- Customizable configuration: Set the maximum number of saves per file and the interval between saves according to your preferences.
|
- Customizable configuration: Set the maximum number of saves per file and the interval between saves according to your preferences.
|
||||||
- Autosave on file switch: Automatically save the current file when opening another, ensuring your progress is always backed up.
|
- Autosave on file switch: Automatically save the current file when opening another, ensuring your progress is always backed up.
|
||||||
- Autosave for unsaved files: Automatically save files that have not been saved before, giving them the name "Unnamed.blend".
|
- Autosave for unsaved files: Automatically save files that have not been saved before, giving them the name "Unnamed.blend".
|
||||||
- Support multiple computers: Specify a list of file paths, where the first valid one will be used for saving backups. Useful when you share your user preferences across multiple computers.
|
- Support multiple computers: Specify a list of file paths, where the first valid one will be used for saving backups. Useful when you share your user preferences across multiple computers.
|
||||||
- Invalid path fallback: If no valid specified file is found, create backups next to the current .blend, or the OS temp folder.
|
- Save next to .blend: Instead of a global auto-save directory, your autosaves can go to an "Autosave" folder next to your .blend.
|
||||||
|
- Invalid path fallback: If none of the given paths are valid, create backups next to the current .blend, or the OS temp folder.
|
||||||
|
- Save modified images: For texture painting workflows, this will auto-save your images as well as the .blend file.
|
||||||
|
- Save Mesh/Sculpt: If you spend a long time in Edit/Sculpt mode and Blender crashes, you will lose your work, because the mesh data is not written back into the object until you go to Object Mode. The add-on will periodically enter object mode for you, without interrupting any Sculpt/Paint/Transform operations.
|
||||||
|
|
||||||
|
It goes without saying that this add-on can introduce periodic lag. This is simply the price of saving data to a disk. A slow disk or a large file will exacerbate this, and all you can do is adjust your save interval according to what you can put up with.
|
||||||
@@ -1,26 +1,36 @@
|
|||||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import bpy
|
||||||
|
from bl_ui.generic_ui_list import draw_ui_list
|
||||||
|
from bpy.app.handlers import persistent
|
||||||
|
from bpy.props import (
|
||||||
|
BoolProperty,
|
||||||
|
CollectionProperty,
|
||||||
|
IntProperty,
|
||||||
|
StringProperty,
|
||||||
|
)
|
||||||
|
from bpy.types import AddonPreferences, PropertyGroup, UIList
|
||||||
|
from rna_prop_ui import IDPropertyGroup
|
||||||
|
|
||||||
|
|
||||||
bl_info = {
|
bl_info = {
|
||||||
"name": "Incremental Autosave",
|
"name": "Incremental Autosave",
|
||||||
"author": "Demeter Dzadik",
|
"author": "Demeter Dzadik",
|
||||||
"version": (1, 1, 0),
|
"version": (1, 1, 1),
|
||||||
"blender": (2, 90, 0),
|
"blender": (2, 90, 0),
|
||||||
"location": "blender",
|
"location": "blender",
|
||||||
"description": "Autosaves in a way where subsequent autosaves don't overwrite previous ones",
|
"description": "Autosaves in a way where subsequent autosaves don't overwrite previous ones",
|
||||||
"category": "System",
|
"category": "System",
|
||||||
}
|
}
|
||||||
|
|
||||||
import os, tempfile, json
|
|
||||||
from datetime import datetime
|
|
||||||
from pathlib import Path
|
|
||||||
import bpy
|
|
||||||
from bpy.props import BoolProperty, IntProperty, StringProperty, CollectionProperty
|
|
||||||
from bpy.types import PropertyGroup, AddonPreferences, UIList
|
|
||||||
from bpy.app.handlers import persistent
|
|
||||||
from bl_ui.generic_ui_list import draw_ui_list
|
|
||||||
|
|
||||||
# Timestamp format for prefixing autosave file names.
|
# Timestamp format for prefixing autosave file names.
|
||||||
TIME_FMT_STR = '%Y-%M-%d_%H-%M'
|
TIME_FMT_STR = "%Y-%m-%d_%H-%M"
|
||||||
|
|
||||||
# Timestamp of when Blender is launched. Used to avoid creating an autosave when opening Blender.
|
# Timestamp of when Blender is launched. Used to avoid creating an autosave when opening Blender.
|
||||||
LAUNCH_TIME = datetime.now()
|
LAUNCH_TIME = datetime.now()
|
||||||
@@ -30,47 +40,36 @@ class INCSAVE_UL_file_paths(UIList):
|
|||||||
def draw_item(
|
def draw_item(
|
||||||
self, context, layout, data, item, icon_value, active_data, active_propname
|
self, context, layout, data, item, icon_value, active_data, active_propname
|
||||||
):
|
):
|
||||||
filepath = item
|
filepath: AutoSavePath = item
|
||||||
|
|
||||||
row = layout.row()
|
row = layout.row()
|
||||||
split = row.split(factor=0.2)
|
split = row.split(factor=0.2)
|
||||||
split.prop(item, 'name', text="")
|
split.prop(filepath, "name", text="")
|
||||||
row = split.row()
|
row = split.row()
|
||||||
if not os.path.exists(item.path):
|
if not os.path.exists(filepath.path):
|
||||||
row.alert = True
|
row.alert = True
|
||||||
row.prop(item, 'path', text="")
|
row.prop(filepath, "path", text="")
|
||||||
|
|
||||||
|
|
||||||
def get_addon_prefs(context=None):
|
def get_addon_prefs(context=None):
|
||||||
context = context or bpy.context
|
context = context or bpy.context
|
||||||
return context.preferences.addons[__name__].preferences
|
return context.preferences.addons[__name__].preferences
|
||||||
|
|
||||||
def update_prefs_on_file(self, context):
|
|
||||||
|
def update_prefs_on_file(self=None, context=None):
|
||||||
prefs = get_addon_prefs(context)
|
prefs = get_addon_prefs(context)
|
||||||
if not type(prefs).loading:
|
if not type(prefs).loading:
|
||||||
prefs.save_prefs_to_file()
|
prefs.save_prefs_to_file()
|
||||||
|
|
||||||
|
|
||||||
class AutoSavePath(PropertyGroup):
|
|
||||||
name: StringProperty(
|
|
||||||
update=update_prefs_on_file
|
|
||||||
)
|
|
||||||
path: StringProperty(
|
|
||||||
name="Autosave Path",
|
|
||||||
description="An autosave path. If this filepath doesn't exist, the next existing one will be used. If none are valid, the Blender auto-save path will be used. If that's not valid either, the OS default temp folder will be used",
|
|
||||||
subtype='FILE_PATH',
|
|
||||||
default="",
|
|
||||||
update=update_prefs_on_file
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PrefsFileSaveLoadMixin:
|
class PrefsFileSaveLoadMixin:
|
||||||
"""Mix-in class that can be used by any add-on to store their preferences in a file,
|
"""Mix-in class that can be used by any add-on to store their preferences in a file,
|
||||||
so that they don't get lost when the add-on is disabled.
|
so that they don't get lost when the add-on is disabled.
|
||||||
To use it, just do this:
|
To use it, copy this class and the function above it, and do this in your code:
|
||||||
|
|
||||||
```
|
```
|
||||||
import bpy
|
import bpy, json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
class MyAddonPrefs(PrefsFileSaveLoadMixin, bpy.types.AddonPreferences):
|
class MyAddonPrefs(PrefsFileSaveLoadMixin, bpy.types.AddonPreferences):
|
||||||
some_prop: bpy.props.IntProperty(update=update_prefs_on_file)
|
some_prop: bpy.props.IntProperty(update=update_prefs_on_file)
|
||||||
@@ -78,10 +77,16 @@ class PrefsFileSaveLoadMixin:
|
|||||||
def register():
|
def register():
|
||||||
bpy.utils.register_class(MyAddonPrefs)
|
bpy.utils.register_class(MyAddonPrefs)
|
||||||
MyAddonPrefs.register_autoload_from_file()
|
MyAddonPrefs.register_autoload_from_file()
|
||||||
|
|
||||||
|
def unregister():
|
||||||
|
update_prefs_on_file()
|
||||||
```
|
```
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# List of property names to not write to disk.
|
||||||
|
omit_from_disk: list[str] = []
|
||||||
|
|
||||||
loading = False
|
loading = False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -89,33 +94,48 @@ class PrefsFileSaveLoadMixin:
|
|||||||
def timer_func(_scene=None):
|
def timer_func(_scene=None):
|
||||||
prefs = get_addon_prefs()
|
prefs = get_addon_prefs()
|
||||||
prefs.load_prefs_from_file()
|
prefs.load_prefs_from_file()
|
||||||
|
|
||||||
bpy.app.timers.register(timer_func, first_interval=delay)
|
bpy.app.timers.register(timer_func, first_interval=delay)
|
||||||
|
|
||||||
@staticmethod
|
def prefs_to_dict_recursive(self, propgroup: IDPropertyGroup) -> dict:
|
||||||
def prefs_to_dict_recursive(propgroup: 'IDPropertyGroup') -> dict:
|
|
||||||
"""Recursively convert AddonPreferences to a dictionary.
|
"""Recursively convert AddonPreferences to a dictionary.
|
||||||
Note that AddonPreferences don't support PointerProperties,
|
Note that AddonPreferences don't support PointerProperties,
|
||||||
so this function doesn't either."""
|
so this function doesn't either."""
|
||||||
from rna_prop_ui import IDPropertyGroup
|
|
||||||
ret = {}
|
ret = {}
|
||||||
|
|
||||||
if hasattr(propgroup, 'bl_rna'):
|
rna_class = None
|
||||||
rna_class = propgroup.bl_rna
|
if isinstance(propgroup, bpy.types.AddonPreferences):
|
||||||
|
prop_dict = {
|
||||||
|
key: getattr(propgroup, key)
|
||||||
|
for key in propgroup.bl_rna.properties.keys()
|
||||||
|
if key not in ("rna_type", "bl_idname")
|
||||||
|
}
|
||||||
else:
|
else:
|
||||||
property_group_class_name = type(propgroup).__name__
|
property_group_class_name = type(propgroup).__name__
|
||||||
rna_class = bpy.types.PropertyGroup.bl_rna_get_subclass_py(property_group_class_name)
|
rna_class = bpy.types.PropertyGroup.bl_rna_get_subclass_py(
|
||||||
|
property_group_class_name
|
||||||
|
)
|
||||||
|
if not hasattr(rna_class, "properties"):
|
||||||
|
rna_class = None
|
||||||
|
prop_dict = {
|
||||||
|
key: getattr(propgroup, key)
|
||||||
|
for key in propgroup.bl_rna.properties.keys()
|
||||||
|
if key not in ("rna_type")
|
||||||
|
}
|
||||||
|
|
||||||
this_func = PrefsFileSaveLoadMixin.prefs_to_dict_recursive
|
for key, value in prop_dict.items():
|
||||||
for key, value in propgroup.items():
|
if key in type(self).omit_from_disk:
|
||||||
if type(value) == list:
|
continue
|
||||||
ret[key] = [this_func(elem) for elem in value]
|
if type(value) in (list, bpy.types.bpy_prop_collection_idprop):
|
||||||
elif type(value) == IDPropertyGroup:
|
ret[key] = [self.prefs_to_dict_recursive(elem) for elem in value]
|
||||||
ret[key] = this_func(value)
|
elif type(value) is IDPropertyGroup:
|
||||||
|
ret[key] = self.prefs_to_dict_recursive(value)
|
||||||
else:
|
else:
|
||||||
if (
|
if (
|
||||||
rna_class and
|
rna_class
|
||||||
key in rna_class.properties and
|
and key in rna_class.properties
|
||||||
hasattr(rna_class.properties[key], 'enum_items')
|
and hasattr(rna_class.properties[key], "enum_items")
|
||||||
):
|
):
|
||||||
# Save enum values as string, not int.
|
# Save enum values as string, not int.
|
||||||
ret[key] = rna_class.properties[key].enum_items[value].identifier
|
ret[key] = rna_class.properties[key].enum_items[value].identifier
|
||||||
@@ -123,32 +143,30 @@ class PrefsFileSaveLoadMixin:
|
|||||||
ret[key] = value
|
ret[key] = value
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@staticmethod
|
def apply_prefs_from_dict_recursive(self, propgroup, data):
|
||||||
def apply_prefs_from_dict_recursive(propgroup, data):
|
|
||||||
this_func = PrefsFileSaveLoadMixin.apply_prefs_from_dict_recursive
|
|
||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
if not hasattr(propgroup, key):
|
if not hasattr(propgroup, key):
|
||||||
# Property got removed or renamed in the implementation.
|
# Property got removed or renamed in the implementation.
|
||||||
continue
|
continue
|
||||||
if type(value) == list:
|
if type(value) is list:
|
||||||
for elem in value:
|
for elem in value:
|
||||||
collprop = getattr(propgroup, key)
|
collprop = getattr(propgroup, key)
|
||||||
entry = collprop.get(elem['name'])
|
entry = collprop.get(elem["name"])
|
||||||
if not entry:
|
if not entry:
|
||||||
entry = collprop.add()
|
entry = collprop.add()
|
||||||
this_func(entry, elem)
|
self.apply_prefs_from_dict_recursive(entry, elem)
|
||||||
elif type(value) == dict:
|
elif type(value) is dict:
|
||||||
this_func(getattr(propgroup, key), value)
|
self.apply_prefs_from_dict_recursive(getattr(propgroup, key), value)
|
||||||
else:
|
else:
|
||||||
setattr(propgroup, key, value)
|
setattr(propgroup, key, value)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_prefs_filepath() -> Path:
|
def get_prefs_filepath() -> Path:
|
||||||
addon_name = __package__.split(".")[-1]
|
addon_name = __package__.split(".")[-1]
|
||||||
return Path(bpy.utils.user_resource('CONFIG')) / Path(addon_name + ".txt")
|
return Path(bpy.utils.user_resource("CONFIG")) / Path(addon_name + ".txt")
|
||||||
|
|
||||||
def save_prefs_to_file(self, _context=None):
|
def save_prefs_to_file(self, _context=None):
|
||||||
data_dict = self.prefs_to_dict_recursive(self)
|
data_dict = self.prefs_to_dict_recursive(propgroup=self)
|
||||||
|
|
||||||
with open(self.get_prefs_filepath(), "w") as f:
|
with open(self.get_prefs_filepath(), "w") as f:
|
||||||
json.dump(data_dict, f, indent=4)
|
json.dump(data_dict, f, indent=4)
|
||||||
@@ -163,24 +181,38 @@ class PrefsFileSaveLoadMixin:
|
|||||||
type(self).loading = True
|
type(self).loading = True
|
||||||
try:
|
try:
|
||||||
self.apply_prefs_from_dict_recursive(self, addon_data)
|
self.apply_prefs_from_dict_recursive(self, addon_data)
|
||||||
except Exception as exc:
|
except Exception:
|
||||||
# If we get an error raised here, and it isn't handled,
|
# If we get an error raised here, and it isn't handled,
|
||||||
# the add-on seems to break.
|
# the add-on seems to break.
|
||||||
print(f"Failed to load {__package__} preferences from file.")
|
print(f"Failed to load {__package__} preferences from file.")
|
||||||
|
# raise exc
|
||||||
type(self).loading = False
|
type(self).loading = False
|
||||||
|
|
||||||
|
|
||||||
|
class AutoSavePath(PropertyGroup):
|
||||||
|
name: StringProperty(update=update_prefs_on_file)
|
||||||
|
path: StringProperty(
|
||||||
|
name="Autosave Path",
|
||||||
|
description="An autosave path. If this filepath doesn't exist, the next existing one will be used. If none are valid, the Blender auto-save path will be used. If that's not valid either, the OS default temp folder will be used",
|
||||||
|
subtype="FILE_PATH",
|
||||||
|
default="",
|
||||||
|
update=update_prefs_on_file,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class IncrementalAutoSavePreferences(PrefsFileSaveLoadMixin, AddonPreferences):
|
class IncrementalAutoSavePreferences(PrefsFileSaveLoadMixin, AddonPreferences):
|
||||||
bl_idname = __name__
|
bl_idname = __name__
|
||||||
|
|
||||||
|
omit_from_disk = ["active_index"]
|
||||||
|
|
||||||
save_before_close: BoolProperty(
|
save_before_close: BoolProperty(
|
||||||
name='Save Before File Open',
|
name="Save Before File Open",
|
||||||
description='Save the current file before opening another file',
|
description="Save the current file before opening another file",
|
||||||
default=True,
|
default=True,
|
||||||
update=update_prefs_on_file,
|
update=update_prefs_on_file,
|
||||||
)
|
)
|
||||||
save_interval: IntProperty(
|
save_interval: IntProperty(
|
||||||
name='Save Interval (Minutes)',
|
name="Save Interval (Minutes)",
|
||||||
description="Number of minutes between each save while the add-on is enabled",
|
description="Number of minutes between each save while the add-on is enabled",
|
||||||
default=5,
|
default=5,
|
||||||
min=1,
|
min=1,
|
||||||
@@ -190,25 +222,53 @@ class IncrementalAutoSavePreferences(PrefsFileSaveLoadMixin, AddonPreferences):
|
|||||||
)
|
)
|
||||||
|
|
||||||
max_save_files: bpy.props.IntProperty(
|
max_save_files: bpy.props.IntProperty(
|
||||||
name='Max Backups Per File',
|
name="Max Backups Per File",
|
||||||
description='Maximum number of backups to save for each file, 0 means unlimited. Otherwise, the oldest file will be deleted after reaching the limit',
|
description="Maximum number of backups to save for each file, 0 means unlimited. Otherwise, the oldest file will be deleted after reaching the limit",
|
||||||
default=10,
|
default=10,
|
||||||
min=0,
|
min=0,
|
||||||
max=100,
|
max=100,
|
||||||
update=update_prefs_on_file,
|
update=update_prefs_on_file,
|
||||||
)
|
)
|
||||||
compress_files: bpy.props.BoolProperty(
|
compress_files: bpy.props.BoolProperty(
|
||||||
name='Compress Files',
|
name="Compress Files",
|
||||||
description='Save backups with compression enabled',
|
description="Save backups with compression enabled",
|
||||||
default=True,
|
default=True,
|
||||||
update=update_prefs_on_file,
|
update=update_prefs_on_file,
|
||||||
)
|
)
|
||||||
|
|
||||||
autosave_paths: CollectionProperty(type=AutoSavePath)
|
autosave_paths: CollectionProperty(type=AutoSavePath)
|
||||||
|
relative_to_blend: BoolProperty(
|
||||||
|
name="Save Next To Blend",
|
||||||
|
description='When the .blend file is saved, save auto-saves next to it, into an "Autosave" folder.\nIf the .blend is not saved, it will fall back to the list of filepaths provided above, or system temp folder.',
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
save_images: BoolProperty(
|
||||||
|
name="Save Images",
|
||||||
|
description="Useful for long periods of texture painting. This will auto-save images - both packed AND UNPACKED! Best to use in conjunction with a version control system like GitLFS/SVN!",
|
||||||
|
default=True,
|
||||||
|
)
|
||||||
|
save_sculpt: BoolProperty(
|
||||||
|
name="Save Mesh/Sculpt",
|
||||||
|
description="Useful for long periods of mesh editing/sculpting. Normally if Blender crashes during mesh edit/sculpt, you lose your changes since the last time you entered Object mode. If this is enabled, the add-on will periodically switch to Object mode for you, and back. This will only trigger after dependency graph updates, so it doesn't happen in the middle of a stroke.",
|
||||||
|
default=True,
|
||||||
|
)
|
||||||
|
print_saves: BoolProperty(
|
||||||
|
name="Print Saves",
|
||||||
|
description="Print in the System Console (aka Terminal) on each autosave.",
|
||||||
|
default=True,
|
||||||
|
)
|
||||||
active_index: IntProperty()
|
active_index: IntProperty()
|
||||||
|
|
||||||
def get_valid_autosave_path(self, context):
|
def get_relative_dir(self) -> str:
|
||||||
|
assert bpy.data.filepath
|
||||||
|
return os.sep.join((os.path.dirname(bpy.data.filepath), "Autosaves"))
|
||||||
|
|
||||||
|
def get_valid_autosave_path(self, context) -> str:
|
||||||
"""Return an autosave path that will always actually exist, no matter how desperate."""
|
"""Return an autosave path that will always actually exist, no matter how desperate."""
|
||||||
|
prefs = get_addon_prefs(context)
|
||||||
|
if bpy.data.filepath and prefs.relative_to_blend:
|
||||||
|
return self.get_relative_dir()
|
||||||
|
|
||||||
# Try the native autosave path first.
|
# Try the native autosave path first.
|
||||||
default_path = bpy.context.preferences.filepaths.temporary_directory
|
default_path = bpy.context.preferences.filepaths.temporary_directory
|
||||||
if os.path.exists(default_path):
|
if os.path.exists(default_path):
|
||||||
@@ -219,9 +279,9 @@ class IncrementalAutoSavePreferences(PrefsFileSaveLoadMixin, AddonPreferences):
|
|||||||
if os.path.exists(path.path):
|
if os.path.exists(path.path):
|
||||||
return path.path
|
return path.path
|
||||||
|
|
||||||
# If none of those exist, return the .blend's directory.
|
# If none of those exist, return Autosave dir next to .blend.
|
||||||
if bpy.data.filepath:
|
if bpy.data.filepath:
|
||||||
return os.path.dirname(bpy.data.filepath)
|
return self.get_relative_dir()
|
||||||
|
|
||||||
# And if that doesn't exist either, fall back to the sys temp dir.
|
# And if that doesn't exist either, fall back to the sys temp dir.
|
||||||
sys_temp = tempfile.gettempdir()
|
sys_temp = tempfile.gettempdir()
|
||||||
@@ -232,53 +292,60 @@ class IncrementalAutoSavePreferences(PrefsFileSaveLoadMixin, AddonPreferences):
|
|||||||
layout.use_property_decorate = False
|
layout.use_property_decorate = False
|
||||||
layout.use_property_split = True
|
layout.use_property_split = True
|
||||||
|
|
||||||
split = layout.split(factor=0.4)
|
header, panel = layout.panel("Incremental Autosave: Paths", default_closed=True)
|
||||||
split.row()
|
header.label(text="Autosave Paths")
|
||||||
split.label(text="First valid path will be used:")
|
if panel:
|
||||||
|
split = panel.split(factor=0.4)
|
||||||
|
split.row()
|
||||||
|
split.label(text="First valid path will be used:")
|
||||||
|
|
||||||
native_row = layout.row()
|
native_row = panel.row()
|
||||||
if not os.path.exists(context.preferences.filepaths.temporary_directory):
|
if not os.path.exists(context.preferences.filepaths.temporary_directory):
|
||||||
native_row.alert = True
|
native_row.alert = True
|
||||||
native_row.prop(
|
native_row.prop(
|
||||||
context.preferences.filepaths,
|
context.preferences.filepaths,
|
||||||
'temporary_directory',
|
"temporary_directory",
|
||||||
text="Native Autosave Path",
|
text="Native Autosave Path",
|
||||||
)
|
)
|
||||||
|
|
||||||
draw_ui_list(
|
draw_ui_list(
|
||||||
layout,
|
panel,
|
||||||
context,
|
context,
|
||||||
class_name='INCSAVE_UL_file_paths',
|
class_name="INCSAVE_UL_file_paths",
|
||||||
list_path=f'preferences.addons["{__package__}"].preferences.autosave_paths',
|
list_path=f'preferences.addons["{__package__}"].preferences.autosave_paths',
|
||||||
active_index_path=f'preferences.addons["{__package__}"].preferences.active_index',
|
active_index_path=f'preferences.addons["{__package__}"].preferences.active_index',
|
||||||
insertion_operators=True,
|
insertion_operators=True,
|
||||||
move_operators=True,
|
move_operators=True,
|
||||||
unique_id='Incremental Autosave Path List',
|
unique_id="Incremental Autosave Path List",
|
||||||
)
|
)
|
||||||
|
|
||||||
layout.separator()
|
panel.prop(self, "relative_to_blend")
|
||||||
|
|
||||||
split = layout.split(factor=0.4)
|
split = layout.split(factor=0.4)
|
||||||
split.row()
|
row = split.row()
|
||||||
split.label(
|
row.alignment = "RIGHT"
|
||||||
text="Current autosave path: " + str(self.get_valid_autosave_path(context))
|
row.label(text="Current Autosave Path")
|
||||||
)
|
split.label(text=str(self.get_valid_autosave_path(context)))
|
||||||
|
|
||||||
layout.separator()
|
layout.separator()
|
||||||
|
|
||||||
layout.prop(self, 'save_interval')
|
layout.prop(self, "save_interval")
|
||||||
layout.prop(self, 'max_save_files')
|
layout.prop(self, "max_save_files")
|
||||||
layout.prop(self, 'save_before_close')
|
layout.prop(self, "save_before_close")
|
||||||
layout.separator()
|
layout.separator()
|
||||||
layout.prop(self, 'compress_files')
|
layout.prop(self, "compress_files")
|
||||||
|
layout.prop(self, "print_saves")
|
||||||
|
layout.separator()
|
||||||
|
layout.prop(self, "save_images")
|
||||||
|
layout.prop(self, "save_sculpt")
|
||||||
|
|
||||||
|
|
||||||
def save_file():
|
def save_file():
|
||||||
addon_prefs = get_addon_prefs()
|
addon_prefs = get_addon_prefs()
|
||||||
|
|
||||||
basename = bpy.data.filepath
|
basename = bpy.data.filepath
|
||||||
if basename == '':
|
if basename == "":
|
||||||
basename = 'Unnamed.blend'
|
basename = "Unnamed.blend"
|
||||||
else:
|
else:
|
||||||
basename = bpy.path.basename(basename)
|
basename = bpy.path.basename(basename)
|
||||||
|
|
||||||
@@ -286,8 +353,10 @@ def save_file():
|
|||||||
save_dir = bpy.path.abspath(addon_prefs.get_valid_autosave_path(bpy.context))
|
save_dir = bpy.path.abspath(addon_prefs.get_valid_autosave_path(bpy.context))
|
||||||
if not os.path.isdir(save_dir):
|
if not os.path.isdir(save_dir):
|
||||||
os.mkdir(save_dir)
|
os.mkdir(save_dir)
|
||||||
except:
|
except PermissionError:
|
||||||
print("Incremental Autosave: Error creating auto save directory.")
|
print(
|
||||||
|
f"Incremental Autosave: No permission to create auto save directory:\n{save_dir}"
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Delete old files, to limit the number of saves.
|
# Delete old files, to limit the number of saves.
|
||||||
@@ -304,20 +373,51 @@ def save_file():
|
|||||||
old_file = os.path.join(save_dir, otherfiles[0])
|
old_file = os.path.join(save_dir, otherfiles[0])
|
||||||
os.remove(old_file)
|
os.remove(old_file)
|
||||||
otherfiles.pop(0)
|
otherfiles.pop(0)
|
||||||
except:
|
except PermissionError:
|
||||||
print("Incremental Autosave: Unable to remove old files.")
|
print("Incremental Autosave: Unable to remove old files.")
|
||||||
|
|
||||||
# Save the copy.
|
# Save the copy.
|
||||||
time = datetime.now()
|
time = datetime.now()
|
||||||
filename = time.strftime(TIME_FMT_STR) + '_' + basename
|
filename = time.strftime(TIME_FMT_STR) + "_" + basename
|
||||||
backup_file = os.path.join(save_dir, filename)
|
backup_file = os.path.join(save_dir, filename)
|
||||||
try:
|
try:
|
||||||
|
if addon_prefs.save_images:
|
||||||
|
save_images()
|
||||||
|
if addon_prefs.save_sculpt:
|
||||||
|
save_sculpt()
|
||||||
bpy.ops.wm.save_as_mainfile(
|
bpy.ops.wm.save_as_mainfile(
|
||||||
filepath=backup_file, copy=True, compress=addon_prefs.compress_files
|
filepath=backup_file, copy=True, compress=addon_prefs.compress_files
|
||||||
)
|
)
|
||||||
print("Incremental Autosave: Saved file: ", backup_file)
|
if addon_prefs.print_saves:
|
||||||
except:
|
print(f"Incremental Autosave: Saved file: {backup_file}")
|
||||||
print('Incremental Autosave: Error auto saving file.')
|
except PermissionError:
|
||||||
|
print(f"Incremental Autosave: Error auto saving file: {backup_file}")
|
||||||
|
|
||||||
|
|
||||||
|
def save_images():
|
||||||
|
for img in bpy.data.images:
|
||||||
|
if not img.is_dirty:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
if img.source == "GENERATED" or img.packed_file:
|
||||||
|
img.pack()
|
||||||
|
else:
|
||||||
|
img.save()
|
||||||
|
except Exception:
|
||||||
|
# Should never happen, but I don't trust this API.
|
||||||
|
# https://projects.blender.org/blender/blender/issues/152638
|
||||||
|
print(f"Incremental Autosave: Failed to save dirty image: {img.name}")
|
||||||
|
|
||||||
|
|
||||||
|
def save_sculpt():
|
||||||
|
"""We just need to enter object mode and then go back to the original mode."""
|
||||||
|
context = bpy.context
|
||||||
|
if context.mode == "EDIT_MESH":
|
||||||
|
bpy.ops.object.mode_set(mode="OBJECT")
|
||||||
|
bpy.ops.object.mode_set(mode="EDIT")
|
||||||
|
if context.mode == "SCULPT":
|
||||||
|
bpy.ops.object.mode_set(mode="OBJECT")
|
||||||
|
bpy.ops.object.mode_set(mode="SCULPT")
|
||||||
|
|
||||||
|
|
||||||
@persistent
|
@persistent
|
||||||
@@ -327,20 +427,43 @@ def save_before_close(_dummy=None):
|
|||||||
save_file()
|
save_file()
|
||||||
|
|
||||||
|
|
||||||
def create_autosave():
|
def create_autosave_on_timer():
|
||||||
now = datetime.now()
|
now = datetime.now()
|
||||||
delta = now - LAUNCH_TIME
|
delta = now - LAUNCH_TIME
|
||||||
|
prefs = get_addon_prefs()
|
||||||
if delta.seconds < 5:
|
if delta.seconds < 5:
|
||||||
return get_addon_prefs().save_interval * 60
|
return prefs.save_interval * 60
|
||||||
|
|
||||||
if bpy.data.is_dirty:
|
if bpy.data.is_dirty:
|
||||||
save_file()
|
bpy.app.timers.register(save_after_modal_operation)
|
||||||
return get_addon_prefs().save_interval * 60
|
return prefs.save_interval * 60
|
||||||
|
|
||||||
|
|
||||||
|
def save_after_modal_operation():
|
||||||
|
"""Mesh edits, sculpt strokes, and texture paint strokes trigger a depsgraph update,
|
||||||
|
so it's better to make a save after one of those operations, rather than on a timer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
context = bpy.context
|
||||||
|
|
||||||
|
for modal in context.window.modal_operators:
|
||||||
|
# We probably don't want to save in the middle of a transform/sculpt operation.
|
||||||
|
if any(
|
||||||
|
modal.bl_idname.startswith(prefix)
|
||||||
|
for prefix in [
|
||||||
|
"TRANSFORM_OT_",
|
||||||
|
"SCULPT_OT_",
|
||||||
|
"PAINT_OT_",
|
||||||
|
]
|
||||||
|
):
|
||||||
|
return 0.5
|
||||||
|
|
||||||
|
save_file()
|
||||||
|
|
||||||
|
|
||||||
@persistent
|
@persistent
|
||||||
def register_autosave_timer(_dummy=None):
|
def register_autosave_timer(_dummy=None):
|
||||||
bpy.app.timers.register(create_autosave)
|
bpy.app.timers.register(create_autosave_on_timer)
|
||||||
|
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
@@ -348,16 +471,17 @@ def register():
|
|||||||
bpy.utils.register_class(AutoSavePath)
|
bpy.utils.register_class(AutoSavePath)
|
||||||
bpy.utils.register_class(IncrementalAutoSavePreferences)
|
bpy.utils.register_class(IncrementalAutoSavePreferences)
|
||||||
IncrementalAutoSavePreferences.register_autoload_from_file()
|
IncrementalAutoSavePreferences.register_autoload_from_file()
|
||||||
bpy.app.timers.register(create_autosave)
|
bpy.app.timers.register(create_autosave_on_timer)
|
||||||
bpy.app.handlers.load_pre.append(save_before_close)
|
bpy.app.handlers.load_pre.append(save_before_close)
|
||||||
bpy.app.handlers.load_post.append(register_autosave_timer)
|
bpy.app.handlers.load_post.append(register_autosave_timer)
|
||||||
|
|
||||||
|
|
||||||
def unregister():
|
def unregister():
|
||||||
save_before_close()
|
save_before_close()
|
||||||
|
update_prefs_on_file()
|
||||||
bpy.app.handlers.load_pre.remove(save_before_close)
|
bpy.app.handlers.load_pre.remove(save_before_close)
|
||||||
bpy.app.handlers.load_post.remove(register_autosave_timer)
|
bpy.app.handlers.load_post.remove(register_autosave_timer)
|
||||||
bpy.app.timers.unregister(create_autosave)
|
bpy.app.timers.unregister(create_autosave_on_timer)
|
||||||
bpy.utils.unregister_class(IncrementalAutoSavePreferences)
|
bpy.utils.unregister_class(IncrementalAutoSavePreferences)
|
||||||
bpy.utils.unregister_class(AutoSavePath)
|
bpy.utils.unregister_class(AutoSavePath)
|
||||||
bpy.utils.unregister_class(INCSAVE_UL_file_paths)
|
bpy.utils.unregister_class(INCSAVE_UL_file_paths)
|
||||||
|
|||||||
@@ -1,21 +1,21 @@
|
|||||||
schema_version = "1.0.0"
|
schema_version = "1.0.0"
|
||||||
|
|
||||||
id = "incremental_auto_save"
|
id = "incremental_auto_save"
|
||||||
version = "1.1.0"
|
version = "1.1.1"
|
||||||
name = "Incremental Auto-Save"
|
name = "Incremental Auto-Save"
|
||||||
tagline = "Improvements to Blender's Autosave"
|
tagline = "Improvements to Blender's Autosave"
|
||||||
maintainer = "Demeter Dzadik <demeter@blender.org>"
|
maintainer = "Demeter Dzadik <demeter@blender.org>"
|
||||||
type = "add-on"
|
type = "add-on"
|
||||||
website = "https://projects.blender.org/Mets/CloudRig"
|
website = "https://projects.blender.org/Mets/incremental-autosave"
|
||||||
tags = ["System"]
|
tags = ["System"]
|
||||||
|
|
||||||
blender_version_min = "4.2.0"
|
blender_version_min = "4.2.0"
|
||||||
|
|
||||||
license = [
|
license = [
|
||||||
"SPDX:GPL-3.0-or-later",
|
"SPDX:GPL-3.0-or-later"
|
||||||
]
|
]
|
||||||
copyright = [
|
copyright = [
|
||||||
"2019-2024 Demeter Dzadik",
|
"2019-2024 Demeter Dzadik"
|
||||||
]
|
]
|
||||||
[permissions]
|
[permissions]
|
||||||
files = "Save preferences & .blends in chosen directories"
|
files = "Save preferences & .blends in chosen directories"
|
||||||
|
|||||||
@@ -0,0 +1,278 @@
|
|||||||
|
name: Full Build (Espeak + Blender Addon)
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-windows:
|
||||||
|
name: Build Espeak NG for Windows
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
# Clone Espeak NG repo
|
||||||
|
- name: Clone forked espeak NG
|
||||||
|
# Clone fork containing a fix to filter out mbrola voices on Windows
|
||||||
|
# Without it, phonemizer will try to use unexisting voices
|
||||||
|
run: git clone --depth 1 -b fix-windows-list-voices https://github.com/Charley3d/espeak-ng.git
|
||||||
|
|
||||||
|
# Clone pcaudiolib repo (required for Windows build)
|
||||||
|
- name: Clone pcaudiolib
|
||||||
|
run: git clone --depth 1 https://github.com/espeak-ng/pcaudiolib.git espeak-ng/src/pcaudiolib
|
||||||
|
|
||||||
|
- name: Modify config.h
|
||||||
|
working-directory: espeak-ng
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
CONFIG_FILE=src/windows/config.h
|
||||||
|
echo "Updating $CONFIG_FILE..."
|
||||||
|
|
||||||
|
# Add or update the definitions
|
||||||
|
sed -i "s/^#define USE_KLATT.*/#define USE_KLATT 0/" $CONFIG_FILE || echo "#define USE_KLATT 0" >> $CONFIG_FILE
|
||||||
|
sed -i "s/^#define USE_SPEECHPLAYER.*/#define USE_SPEECHPLAYER 0/" $CONFIG_FILE || echo "#define USE_SPEECHPLAYER 0" >> $CONFIG_FILE
|
||||||
|
sed -i "s/^#define USE_MBROLA.*/#define USE_MBROLA 0/" $CONFIG_FILE || echo "#define USE_MBROLA 0" >> $CONFIG_FILE
|
||||||
|
sed -i "s/^#define USE_SONIC.*/#define USE_SONIC 0/" $CONFIG_FILE || echo "#define USE_SONIC 0" >> $CONFIG_FILE
|
||||||
|
sed -i "s/^#define USE_ASYNC.*/#define USE_ASYNC 0/" $CONFIG_FILE || echo "#define USE_ASYNC 0" >> $CONFIG_FILE
|
||||||
|
|
||||||
|
# Build Espeak NG for Windows
|
||||||
|
- name: Build with MSBuild
|
||||||
|
working-directory: espeak-ng
|
||||||
|
shell: cmd
|
||||||
|
run: |
|
||||||
|
cd src/windows
|
||||||
|
"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -latest -products * -requires Microsoft.Component.MSBuild -find MSBuild\**\Bin\MSBuild.exe > msbuild_path.txt
|
||||||
|
set /p MSBUILD_PATH=<msbuild_path.txt
|
||||||
|
call "%MSBUILD_PATH%" espeak-ng.sln /p:Configuration=Release
|
||||||
|
|
||||||
|
# Copy .dll to temporary dist dir
|
||||||
|
- name: Copy Windows DLL + Data
|
||||||
|
run: |
|
||||||
|
mkdir dist
|
||||||
|
copy espeak-ng\src\windows\x64\Release\libespeak-ng.dll dist\
|
||||||
|
|
||||||
|
# Zip artifacts
|
||||||
|
- name: Zip Windows build
|
||||||
|
shell: pwsh
|
||||||
|
run: |
|
||||||
|
Compress-Archive -Path dist\* -DestinationPath espeak-ng-windows.zip
|
||||||
|
|
||||||
|
# Upload ZIP artifacts
|
||||||
|
- name: Upload Windows Build of Espeak NG
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-windows
|
||||||
|
path: espeak-ng-windows.zip
|
||||||
|
|
||||||
|
build-linux:
|
||||||
|
name: Build Espeak NG for linux
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Clone forked eSpeak NG
|
||||||
|
run: git clone --depth 1 -b fix-windows-list-voices https://github.com/Charley3d/espeak-ng.git
|
||||||
|
|
||||||
|
# Step 2: Install Build Dependencies
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y make autoconf automake libtool pkg-config
|
||||||
|
sudo apt-get install -y gcc g++
|
||||||
|
sudo apt-get install -y libpcaudio-dev
|
||||||
|
|
||||||
|
# Build & Copy lib and data folder to temporary dist dir
|
||||||
|
- name: Building
|
||||||
|
working-directory: espeak-ng
|
||||||
|
run: |
|
||||||
|
./autogen.sh
|
||||||
|
./configure --with-klatt=no --with-speechplayer=no --with-mbrola=no --with-sonic=no --with-async=no
|
||||||
|
make
|
||||||
|
mkdir -p ../dist
|
||||||
|
cp src/.libs/libespeak-ng.so* ../dist/
|
||||||
|
zip -r ../espeak-ng-data.zip espeak-ng-data
|
||||||
|
cd ../dist
|
||||||
|
zip -r ../espeak-ng-linux.zip .
|
||||||
|
|
||||||
|
|
||||||
|
# Upload ZIP artifacts
|
||||||
|
- name: Upload Linux lib
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-linux
|
||||||
|
path: espeak-ng-linux.zip
|
||||||
|
|
||||||
|
# Upload ZIP artifacts
|
||||||
|
- name: Upload espeak data folder
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-data
|
||||||
|
path: espeak-ng-data.zip
|
||||||
|
|
||||||
|
build-arm64:
|
||||||
|
name: Build Espeak NG for macOS (arm64)
|
||||||
|
runs-on: macos-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Clone forked eSpeak NG
|
||||||
|
run: git clone --depth 1 -b fix-windows-list-voices https://github.com/Charley3d/espeak-ng.git
|
||||||
|
|
||||||
|
# Step 2: Install Build Dependencies
|
||||||
|
- name: Install dependencies on macOS
|
||||||
|
run: |
|
||||||
|
brew update
|
||||||
|
brew install make gcc autoconf automake libtool pkg-config portaudio
|
||||||
|
|
||||||
|
- name: Building arm64
|
||||||
|
working-directory: espeak-ng
|
||||||
|
run: |
|
||||||
|
./autogen.sh
|
||||||
|
./configure CFLAGS="-arch arm64" LDFLAGS="-arch arm64" --with-klatt=no --with-speechplayer=no --with-mbrola=no --with-sonic=no --with-async=no
|
||||||
|
make
|
||||||
|
cp src/.libs/libespeak-ng.dylib ../libespeak-ng-arm64.dylib
|
||||||
|
|
||||||
|
- name: Upload arm64 lib + data
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-arm64
|
||||||
|
path: libespeak-ng-arm64.dylib
|
||||||
|
|
||||||
|
build-x86_64:
|
||||||
|
name: Build Espeak NG for macOS (x86_64)
|
||||||
|
runs-on: macos-13
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Clone forked eSpeak NG
|
||||||
|
run: git clone --depth 1 -b fix-windows-list-voices https://github.com/Charley3d/espeak-ng.git
|
||||||
|
|
||||||
|
# Step 2: Install Build Dependencies
|
||||||
|
- name: Install dependencies on macOS
|
||||||
|
run: |
|
||||||
|
brew update
|
||||||
|
brew install make gcc autoconf automake libtool pkg-config portaudio
|
||||||
|
|
||||||
|
- name: Building x86_64
|
||||||
|
working-directory: espeak-ng
|
||||||
|
run: |
|
||||||
|
./autogen.sh
|
||||||
|
./configure CFLAGS="-arch x86_64" LDFLAGS="-arch x86_64" --with-klatt=no --with-speechplayer=no --with-mbrola=no --with-sonic=no --with-async=no
|
||||||
|
make
|
||||||
|
cp src/.libs/libespeak-ng.dylib ../libespeak-ng-x86_64.dylib
|
||||||
|
|
||||||
|
- name: Upload x86_64 lib
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-x86_64
|
||||||
|
path: libespeak-ng-x86_64.dylib
|
||||||
|
|
||||||
|
merge:
|
||||||
|
name: Create Universal macOS Build
|
||||||
|
runs-on: macos-latest
|
||||||
|
needs: [build-arm64, build-x86_64]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Download arm64 lib + data
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-arm64
|
||||||
|
|
||||||
|
- name: Download x86_64 lib
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-x86_64
|
||||||
|
|
||||||
|
- name: Merge with lipo and prepare bundle
|
||||||
|
run: |
|
||||||
|
mkdir -p dist
|
||||||
|
lipo -create -output dist/libespeak-ng.dylib \
|
||||||
|
libespeak-ng-arm64.dylib \
|
||||||
|
libespeak-ng-x86_64.dylib
|
||||||
|
file dist/libespeak-ng.dylib
|
||||||
|
cd dist
|
||||||
|
zip -r ../espeak-ng-darwin.zip .
|
||||||
|
|
||||||
|
- name: Upload Universal lib
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-darwin
|
||||||
|
path: espeak-ng-darwin.zip
|
||||||
|
|
||||||
|
collect-artifacts:
|
||||||
|
name: Make Blender Addon
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [ build-windows, merge, build-linux ]
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Install Blender via Snap
|
||||||
|
run: sudo snap install blender --classic
|
||||||
|
|
||||||
|
- name: Download eSpeak Data Folder
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-data
|
||||||
|
path: temp/espeak-ng-data
|
||||||
|
|
||||||
|
# Download each platform’s build artifact
|
||||||
|
- name: Download Windows build
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-windows
|
||||||
|
path: temp/windows
|
||||||
|
|
||||||
|
- name: Download macOS build
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-darwin
|
||||||
|
path: temp/macos
|
||||||
|
|
||||||
|
- name: Download Linux build
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: espeak-ng-linux
|
||||||
|
path: temp/linux
|
||||||
|
|
||||||
|
# Move them to Assets/{platform}
|
||||||
|
- name: Move artifacts to Assets
|
||||||
|
run: |
|
||||||
|
mkdir -p Assets/Archives/windows Assets/Archives/darwin Assets/Archives/linux
|
||||||
|
cp -r temp/windows/* Assets/Archives/windows/
|
||||||
|
cp -r temp/macos/* Assets/Archives/darwin/
|
||||||
|
cp -r temp/linux/* Assets/Archives/linux/
|
||||||
|
mkdir -p Assets/Archives/common && cp -r temp/espeak-ng-data/* Assets/Archives/common/
|
||||||
|
|
||||||
|
# Remove temp files and folders + unwanted files for release
|
||||||
|
- name: Clean up folder to zip Blender Addon
|
||||||
|
run: |
|
||||||
|
rm -rf temp || true
|
||||||
|
rm -rf dist || true
|
||||||
|
rm -f .gitignore || true
|
||||||
|
rm -f dev_tools.py || true
|
||||||
|
rm -rf .github || true
|
||||||
|
rm -rf .idea || true
|
||||||
|
|
||||||
|
- name: Build Extension with blender
|
||||||
|
run: blender --command extension build
|
||||||
|
|
||||||
|
- name: Get generated zip filename
|
||||||
|
id: get-zip
|
||||||
|
run: echo "ZIP_NAME=$(basename $(ls ./*.zip))" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
# Upload the full release of Blender Add-on
|
||||||
|
- name: Upload Blender Addon
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: lipsync-addon
|
||||||
|
path: ${{ steps.get-zip.outputs.ZIP_NAME }}
|
||||||
|
|
||||||
+118
@@ -0,0 +1,118 @@
|
|||||||
|
name: Publish Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-extension:
|
||||||
|
permissions:
|
||||||
|
contents: write # to be able to publish a GitHub release
|
||||||
|
issues: write # to be able to comment on released issues
|
||||||
|
pull-requests: write # to be able to comment on released pull requests
|
||||||
|
|
||||||
|
name: Blender Extension Build
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Get next version from semantic-release
|
||||||
|
id: next_version
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
|
OUTPUT=$(npx semantic-release --dry-run 2>&1 || true)
|
||||||
|
echo "$OUTPUT"
|
||||||
|
|
||||||
|
VERSION=$(echo "$OUTPUT" | grep -oP 'The next release version is \K[0-9]+\.[0-9]+\.[0-9]+' || true)
|
||||||
|
|
||||||
|
if [ -z "$VERSION" ]; then
|
||||||
|
echo "⚠️ No new version detected (probably no semantic commits)."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "✅ Found next version: $VERSION"
|
||||||
|
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set up Python
|
||||||
|
if: steps.next_version.outputs.version != ''
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: '3.11'
|
||||||
|
|
||||||
|
- name: Install tomlkit
|
||||||
|
if: steps.next_version.outputs.version != ''
|
||||||
|
run: pip install tomlkit
|
||||||
|
|
||||||
|
- name: Update version in Blender Manifest
|
||||||
|
if: steps.next_version.outputs.version != ''
|
||||||
|
run: |
|
||||||
|
python scripts/update_version.py ${{ steps.next_version.outputs.version }}
|
||||||
|
|
||||||
|
- name: Install Blender via Snap
|
||||||
|
run: sudo snap install blender --classic
|
||||||
|
|
||||||
|
- name: Build Extension with blender
|
||||||
|
run: blender --command extension build --split-platforms
|
||||||
|
|
||||||
|
# Upload the full release of Blender Add-on
|
||||||
|
- name: Clean platform-specific archives
|
||||||
|
run: |
|
||||||
|
for zipfile in iocgpoly_lip_sync-*-*.zip; do
|
||||||
|
echo "Examining $zipfile"
|
||||||
|
unzip -l "$zipfile" | grep -i "archives"
|
||||||
|
|
||||||
|
# Extract platform from filename (between last - and _)
|
||||||
|
platform=$(echo $zipfile | sed 's/.*-\(.*\)_.*/\1/')
|
||||||
|
|
||||||
|
echo "Processing $zipfile (platform: $platform)"
|
||||||
|
|
||||||
|
# Create temp dir
|
||||||
|
temp_dir="${platform}_temp"
|
||||||
|
unzip -q "$zipfile" -d "$temp_dir"
|
||||||
|
|
||||||
|
# List the actual directory structure
|
||||||
|
echo "Directory structure in temp_dir:"
|
||||||
|
ls -la "$temp_dir/Assets/Archives/"
|
||||||
|
|
||||||
|
rm "$zipfile"
|
||||||
|
cd "$temp_dir"
|
||||||
|
|
||||||
|
# Remove irrelevant platform folders based on the platform
|
||||||
|
case $platform in
|
||||||
|
linux)
|
||||||
|
rm -rf Assets/Archives/darwin Assets/Archives/windows
|
||||||
|
;;
|
||||||
|
macos)
|
||||||
|
rm -rf Assets/Archives/linux Assets/Archives/windows
|
||||||
|
;;
|
||||||
|
windows)
|
||||||
|
rm -rf Assets/Archives/darwin Assets/Archives/linux
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# List what remains
|
||||||
|
echo "Remaining directories:"
|
||||||
|
ls -la Assets/Archives/
|
||||||
|
|
||||||
|
# Rezip with same name
|
||||||
|
zip -r "../$zipfile" .
|
||||||
|
cd ..
|
||||||
|
rm -rf "$temp_dir"
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Semantic release
|
||||||
|
# if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: npx semantic-release --debug true
|
||||||
|
|
||||||
|
- name: Upload Blender Addon
|
||||||
|
if: ${{ github.event_name == 'workflow_dispatch' || steps.next_version.outputs.version == '' }}
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: extension-builds
|
||||||
|
path: ./*.zip
|
||||||
|
|
||||||
+36
@@ -0,0 +1,36 @@
|
|||||||
|
name: Upload to Extensions Platform
|
||||||
|
|
||||||
|
on:
|
||||||
|
# release:
|
||||||
|
# types: [published]
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
upload-to-extensions:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Download Release Assets and Notes
|
||||||
|
run: |
|
||||||
|
mkdir assets
|
||||||
|
gh release download -p "*.zip" -D assets/
|
||||||
|
RELEASE_NOTES=$(gh release view --json body | jq -r .body)
|
||||||
|
echo "$RELEASE_NOTES" > release_notes.txt
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Upload to Extensions Platform
|
||||||
|
env:
|
||||||
|
EXTENSION: iocgpoly_lip_sync
|
||||||
|
run: |
|
||||||
|
for zipfile in assets/*.zip; do
|
||||||
|
echo "Uploading $zipfile to Blender Extensions Platform"
|
||||||
|
curl -X POST https://extensions.blender.org/api/v1/extensions/${EXTENSION}/versions/upload/ \
|
||||||
|
-H "Authorization:bearer ${{ secrets.BLENDER_EXTENSIONS_TOKEN }}" \
|
||||||
|
-F "version_file=@${zipfile}" \
|
||||||
|
-F "release_notes=<release_notes.txt"
|
||||||
|
done
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"repositoryUrl": "git@github.com:Charley3d/lip-sync.git",
|
||||||
|
"debug": true,
|
||||||
|
"plugins": [
|
||||||
|
"@semantic-release/commit-analyzer",
|
||||||
|
"@semantic-release/release-notes-generator",
|
||||||
|
[
|
||||||
|
"@semantic-release/github",
|
||||||
|
{
|
||||||
|
"assets": [
|
||||||
|
{ "path": "iocgpoly_lip_sync-*-linux_x64.zip", "label": "LipSync for Linux ${nextRelease.gitTag}" },
|
||||||
|
{ "path": "iocgpoly_lip_sync-*-macos_arm64.zip", "label": "LipSync for MacOS arm64 ${nextRelease.gitTag}" },
|
||||||
|
{ "path": "iocgpoly_lip_sync-*-macos_x64.zip", "label": "LipSync for MacOS x64 ${nextRelease.gitTag}" },
|
||||||
|
{ "path": "iocgpoly_lip_sync-*-windows_x64.zip", "label": "LipSync for Windows x64 ${nextRelease.gitTag}" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
BIN
Binary file not shown.
BIN
Binary file not shown.
+30
-1
@@ -177,14 +177,43 @@ class LIPSYNC2D_PoseAssetsAnimator:
|
|||||||
:param interpolation: The interpolation type for the keyframes.
|
:param interpolation: The interpolation type for the keyframes.
|
||||||
:type interpolation: Literal["LINEAR"]
|
:type interpolation: Literal["LINEAR"]
|
||||||
"""
|
"""
|
||||||
|
# Determine where to read the pose asset fcurves from. In newer Blender
|
||||||
|
# APIs pose assets may not expose `.fcurves` directly; the pose data is
|
||||||
|
# stored in the action's first layer/strip channelbag. Fall back to
|
||||||
|
# `pose_action.fcurves` when available.
|
||||||
|
pose_fcurves = None
|
||||||
|
|
||||||
|
# Try to get fcurves from the pose asset's strip channelbag first
|
||||||
|
try:
|
||||||
|
if hasattr(pose_action, "layers") and len(pose_action.layers) > 0:
|
||||||
|
pose_strip = pose_action.layers[0].strips[0]
|
||||||
|
# Use first slot if present
|
||||||
|
pose_slot = pose_action.slots[0] if getattr(pose_action, "slots", None) else None
|
||||||
|
if pose_strip is not None and pose_slot is not None:
|
||||||
|
pose_channelbag = pose_strip.channelbag(pose_slot)
|
||||||
|
pose_fcurves = getattr(pose_channelbag, "fcurves", None)
|
||||||
|
except Exception:
|
||||||
|
pose_fcurves = None
|
||||||
|
|
||||||
|
# Fallback to action.fcurves when present
|
||||||
|
if pose_fcurves is None:
|
||||||
|
pose_fcurves = getattr(pose_action, "fcurves", None)
|
||||||
|
|
||||||
|
if pose_fcurves is None:
|
||||||
|
# Nothing we can copy from
|
||||||
|
return
|
||||||
|
|
||||||
for fcurve in self.channelbag.fcurves:
|
for fcurve in self.channelbag.fcurves:
|
||||||
pose_asset_fcurve = pose_action.fcurves.find(
|
pose_asset_fcurve = pose_fcurves.find(
|
||||||
fcurve.data_path, index=fcurve.array_index
|
fcurve.data_path, index=fcurve.array_index
|
||||||
)
|
)
|
||||||
if pose_asset_fcurve is None:
|
if pose_asset_fcurve is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Since Action is from a Pose Asset, we can safely assume that first keyframe point holds the Pose
|
# Since Action is from a Pose Asset, we can safely assume that first keyframe point holds the Pose
|
||||||
|
if len(pose_asset_fcurve.keyframe_points) == 0:
|
||||||
|
continue
|
||||||
|
|
||||||
fcurve_value = pose_asset_fcurve.keyframe_points[0].co.y
|
fcurve_value = pose_asset_fcurve.keyframe_points[0].co.y
|
||||||
kframe = fcurve.keyframe_points.insert(
|
kframe = fcurve.keyframe_points.insert(
|
||||||
frame,
|
frame,
|
||||||
|
|||||||
+41
-6
@@ -57,11 +57,29 @@ class LIPSYNC_SpriteSheetAnimator:
|
|||||||
:type obj: BpyObject
|
:type obj: BpyObject
|
||||||
:return: None
|
:return: None
|
||||||
"""
|
"""
|
||||||
action = obj.animation_data.action if obj.animation_data else None
|
if (action := obj.animation_data.action) is None:
|
||||||
if action:
|
return
|
||||||
for fcurve in action.fcurves:
|
|
||||||
|
# Retrieve the strip and channelbag correctly for Layered Actions
|
||||||
|
# Assuming single layer/strip structure as per setup()
|
||||||
|
if not action.layers or not action.layers[0].strips:
|
||||||
|
return
|
||||||
|
|
||||||
|
strip = cast(BpyActionKeyframeStrip, action.layers[0].strips[0])
|
||||||
|
|
||||||
|
# Ensure we get the correct slot for sprite sheet
|
||||||
|
slot = action.slots.get(f"OB{SLOT_SPRITE_SHEET_NAME}")
|
||||||
|
if not slot:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
channelbag = strip.channelbag(slot)
|
||||||
|
for fcurve in channelbag.fcurves:
|
||||||
if fcurve.data_path == "lipsync2d_props.lip_sync_2d_sprite_sheet_index":
|
if fcurve.data_path == "lipsync2d_props.lip_sync_2d_sprite_sheet_index":
|
||||||
fcurve.keyframe_points.clear()
|
fcurve.keyframe_points.clear()
|
||||||
|
except Exception:
|
||||||
|
# Fallback or silence if channelbag creation fails (though it should exist if we are clearing)
|
||||||
|
pass
|
||||||
|
|
||||||
def insert_keyframes(self, obj: BpyObject, props: BpyPropertyGroup, visemes_data: VisemeData,
|
def insert_keyframes(self, obj: BpyObject, props: BpyPropertyGroup, visemes_data: VisemeData,
|
||||||
word_timing: WordTiming,
|
word_timing: WordTiming,
|
||||||
@@ -203,12 +221,29 @@ class LIPSYNC_SpriteSheetAnimator:
|
|||||||
:type obj: BpyObject
|
:type obj: BpyObject
|
||||||
:return: None
|
:return: None
|
||||||
"""
|
"""
|
||||||
action = obj.animation_data.action if obj.animation_data else None
|
if (action := obj.animation_data.action) is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
# We can try to use self.channelbag if it's already set up, but to be robust
|
||||||
|
# we re-fetch it similar to clear_previous_keyframes to ensure we have the right one.
|
||||||
|
|
||||||
|
if not action.layers or not action.layers[0].strips:
|
||||||
|
return
|
||||||
|
|
||||||
if action:
|
strip = cast(BpyActionKeyframeStrip, action.layers[0].strips[0])
|
||||||
for fcurve in action.fcurves:
|
# Ensure we get the correct slot for sprite sheet
|
||||||
|
slot = action.slots.get(f"OB{SLOT_SPRITE_SHEET_NAME}")
|
||||||
|
|
||||||
|
if not slot:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
channelbag = strip.channelbag(slot)
|
||||||
|
for fcurve in channelbag.fcurves:
|
||||||
for keyframe in fcurve.keyframe_points:
|
for keyframe in fcurve.keyframe_points:
|
||||||
keyframe.interpolation = 'CONSTANT'
|
keyframe.interpolation = 'CONSTANT'
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
def setup(self, obj: BpyObject):
|
def setup(self, obj: BpyObject):
|
||||||
self.setup_animation_properties(obj)
|
self.setup_animation_properties(obj)
|
||||||
|
|||||||
@@ -53,8 +53,9 @@ phoneme_to_viseme_arkit_v2 = {
|
|||||||
"ŋ": "nn",
|
"ŋ": "nn",
|
||||||
"ɲ": "nn",
|
"ɲ": "nn",
|
||||||
"ɳ": "nn",
|
"ɳ": "nn",
|
||||||
"l": "nn",
|
# LL – lateral group
|
||||||
"ɫ": "nn",
|
"l": "LL",
|
||||||
|
"ɫ": "LL",
|
||||||
# aa – open and low/mid front vowels
|
# aa – open and low/mid front vowels
|
||||||
"a": "aa",
|
"a": "aa",
|
||||||
"aː": "aa",
|
"aː": "aa",
|
||||||
@@ -119,7 +120,8 @@ def viseme_items_mpeg4_v2(self, context):
|
|||||||
("kk", "kk", "K, G (velar stops)"),
|
("kk", "kk", "K, G (velar stops)"),
|
||||||
("CH", "CH", "CH, J (affricates)"),
|
("CH", "CH", "CH, J (affricates)"),
|
||||||
("SS", "SS", "S, Z, SH, ZH (narrow fricatives)"),
|
("SS", "SS", "S, Z, SH, ZH (narrow fricatives)"),
|
||||||
("nn", "nn", "N, NG, L (nasals and laterals)"),
|
("nn", "nn", "N, NG (nasals)"),
|
||||||
|
("LL", "LL", "L (lateral)"),
|
||||||
("RR", "RR", "R (r-like sounds)"),
|
("RR", "RR", "R (r-like sounds)"),
|
||||||
("aa", "aa", "A, Æ (open/low vowels)"),
|
("aa", "aa", "A, Æ (open/low vowels)"),
|
||||||
("E", "E", "E, Ø, Ə (mid front vowels)"),
|
("E", "E", "E, Ø, Ə (mid front vowels)"),
|
||||||
@@ -141,6 +143,7 @@ def phonemes_to_default_sprite_index():
|
|||||||
"CH": 10,
|
"CH": 10,
|
||||||
"SS": 2,
|
"SS": 2,
|
||||||
"nn": 10,
|
"nn": 10,
|
||||||
|
"LL": 10,
|
||||||
"RR": 3,
|
"RR": 3,
|
||||||
"aa": 11,
|
"aa": 11,
|
||||||
"E": 8,
|
"E": 8,
|
||||||
|
|||||||
@@ -62,7 +62,26 @@ class LIPSYNC2D_OT_AnalyzeAudio(bpy.types.Operator):
|
|||||||
return {"CANCELLED"}
|
return {"CANCELLED"}
|
||||||
|
|
||||||
self.set_bake_range()
|
self.set_bake_range()
|
||||||
file_path = extract_audio()
|
|
||||||
|
props = context.active_object.lipsync2d_props # type: ignore
|
||||||
|
target_channel = props.lip_sync_2d_bake_channel
|
||||||
|
|
||||||
|
muted_strips = []
|
||||||
|
try:
|
||||||
|
if target_channel != "ALL":
|
||||||
|
target_ch_int = int(target_channel)
|
||||||
|
for strip in context.scene.sequence_editor.strips_all:
|
||||||
|
if strip.type == "SOUND" and not strip.mute:
|
||||||
|
if strip.channel != target_ch_int:
|
||||||
|
strip.mute = True
|
||||||
|
muted_strips.append(strip)
|
||||||
|
|
||||||
|
file_path = extract_audio()
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Restore mute state
|
||||||
|
for strip in muted_strips:
|
||||||
|
strip.mute = False
|
||||||
|
|
||||||
if not os.path.isfile(f"{file_path}"):
|
if not os.path.isfile(f"{file_path}"):
|
||||||
self.report(
|
self.report(
|
||||||
@@ -90,15 +109,20 @@ class LIPSYNC2D_OT_AnalyzeAudio(bpy.types.Operator):
|
|||||||
phonemes = LIPSYNC2D_DialogInspector.extract_phonemes(words, context)
|
phonemes = LIPSYNC2D_DialogInspector.extract_phonemes(words, context)
|
||||||
|
|
||||||
auto_obj = self.get_animator(obj)
|
auto_obj = self.get_animator(obj)
|
||||||
|
props = obj.lipsync2d_props # type: ignore
|
||||||
|
debug_entries = [] if props.lip_sync_2d_debug_output else None
|
||||||
|
|
||||||
auto_obj.setup(obj)
|
auto_obj.setup(obj)
|
||||||
self.auto_insert_keyframes(
|
self.auto_insert_keyframes(
|
||||||
auto_obj, obj, recognized_words, dialog_inspector, total_words, phonemes
|
auto_obj, obj, recognized_words, dialog_inspector, total_words, phonemes, debug_entries
|
||||||
)
|
)
|
||||||
auto_obj.set_interpolation(obj)
|
auto_obj.set_interpolation(obj)
|
||||||
auto_obj.cleanup(obj)
|
auto_obj.cleanup(obj)
|
||||||
self.reset_bake_range()
|
self.reset_bake_range()
|
||||||
|
|
||||||
|
if debug_entries is not None:
|
||||||
|
self.write_debug_output(debug_entries)
|
||||||
|
|
||||||
if bpy.context.view_layer:
|
if bpy.context.view_layer:
|
||||||
bpy.context.view_layer.update()
|
bpy.context.view_layer.update()
|
||||||
|
|
||||||
@@ -116,6 +140,7 @@ class LIPSYNC2D_OT_AnalyzeAudio(bpy.types.Operator):
|
|||||||
dialog_inspector: LIPSYNC2D_DialogInspector,
|
dialog_inspector: LIPSYNC2D_DialogInspector,
|
||||||
total_words,
|
total_words,
|
||||||
phonemes,
|
phonemes,
|
||||||
|
debug_entries: list | None = None,
|
||||||
):
|
):
|
||||||
props = obj.lipsync2d_props # type: ignore
|
props = obj.lipsync2d_props # type: ignore
|
||||||
words = enumerate(recognized_words)
|
words = enumerate(recognized_words)
|
||||||
@@ -123,9 +148,19 @@ class LIPSYNC2D_OT_AnalyzeAudio(bpy.types.Operator):
|
|||||||
for index, recognized_word in words:
|
for index, recognized_word in words:
|
||||||
is_last_word = index == total_words - 1
|
is_last_word = index == total_words - 1
|
||||||
word_timing = dialog_inspector.get_word_timing(recognized_word)
|
word_timing = dialog_inspector.get_word_timing(recognized_word)
|
||||||
|
current_phonemes = phonemes[index]
|
||||||
visemes_data = dialog_inspector.get_visemes(
|
visemes_data = dialog_inspector.get_visemes(
|
||||||
phonemes[index], word_timing["duration"]
|
current_phonemes, word_timing["duration"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if debug_entries is not None:
|
||||||
|
debug_entries.append({
|
||||||
|
"word": recognized_word["word"],
|
||||||
|
"phonemes": current_phonemes,
|
||||||
|
"visemes": visemes_data,
|
||||||
|
"start": word_timing["word_frame_start"],
|
||||||
|
})
|
||||||
|
|
||||||
next_word_timing = dialog_inspector.get_next_word_timing(
|
next_word_timing = dialog_inspector.get_next_word_timing(
|
||||||
recognized_words, index
|
recognized_words, index
|
||||||
)
|
)
|
||||||
@@ -151,6 +186,54 @@ class LIPSYNC2D_OT_AnalyzeAudio(bpy.types.Operator):
|
|||||||
index,
|
index,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def write_debug_output(self, entries):
|
||||||
|
text_name = "LipSync Debug"
|
||||||
|
text = bpy.data.texts.get(text_name)
|
||||||
|
if text is None:
|
||||||
|
text = bpy.data.texts.new(text_name)
|
||||||
|
else:
|
||||||
|
text.clear()
|
||||||
|
|
||||||
|
# Header
|
||||||
|
output = [
|
||||||
|
f"{'Word':<15} {'Start':<10} {'Phonemes':<15} {'Viseme':<10} {'Frame':<10}",
|
||||||
|
"-" * 60
|
||||||
|
]
|
||||||
|
|
||||||
|
for entry in entries:
|
||||||
|
word = entry['word']
|
||||||
|
start_frame = entry['start']
|
||||||
|
phonemes = entry['phonemes'] # list of phonemes strings
|
||||||
|
phonemes_str = " ".join(phonemes)
|
||||||
|
|
||||||
|
viseme_data = entry['visemes']
|
||||||
|
visemes_list = viseme_data['visemes']
|
||||||
|
part_duration = viseme_data['visemes_parts']
|
||||||
|
|
||||||
|
# First line with word info
|
||||||
|
first_viseme = visemes_list[0] if visemes_list else ""
|
||||||
|
first_viseme_frame = f"{start_frame:.2f}"
|
||||||
|
|
||||||
|
# If no visemes, just print word info
|
||||||
|
if not visemes_list:
|
||||||
|
output.append(f"{word:<15} {start_frame:<10} {phonemes_str:<15}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Print first viseme with word info
|
||||||
|
output.append(f"{word:<15} {start_frame:<10} {phonemes_str:<15} {visemes_list[0]:<10} {first_viseme_frame:<10}")
|
||||||
|
|
||||||
|
# Print remaining visemes
|
||||||
|
current_frame = start_frame
|
||||||
|
for i in range(1, len(visemes_list)):
|
||||||
|
current_frame += part_duration
|
||||||
|
viseme = visemes_list[i]
|
||||||
|
output.append(f"{'':<15} {'':<10} {'':<15} {viseme:<10} {current_frame:.2f}")
|
||||||
|
|
||||||
|
# Add a separator blank line or just spacing
|
||||||
|
# output.append("")
|
||||||
|
|
||||||
|
text.write("\n".join(output))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_animator(obj: BpyObject) -> LIPSYNC2D_LipSyncAnimator:
|
def get_animator(obj: BpyObject) -> LIPSYNC2D_LipSyncAnimator:
|
||||||
props = obj.lipsync2d_props # type: ignore
|
props = obj.lipsync2d_props # type: ignore
|
||||||
|
|||||||
@@ -95,6 +95,8 @@ class AnimatorPanelMixin:
|
|||||||
row = panel_body.row()
|
row = panel_body.row()
|
||||||
row.prop(self.props, "lip_sync_2d_use_clear_keyframes")
|
row.prop(self.props, "lip_sync_2d_use_clear_keyframes")
|
||||||
row = panel_body.row()
|
row = panel_body.row()
|
||||||
|
row.prop(self.props, "lip_sync_2d_debug_output")
|
||||||
|
row = panel_body.row()
|
||||||
row.label(text="Range:")
|
row.label(text="Range:")
|
||||||
row = panel_body.row()
|
row = panel_body.row()
|
||||||
row.prop(self.props, "lip_sync_2d_use_bake_range")
|
row.prop(self.props, "lip_sync_2d_use_bake_range")
|
||||||
@@ -102,3 +104,6 @@ class AnimatorPanelMixin:
|
|||||||
row.prop(self.props, "lip_sync_2d_bake_start", text="Start")
|
row.prop(self.props, "lip_sync_2d_bake_start", text="Start")
|
||||||
row.prop(self.props, "lip_sync_2d_bake_end", text="End")
|
row.prop(self.props, "lip_sync_2d_bake_end", text="End")
|
||||||
row.enabled = self.props.lip_sync_2d_use_bake_range # type: ignore
|
row.enabled = self.props.lip_sync_2d_use_bake_range # type: ignore
|
||||||
|
|
||||||
|
row = panel_body.row()
|
||||||
|
row.prop(self.props, "lip_sync_2d_bake_channel")
|
||||||
|
|||||||
+73
-4
@@ -154,47 +154,93 @@ def get_lip_sync_type_items(self, context: BpyContext | None):
|
|||||||
|
|
||||||
|
|
||||||
def poll_pose_assets(self, obj: bpy.types.ID):
|
def poll_pose_assets(self, obj: bpy.types.ID):
|
||||||
return bool(obj.asset_data)
|
# Ensure it's an Action
|
||||||
|
if not isinstance(obj, bpy.types.Action):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Must have asset data (be marked as a pose asset)
|
||||||
|
if not obj.asset_data:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Allow local, linked, and library override actions
|
||||||
|
# This is necessary because linked library pose assets should still be usable
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def get_channel_items(self, context: BpyContext | None):
|
||||||
|
items = [("ALL", "All Channels", "Bake audio from all channels")]
|
||||||
|
|
||||||
|
if context is None or context.scene is None or context.scene.sequence_editor is None:
|
||||||
|
return intern_enum_items(items)
|
||||||
|
|
||||||
|
channels = set()
|
||||||
|
for strip in context.scene.sequence_editor.strips_all:
|
||||||
|
if strip.type == "SOUND" and not strip.mute:
|
||||||
|
channels.add(strip.channel)
|
||||||
|
|
||||||
|
sorted_channels = sorted(list(channels))
|
||||||
|
|
||||||
|
for channel in sorted_channels:
|
||||||
|
c_data = context.scene.sequence_editor.channels[channel]
|
||||||
|
channel_name = str(channel)+" - "+c_data.name
|
||||||
|
if c_data.mute:
|
||||||
|
channel_name += " (Muted)"
|
||||||
|
items.append((str(channel), channel_name, f"Bake audio from Channel {channel}"))
|
||||||
|
return intern_enum_items(items)
|
||||||
|
|
||||||
|
|
||||||
class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
||||||
|
lip_sync_2d_bake_channel: bpy.props.EnumProperty(
|
||||||
|
name="Channel",
|
||||||
|
description="Select specific channel to bake audio from",
|
||||||
|
items=get_channel_items
|
||||||
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_initialized: bpy.props.BoolProperty(
|
lip_sync_2d_initialized: bpy.props.BoolProperty(
|
||||||
name="Initilize Lip Sync",
|
name="Initilize Lip Sync",
|
||||||
description="Initilize Lip Sync on selection",
|
description="Initilize Lip Sync on selection",
|
||||||
default=False,
|
default=False,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
lip_sync_2d_sprite_sheet: bpy.props.PointerProperty(
|
lip_sync_2d_sprite_sheet: bpy.props.PointerProperty(
|
||||||
name="Sprite Sheet",
|
name="Sprite Sheet",
|
||||||
description="The name of the addon to reload",
|
description="The name of the addon to reload",
|
||||||
type=bpy.types.Image,
|
type=bpy.types.Image,
|
||||||
update=update_sprite_sheet,
|
update=update_sprite_sheet,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
lip_sync_2d_main_material: bpy.props.PointerProperty(
|
lip_sync_2d_main_material: bpy.props.PointerProperty(
|
||||||
name="Main Material",
|
name="Main Material",
|
||||||
description="Material containing Sprite sheet",
|
description="Material containing Sprite sheet",
|
||||||
type=bpy.types.Material,
|
type=bpy.types.Material,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
lip_sync_2d_sprite_sheet_columns: bpy.props.IntProperty(
|
lip_sync_2d_sprite_sheet_columns: bpy.props.IntProperty(
|
||||||
name="Columns", description="Total of columns in sprite sheet", default=1
|
name="Columns", description="Total of columns in sprite sheet", default=1,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
lip_sync_2d_sprite_sheet_rows: bpy.props.IntProperty(
|
lip_sync_2d_sprite_sheet_rows: bpy.props.IntProperty(
|
||||||
name="Rows",
|
name="Rows",
|
||||||
description="Total of rows in sprite sheet",
|
description="Total of rows in sprite sheet",
|
||||||
update=update_sprite_sheet_rows,
|
update=update_sprite_sheet_rows,
|
||||||
default=1,
|
default=1,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
lip_sync_2d_sprite_sheet_sprite_scale: bpy.props.FloatProperty(
|
lip_sync_2d_sprite_sheet_sprite_scale: bpy.props.FloatProperty(
|
||||||
name="Sprite",
|
name="Sprite",
|
||||||
description="Adjust sprite scale so it fits in mouth area",
|
description="Adjust sprite scale so it fits in mouth area",
|
||||||
default=1,
|
default=1,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
lip_sync_2d_sprite_sheet_main_scale: bpy.props.FloatProperty(
|
lip_sync_2d_sprite_sheet_main_scale: bpy.props.FloatProperty(
|
||||||
name="Lips", description="Adjust Lips scale", default=1
|
name="Lips", description="Adjust Lips scale", default=1,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
lip_sync_2d_sprite_sheet_index: bpy.props.IntProperty(
|
lip_sync_2d_sprite_sheet_index: bpy.props.IntProperty(
|
||||||
name="Sprite Index",
|
name="Sprite Index",
|
||||||
description="Sprite Index. Start at 0, from Bottom Left to Top Right",
|
description="Sprite Index. Start at 0, from Bottom Left to Top Right",
|
||||||
default=1,
|
default=1,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
lip_sync_2d_sprite_sheet_format: bpy.props.EnumProperty(
|
lip_sync_2d_sprite_sheet_format: bpy.props.EnumProperty(
|
||||||
name="Sprite sheet format",
|
name="Sprite sheet format",
|
||||||
@@ -215,6 +261,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
],
|
],
|
||||||
update=update_sprite_sheet_format,
|
update=update_sprite_sheet_format,
|
||||||
default=3,
|
default=3,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_lips_type: bpy.props.EnumProperty(
|
lip_sync_2d_lips_type: bpy.props.EnumProperty(
|
||||||
@@ -223,6 +270,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
items=get_lip_sync_type_items,
|
items=get_lip_sync_type_items,
|
||||||
update=update_sprite_sheet_format,
|
update=update_sprite_sheet_format,
|
||||||
default=0,
|
default=0,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_in_between_threshold: bpy.props.FloatProperty(
|
lip_sync_2d_in_between_threshold: bpy.props.FloatProperty(
|
||||||
@@ -231,6 +279,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
default=0.0417,
|
default=0.0417,
|
||||||
subtype="TIME",
|
subtype="TIME",
|
||||||
unit="TIME_ABSOLUTE",
|
unit="TIME_ABSOLUTE",
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_sil_threshold: bpy.props.FloatProperty(
|
lip_sync_2d_sil_threshold: bpy.props.FloatProperty(
|
||||||
@@ -239,6 +288,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
default=0.22,
|
default=0.22,
|
||||||
subtype="TIME",
|
subtype="TIME",
|
||||||
unit="TIME_ABSOLUTE",
|
unit="TIME_ABSOLUTE",
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_sps_in_between_threshold: bpy.props.FloatProperty(
|
lip_sync_2d_sps_in_between_threshold: bpy.props.FloatProperty(
|
||||||
@@ -247,6 +297,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
default=0.0417,
|
default=0.0417,
|
||||||
subtype="TIME",
|
subtype="TIME",
|
||||||
unit="TIME_ABSOLUTE",
|
unit="TIME_ABSOLUTE",
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_sps_sil_threshold: bpy.props.FloatProperty(
|
lip_sync_2d_sps_sil_threshold: bpy.props.FloatProperty(
|
||||||
@@ -255,6 +306,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
default=0.22,
|
default=0.22,
|
||||||
subtype="TIME",
|
subtype="TIME",
|
||||||
unit="TIME_ABSOLUTE",
|
unit="TIME_ABSOLUTE",
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_close_motion_duration: bpy.props.FloatProperty(
|
lip_sync_2d_close_motion_duration: bpy.props.FloatProperty(
|
||||||
@@ -263,30 +315,41 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
default=0.2,
|
default=0.2,
|
||||||
subtype="TIME",
|
subtype="TIME",
|
||||||
unit="TIME_ABSOLUTE",
|
unit="TIME_ABSOLUTE",
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_remove_animation_data: bpy.props.BoolProperty(
|
lip_sync_2d_remove_animation_data: bpy.props.BoolProperty(
|
||||||
name="Remove Animation",
|
name="Remove Animation",
|
||||||
description="Also remove action, action slot and keyframes",
|
description="Also remove action, action slot and keyframes",
|
||||||
default=True,
|
default=True,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_remove_cgp_node_group: bpy.props.BoolProperty(
|
lip_sync_2d_remove_cgp_node_group: bpy.props.BoolProperty(
|
||||||
name="Remove Nodes",
|
name="Remove Nodes",
|
||||||
description="Also remove node groups from Object's Materials",
|
description="Also remove node groups from Object's Materials",
|
||||||
default=True,
|
default=True,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_use_clear_keyframes: bpy.props.BoolProperty(
|
lip_sync_2d_use_clear_keyframes: bpy.props.BoolProperty(
|
||||||
name="Clear Keyframes",
|
name="Clear Keyframes",
|
||||||
description="Clear Keyframes before Bake",
|
description="Clear Keyframes before Bake",
|
||||||
default=True,
|
default=True,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_use_bake_range: bpy.props.BoolProperty(
|
lip_sync_2d_use_bake_range: bpy.props.BoolProperty(
|
||||||
name="Use Range",
|
name="Use Range",
|
||||||
description="Only bake between specified range",
|
description="Only bake between specified range",
|
||||||
default=False,
|
default=False,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
|
) # type: ignore
|
||||||
|
|
||||||
|
lip_sync_2d_debug_output: bpy.props.BoolProperty(
|
||||||
|
name="Debug Output",
|
||||||
|
description="Output phoneme and viseme data to a text block",
|
||||||
|
default=False,
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_bake_start: bpy.props.IntProperty(
|
lip_sync_2d_bake_start: bpy.props.IntProperty(
|
||||||
@@ -296,6 +359,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
min=0,
|
min=0,
|
||||||
set=set_bake_start,
|
set=set_bake_start,
|
||||||
get=get_bake_start,
|
get=get_bake_start,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_bake_end: bpy.props.IntProperty(
|
lip_sync_2d_bake_end: bpy.props.IntProperty(
|
||||||
@@ -305,6 +369,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
min=0,
|
min=0,
|
||||||
set=set_bake_end,
|
set=set_bake_end,
|
||||||
get=get_bake_end,
|
get=get_bake_end,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_rig_type_basic: bpy.props.BoolProperty(
|
lip_sync_2d_rig_type_basic: bpy.props.BoolProperty(
|
||||||
@@ -315,6 +380,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
),
|
),
|
||||||
default=True,
|
default=True,
|
||||||
update=update_rig_type_basic,
|
update=update_rig_type_basic,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_rig_type_advanced: bpy.props.BoolProperty(
|
lip_sync_2d_rig_type_advanced: bpy.props.BoolProperty(
|
||||||
@@ -325,6 +391,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
"Only use this if Basic Rig is not working."
|
"Only use this if Basic Rig is not working."
|
||||||
),
|
),
|
||||||
update=update_rig_type_advanced,
|
update=update_rig_type_advanced,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
lip_sync_2d_prioritize_accuracy: bpy.props.BoolProperty(
|
lip_sync_2d_prioritize_accuracy: bpy.props.BoolProperty(
|
||||||
@@ -335,6 +402,7 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
"from being skipped when they occur in rapid succession."
|
"from being skipped when they occur in rapid succession."
|
||||||
),
|
),
|
||||||
default=False,
|
default=False,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -373,5 +441,6 @@ class LIPSYNC2D_PG_CustomProperties(bpy.types.PropertyGroup):
|
|||||||
name=f"Viseme {name}",
|
name=f"Viseme {name}",
|
||||||
description=desc,
|
description=desc,
|
||||||
poll=poll_pose_assets,
|
poll=poll_pose_assets,
|
||||||
),
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
|
), # type: ignore
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -38,7 +38,10 @@ def register():
|
|||||||
bpy.utils.register_class(LIPSYNC2D_PT_Edit)
|
bpy.utils.register_class(LIPSYNC2D_PT_Edit)
|
||||||
bpy.utils.register_class(LIPSYNC2D_OT_RemoveAnimations)
|
bpy.utils.register_class(LIPSYNC2D_OT_RemoveAnimations)
|
||||||
bpy.utils.register_class(LIPSYNC2D_OT_refresh_pose_assets)
|
bpy.utils.register_class(LIPSYNC2D_OT_refresh_pose_assets)
|
||||||
bpy.types.Object.lipsync2d_props = bpy.props.PointerProperty(type=LIPSYNC2D_PG_CustomProperties) # type: ignore
|
bpy.types.Object.lipsync2d_props = bpy.props.PointerProperty(
|
||||||
|
type=LIPSYNC2D_PG_CustomProperties,
|
||||||
|
override={'LIBRARY_OVERRIDABLE'}
|
||||||
|
) # type: ignore
|
||||||
|
|
||||||
|
|
||||||
def unregister():
|
def unregister():
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
schema_version = "1.0.0"
|
schema_version = "1.0.0"
|
||||||
id = "iocgpoly_lip_sync"
|
id = "iocgpoly_lip_sync"
|
||||||
version = "2.3.2"
|
version = "1.0.6"
|
||||||
name = "Lip Sync"
|
name = "Lip Sync"
|
||||||
tagline = "Automatic lip sync for your Blender models"
|
tagline = "Automatic lip sync for your Blender models"
|
||||||
maintainer = "Charley 3D <charley@cgpoly.fr>"
|
maintainer = "Charley 3D <charley@cgpoly.fr>"
|
||||||
@@ -30,11 +30,3 @@ paths_exclude_pattern = [
|
|||||||
"/scripts/",
|
"/scripts/",
|
||||||
".releaserc"
|
".releaserc"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
# BEGIN GENERATED CONTENT.
|
|
||||||
# This must not be included in source manifests.
|
|
||||||
[build.generated]
|
|
||||||
platforms = ["windows-x64"]
|
|
||||||
wheels = ["./wheels/attrs-25.3.0-py3-none-any.whl", "./wheels/babel-2.17.0-py3-none-any.whl", "./wheels/cffi-1.17.1-cp311-cp311-win_amd64.whl", "./wheels/csvw-3.5.1-py2.py3-none-any.whl", "./wheels/dlinfo-2.0.0-py3-none-any.whl", "./wheels/isodate-0.7.2-py3-none-any.whl", "./wheels/joblib-1.4.2-py3-none-any.whl", "./wheels/jsonschema-4.23.0-py3-none-any.whl", "./wheels/jsonschema_specifications-2024.10.1-py3-none-any.whl", "./wheels/language_tags-1.2.0-py3-none-any.whl", "./wheels/phonemizer-3.3.0-py3-none-any.whl", "./wheels/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", "./wheels/rdflib-7.1.4-py3-none-any.whl", "./wheels/referencing-0.36.2-py3-none-any.whl", "./wheels/regex-2024.11.6-cp311-cp311-win_amd64.whl", "./wheels/rfc3986-1.5.0-py2.py3-none-any.whl", "./wheels/rpds_py-0.24.0-cp311-cp311-win_amd64.whl", "./wheels/segments-2.3.0-py2.py3-none-any.whl", "./wheels/six-1.17.0-py2.py3-none-any.whl", "./wheels/tqdm-4.67.1-py3-none-any.whl", "./wheels/typing_extensions-4.13.2-py3-none-any.whl", "./wheels/uritemplate-4.1.1-py2.py3-none-any.whl", "./wheels/vosk-0.3.41-py3-none-win_amd64.whl"]
|
|
||||||
# END GENERATED CONTENT.
|
|
||||||
|
|||||||
@@ -0,0 +1,169 @@
|
|||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import zipfile
|
||||||
|
from collections import defaultdict
|
||||||
|
from glob import glob
|
||||||
|
|
||||||
|
import tomlkit
|
||||||
|
|
||||||
|
|
||||||
|
def update_wheels():
|
||||||
|
folder = "./wheels"
|
||||||
|
files = os.listdir(folder)
|
||||||
|
toml_path = "blender_manifest.toml"
|
||||||
|
|
||||||
|
wheel_paths = glob("./wheels/**/*.whl", recursive=True)
|
||||||
|
|
||||||
|
|
||||||
|
print(wheel_paths)
|
||||||
|
clean = [path.replace("\\", "/") for path in wheel_paths]
|
||||||
|
|
||||||
|
# Load the TOML file
|
||||||
|
with open(toml_path, "r", encoding="utf-8") as f:
|
||||||
|
toml_data = tomlkit.load(f)
|
||||||
|
# Update the "wheels" entry
|
||||||
|
toml_data["wheels"] = clean
|
||||||
|
|
||||||
|
|
||||||
|
# Write back to the TOML file, preserving formatting and comments
|
||||||
|
with open(toml_path, "w", encoding="utf-8") as f:
|
||||||
|
f.write(tomlkit.dumps(toml_data))
|
||||||
|
|
||||||
|
def build_addon():
|
||||||
|
current_dir = os.getcwd()
|
||||||
|
exclude_patterns = [
|
||||||
|
'.venv',
|
||||||
|
'.gitignore',
|
||||||
|
'.git',
|
||||||
|
'.idea',
|
||||||
|
'__pycache__',
|
||||||
|
'dist',
|
||||||
|
'dev_tools.py'
|
||||||
|
]
|
||||||
|
|
||||||
|
dist_dir = os.path.join(current_dir, "dist")
|
||||||
|
|
||||||
|
if not os.path.exists(dist_dir):
|
||||||
|
os.makedirs(dist_dir)
|
||||||
|
|
||||||
|
with open("blender_manifest.toml", "r") as f:
|
||||||
|
parsed = tomlkit.parse(f.read())
|
||||||
|
version = parsed["version"]
|
||||||
|
addon_id = parsed["id"]
|
||||||
|
|
||||||
|
with zipfile.ZipFile(f"{current_dir}/dist/{addon_id}_{version}.zip", "w") as zip_file:
|
||||||
|
for root, dirs, files in os.walk(f"{current_dir}"):
|
||||||
|
dirs[:] = [d for d in dirs if os.path.relpath(os.path.join(root,d), current_dir) not in exclude_patterns]
|
||||||
|
|
||||||
|
for file in files:
|
||||||
|
file_path = os.path.relpath(os.path.join(root, file), current_dir)
|
||||||
|
# Skip files based on exclude patterns
|
||||||
|
if any(file_path.startswith(pattern) or file_path in exclude_patterns for pattern in exclude_patterns):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Add the file to the zip archive
|
||||||
|
zip_file.write(os.path.join(root, file), file_path)
|
||||||
|
|
||||||
|
print(f"Build complete. Addon file saved to {dist_dir}")
|
||||||
|
|
||||||
|
|
||||||
|
def handle_duplicate_wheels(directory: str):
|
||||||
|
"""
|
||||||
|
Find and handle duplicate .whl files in the given directory and its subdirectories.
|
||||||
|
Keeps files in ./wheels/common and removes duplicates. If a file is not in ./wheels/common,
|
||||||
|
moves one to ./wheels/common and deletes the others.
|
||||||
|
|
||||||
|
:param directory: str: The path to the directory to search for .whl files.
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
# Path to the common directory
|
||||||
|
common_path = os.path.join(directory, "common")
|
||||||
|
# Create the common directory if it doesn't exist
|
||||||
|
os.makedirs(common_path, exist_ok=True)
|
||||||
|
|
||||||
|
# Dictionary to store .whl filenames and their paths
|
||||||
|
files_dict = defaultdict(list)
|
||||||
|
|
||||||
|
# Traverse the directory and its subdirectories
|
||||||
|
for root, _, files in os.walk(directory):
|
||||||
|
for file in files:
|
||||||
|
print(f"Processing file: {file}")
|
||||||
|
|
||||||
|
# Check if the file is a .whl file
|
||||||
|
if file.endswith(".whl"):
|
||||||
|
# Store the file and its full path in the dictionary
|
||||||
|
full_path = os.path.join(root, file)
|
||||||
|
files_dict[file].append(full_path)
|
||||||
|
|
||||||
|
# Handle duplicates (files with more than one associated path)
|
||||||
|
for file, paths in files_dict.items():
|
||||||
|
if len(paths) > 1: # Check if there are duplicates
|
||||||
|
print(f"\nDuplicate found for: {file}")
|
||||||
|
print("Locations:")
|
||||||
|
for path in paths:
|
||||||
|
print(f" - {path}")
|
||||||
|
|
||||||
|
# Check if the file already exists in the common directory
|
||||||
|
common_file_path = os.path.join(common_path, file)
|
||||||
|
if os.path.exists(common_file_path):
|
||||||
|
# Remove all duplicates, as the file already exists in common
|
||||||
|
print(f"File already in common directory: {common_file_path}")
|
||||||
|
for path in paths:
|
||||||
|
if path != common_file_path:
|
||||||
|
print(f"Removing duplicate: {path}")
|
||||||
|
os.remove(path)
|
||||||
|
else:
|
||||||
|
# Move one file to common and remove the others
|
||||||
|
print(f"Moving one copy to common directory: {common_file_path}")
|
||||||
|
shutil.move(paths[0], common_file_path) # Move the first file to common
|
||||||
|
for path in paths[1:]:
|
||||||
|
print(f"Removing duplicate: {path}")
|
||||||
|
os.remove(path)
|
||||||
|
else:
|
||||||
|
print(f"No duplicates for: {file}")
|
||||||
|
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def md5_for_folder(folder_path: str):
|
||||||
|
"""
|
||||||
|
Compute the MD5 hash of the contents of a folder including file names and contents.
|
||||||
|
|
||||||
|
:param folder_path: str: Path to the folder.
|
||||||
|
:return: str: The MD5 hash of the folder.
|
||||||
|
"""
|
||||||
|
md5_hash = hashlib.md5()
|
||||||
|
|
||||||
|
# Walk through the directory
|
||||||
|
for root, dirs, files in os.walk(folder_path):
|
||||||
|
# Sort directories and files to ensure consistent order (important for consistent hash values)
|
||||||
|
for names in sorted(dirs + files):
|
||||||
|
# Update hash with file/folder name
|
||||||
|
path = os.path.join(root, names)
|
||||||
|
md5_hash.update(names.encode('utf-8'))
|
||||||
|
|
||||||
|
# If it's a file, include its content in the hash
|
||||||
|
if os.path.isfile(path):
|
||||||
|
with open(path, 'rb') as f:
|
||||||
|
while chunk := f.read(8192): # Read file in chunks
|
||||||
|
md5_hash.update(chunk)
|
||||||
|
|
||||||
|
return md5_hash.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
update_wheels()
|
||||||
|
# Example usage
|
||||||
|
# folder = "./Assets/Archives/darwin/espeak-ng-darwin/espeak-ng-data"
|
||||||
|
# print(f"MD5 Hash for the folder '{folder}': {md5_for_folder(folder)}")
|
||||||
|
|
||||||
|
# folder = "./Assets/Archives/linux/espeak-ng-data/lang"
|
||||||
|
# print(f"MD5 Hash for the folder '{folder}': {md5_for_folder(folder)}")
|
||||||
|
|
||||||
|
# folder = "./Assets/Archives/windows/espeak-ng-data/lang"
|
||||||
|
# print(f"MD5 Hash for the folder '{folder}': {md5_for_folder(folder)}")
|
||||||
|
# update_wheels()
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
import sys
|
||||||
|
from tomlkit import parse, dumps # type: ignore
|
||||||
|
|
||||||
|
version = sys.argv[1]
|
||||||
|
toml_path = "blender_manifest.toml"
|
||||||
|
|
||||||
|
with open(toml_path, "r", encoding="utf-8") as f:
|
||||||
|
doc = parse(f.read())
|
||||||
|
|
||||||
|
doc["version"] = version
|
||||||
|
|
||||||
|
with open(toml_path, "w", encoding="utf-8") as f:
|
||||||
|
f.write(dumps(doc))
|
||||||
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
LFS
BIN
Binary file not shown.
LFS
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
@@ -1,62 +1,41 @@
|
|||||||
import bpy
|
"""
|
||||||
|
Remove Static FCurves - Blender Extension
|
||||||
|
Removes animation channels with static (unchanging) values
|
||||||
|
"""
|
||||||
|
|
||||||
|
bl_info = {
|
||||||
|
"name": "Remove Static FCurves",
|
||||||
|
"author": "lokimckay",
|
||||||
|
"version": (0, 3, 0),
|
||||||
|
"blender": (5, 0, 0),
|
||||||
|
"location": "Graph Editor > Channel > Remove Static FCurves",
|
||||||
|
"description": "Remove animation channels with static (unchanging) values",
|
||||||
|
"category": "Animation",
|
||||||
|
}
|
||||||
|
|
||||||
class RemoveStaticFcurvesOperator(bpy.types.Operator):
|
# Handle module reloading
|
||||||
"""Operator to remove static FCurves"""
|
if "bpy" in locals():
|
||||||
bl_idname = "graph.remove_static_fcurves"
|
import importlib
|
||||||
bl_label = "Remove Static FCurves"
|
if "operator" in locals():
|
||||||
bl_options = {"REGISTER", "UNDO"}
|
importlib.reload(operator)
|
||||||
|
if "utils" in locals():
|
||||||
def execute(self, context):
|
importlib.reload(utils)
|
||||||
has_selection = any(obj.select_get()
|
print("[Remove Static FCurves] Reloading modules...")
|
||||||
for obj in bpy.context.selected_objects)
|
else:
|
||||||
|
from . import operator
|
||||||
if not has_selection:
|
from . import utils
|
||||||
self.report({'ERROR_INVALID_INPUT'}, "Please select objects.")
|
|
||||||
else:
|
|
||||||
self.remove_static_fcurves()
|
|
||||||
self.report({'INFO'}, "Removed static animation channels.")
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def is_static_fcurve(fcurve):
|
|
||||||
"""Check if an FCurve is static (all keyframes have the same value)."""
|
|
||||||
keyframes = fcurve.keyframe_points
|
|
||||||
if len(keyframes) < 2:
|
|
||||||
return True # A single keyframe is considered static
|
|
||||||
|
|
||||||
first_value = keyframes[0].co[1]
|
|
||||||
return all(kf.co[1] == first_value for kf in keyframes)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def remove_static_fcurves():
|
|
||||||
"""Remove static FCurves that have no data."""
|
|
||||||
for obj in bpy.context.selected_objects:
|
|
||||||
if obj.animation_data and obj.animation_data.action:
|
|
||||||
action = obj.animation_data.action
|
|
||||||
fcurves_to_remove = [
|
|
||||||
fcurve for fcurve in action.fcurves if RemoveStaticFcurvesOperator.is_static_fcurve(fcurve)]
|
|
||||||
|
|
||||||
for fcurve in fcurves_to_remove:
|
|
||||||
action.fcurves.remove(fcurve)
|
|
||||||
|
|
||||||
|
|
||||||
def menu_func(self, context):
|
|
||||||
self.layout.operator(RemoveStaticFcurvesOperator.bl_idname)
|
|
||||||
|
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
bpy.utils.register_class(RemoveStaticFcurvesOperator)
|
"""Register all classes and handlers"""
|
||||||
bpy.types.GRAPH_MT_channel.append(menu_func)
|
operator.register()
|
||||||
bpy.types.DOPESHEET_MT_channel.append(menu_func)
|
print("[Remove Static FCurves] Registered")
|
||||||
print("[Remove Static FCurves] registered")
|
|
||||||
|
|
||||||
|
|
||||||
def unregister():
|
def unregister():
|
||||||
bpy.utils.unregister_class(RemoveStaticFcurvesOperator)
|
"""Unregister all classes and handlers"""
|
||||||
bpy.types.GRAPH_MT_channel.remove(menu_func)
|
operator.unregister()
|
||||||
bpy.types.DOPESHEET_MT_channel.remove(menu_func)
|
print("[Remove Static FCurves] Unregistered")
|
||||||
print("[Remove Static FCurves] unregistered")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ schema_version = "1.0.0"
|
|||||||
# Example of manifest file for a Blender extension
|
# Example of manifest file for a Blender extension
|
||||||
# Change the values according to your extension
|
# Change the values according to your extension
|
||||||
id = "remove_static_fcurves"
|
id = "remove_static_fcurves"
|
||||||
version = "0.2.1"
|
version = "0.4.0"
|
||||||
name = "Remove Static FCurves"
|
name = "Remove Static FCurves"
|
||||||
tagline = "Clean up animation channels that have no data"
|
tagline = "Clean up animation channels that have no data"
|
||||||
maintainer = "Loki McKay <lokimckay@gmail.com>"
|
maintainer = "Loki McKay <lokimckay@gmail.com>"
|
||||||
@@ -24,9 +24,7 @@ blender_version_min = "4.2.0"
|
|||||||
|
|
||||||
# License conforming to https://spdx.org/licenses/ (use "SPDX: prefix)
|
# License conforming to https://spdx.org/licenses/ (use "SPDX: prefix)
|
||||||
# https://docs.blender.org/manual/en/dev/advanced/extensions/licenses.html
|
# https://docs.blender.org/manual/en/dev/advanced/extensions/licenses.html
|
||||||
license = [
|
license = ["SPDX:GPL-3.0-or-later"]
|
||||||
"SPDX:GPL-3.0-or-later",
|
|
||||||
]
|
|
||||||
# # Optional: required by some licenses.
|
# # Optional: required by some licenses.
|
||||||
# copyright = [
|
# copyright = [
|
||||||
# "2002-2024 Developer Name",
|
# "2002-2024 Developer Name",
|
||||||
@@ -72,4 +70,4 @@ license = [
|
|||||||
# "__pycache__/",
|
# "__pycache__/",
|
||||||
# "/.git/",
|
# "/.git/",
|
||||||
# "/*.zip",
|
# "/*.zip",
|
||||||
# ]
|
# ]
|
||||||
|
|||||||
@@ -0,0 +1,129 @@
|
|||||||
|
"""
|
||||||
|
Operators for removing static FCurves
|
||||||
|
"""
|
||||||
|
|
||||||
|
import bpy
|
||||||
|
from . import utils
|
||||||
|
|
||||||
|
|
||||||
|
class RemoveStaticFcurvesOperator(bpy.types.Operator):
|
||||||
|
"""Operator to remove static FCurves"""
|
||||||
|
bl_idname = "graph.remove_static_fcurves"
|
||||||
|
bl_label = "Remove Static FCurves"
|
||||||
|
bl_options = {"REGISTER", "UNDO"}
|
||||||
|
|
||||||
|
def execute(self, context):
|
||||||
|
# Get objects with animation data
|
||||||
|
objects_to_check = set()
|
||||||
|
|
||||||
|
# Add selected objects
|
||||||
|
objects_to_check.update(
|
||||||
|
obj for obj in context.selected_objects if obj.animation_data)
|
||||||
|
|
||||||
|
# Add visible objects from Graph Editor if we're in that context
|
||||||
|
if context.area and context.area.type == 'GRAPH_EDITOR':
|
||||||
|
for obj in context.scene.objects:
|
||||||
|
if obj.animation_data and obj.animation_data.action:
|
||||||
|
objects_to_check.add(obj)
|
||||||
|
|
||||||
|
# Also check the active object specifically
|
||||||
|
if context.active_object and context.active_object.animation_data:
|
||||||
|
objects_to_check.add(context.active_object)
|
||||||
|
|
||||||
|
if not objects_to_check:
|
||||||
|
self.report({'WARNING'}, "No objects with animation data found.")
|
||||||
|
return {'CANCELLED'}
|
||||||
|
|
||||||
|
print(f"\n=== Remove Static FCurves ===")
|
||||||
|
print(f"Checking {len(objects_to_check)} object(s)")
|
||||||
|
|
||||||
|
removed_count = self.remove_static_fcurves(objects_to_check)
|
||||||
|
|
||||||
|
if removed_count > 0:
|
||||||
|
self.report(
|
||||||
|
{'INFO'}, f"Removed {removed_count} static animation channel(s).")
|
||||||
|
else:
|
||||||
|
self.report({'INFO'}, "No static FCurves found to remove.")
|
||||||
|
|
||||||
|
return {'FINISHED'}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def is_static_fcurve(fcurve):
|
||||||
|
# If the curve has modifiers, keep it
|
||||||
|
if fcurve.modifiers and len(fcurve.modifiers) > 0:
|
||||||
|
return False
|
||||||
|
|
||||||
|
"""Check if an FCurve is static (all keyframes have the same value)."""
|
||||||
|
keyframes = fcurve.keyframe_points
|
||||||
|
if len(keyframes) < 2:
|
||||||
|
return True # A single keyframe is considered static
|
||||||
|
|
||||||
|
first_value = keyframes[0].co[1]
|
||||||
|
tolerance = 0.0001 # Small tolerance for floating point comparison
|
||||||
|
return all(abs(kf.co[1] - first_value) < tolerance for kf in keyframes)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def remove_static_fcurves(objects):
|
||||||
|
"""Remove static FCurves from the given objects."""
|
||||||
|
removed_count = 0
|
||||||
|
|
||||||
|
for obj in objects:
|
||||||
|
if not obj.animation_data or not obj.animation_data.action:
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"\nProcessing object: {obj.name}")
|
||||||
|
action = obj.animation_data.action
|
||||||
|
print(f" Action: {action.name}")
|
||||||
|
|
||||||
|
# Get FCurves using the compatibility function
|
||||||
|
fcurves_data = utils.compat.get_fcurves_from_object(obj)
|
||||||
|
|
||||||
|
if not fcurves_data:
|
||||||
|
print(f" No FCurves found")
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f" Found {len(fcurves_data)} FCurve(s)")
|
||||||
|
|
||||||
|
# Check each FCurve
|
||||||
|
for action, fcurve, slot in fcurves_data:
|
||||||
|
is_static = RemoveStaticFcurvesOperator.is_static_fcurve(
|
||||||
|
fcurve)
|
||||||
|
|
||||||
|
if is_static:
|
||||||
|
data_path = fcurve.data_path
|
||||||
|
array_idx = fcurve.array_index
|
||||||
|
print(
|
||||||
|
f" Removing static FCurve: {data_path}[{array_idx}]")
|
||||||
|
|
||||||
|
# Remove using the compatibility function
|
||||||
|
if utils.compat.remove_action_fcurve(action, fcurve, slot):
|
||||||
|
removed_count += 1
|
||||||
|
else:
|
||||||
|
print(f" Failed to remove FCurve")
|
||||||
|
else:
|
||||||
|
print(
|
||||||
|
f" Keeping animated FCurve: {fcurve.data_path}[{fcurve.array_index}]")
|
||||||
|
|
||||||
|
return removed_count
|
||||||
|
|
||||||
|
|
||||||
|
def menu_func(self, context):
|
||||||
|
self.layout.operator(RemoveStaticFcurvesOperator.bl_idname)
|
||||||
|
|
||||||
|
|
||||||
|
def register():
|
||||||
|
bpy.utils.register_class(RemoveStaticFcurvesOperator)
|
||||||
|
bpy.types.GRAPH_MT_channel.append(menu_func)
|
||||||
|
bpy.types.DOPESHEET_MT_channel.append(menu_func)
|
||||||
|
print("[RemoveStaticFcurvesOperator] registered")
|
||||||
|
|
||||||
|
|
||||||
|
def unregister():
|
||||||
|
bpy.utils.unregister_class(RemoveStaticFcurvesOperator)
|
||||||
|
bpy.types.GRAPH_MT_channel.remove(menu_func)
|
||||||
|
bpy.types.DOPESHEET_MT_channel.remove(menu_func)
|
||||||
|
print("[RemoveStaticFcurvesOperator] unregistered")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
register()
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
Metadata-Version: 2.4
|
||||||
|
Name: remove-static-fcurves
|
||||||
|
Version: 0.1.0
|
||||||
|
Summary: Remove fcurves from Blender animations that don't change at all.
|
||||||
|
Author-email: lokimckay <lokimckay@gmail.com>
|
||||||
|
License: MIT
|
||||||
|
Requires-Python: ==3.10.*
|
||||||
|
Description-Content-Type: text/markdown
|
||||||
|
License-File: LICENSE
|
||||||
|
Dynamic: license-file
|
||||||
|
|
||||||
|
# Remove Static FCurves
|
||||||
|
|
||||||
|
Cleans up animation channels that don't contain any animation data
|
||||||
+12
@@ -0,0 +1,12 @@
|
|||||||
|
LICENSE
|
||||||
|
README.md
|
||||||
|
pyproject.toml
|
||||||
|
src/__init__.py
|
||||||
|
src/operator.py
|
||||||
|
src/remove_static_fcurves.egg-info/PKG-INFO
|
||||||
|
src/remove_static_fcurves.egg-info/SOURCES.txt
|
||||||
|
src/remove_static_fcurves.egg-info/dependency_links.txt
|
||||||
|
src/remove_static_fcurves.egg-info/top_level.txt
|
||||||
|
src/utils/__init__.py
|
||||||
|
src/utils/compat.py
|
||||||
|
src/utils/version.py
|
||||||
+1
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
+3
@@ -0,0 +1,3 @@
|
|||||||
|
__init__
|
||||||
|
operator
|
||||||
|
utils
|
||||||
@@ -0,0 +1,18 @@
|
|||||||
|
"""
|
||||||
|
Utility modules for Remove Static FCurves extension
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Handle module reloading
|
||||||
|
if "bpy" in locals():
|
||||||
|
import importlib
|
||||||
|
if "compat" in locals():
|
||||||
|
importlib.reload(compat)
|
||||||
|
if "version" in locals():
|
||||||
|
importlib.reload(version)
|
||||||
|
else:
|
||||||
|
from . import compat
|
||||||
|
from . import version
|
||||||
|
|
||||||
|
import bpy
|
||||||
|
|
||||||
|
__all__ = ["compat", "version"]
|
||||||
@@ -0,0 +1,144 @@
|
|||||||
|
"""
|
||||||
|
API compatibility functions for handling differences between Blender versions.
|
||||||
|
"""
|
||||||
|
import bpy
|
||||||
|
from bpy_extras import anim_utils
|
||||||
|
from . import version
|
||||||
|
|
||||||
|
|
||||||
|
def get_action_fcurves(action):
|
||||||
|
"""
|
||||||
|
Get the fcurves collection from an action, handling version differences.
|
||||||
|
|
||||||
|
In Blender 4.x: action.fcurves
|
||||||
|
In Blender 5.0+: Uses channelbag.fcurves via action slots
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action: The action object
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Collection of fcurves, or None if not available
|
||||||
|
"""
|
||||||
|
if version.is_version_at_least(5, 0, 0):
|
||||||
|
# Blender 5.0+ uses channelbags and slots
|
||||||
|
# Try to get fcurves from all slots in the action
|
||||||
|
all_fcurves = []
|
||||||
|
|
||||||
|
# Iterate through all slots in the action
|
||||||
|
if hasattr(action, 'slots') and action.slots:
|
||||||
|
for slot in action.slots:
|
||||||
|
try:
|
||||||
|
channelbag = anim_utils.action_get_channelbag_for_slot(
|
||||||
|
action, slot)
|
||||||
|
if channelbag and hasattr(channelbag, 'fcurves'):
|
||||||
|
all_fcurves.extend(channelbag.fcurves)
|
||||||
|
except (AttributeError, RuntimeError):
|
||||||
|
continue
|
||||||
|
|
||||||
|
return all_fcurves if all_fcurves else None
|
||||||
|
else:
|
||||||
|
# Blender 4.x uses direct fcurves attribute
|
||||||
|
if hasattr(action, 'fcurves'):
|
||||||
|
return action.fcurves
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_fcurves_from_object(obj):
|
||||||
|
"""
|
||||||
|
Get FCurves from an object's animation data, handling version differences.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
obj: The object with animation data
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of (action, fcurve, slot) tuples, or empty list
|
||||||
|
"""
|
||||||
|
if not obj.animation_data or not obj.animation_data.action:
|
||||||
|
return []
|
||||||
|
|
||||||
|
action = obj.animation_data.action
|
||||||
|
fcurves_list = []
|
||||||
|
|
||||||
|
if version.is_version_at_least(5, 0, 0):
|
||||||
|
# Blender 5.0+: Get the channelbag for the object's current action slot
|
||||||
|
if hasattr(obj.animation_data, 'action_slot') and obj.animation_data.action_slot:
|
||||||
|
try:
|
||||||
|
channelbag = anim_utils.action_get_channelbag_for_slot(
|
||||||
|
action,
|
||||||
|
obj.animation_data.action_slot
|
||||||
|
)
|
||||||
|
if channelbag and hasattr(channelbag, 'fcurves'):
|
||||||
|
for fcurve in channelbag.fcurves:
|
||||||
|
fcurves_list.append(
|
||||||
|
(action, fcurve, obj.animation_data.action_slot))
|
||||||
|
except (AttributeError, RuntimeError) as e:
|
||||||
|
print(f"Error getting channelbag for {obj.name}: {e}")
|
||||||
|
else:
|
||||||
|
# Fallback: try all slots if no specific slot is set
|
||||||
|
if hasattr(action, 'slots') and action.slots:
|
||||||
|
for slot in action.slots:
|
||||||
|
try:
|
||||||
|
channelbag = anim_utils.action_get_channelbag_for_slot(
|
||||||
|
action, slot)
|
||||||
|
if channelbag and hasattr(channelbag, 'fcurves'):
|
||||||
|
for fcurve in channelbag.fcurves:
|
||||||
|
fcurves_list.append((action, fcurve, slot))
|
||||||
|
except (AttributeError, RuntimeError):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# Blender 4.x: Direct access to fcurves
|
||||||
|
if hasattr(action, 'fcurves'):
|
||||||
|
for fcurve in action.fcurves:
|
||||||
|
fcurves_list.append((action, fcurve, None))
|
||||||
|
|
||||||
|
return fcurves_list
|
||||||
|
|
||||||
|
|
||||||
|
def remove_action_fcurve(action, fcurve, slot=None):
|
||||||
|
"""
|
||||||
|
Remove an fcurve from an action, handling version differences.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action: The action object
|
||||||
|
fcurve: The fcurve to remove
|
||||||
|
slot: (Blender 5.0+) The action slot containing the fcurve
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if removal succeeded, False otherwise
|
||||||
|
"""
|
||||||
|
if version.is_version_at_least(5, 0, 0):
|
||||||
|
# Blender 5.0+: Need to remove from the channelbag
|
||||||
|
if slot is None:
|
||||||
|
# Try to find which slot contains this fcurve
|
||||||
|
if hasattr(action, 'slots'):
|
||||||
|
for try_slot in action.slots:
|
||||||
|
try:
|
||||||
|
channelbag = anim_utils.action_get_channelbag_for_slot(
|
||||||
|
action, try_slot)
|
||||||
|
if channelbag and hasattr(channelbag, 'fcurves'):
|
||||||
|
if fcurve in channelbag.fcurves:
|
||||||
|
channelbag.fcurves.remove(fcurve)
|
||||||
|
return True
|
||||||
|
except (ValueError, AttributeError, RuntimeError):
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# We know which slot to use
|
||||||
|
try:
|
||||||
|
channelbag = anim_utils.action_get_channelbag_for_slot(
|
||||||
|
action, slot)
|
||||||
|
if channelbag and hasattr(channelbag, 'fcurves'):
|
||||||
|
channelbag.fcurves.remove(fcurve)
|
||||||
|
return True
|
||||||
|
except (ValueError, AttributeError, RuntimeError) as e:
|
||||||
|
print(f"Error removing fcurve: {e}")
|
||||||
|
else:
|
||||||
|
# Blender 4.x: Direct removal
|
||||||
|
if hasattr(action, 'fcurves'):
|
||||||
|
try:
|
||||||
|
action.fcurves.remove(fcurve)
|
||||||
|
return True
|
||||||
|
except (ValueError, AttributeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return False
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
"""
|
||||||
|
Version detection and comparison utilities for multi-version Blender support.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import bpy
|
||||||
|
|
||||||
|
|
||||||
|
def get_blender_version():
|
||||||
|
"""
|
||||||
|
Returns the current Blender version as a tuple (major, minor, patch).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (major, minor, patch) version numbers
|
||||||
|
"""
|
||||||
|
return bpy.app.version
|
||||||
|
|
||||||
|
|
||||||
|
def get_version_string():
|
||||||
|
"""
|
||||||
|
Returns the current Blender version as a string (e.g., "4.2.0").
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Version string in format "major.minor.patch"
|
||||||
|
"""
|
||||||
|
version = get_blender_version()
|
||||||
|
return f"{version[0]}.{version[1]}.{version[2]}"
|
||||||
|
|
||||||
|
|
||||||
|
def is_version_at_least(major, minor=0, patch=0):
|
||||||
|
"""
|
||||||
|
Check if the current Blender version is at least the specified version.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
major (int): Major version number
|
||||||
|
minor (int): Minor version number (default: 0)
|
||||||
|
patch (int): Patch version number (default: 0)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if current version >= specified version
|
||||||
|
"""
|
||||||
|
current = get_blender_version()
|
||||||
|
target = (major, minor, patch)
|
||||||
|
|
||||||
|
if current[0] != target[0]:
|
||||||
|
return current[0] > target[0]
|
||||||
|
if current[1] != target[1]:
|
||||||
|
return current[1] > target[1]
|
||||||
|
return current[2] >= target[2]
|
||||||
|
|
||||||
|
|
||||||
|
def is_version_less_than(major, minor=0, patch=0):
|
||||||
|
"""
|
||||||
|
Check if the current Blender version is less than the specified version.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
major (int): Major version number
|
||||||
|
minor (int): Minor version number (default: 0)
|
||||||
|
patch (int): Patch version number (default: 0)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if current version < specified version
|
||||||
|
"""
|
||||||
|
return not is_version_at_least(major, minor, patch)
|
||||||
|
|
||||||
|
|
||||||
|
def is_version_5_0():
|
||||||
|
"""Check if running Blender 5.0 or later."""
|
||||||
|
return is_version_at_least(5, 0, 0)
|
||||||
|
|
||||||
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
# BasedPlayblast
|
|
||||||
|
|
||||||
**Easily create playblasts from Blender**
|
|
||||||
|
|
||||||
BasedPlayblast is a Blender addon that streamlines the process of creating video playblasts for animation review. It provides optimized render settings for fast preview generation while maintaining visual quality suitable for review purposes.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
- **Fast Playblast Creation**: Optimized render settings for different preview modes (Solid, Material, Rendered)
|
|
||||||
- **Multiple Display Modes**: Support for Wireframe, Solid, Material Preview, and Rendered modes
|
|
||||||
- **Flexible Resolution**: Scene, preset, or custom resolution options
|
|
||||||
- **Video Format Support**: MP4, MOV, AVI, MKV with various codecs (H.264, H.265, AV1, etc.)
|
|
||||||
- **Metadata Integration**: Automatic inclusion of frame numbers, camera info, and custom notes
|
|
||||||
- **Settings Management**: Apply and restore render settings without losing your project configuration
|
|
||||||
- **Flamenco Support**: Custom Flamenco Job Script with a simple, non-destructive workflow
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
### Via BlenderKit's Extension Repository (Recommended)
|
|
||||||
1. Open Blender (5.0+)
|
|
||||||
2. Install BlenderKit via https://www.blenderkit.com/get-blenderkit/
|
|
||||||
3. Open Preferences (Ctrl + ,)
|
|
||||||
4. Go to **Edit > Preferences > Get Extensions**
|
|
||||||
5. Search for "BasedPlayblast"
|
|
||||||
6. Click **Install**
|
|
||||||
7. Enjoy automatic updating!
|
|
||||||
|
|
||||||
### Manual Installation
|
|
||||||
1. Download the latest release, or the release that supports your intended Blender version
|
|
||||||
2. In Blender, go to **Edit > Preferences > Add-ons**
|
|
||||||
3. Click **Install from Disk** and select the downloaded file
|
|
||||||
4. Enable the addon in the list
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
1. **Locate the Panel**: Go to **Properties > Output > BasedPlayblast**
|
|
||||||
2. **Configure Settings**: Set your output path, resolution, and display mode
|
|
||||||
3. **Create Playblast**: Click the **PLAYBLAST** button
|
|
||||||
4. **View Result**: Click **VIEW** to open the generated video
|
|
||||||
|
|
||||||
- **Apply Blast Settings**: Use this button to apply optimized render settings without rendering
|
|
||||||
- Intended particularly for Flamenco. Apply, check the resultant render settings to ensure they're correct, then send to Flamenco using the BasedPlayblast custom Job type.
|
|
||||||
- **Restore Original Settings**: Return to your original render configuration
|
|
||||||
- **Display Modes**:
|
|
||||||
- **Wireframe/Solid**
|
|
||||||
- Fast workbench viewport rendering. Recommended for short and/or locally-blasted projects.
|
|
||||||
- **Material**
|
|
||||||
- **Rendered**
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
- Blender 5.0.0 or higher
|
|
||||||
- Python 3.x (included with Blender)
|
|
||||||
|
|
||||||
## Support
|
|
||||||
|
|
||||||
- **Documentation**: [GitHub Repository](https://github.com/RaincloudTheDragon/BasedPlayblast)
|
|
||||||
- **Issues**: Report bugs or request features on GitHub
|
|
||||||
- **License**: GPL-3.0-or-later
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,28 +0,0 @@
|
|||||||
schema_version = "1.0.0"
|
|
||||||
|
|
||||||
id = "basedplayblast"
|
|
||||||
name = "BasedPlayblast"
|
|
||||||
tagline = "Easily create playblasts from Blender and Flamenco"
|
|
||||||
version = "2.4.0"
|
|
||||||
type = "add-on"
|
|
||||||
|
|
||||||
maintainer = "RaincloudTheDragon <raincloudthedragon@gmail.com>"
|
|
||||||
license = ["GPL-3.0-or-later"]
|
|
||||||
blender_version_min = "4.2.0"
|
|
||||||
|
|
||||||
website = "https://github.com/RaincloudTheDragon/BasedPlayblast"
|
|
||||||
|
|
||||||
tags = ["Animation", "Render", "Workflow", "Video"]
|
|
||||||
|
|
||||||
[permissions]
|
|
||||||
files = "Import/export files and data"
|
|
||||||
|
|
||||||
[build]
|
|
||||||
paths_exclude_pattern = [
|
|
||||||
"__pycache__/",
|
|
||||||
"*.pyc",
|
|
||||||
".git/",
|
|
||||||
".github/",
|
|
||||||
"addon_updater*",
|
|
||||||
"basedplayblast_updater/"
|
|
||||||
]
|
|
||||||
@@ -1,133 +0,0 @@
|
|||||||
import bpy # type: ignore
|
|
||||||
from bpy.types import AddonPreferences, Panel # type: ignore
|
|
||||||
from bpy.props import BoolProperty # type: ignore
|
|
||||||
from .panels import bulk_viewport_display
|
|
||||||
from .panels import bulk_data_remap
|
|
||||||
from .panels import bulk_path_management
|
|
||||||
from .panels import bulk_scene_general
|
|
||||||
from .ops.AutoMatExtractor import AutoMatExtractor, AUTOMAT_OT_summary_dialog
|
|
||||||
from .ops.Rename_images_by_mat import Rename_images_by_mat, RENAME_OT_summary_dialog
|
|
||||||
from .ops.FreeGPU import BST_FreeGPU
|
|
||||||
from .ops import ghost_buster
|
|
||||||
|
|
||||||
# Addon preferences class for update settings
|
|
||||||
class BST_AddonPreferences(AddonPreferences):
|
|
||||||
bl_idname = __package__
|
|
||||||
|
|
||||||
# AutoMat Extractor settings
|
|
||||||
automat_common_outside_blend: BoolProperty(
|
|
||||||
name="Place 'common' folder outside 'blend' folder",
|
|
||||||
description="If enabled, the 'common' folder for shared textures will be placed directly in 'textures/'. If disabled, it will be placed inside 'textures/<blend_name>/'",
|
|
||||||
default=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
def draw(self, context):
|
|
||||||
layout = self.layout
|
|
||||||
|
|
||||||
# AutoMat Extractor settings
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="AutoMat Extractor Settings")
|
|
||||||
row = box.row()
|
|
||||||
row.prop(self, "automat_common_outside_blend")
|
|
||||||
|
|
||||||
# Main panel for Bulk Scene Tools
|
|
||||||
class VIEW3D_PT_BulkSceneTools(Panel):
|
|
||||||
"""Bulk Scene Tools Panel"""
|
|
||||||
bl_label = "Bulk Scene Tools"
|
|
||||||
bl_idname = "VIEW3D_PT_bulk_scene_tools"
|
|
||||||
bl_space_type = 'VIEW_3D'
|
|
||||||
bl_region_type = 'UI'
|
|
||||||
bl_category = 'Edit'
|
|
||||||
|
|
||||||
def draw(self, context):
|
|
||||||
layout = self.layout
|
|
||||||
layout.label(text="Tools for bulk operations on scene data")
|
|
||||||
|
|
||||||
# List of all classes in this module
|
|
||||||
classes = (
|
|
||||||
VIEW3D_PT_BulkSceneTools,
|
|
||||||
BST_AddonPreferences,
|
|
||||||
AutoMatExtractor,
|
|
||||||
AUTOMAT_OT_summary_dialog,
|
|
||||||
Rename_images_by_mat,
|
|
||||||
RENAME_OT_summary_dialog,
|
|
||||||
BST_FreeGPU,
|
|
||||||
)
|
|
||||||
|
|
||||||
def register():
|
|
||||||
# Register classes from this module (do this first to ensure preferences are available)
|
|
||||||
for cls in classes:
|
|
||||||
bpy.utils.register_class(cls)
|
|
||||||
|
|
||||||
# Print debug info about preferences
|
|
||||||
try:
|
|
||||||
prefs = bpy.context.preferences.addons.get(__package__)
|
|
||||||
if prefs:
|
|
||||||
print(f"Addon preferences registered successfully: {prefs}")
|
|
||||||
else:
|
|
||||||
print("WARNING: Addon preferences not found after registration!")
|
|
||||||
print(f"Available addons: {', '.join(bpy.context.preferences.addons.keys())}")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Error accessing preferences: {str(e)}")
|
|
||||||
|
|
||||||
# Register modules
|
|
||||||
bulk_scene_general.register()
|
|
||||||
bulk_viewport_display.register()
|
|
||||||
bulk_data_remap.register()
|
|
||||||
bulk_path_management.register()
|
|
||||||
ghost_buster.register()
|
|
||||||
|
|
||||||
# Add keybind for Free GPU (global context)
|
|
||||||
wm = bpy.context.window_manager
|
|
||||||
kc = wm.keyconfigs.addon
|
|
||||||
if kc:
|
|
||||||
# Use Screen keymap for global shortcuts that work everywhere
|
|
||||||
km = kc.keymaps.new(name='Screen', space_type='EMPTY')
|
|
||||||
kmi = km.keymap_items.new('bst.free_gpu', 'M', 'PRESS', ctrl=True, alt=True, shift=True)
|
|
||||||
# Store keymap for cleanup
|
|
||||||
addon_keymaps = getattr(bpy.types.Scene, '_bst_keymaps', [])
|
|
||||||
addon_keymaps.append((km, kmi))
|
|
||||||
bpy.types.Scene._bst_keymaps = addon_keymaps
|
|
||||||
|
|
||||||
def unregister():
|
|
||||||
# Remove keybinds
|
|
||||||
addon_keymaps = getattr(bpy.types.Scene, '_bst_keymaps', [])
|
|
||||||
for km, kmi in addon_keymaps:
|
|
||||||
try:
|
|
||||||
km.keymap_items.remove(kmi)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
addon_keymaps.clear()
|
|
||||||
if hasattr(bpy.types.Scene, '_bst_keymaps'):
|
|
||||||
delattr(bpy.types.Scene, '_bst_keymaps')
|
|
||||||
|
|
||||||
# Unregister modules
|
|
||||||
try:
|
|
||||||
ghost_buster.unregister()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
bulk_path_management.unregister()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
bulk_data_remap.unregister()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
bulk_viewport_display.unregister()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
try:
|
|
||||||
bulk_scene_general.unregister()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
# Unregister classes from this module
|
|
||||||
for cls in reversed(classes):
|
|
||||||
try:
|
|
||||||
bpy.utils.unregister_class(cls)
|
|
||||||
except RuntimeError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
register()
|
|
||||||
-29
@@ -1,29 +0,0 @@
|
|||||||
schema_version = "1.0.0"
|
|
||||||
|
|
||||||
id = "rainclouds_bulk_scene_tools"
|
|
||||||
name = "Raincloud's Bulk Scene Tools"
|
|
||||||
tagline = "Bulk utilities for optimizing scene data"
|
|
||||||
version = "0.11.0"
|
|
||||||
type = "add-on"
|
|
||||||
|
|
||||||
maintainer = "RaincloudTheDragon <raincloudthedragon@gmail.com>"
|
|
||||||
license = ["GPL-3.0-or-later"]
|
|
||||||
blender_version_min = "4.5.0"
|
|
||||||
|
|
||||||
website = "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools"
|
|
||||||
|
|
||||||
tags = ["Scene", "Workflow", "Materials"]
|
|
||||||
|
|
||||||
[permissions]
|
|
||||||
files = "Read and write external resources referenced by scenes"
|
|
||||||
|
|
||||||
[build]
|
|
||||||
paths_exclude_pattern = [
|
|
||||||
"__pycache__/",
|
|
||||||
"*.pyc",
|
|
||||||
".git/",
|
|
||||||
".github/",
|
|
||||||
"docs/",
|
|
||||||
"tests/",
|
|
||||||
]
|
|
||||||
|
|
||||||
@@ -1,147 +0,0 @@
|
|||||||
# v 0.10.0
|
|
||||||
- **AutoMat Extractor**
|
|
||||||
- Added UDIM/tiled image detection so multi-tile textures are organized alongside standard images without errors. #8
|
|
||||||
- Path builder now emits UDIM filename templates (e.g., `name.[UDIM].png`) plus per-tile targets (e.g., `name.1001.png`), preventing collisions during relocation.
|
|
||||||
- Remapping helper sets tile-level `filepath` values and ensures directories exist before saving.
|
|
||||||
- Saving routine attempts whole-image writes first, then falls back to per-tile saves via the Image Editor context, with summary logs noting UDIM sets processed.
|
|
||||||
- **Viewport Colors**
|
|
||||||
- Added a Refresh Material Previews button that clears thumbnails, assigns each material to a temporary preview mesh, and forces `preview_ensure()` so stubborn viewport colors now reliably pick up thumbnail data. #7
|
|
||||||
|
|
||||||
# v 0.9.1
|
|
||||||
- **Convert Relations to Constraint**
|
|
||||||
- Bugfix: Now converts bone parenting as intended
|
|
||||||
|
|
||||||
# v 0.9.0
|
|
||||||
- **Convert Relations to Constraint**: Operator in Animation Data section that converts regular parenting relationships to Child Of constraints for selected objects, maintaining world position and transform hierarchy
|
|
||||||
- Bugfix: Adapted old operator that wasn't drawing due to using the wrong icon string name.
|
|
||||||
|
|
||||||
# v 0.8.1
|
|
||||||
- Delete Single Keyframe Actions: fixed bug caused by not ignoring linked files
|
|
||||||
|
|
||||||
# v 0.8.0
|
|
||||||
|
|
||||||
## New Features
|
|
||||||
- **Delete Single Keyframe Actions**: New operator to remove unwanted animation actions (no keyframes, single keyframe, or all keyframes on same frame)
|
|
||||||
- **Find Material Users**: New operator with native material selector interface that displays detailed material usage analysis in a popup dialog, showing:
|
|
||||||
- Object users with material slots
|
|
||||||
- Node tree references
|
|
||||||
- Material node tree usage
|
|
||||||
- Blender's internal user count and fake user status
|
|
||||||
- **Remove Unused Material Slots**: New operator to clean up unused material slots from all mesh objects in the scene
|
|
||||||
- **Enhanced Bulk Scene General Panel**: Reorganized panel with new sections:
|
|
||||||
- Materials section containing material analysis and cleanup tools
|
|
||||||
- Animation Data section for keyframe/action management
|
|
||||||
- All new operators integrated with consistent UI and project formatting standards
|
|
||||||
|
|
||||||
## Fixes
|
|
||||||
- PathMan
|
|
||||||
- Automat summary no longer gives invoke error
|
|
||||||
- Fixed timing/cancellation error when cancelling Rename Flat Colors operation
|
|
||||||
- Pack files operator no longer throws AttributeError for is_generated (now uses img.source != 'GENERATED')
|
|
||||||
- Pack files operator now properly skips special Blender images like "Render Result" and "Viewer Node" that can't be packed
|
|
||||||
- General
|
|
||||||
- Removed debug print statement that was showing "Subdivision Surface modifiers removed from all objects" on every addon load
|
|
||||||
|
|
||||||
# v 0.7.1
|
|
||||||
|
|
||||||
## Ghost Buster Enhancements
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- **Low Priority Ghost Detection**: New option to delete objects not in scenes with no legitimate use and users < 2
|
|
||||||
- **Smart Instance Collection Detection**: Ghost Buster now properly detects when objects are used by instance collections in scenes
|
|
||||||
- **Enhanced Legitimacy Checks**: Improved detection of objects with valid uses outside scenes (constraints, modifiers, particle systems only count if the using object is in a scene)
|
|
||||||
|
|
||||||
### Improved
|
|
||||||
- **More Accurate Ghost Detection**: Eliminated false positives by checking if instance collection targets are actually being used by scene objects
|
|
||||||
- **Better Classification**: Objects are now classified as "Legitimate", "Ghosts (users >= 2)", or "Low Priority (users < 2)" with clearer reasoning
|
|
||||||
- Cleaned UI
|
|
||||||
|
|
||||||
### Technical Changes
|
|
||||||
- Added `is_object_used_by_scene_instance_collections()` function for precise instance collection detection
|
|
||||||
- Enhanced `is_object_legitimate_outside_scene()` with scene-aware checks for modifiers, constraints, and particle systems
|
|
||||||
- Updated ghost analysis and removal logic to use more precise categorization
|
|
||||||
- Added scene property `ghost_buster_delete_low_priority` for user preference storage
|
|
||||||
|
|
||||||
# v 0.7.0
|
|
||||||
|
|
||||||
## New: Ghost Detection System
|
|
||||||
- **Universal Object Analysis**: Expanded ghost detection from CC-objects only to all object types (meshes, empties, curves, etc.)
|
|
||||||
- **Enhanced Safety Framework**: Added comprehensive protection for legitimate objects outside scenes:
|
|
||||||
- WGT rig widgets (`WGT-*` objects)
|
|
||||||
- Modifier targets (curve modifiers, constraints)
|
|
||||||
- Constraint targets and references
|
|
||||||
- Particle system objects
|
|
||||||
- Collection instance objects (linked collection references)
|
|
||||||
- **Smart Classification**: Objects not in scenes now categorized as:
|
|
||||||
- `LEGITIMATE`: Has valid use outside scenes (protected)
|
|
||||||
- `LOW PRIORITY`: Only collection reference (preserved)
|
|
||||||
- `GHOST`: Multiple users but not in scenes (removed)
|
|
||||||
- **Conservative Cleanup Logic**: Only removes objects with 2+ users that have no legitimate purpose
|
|
||||||
- **Updated UI**: Ghost Detector popup now shows "Ghost Objects Analysis" with enhanced categorization and object type details
|
|
||||||
- **Improved Safety**: All linked/library content automatically protected from ghost detection
|
|
||||||
|
|
||||||
# v 0.6.1
|
|
||||||
|
|
||||||
## Bug Fixes
|
|
||||||
- **Fixed flat color detection**: Redesigned algorithm with exact pixel matching and smart sampling
|
|
||||||
- **Fixed AutoMat Extractor**: Now properly organizes images by material instead of dumping everything to common folder
|
|
||||||
- **Fixed viewport color setting**: Resolved context restriction errors with deferred color application
|
|
||||||
- **Fixed timer performance**: Reduced timer frequency and improved cancellation reliability
|
|
||||||
- **Enhanced debugging**: Added comprehensive console reporting for all bulk operations
|
|
||||||
|
|
||||||
## Improvements
|
|
||||||
- Better performance with optimized sampling
|
|
||||||
- More reliable cancellation system
|
|
||||||
- Context-safe operations that don't interfere with Blender's drawing state
|
|
||||||
|
|
||||||
# v 0.6.0
|
|
||||||
|
|
||||||
- **Enhancement: Progress Reporting & Cancellation**
|
|
||||||
- Some of the PathMan's operators are pretty resource-intense. Due to Python's GIL, I haven't been able to figure out how to run some of these more efficiently. Without the console window, you're flying blind, so I've integrated a loading bar with progress reporting for the following operators:
|
|
||||||
- Flat Color Texture Renamer
|
|
||||||
- Remove Extensions
|
|
||||||
- Save All to image Paths
|
|
||||||
- Remap Selected
|
|
||||||
- Rename by Material
|
|
||||||
- AutoMat Extractor
|
|
||||||
|
|
||||||
# v 0.5.1
|
|
||||||
|
|
||||||
- **Enhanced AutoMat Extractor:**
|
|
||||||
- Added a crucial safety check to prevent textures from overwriting each other if they resolve to the same filename (e.g., `Image.001.png` and `Image.002.png` both becoming `Image.png`).
|
|
||||||
- The operator now correctly sanitizes names with numerical suffixes before saving.
|
|
||||||
- A new summary dialog now appears after the operation, reporting how many files were extracted successfully and listing any files that were skipped due to naming conflicts.
|
|
||||||
- Added a user preference to control the location of the `common` folder, allowing it to be placed either inside or outside the blend file's specific texture folder. A checkbox for this setting was added to the UI.
|
|
||||||
- **Improved Suffix Handling:**
|
|
||||||
- The "Rename by Material" tool now correctly preserves spaces in packed texture names (e.g., `Flow Pack` instead of `FlowPack`).
|
|
||||||
- Added support for underscore-separated packed texture names (e.g., `flow_pack`).
|
|
||||||
- **Bug Fixes:**
|
|
||||||
- Resolved multiple `AttributeError` and `TypeError` exceptions that occurred due to incorrect addon name lookups and invalid icon names, making the UI and addon registration more robust.
|
|
||||||
|
|
||||||
# v 0.5.0
|
|
||||||
|
|
||||||
- **Integrated Scene General: Free GPU VRAM**
|
|
||||||
- **Integrated PathMan: Automatic Material Extractor**
|
|
||||||
- **Integrated PathMan: Rename Image Textures by Material**: Added comprehensive texture suffix recognition
|
|
||||||
- Recognizes many Character Creator suffixes
|
|
||||||
- Recognizes most standard material suffixes
|
|
||||||
- Images with unrecognized suffixes are skipped instead of renamed, preventing unintended modifications
|
|
||||||
- Enhanced logging: Unrecognized suffix images are listed separately for easy identification
|
|
||||||
- **UI Improvements**:
|
|
||||||
- Rearranged workflow layout: Make Paths Relative/Absolute moved to main workflow section
|
|
||||||
- Remap Selected moved under path preview for better workflow progression
|
|
||||||
- Rename by Material and AutoMat Extractor repositioned after Remap Selected
|
|
||||||
- Added Autopack toggle at beginning of workflow sections (both Node Editor and 3D Viewport)
|
|
||||||
- Consolidated draw functions: Node Editor panel now serves as master template for both panels
|
|
||||||
|
|
||||||
# v 0.4.1
|
|
||||||
|
|
||||||
- Fixed traceback error causing remap to fail to draw buttons
|
|
||||||
|
|
||||||
# v 0.4.0
|
|
||||||
|
|
||||||
Overhaul! Added new Scene General panel, major enhancements to all panels and functions.
|
|
||||||
|
|
||||||
# v0.3.0
|
|
||||||
|
|
||||||
- Added image path remapping for unpacked images, keeping them organized.
|
|
||||||
-540
@@ -1,540 +0,0 @@
|
|||||||
import bpy
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from ..panels.bulk_path_management import (
|
|
||||||
get_image_extension,
|
|
||||||
bulk_remap_paths,
|
|
||||||
set_image_paths,
|
|
||||||
ensure_directory_for_path,
|
|
||||||
)
|
|
||||||
|
|
||||||
class AUTOMAT_OT_summary_dialog(bpy.types.Operator):
|
|
||||||
"""Show AutoMat Extractor operation summary"""
|
|
||||||
bl_idname = "bst.automat_summary_dialog"
|
|
||||||
bl_label = "AutoMat Extractor Summary"
|
|
||||||
bl_options = {'REGISTER', 'INTERNAL'}
|
|
||||||
|
|
||||||
# Properties to store summary data
|
|
||||||
total_selected: bpy.props.IntProperty(default=0)
|
|
||||||
success_count: bpy.props.IntProperty(default=0)
|
|
||||||
overwrite_skipped_count: bpy.props.IntProperty(default=0)
|
|
||||||
failed_remap_count: bpy.props.IntProperty(default=0)
|
|
||||||
|
|
||||||
overwrite_details: bpy.props.StringProperty(default="")
|
|
||||||
failed_remap_details: bpy.props.StringProperty(default="")
|
|
||||||
|
|
||||||
def draw(self, context):
|
|
||||||
layout = self.layout
|
|
||||||
|
|
||||||
layout.label(text="AutoMat Extractor - Summary", icon='INFO')
|
|
||||||
layout.separator()
|
|
||||||
|
|
||||||
box = layout.box()
|
|
||||||
col = box.column(align=True)
|
|
||||||
col.label(text=f"Total selected images: {self.total_selected}")
|
|
||||||
col.label(text=f"Successfully extracted: {self.success_count}", icon='CHECKMARK')
|
|
||||||
|
|
||||||
if self.overwrite_skipped_count > 0:
|
|
||||||
col.label(text=f"Skipped to prevent overwrite: {self.overwrite_skipped_count}", icon='ERROR')
|
|
||||||
if self.failed_remap_count > 0:
|
|
||||||
col.label(text=f"Failed to remap (path issue): {self.failed_remap_count}", icon='ERROR')
|
|
||||||
|
|
||||||
if self.overwrite_details:
|
|
||||||
layout.separator()
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="Overwrite Conflicts (Skipped):", icon='FILE_TEXT')
|
|
||||||
for line in self.overwrite_details.split('\n'):
|
|
||||||
if line.strip():
|
|
||||||
box.label(text=line)
|
|
||||||
|
|
||||||
if self.failed_remap_details:
|
|
||||||
layout.separator()
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="Failed Remaps:", icon='FILE_TEXT')
|
|
||||||
for line in self.failed_remap_details.split('\n'):
|
|
||||||
if line.strip():
|
|
||||||
box.label(text=line)
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
def invoke(self, context, event):
|
|
||||||
return context.window_manager.invoke_popup(self, width=500)
|
|
||||||
|
|
||||||
class AutoMatExtractor(bpy.types.Operator):
|
|
||||||
bl_idname = "bst.automatextractor"
|
|
||||||
bl_label = "AutoMatExtractor"
|
|
||||||
bl_description = "Pack selected images and extract them with organized paths by blend file and material"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
# Get addon preferences
|
|
||||||
addon_name = __package__.split('.')[0]
|
|
||||||
prefs = context.preferences.addons.get(addon_name).preferences
|
|
||||||
common_outside = prefs.automat_common_outside_blend
|
|
||||||
|
|
||||||
# Get selected images
|
|
||||||
selected_images = [img for img in bpy.data.images if hasattr(img, "bst_selected") and img.bst_selected]
|
|
||||||
|
|
||||||
if not selected_images:
|
|
||||||
self.report({'WARNING'}, "No images selected for extraction")
|
|
||||||
return {'CANCELLED'}
|
|
||||||
|
|
||||||
# Set up progress tracking
|
|
||||||
props = context.scene.bst_path_props
|
|
||||||
props.is_operation_running = True
|
|
||||||
props.operation_progress = 0.0
|
|
||||||
props.operation_status = f"Preparing AutoMat extraction for {len(selected_images)} images..."
|
|
||||||
|
|
||||||
# Store data for timer processing
|
|
||||||
self.selected_images = selected_images
|
|
||||||
self.common_outside = common_outside
|
|
||||||
self.current_step = 0
|
|
||||||
self.current_index = 0
|
|
||||||
self.packed_count = 0
|
|
||||||
self.success_count = 0
|
|
||||||
self.overwrite_skipped = []
|
|
||||||
self.failed_list = []
|
|
||||||
self.path_mapping = {}
|
|
||||||
self.udim_summary = {
|
|
||||||
"found": 0,
|
|
||||||
"saved": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
# Start timer for processing
|
|
||||||
bpy.app.timers.register(self._process_step)
|
|
||||||
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
def _process_step(self):
|
|
||||||
"""Process AutoMat extraction in steps to avoid blocking the UI"""
|
|
||||||
props = bpy.context.scene.bst_path_props
|
|
||||||
|
|
||||||
# Check for cancellation
|
|
||||||
if props.cancel_operation:
|
|
||||||
props.is_operation_running = False
|
|
||||||
props.operation_progress = 0.0
|
|
||||||
props.operation_status = "Operation cancelled"
|
|
||||||
props.cancel_operation = False
|
|
||||||
return None
|
|
||||||
|
|
||||||
if self.current_step == 0:
|
|
||||||
# Step 1: Pack images
|
|
||||||
if self.current_index >= len(self.selected_images):
|
|
||||||
# Packing complete, move to next step
|
|
||||||
self.current_step = 1
|
|
||||||
self.current_index = 0
|
|
||||||
props.operation_status = "Removing extensions from image names..."
|
|
||||||
props.operation_progress = 25.0
|
|
||||||
return 0.01
|
|
||||||
|
|
||||||
# Pack current image
|
|
||||||
img = self.selected_images[self.current_index]
|
|
||||||
props.operation_status = f"Packing {img.name}..."
|
|
||||||
|
|
||||||
if not img.packed_file:
|
|
||||||
try:
|
|
||||||
img.pack()
|
|
||||||
self.packed_count += 1
|
|
||||||
except Exception as e:
|
|
||||||
# Continue even if packing fails
|
|
||||||
pass
|
|
||||||
|
|
||||||
self.current_index += 1
|
|
||||||
progress = (self.current_index / len(self.selected_images)) * 25.0
|
|
||||||
props.operation_progress = progress
|
|
||||||
|
|
||||||
elif self.current_step == 1:
|
|
||||||
# Step 2: Remove extensions (this is a quick operation)
|
|
||||||
try:
|
|
||||||
bpy.ops.bst.remove_extensions()
|
|
||||||
except Exception as e:
|
|
||||||
pass # Continue even if this fails
|
|
||||||
|
|
||||||
self.current_step = 2
|
|
||||||
self.current_index = 0
|
|
||||||
props.operation_status = "Analyzing material usage..."
|
|
||||||
props.operation_progress = 30.0
|
|
||||||
|
|
||||||
elif self.current_step == 2:
|
|
||||||
# Step 3: Organize images by material usage
|
|
||||||
if self.current_index >= len(self.selected_images):
|
|
||||||
# Analysis complete, move to path building
|
|
||||||
self.current_step = 3
|
|
||||||
self.current_index = 0
|
|
||||||
props.operation_status = "Building path mapping..."
|
|
||||||
props.operation_progress = 50.0
|
|
||||||
return 0.01
|
|
||||||
|
|
||||||
# Get material mapping for all selected images
|
|
||||||
if self.current_index == 0:
|
|
||||||
self.material_mapping = self.get_image_material_mapping(self.selected_images)
|
|
||||||
print(f"DEBUG: Material mapping created for {len(self.selected_images)} images")
|
|
||||||
|
|
||||||
# This step is quick, just mark progress
|
|
||||||
self.current_index += 1
|
|
||||||
progress = 30.0 + (self.current_index / len(self.selected_images)) * 20.0
|
|
||||||
props.operation_progress = progress
|
|
||||||
|
|
||||||
elif self.current_step == 3:
|
|
||||||
# Step 4: Build path mapping
|
|
||||||
if self.current_index >= len(self.selected_images):
|
|
||||||
# Path building complete, move to remapping
|
|
||||||
self.current_step = 4
|
|
||||||
self.current_index = 0
|
|
||||||
props.operation_status = "Remapping image paths..."
|
|
||||||
props.operation_progress = 70.0
|
|
||||||
return 0.01
|
|
||||||
|
|
||||||
# Build path for current image
|
|
||||||
img = self.selected_images[self.current_index]
|
|
||||||
props.operation_status = f"Building path for {img.name}..."
|
|
||||||
|
|
||||||
# Get blend file name
|
|
||||||
blend_name = bpy.path.basename(bpy.data.filepath)
|
|
||||||
if blend_name:
|
|
||||||
blend_name = os.path.splitext(blend_name)[0]
|
|
||||||
else:
|
|
||||||
blend_name = "untitled"
|
|
||||||
blend_name = self.sanitize_filename(blend_name)
|
|
||||||
|
|
||||||
# Determine common path
|
|
||||||
if self.common_outside:
|
|
||||||
common_path_part = "common"
|
|
||||||
else:
|
|
||||||
common_path_part = f"{blend_name}\\common"
|
|
||||||
|
|
||||||
# Get extension and build path
|
|
||||||
extension = get_image_extension(img)
|
|
||||||
sanitized_base_name = self.sanitize_filename(img.name)
|
|
||||||
filename = f"{sanitized_base_name}{extension}"
|
|
||||||
|
|
||||||
if img.name.startswith('#'):
|
|
||||||
# Flat colors go to FlatColors subfolder
|
|
||||||
base_folder = f"//textures\\{common_path_part}\\FlatColors"
|
|
||||||
else:
|
|
||||||
# Check material usage for this image
|
|
||||||
materials_using_image = self.material_mapping.get(img.name, [])
|
|
||||||
|
|
||||||
if not materials_using_image:
|
|
||||||
# No materials found, put in common folder
|
|
||||||
base_folder = f"//textures\\{common_path_part}"
|
|
||||||
print(f"DEBUG: {img.name} - No materials found, using common folder")
|
|
||||||
elif len(materials_using_image) == 1:
|
|
||||||
# Used by exactly one material, organize by material name
|
|
||||||
material_name = self.sanitize_filename(materials_using_image[0])
|
|
||||||
base_folder = f"//textures\\{blend_name}\\{material_name}"
|
|
||||||
print(f"DEBUG: {img.name} - Used by {material_name}, organizing by material")
|
|
||||||
else:
|
|
||||||
# Used by multiple materials, put in common folder
|
|
||||||
base_folder = f"//textures\\{common_path_part}"
|
|
||||||
print(f"DEBUG: {img.name} - Used by multiple materials: {materials_using_image}, using common folder")
|
|
||||||
|
|
||||||
is_udim = self.is_udim_image(img)
|
|
||||||
if is_udim:
|
|
||||||
udim_mapping = self.build_udim_mapping(base_folder, sanitized_base_name, extension, img)
|
|
||||||
self.path_mapping[img.name] = udim_mapping
|
|
||||||
self.udim_summary["found"] += 1
|
|
||||||
print(f"DEBUG: {img.name} - UDIM detected with {len(udim_mapping.get('tiles', {}))} tiles")
|
|
||||||
else:
|
|
||||||
path = f"{base_folder}\\{filename}"
|
|
||||||
self.path_mapping[img.name] = path
|
|
||||||
|
|
||||||
self.current_index += 1
|
|
||||||
progress = 50.0 + (self.current_index / len(self.selected_images)) * 20.0
|
|
||||||
props.operation_progress = progress
|
|
||||||
|
|
||||||
elif self.current_step == 4:
|
|
||||||
# Step 5: Remap paths
|
|
||||||
if self.current_index >= len(self.path_mapping):
|
|
||||||
# Remapping complete, move to saving
|
|
||||||
self.current_step = 5
|
|
||||||
self.current_index = 0
|
|
||||||
props.operation_status = "Saving images to new locations..."
|
|
||||||
props.operation_progress = 85.0
|
|
||||||
return 0.01
|
|
||||||
|
|
||||||
# Remap current image
|
|
||||||
img_name = list(self.path_mapping.keys())[self.current_index]
|
|
||||||
mapping_entry = self.path_mapping[img_name]
|
|
||||||
props.operation_status = f"Remapping {img_name}..."
|
|
||||||
|
|
||||||
if isinstance(mapping_entry, dict) and mapping_entry.get("udim"):
|
|
||||||
success = set_image_paths(
|
|
||||||
img_name,
|
|
||||||
mapping_entry.get("template", ""),
|
|
||||||
tile_paths=mapping_entry.get("tiles", {})
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
success = set_image_paths(img_name, mapping_entry)
|
|
||||||
if success:
|
|
||||||
self.success_count += 1
|
|
||||||
else:
|
|
||||||
self.failed_list.append(img_name)
|
|
||||||
|
|
||||||
self.current_index += 1
|
|
||||||
progress = 70.0 + (self.current_index / len(self.path_mapping)) * 15.0
|
|
||||||
props.operation_progress = progress
|
|
||||||
|
|
||||||
elif self.current_step == 5:
|
|
||||||
# Step 6: Save images
|
|
||||||
if self.current_index >= len(self.selected_images):
|
|
||||||
# Operation complete
|
|
||||||
props.is_operation_running = False
|
|
||||||
props.operation_progress = 100.0
|
|
||||||
props.operation_status = f"Completed! Extracted {self.success_count} images{f', {len(self.failed_list)} failed' if self.failed_list else ''}"
|
|
||||||
|
|
||||||
# Show summary dialog
|
|
||||||
self.show_summary_dialog(
|
|
||||||
bpy.context,
|
|
||||||
total_selected=len(self.selected_images),
|
|
||||||
success_count=self.success_count,
|
|
||||||
overwrite_skipped_list=self.overwrite_skipped,
|
|
||||||
failed_remap_list=self.failed_list
|
|
||||||
)
|
|
||||||
|
|
||||||
# Console summary
|
|
||||||
print(f"\n=== AUTOMAT EXTRACTION SUMMARY ===")
|
|
||||||
print(f"Total images processed: {len(self.selected_images)}")
|
|
||||||
print(f"Successfully extracted: {self.success_count}")
|
|
||||||
print(f"Failed to remap: {len(self.failed_list)}")
|
|
||||||
|
|
||||||
# Show organization breakdown
|
|
||||||
material_organized = 0
|
|
||||||
common_organized = 0
|
|
||||||
flat_colors = 0
|
|
||||||
|
|
||||||
for img_name, path in self.path_mapping.items():
|
|
||||||
current_path = path["template"] if isinstance(path, dict) else path
|
|
||||||
if "FlatColors" in current_path:
|
|
||||||
flat_colors += 1
|
|
||||||
elif "common" in current_path:
|
|
||||||
common_organized += 1
|
|
||||||
else:
|
|
||||||
material_organized += 1
|
|
||||||
|
|
||||||
print(f"\nOrganization breakdown:")
|
|
||||||
print(f" Material-specific folders: {material_organized}")
|
|
||||||
print(f" Common folder: {common_organized}")
|
|
||||||
print(f" Flat colors: {flat_colors}")
|
|
||||||
|
|
||||||
# Show material organization details
|
|
||||||
if material_organized > 0:
|
|
||||||
print(f"\nMaterial organization details:")
|
|
||||||
material_folders = {}
|
|
||||||
for img_name, path in self.path_mapping.items():
|
|
||||||
if "FlatColors" not in path and "common" not in path:
|
|
||||||
# Extract material name from path
|
|
||||||
if isinstance(path, dict):
|
|
||||||
continue
|
|
||||||
path_parts = path.split('\\')
|
|
||||||
if len(path_parts) >= 3:
|
|
||||||
material_name = path_parts[-2]
|
|
||||||
if material_name not in material_folders:
|
|
||||||
material_folders[material_name] = []
|
|
||||||
material_folders[material_name].append(img_name)
|
|
||||||
|
|
||||||
for material_name, images in material_folders.items():
|
|
||||||
print(f" {material_name}: {len(images)} images")
|
|
||||||
|
|
||||||
print(f"=====================================\n")
|
|
||||||
if self.udim_summary["found"]:
|
|
||||||
print(f"UDIM images processed: {self.udim_summary['found']} (saved successfully: {self.udim_summary['saved']})")
|
|
||||||
|
|
||||||
# Force UI update
|
|
||||||
for area in bpy.context.screen.areas:
|
|
||||||
area.tag_redraw()
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Save current image
|
|
||||||
img = self.selected_images[self.current_index]
|
|
||||||
props.operation_status = f"Saving {img.name}..."
|
|
||||||
|
|
||||||
mapping_entry = self.path_mapping.get(img.name)
|
|
||||||
if isinstance(mapping_entry, dict) and mapping_entry.get("udim"):
|
|
||||||
self.save_udim_image(img, mapping_entry)
|
|
||||||
else:
|
|
||||||
self.save_standard_image(img)
|
|
||||||
|
|
||||||
self.current_index += 1
|
|
||||||
progress = 85.0 + (self.current_index / len(self.selected_images)) * 15.0
|
|
||||||
props.operation_progress = progress
|
|
||||||
|
|
||||||
# Force UI update
|
|
||||||
for area in bpy.context.screen.areas:
|
|
||||||
area.tag_redraw()
|
|
||||||
|
|
||||||
# Continue processing
|
|
||||||
return 0.01
|
|
||||||
|
|
||||||
def show_summary_dialog(self, context, total_selected, success_count, overwrite_skipped_list, failed_remap_list):
|
|
||||||
"""Show a popup dialog with the extraction summary"""
|
|
||||||
overwrite_details = ""
|
|
||||||
if overwrite_skipped_list:
|
|
||||||
for name, path in overwrite_skipped_list:
|
|
||||||
overwrite_details += f"'{name}' -> '{path}'\n"
|
|
||||||
|
|
||||||
failed_remap_details = ""
|
|
||||||
if failed_remap_list:
|
|
||||||
for name, path in failed_remap_list:
|
|
||||||
failed_remap_details += f"'{name}' -> '{path}'\n"
|
|
||||||
|
|
||||||
bpy.ops.bst.automat_summary_dialog('INVOKE_DEFAULT',
|
|
||||||
total_selected=total_selected,
|
|
||||||
success_count=success_count,
|
|
||||||
overwrite_skipped_count=len(overwrite_skipped_list),
|
|
||||||
failed_remap_count=len(failed_remap_list),
|
|
||||||
overwrite_details=overwrite_details.strip(),
|
|
||||||
failed_remap_details=failed_remap_details.strip()
|
|
||||||
)
|
|
||||||
|
|
||||||
def sanitize_filename(self, filename):
|
|
||||||
"""Sanitize filename/folder name for filesystem compatibility"""
|
|
||||||
# First, remove potential file extensions, including numerical ones like .001
|
|
||||||
base_name = re.sub(r'\.\d{3}$', '', filename) # Remove .001, .002 etc.
|
|
||||||
base_name = os.path.splitext(base_name)[0] # Remove standard extensions
|
|
||||||
|
|
||||||
# Remove or replace invalid characters for Windows/Mac/Linux
|
|
||||||
sanitized = re.sub(r'[<>:"/\\|?*]', '_', base_name)
|
|
||||||
# Remove leading/trailing spaces and dots
|
|
||||||
sanitized = sanitized.strip(' .')
|
|
||||||
# Ensure it's not empty
|
|
||||||
if not sanitized:
|
|
||||||
sanitized = "unnamed"
|
|
||||||
return sanitized
|
|
||||||
|
|
||||||
def get_image_material_mapping(self, images):
|
|
||||||
"""Create mapping of image names to materials that use them"""
|
|
||||||
image_to_materials = {}
|
|
||||||
|
|
||||||
# Initialize mapping
|
|
||||||
for img in images:
|
|
||||||
image_to_materials[img.name] = []
|
|
||||||
|
|
||||||
# Check all materials for image usage
|
|
||||||
for material in bpy.data.materials:
|
|
||||||
if not material.use_nodes:
|
|
||||||
continue
|
|
||||||
|
|
||||||
material_images = set()
|
|
||||||
|
|
||||||
# Find all image texture nodes in this material
|
|
||||||
for node in material.node_tree.nodes:
|
|
||||||
if node.type == 'TEX_IMAGE' and node.image:
|
|
||||||
material_images.add(node.image.name)
|
|
||||||
|
|
||||||
# Add this material to each image's usage list
|
|
||||||
for img_name in material_images:
|
|
||||||
if img_name in image_to_materials:
|
|
||||||
image_to_materials[img_name].append(material.name)
|
|
||||||
|
|
||||||
return image_to_materials
|
|
||||||
|
|
||||||
def is_udim_image(self, image):
|
|
||||||
"""Return True when the image contains UDIM/tiled data"""
|
|
||||||
has_tiles = hasattr(image, "source") and image.source == 'TILED'
|
|
||||||
tiles_attr = getattr(image, "tiles", None)
|
|
||||||
if tiles_attr and len(tiles_attr) > 1:
|
|
||||||
return True
|
|
||||||
return has_tiles
|
|
||||||
|
|
||||||
def build_udim_mapping(self, base_folder, base_name, extension, image):
|
|
||||||
"""Create a path mapping structure for UDIM images"""
|
|
||||||
udim_token = "<UDIM>"
|
|
||||||
template_filename = f"{base_name}.{udim_token}{extension}"
|
|
||||||
template_path = f"{base_folder}\\{template_filename}"
|
|
||||||
tile_paths = {}
|
|
||||||
|
|
||||||
tiles = getattr(image, "tiles", [])
|
|
||||||
for tile in tiles:
|
|
||||||
tile_number = str(getattr(tile, "number", "1001"))
|
|
||||||
tile_filename = f"{base_name}.{tile_number}{extension}"
|
|
||||||
tile_paths[tile_number] = f"{base_folder}\\{tile_filename}"
|
|
||||||
|
|
||||||
return {
|
|
||||||
"udim": True,
|
|
||||||
"template": template_path,
|
|
||||||
"tiles": tile_paths,
|
|
||||||
}
|
|
||||||
|
|
||||||
def save_udim_image(self, image, mapping):
|
|
||||||
"""Attempt to save each tile for a UDIM image"""
|
|
||||||
success = False
|
|
||||||
try:
|
|
||||||
image.save()
|
|
||||||
success = True
|
|
||||||
except Exception as e:
|
|
||||||
print(f"DEBUG: UDIM bulk save failed for {image.name}: {e}")
|
|
||||||
success = self._save_udim_tiles_individually(image, mapping)
|
|
||||||
|
|
||||||
if success:
|
|
||||||
self.udim_summary["saved"] += 1
|
|
||||||
return success
|
|
||||||
|
|
||||||
def save_standard_image(self, image):
|
|
||||||
"""Save a non-UDIM image safely"""
|
|
||||||
try:
|
|
||||||
if hasattr(image, 'save'):
|
|
||||||
image.save()
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print(f"DEBUG: Failed to save image {image.name}: {e}")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _save_udim_tiles_individually(self, image, mapping):
|
|
||||||
"""Fallback saving routine when image.save() fails on UDIMs"""
|
|
||||||
tile_paths = mapping.get("tiles", {})
|
|
||||||
any_saved = False
|
|
||||||
|
|
||||||
for tile in getattr(image, "tiles", []):
|
|
||||||
tile_number = str(getattr(tile, "number", "1001"))
|
|
||||||
target_path = tile_paths.get(tile_number)
|
|
||||||
if not target_path:
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
ensure_directory_for_path(target_path)
|
|
||||||
self._save_tile_via_image_editor(image, tile_number, target_path)
|
|
||||||
any_saved = True
|
|
||||||
except Exception as e:
|
|
||||||
print(f"DEBUG: Failed to save UDIM tile {tile_number} for {image.name}: {e}")
|
|
||||||
|
|
||||||
return any_saved
|
|
||||||
|
|
||||||
def _save_tile_via_image_editor(self, image, tile_number, filepath):
|
|
||||||
"""Use an IMAGE_EDITOR override to save a specific tile"""
|
|
||||||
# Try to find an existing image editor to reuse Blender UI context
|
|
||||||
for area in bpy.context.screen.areas:
|
|
||||||
if area.type != 'IMAGE_EDITOR':
|
|
||||||
continue
|
|
||||||
override = bpy.context.copy()
|
|
||||||
override['area'] = area
|
|
||||||
override['space_data'] = area.spaces.active
|
|
||||||
region = next((r for r in area.regions if r.type == 'WINDOW'), None)
|
|
||||||
if region is None:
|
|
||||||
continue
|
|
||||||
override['region'] = region
|
|
||||||
space = area.spaces.active
|
|
||||||
space.image = image
|
|
||||||
if hasattr(space, "image_user"):
|
|
||||||
space.image_user.tile = int(tile_number)
|
|
||||||
bpy.ops.image.save(override, filepath=filepath)
|
|
||||||
return
|
|
||||||
# Fallback: attempt to set filepath and invoke save without override
|
|
||||||
image.filepath = filepath
|
|
||||||
image.save()
|
|
||||||
|
|
||||||
# Must register the new dialog class as well
|
|
||||||
classes = (
|
|
||||||
AUTOMAT_OT_summary_dialog,
|
|
||||||
AutoMatExtractor,
|
|
||||||
)
|
|
||||||
|
|
||||||
def register():
|
|
||||||
for cls in classes:
|
|
||||||
bpy.utils.register_class(cls)
|
|
||||||
|
|
||||||
def unregister():
|
|
||||||
for cls in reversed(classes):
|
|
||||||
bpy.utils.unregister_class(cls)
|
|
||||||
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
class BST_FreeGPU(bpy.types.Operator):
|
|
||||||
bl_idname = "bst.free_gpu"
|
|
||||||
bl_label = "Free VRAM"
|
|
||||||
bl_description = "Unallocate all material images from VRAM"
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
for mat in bpy.data.materials:
|
|
||||||
if mat.use_nodes:
|
|
||||||
for node in mat.node_tree.nodes:
|
|
||||||
if hasattr(node, 'image') and node.image:
|
|
||||||
node.image.gl_free()
|
|
||||||
return {"FINISHED"}
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
class NoSubdiv(bpy.types.Operator):
|
|
||||||
"""Remove all subdivision surface modifiers from objects"""
|
|
||||||
bl_idname = "bst.no_subdiv"
|
|
||||||
bl_label = "No Subdiv"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
only_selected: bpy.props.BoolProperty(
|
|
||||||
name="Only Selected Objects",
|
|
||||||
description="Apply only to selected objects",
|
|
||||||
default=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
# Choose objects based on the property
|
|
||||||
if self.only_selected:
|
|
||||||
objects = context.selected_objects
|
|
||||||
else:
|
|
||||||
objects = bpy.data.objects
|
|
||||||
removed_count = 0
|
|
||||||
for obj in objects:
|
|
||||||
if obj.modifiers:
|
|
||||||
subdiv_mods = [mod for mod in obj.modifiers if mod.type == 'SUBSURF']
|
|
||||||
for mod in subdiv_mods:
|
|
||||||
obj.modifiers.remove(mod)
|
|
||||||
removed_count += 1
|
|
||||||
self.report({'INFO'}, f"Subdivision Surface modifiers removed from {'selected' if self.only_selected else 'all'} objects. ({removed_count} removed)")
|
|
||||||
return {'FINISHED'}
|
|
||||||
-513
@@ -1,513 +0,0 @@
|
|||||||
import bpy
|
|
||||||
import re
|
|
||||||
|
|
||||||
class RENAME_OT_summary_dialog(bpy.types.Operator):
|
|
||||||
"""Show rename operation summary"""
|
|
||||||
bl_idname = "bst.rename_summary_dialog"
|
|
||||||
bl_label = "Rename Summary"
|
|
||||||
bl_options = {'REGISTER', 'INTERNAL'}
|
|
||||||
|
|
||||||
# Properties to store summary data
|
|
||||||
total_selected: bpy.props.IntProperty(default=0)
|
|
||||||
renamed_count: bpy.props.IntProperty(default=0)
|
|
||||||
shared_count: bpy.props.IntProperty(default=0)
|
|
||||||
unused_count: bpy.props.IntProperty(default=0)
|
|
||||||
cc3iid_count: bpy.props.IntProperty(default=0)
|
|
||||||
flatcolor_count: bpy.props.IntProperty(default=0)
|
|
||||||
already_correct_count: bpy.props.IntProperty(default=0)
|
|
||||||
unrecognized_suffix_count: bpy.props.IntProperty(default=0)
|
|
||||||
rename_details: bpy.props.StringProperty(default="")
|
|
||||||
|
|
||||||
def draw(self, context):
|
|
||||||
layout = self.layout
|
|
||||||
|
|
||||||
# Title
|
|
||||||
layout.label(text="Rename by Material - Summary", icon='INFO')
|
|
||||||
layout.separator()
|
|
||||||
|
|
||||||
# Statistics box
|
|
||||||
box = layout.box()
|
|
||||||
col = box.column(align=True)
|
|
||||||
col.label(text=f"Total selected images: {self.total_selected}")
|
|
||||||
col.label(text=f"Successfully renamed: {self.renamed_count}", icon='CHECKMARK')
|
|
||||||
|
|
||||||
if self.already_correct_count > 0:
|
|
||||||
col.label(text=f"Already correctly named: {self.already_correct_count}", icon='CHECKMARK')
|
|
||||||
if self.shared_count > 0:
|
|
||||||
col.label(text=f"Shared images skipped: {self.shared_count}", icon='RADIOBUT_OFF')
|
|
||||||
if self.unused_count > 0:
|
|
||||||
col.label(text=f"Unused images skipped: {self.unused_count}", icon='RADIOBUT_OFF')
|
|
||||||
if self.cc3iid_count > 0:
|
|
||||||
col.label(text=f"CC3 ID textures skipped: {self.cc3iid_count}", icon='RADIOBUT_OFF')
|
|
||||||
if self.flatcolor_count > 0:
|
|
||||||
col.label(text=f"Flat colors skipped: {self.flatcolor_count}", icon='RADIOBUT_OFF')
|
|
||||||
if self.unrecognized_suffix_count > 0:
|
|
||||||
col.label(text=f"Unrecognized suffixes skipped: {self.unrecognized_suffix_count}", icon='RADIOBUT_OFF')
|
|
||||||
|
|
||||||
# Show detailed rename information if available
|
|
||||||
if self.rename_details:
|
|
||||||
layout.separator()
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="Renamed Images:", icon='FILE_TEXT')
|
|
||||||
|
|
||||||
# Split the details by lines and show each one
|
|
||||||
lines = self.rename_details.split('\n')
|
|
||||||
for line in lines[:10]: # Limit to first 10 to avoid overly long dialogs
|
|
||||||
if line.strip():
|
|
||||||
box.label(text=line)
|
|
||||||
|
|
||||||
if len(lines) > 10:
|
|
||||||
box.label(text=f"... and {len(lines) - 10} more")
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
def invoke(self, context, event):
|
|
||||||
return context.window_manager.invoke_popup(self, width=500)
|
|
||||||
|
|
||||||
class Rename_images_by_mat(bpy.types.Operator):
|
|
||||||
bl_idname = "bst.rename_images_by_mat"
|
|
||||||
bl_label = "Rename Images by Material"
|
|
||||||
bl_description = "Rename selected images based on their material usage, preserving texture type suffixes"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
# Get selected images
|
|
||||||
selected_images = [img for img in bpy.data.images if hasattr(img, "bst_selected") and img.bst_selected]
|
|
||||||
|
|
||||||
if not selected_images:
|
|
||||||
self.report({'WARNING'}, "No images selected for renaming")
|
|
||||||
return {'CANCELLED'}
|
|
||||||
|
|
||||||
# Get image to material mapping
|
|
||||||
image_to_materials = self.get_image_material_mapping(selected_images)
|
|
||||||
|
|
||||||
renamed_count = 0
|
|
||||||
shared_count = 0
|
|
||||||
unused_count = 0
|
|
||||||
cc3iid_count = 0 # Track CC3 ID textures
|
|
||||||
flatcolor_count = 0 # Track flat color textures
|
|
||||||
already_correct_count = 0 # Track images already correctly named
|
|
||||||
unrecognized_suffix_count = 0 # Track images with unrecognized suffixes
|
|
||||||
renamed_list = [] # Track renamed images for debug
|
|
||||||
unrecognized_list = [] # Track images with unrecognized suffixes
|
|
||||||
|
|
||||||
for img in selected_images:
|
|
||||||
# Skip CC3 ID textures (ignore case)
|
|
||||||
if img.name.lower().startswith('cc3iid'):
|
|
||||||
cc3iid_count += 1
|
|
||||||
print(f"DEBUG: Skipped CC3 ID texture: {img.name}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Skip flat color textures (start with #)
|
|
||||||
if img.name.startswith('#'):
|
|
||||||
flatcolor_count += 1
|
|
||||||
print(f"DEBUG: Skipped flat color texture: {img.name}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
materials = image_to_materials.get(img.name, [])
|
|
||||||
|
|
||||||
if len(materials) == 0:
|
|
||||||
# Unused image - skip
|
|
||||||
unused_count += 1
|
|
||||||
print(f"DEBUG: Skipped unused image: {img.name}")
|
|
||||||
continue
|
|
||||||
elif len(materials) == 1:
|
|
||||||
# Single material usage - check suffix recognition
|
|
||||||
material_name = materials[0]
|
|
||||||
suffix = self.extract_texture_suffix(img.name)
|
|
||||||
original_name = img.name
|
|
||||||
|
|
||||||
# Skip images with unrecognized suffixes (only if they have a potential suffix pattern)
|
|
||||||
if suffix is None and self.has_potential_suffix(img.name):
|
|
||||||
unrecognized_suffix_count += 1
|
|
||||||
unrecognized_list.append(img.name)
|
|
||||||
print(f"DEBUG: Skipped image with unrecognized suffix: {img.name}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
if suffix:
|
|
||||||
# Capitalize the suffix properly
|
|
||||||
capitalized_suffix = self.capitalize_suffix(suffix)
|
|
||||||
expected_name = f"{material_name}_{capitalized_suffix}"
|
|
||||||
else:
|
|
||||||
# No suffix detected, use material name only
|
|
||||||
expected_name = material_name
|
|
||||||
|
|
||||||
# Check if the image is already correctly named
|
|
||||||
if img.name == expected_name:
|
|
||||||
already_correct_count += 1
|
|
||||||
print(f"DEBUG: Skipped already correctly named: {img.name}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Avoid duplicate names
|
|
||||||
new_name = self.ensure_unique_name(expected_name)
|
|
||||||
|
|
||||||
img.name = new_name
|
|
||||||
renamed_count += 1
|
|
||||||
renamed_list.append((original_name, new_name, material_name, capitalized_suffix if suffix else None))
|
|
||||||
print(f"DEBUG: Renamed '{original_name}' → '{new_name}' (Material: {material_name}, Suffix: {capitalized_suffix if suffix else 'none'})")
|
|
||||||
else:
|
|
||||||
# Shared across multiple materials - skip
|
|
||||||
shared_count += 1
|
|
||||||
print(f"DEBUG: Skipped shared image: {img.name} (used by {len(materials)} materials: {', '.join(materials[:3])}{'...' if len(materials) > 3 else ''})")
|
|
||||||
|
|
||||||
# Console debug summary (keep for development)
|
|
||||||
print(f"\n=== RENAME BY MATERIAL SUMMARY ===")
|
|
||||||
print(f"Total selected: {len(selected_images)}")
|
|
||||||
print(f"Renamed: {renamed_count}")
|
|
||||||
print(f"Already correct (skipped): {already_correct_count}")
|
|
||||||
print(f"Shared (skipped): {shared_count}")
|
|
||||||
print(f"Unused (skipped): {unused_count}")
|
|
||||||
print(f"CC3 ID textures (skipped): {cc3iid_count}")
|
|
||||||
print(f"Flat colors (skipped): {flatcolor_count}")
|
|
||||||
print(f"Unrecognized suffixes (skipped): {unrecognized_suffix_count}")
|
|
||||||
|
|
||||||
if renamed_list:
|
|
||||||
print(f"\nDetailed rename log:")
|
|
||||||
for original, new, material, suffix in renamed_list:
|
|
||||||
suffix_info = f" (suffix: {suffix})" if suffix else " (no suffix)"
|
|
||||||
print(f" '{original}' → '{new}' for material '{material}'{suffix_info}")
|
|
||||||
|
|
||||||
if unrecognized_list:
|
|
||||||
print(f"\nImages with unrecognized suffixes:")
|
|
||||||
for img_name in unrecognized_list:
|
|
||||||
print(f" '{img_name}'")
|
|
||||||
|
|
||||||
print(f"===================================\n")
|
|
||||||
|
|
||||||
# Show popup summary dialog
|
|
||||||
self.show_summary_dialog(context, len(selected_images), renamed_count, shared_count, unused_count, cc3iid_count, flatcolor_count, already_correct_count, unrecognized_suffix_count, renamed_list)
|
|
||||||
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
def show_summary_dialog(self, context, total_selected, renamed_count, shared_count, unused_count, cc3iid_count, flatcolor_count, already_correct_count, unrecognized_suffix_count, renamed_list):
|
|
||||||
"""Show a popup dialog with the rename summary"""
|
|
||||||
# Prepare detailed rename information for display
|
|
||||||
details_text = ""
|
|
||||||
if renamed_list:
|
|
||||||
for original, new, material, suffix in renamed_list:
|
|
||||||
suffix_info = f" ({suffix})" if suffix else ""
|
|
||||||
details_text += f"'{original}' → '{new}'{suffix_info}\n"
|
|
||||||
|
|
||||||
# Invoke the summary dialog
|
|
||||||
dialog = bpy.ops.bst.rename_summary_dialog('INVOKE_DEFAULT',
|
|
||||||
total_selected=total_selected,
|
|
||||||
renamed_count=renamed_count,
|
|
||||||
shared_count=shared_count,
|
|
||||||
unused_count=unused_count,
|
|
||||||
cc3iid_count=cc3iid_count,
|
|
||||||
flatcolor_count=flatcolor_count,
|
|
||||||
already_correct_count=already_correct_count,
|
|
||||||
unrecognized_suffix_count=unrecognized_suffix_count,
|
|
||||||
rename_details=details_text.strip())
|
|
||||||
|
|
||||||
def get_image_material_mapping(self, images):
|
|
||||||
"""Create mapping of image names to materials that use them"""
|
|
||||||
image_to_materials = {}
|
|
||||||
|
|
||||||
# Initialize mapping
|
|
||||||
for img in images:
|
|
||||||
image_to_materials[img.name] = []
|
|
||||||
|
|
||||||
# Check all materials for image usage
|
|
||||||
for material in bpy.data.materials:
|
|
||||||
if not material.use_nodes:
|
|
||||||
continue
|
|
||||||
|
|
||||||
material_images = set()
|
|
||||||
|
|
||||||
# Find all image texture nodes in this material
|
|
||||||
for node in material.node_tree.nodes:
|
|
||||||
if node.type == 'TEX_IMAGE' and node.image:
|
|
||||||
material_images.add(node.image.name)
|
|
||||||
|
|
||||||
# Add this material to each image's usage list
|
|
||||||
for img_name in material_images:
|
|
||||||
if img_name in image_to_materials:
|
|
||||||
image_to_materials[img_name].append(material.name)
|
|
||||||
|
|
||||||
return image_to_materials
|
|
||||||
|
|
||||||
def extract_texture_suffix(self, name):
|
|
||||||
"""Extract texture type suffix from image name (case-insensitive)"""
|
|
||||||
# Comprehensive list of texture suffixes
|
|
||||||
suffixes = [
|
|
||||||
# Standard PBR suffixes
|
|
||||||
'diffuse', 'basecolor', 'base_color', 'albedo', 'color', 'col',
|
|
||||||
'normal', 'norm', 'nrm', 'bump',
|
|
||||||
'roughness', 'rough', 'rgh',
|
|
||||||
'metallic', 'metal', 'mtl',
|
|
||||||
'specular', 'spec', 'spc',
|
|
||||||
'ao', 'ambient_occlusion', 'ambientocclusion', 'occlusion',
|
|
||||||
'gradao',
|
|
||||||
'height', 'displacement', 'disp', 'displace',
|
|
||||||
'opacity', 'alpha', 'mask',
|
|
||||||
'emission', 'emissive', 'emit',
|
|
||||||
'subsurface', 'sss', 'transmission',
|
|
||||||
|
|
||||||
# Character Creator / iClone suffixes
|
|
||||||
'base', 'diffusemap', 'normalmap', 'roughnessmap', 'metallicmap',
|
|
||||||
'aomap', 'opacitymap', 'emissionmap', 'heightmap', 'displacementmap',
|
|
||||||
'detail_normal', 'detail_diffuse', 'detail_mask',
|
|
||||||
'blend', 'id', 'cavity', 'curvature', 'transmap', 'rgbamask', 'sssmap', 'micronmask',
|
|
||||||
'bcbmap', 'mnaomask', 'specmask', 'micron', 'cfulcmask', 'nmuilmask', 'nbmap', 'enmask', 'blend_multiply',
|
|
||||||
|
|
||||||
# Hair-related compound suffixes (no spaces)
|
|
||||||
'hairflowmap', 'hairidmap', 'hairrootmap', 'hairdepthmap',
|
|
||||||
'flowmap', 'idmap', 'rootmap', 'depthmap',
|
|
||||||
|
|
||||||
# Wrinkle map suffixes (Character Creator)
|
|
||||||
'wrinkle_normal1', 'wrinkle_normal2', 'wrinkle_normal3',
|
|
||||||
'wrinkle_roughness1', 'wrinkle_roughness2', 'wrinkle_roughness3',
|
|
||||||
'wrinkle_diffuse1', 'wrinkle_diffuse2', 'wrinkle_diffuse3',
|
|
||||||
'wrinkle_mask1', 'wrinkle_mask2', 'wrinkle_mask3',
|
|
||||||
'wrinkle_flow1', 'wrinkle_flow2', 'wrinkle_flow3',
|
|
||||||
|
|
||||||
# Character Creator pack suffixes (with spaces)
|
|
||||||
'flow pack', 'msmnao pack', 'roughness pack', 'sstm pack',
|
|
||||||
'flow_pack', 'msmnao_pack', 'roughness_pack', 'sstm_pack',
|
|
||||||
|
|
||||||
# Hair-related multi-word suffixes (spaces)
|
|
||||||
'hair flow map', 'hair id map', 'hair root map', 'hair depth map',
|
|
||||||
'flow map', 'id map', 'root map', 'depth map',
|
|
||||||
|
|
||||||
# Additional common variations
|
|
||||||
'tex', 'map', 'img', 'texture',
|
|
||||||
'd', 'n', 'r', 'm', 's', 'a', 'h', 'o', 'e' # Single letter abbreviations
|
|
||||||
]
|
|
||||||
|
|
||||||
# Remove file extension first
|
|
||||||
base_name = re.sub(r'\.[^.]+$', '', name)
|
|
||||||
|
|
||||||
# Sort suffixes by length (longest first) to prioritize more specific matches
|
|
||||||
sorted_suffixes = sorted(suffixes, key=len, reverse=True)
|
|
||||||
|
|
||||||
# First, try to find multi-word suffixes with spaces (case-insensitive)
|
|
||||||
for suffix in sorted_suffixes:
|
|
||||||
if ' ' in suffix: # Multi-word suffix
|
|
||||||
# Pattern: ends with space + suffix
|
|
||||||
pattern = rf'\s+({re.escape(suffix)})$'
|
|
||||||
match = re.search(pattern, base_name, re.IGNORECASE)
|
|
||||||
if match:
|
|
||||||
return match.group(1).lower()
|
|
||||||
|
|
||||||
# Pattern: ends with suffix (no space separator, but exact match)
|
|
||||||
if base_name.lower().endswith(suffix.lower()) and len(base_name) > len(suffix):
|
|
||||||
# Check if there's a word boundary before the suffix
|
|
||||||
prefix_end = len(base_name) - len(suffix)
|
|
||||||
if prefix_end > 0 and base_name[prefix_end - 1] in ' _-':
|
|
||||||
return suffix.lower()
|
|
||||||
|
|
||||||
# Then try single-word suffixes with traditional separators
|
|
||||||
for suffix in sorted_suffixes:
|
|
||||||
if ' ' not in suffix: # Single word suffix
|
|
||||||
# Pattern: ends with _suffix or -suffix or .suffix
|
|
||||||
pattern = rf'[._-]({re.escape(suffix)})$'
|
|
||||||
match = re.search(pattern, base_name, re.IGNORECASE)
|
|
||||||
if match:
|
|
||||||
return match.group(1).lower()
|
|
||||||
|
|
||||||
# Check for numeric suffixes (like _01, _02, etc.)
|
|
||||||
numeric_match = re.search(r'[._-](\d+)$', base_name)
|
|
||||||
if numeric_match:
|
|
||||||
return numeric_match.group(1)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def ensure_unique_name(self, proposed_name):
|
|
||||||
"""Ensure the proposed name is unique among all images"""
|
|
||||||
if proposed_name not in bpy.data.images:
|
|
||||||
return proposed_name
|
|
||||||
|
|
||||||
# If name exists, add numerical suffix
|
|
||||||
counter = 1
|
|
||||||
while f"{proposed_name}.{counter:03d}" in bpy.data.images:
|
|
||||||
counter += 1
|
|
||||||
|
|
||||||
return f"{proposed_name}.{counter:03d}"
|
|
||||||
|
|
||||||
def capitalize_suffix(self, suffix):
|
|
||||||
"""Properly capitalize texture type suffixes with correct formatting"""
|
|
||||||
# Dictionary of common texture suffixes with proper capitalization
|
|
||||||
suffix_mapping = {
|
|
||||||
# Standard PBR suffixes
|
|
||||||
'diffuse': 'Diffuse',
|
|
||||||
'basecolor': 'BaseColor',
|
|
||||||
'base_color': 'BaseColor',
|
|
||||||
'albedo': 'Albedo',
|
|
||||||
'color': 'Color',
|
|
||||||
'col': 'Color',
|
|
||||||
|
|
||||||
'normal': 'Normal',
|
|
||||||
'norm': 'Normal',
|
|
||||||
'nrm': 'Normal',
|
|
||||||
'bump': 'Bump',
|
|
||||||
|
|
||||||
'roughness': 'Roughness',
|
|
||||||
'rough': 'Roughness',
|
|
||||||
'rgh': 'Roughness',
|
|
||||||
|
|
||||||
'metallic': 'Metallic',
|
|
||||||
'metal': 'Metallic',
|
|
||||||
'mtl': 'Metallic',
|
|
||||||
|
|
||||||
'specular': 'Specular',
|
|
||||||
'spec': 'Specular',
|
|
||||||
'spc': 'Specular',
|
|
||||||
|
|
||||||
'ao': 'AO',
|
|
||||||
'ambient_occlusion': 'AmbientOcclusion',
|
|
||||||
'ambientocclusion': 'AmbientOcclusion',
|
|
||||||
'occlusion': 'Occlusion',
|
|
||||||
'gradao': 'GradAO',
|
|
||||||
|
|
||||||
'height': 'Height',
|
|
||||||
'displacement': 'Displacement',
|
|
||||||
'disp': 'Displacement',
|
|
||||||
'displace': 'Displacement',
|
|
||||||
|
|
||||||
'opacity': 'Opacity',
|
|
||||||
'alpha': 'Alpha',
|
|
||||||
'mask': 'Mask',
|
|
||||||
'transmap': 'TransMap',
|
|
||||||
|
|
||||||
'emission': 'Emission',
|
|
||||||
'emissive': 'Emission',
|
|
||||||
'emit': 'Emission',
|
|
||||||
|
|
||||||
'subsurface': 'Subsurface',
|
|
||||||
'sss': 'SSS',
|
|
||||||
'transmission': 'Transmission',
|
|
||||||
|
|
||||||
# Character Creator / iClone suffixes
|
|
||||||
'base': 'Base',
|
|
||||||
'diffusemap': 'DiffuseMap',
|
|
||||||
'normalmap': 'NormalMap',
|
|
||||||
'roughnessmap': 'RoughnessMap',
|
|
||||||
'metallicmap': 'MetallicMap',
|
|
||||||
'aomap': 'AOMap',
|
|
||||||
'opacitymap': 'OpacityMap',
|
|
||||||
'emissionmap': 'EmissionMap',
|
|
||||||
'heightmap': 'HeightMap',
|
|
||||||
'displacementmap': 'DisplacementMap',
|
|
||||||
'detail_normal': 'DetailNormal',
|
|
||||||
'detail_diffuse': 'DetailDiffuse',
|
|
||||||
'detail_mask': 'DetailMask',
|
|
||||||
'blend': 'Blend',
|
|
||||||
'id': 'ID',
|
|
||||||
'cavity': 'Cavity',
|
|
||||||
'curvature': 'Curvature',
|
|
||||||
'transmap': 'TransMap',
|
|
||||||
'rgbamask': 'RGBAMask',
|
|
||||||
'sssmap': 'SSSMap',
|
|
||||||
'micronmask': 'MicroNMask',
|
|
||||||
'bcbmap': 'BCBMap',
|
|
||||||
'mnaomask': 'MNAOMask',
|
|
||||||
'specmask': 'SpecMask',
|
|
||||||
'micron': 'MicroN',
|
|
||||||
'cfulcmask': 'CFULCMask',
|
|
||||||
'nmuilmask': 'NMUILMask',
|
|
||||||
'nbmap': 'NBMap',
|
|
||||||
'enmask': 'ENMask',
|
|
||||||
'blend_multiply': 'Blend_Multiply',
|
|
||||||
|
|
||||||
# Hair-related compound suffixes (no spaces)
|
|
||||||
'hairflowmap': 'HairFlowMap',
|
|
||||||
'hairidmap': 'HairIDMap',
|
|
||||||
'hairrootmap': 'HairRootMap',
|
|
||||||
'hairdepthmap': 'HairDepthMap',
|
|
||||||
'flowmap': 'FlowMap',
|
|
||||||
'idmap': 'IDMap',
|
|
||||||
'rootmap': 'RootMap',
|
|
||||||
'depthmap': 'DepthMap',
|
|
||||||
|
|
||||||
# Wrinkle map suffixes (Character Creator)
|
|
||||||
'wrinkle_normal1': 'Wrinkle_Normal1',
|
|
||||||
'wrinkle_normal2': 'Wrinkle_Normal2',
|
|
||||||
'wrinkle_normal3': 'Wrinkle_Normal3',
|
|
||||||
'wrinkle_roughness1': 'Wrinkle_Roughness1',
|
|
||||||
'wrinkle_roughness2': 'Wrinkle_Roughness2',
|
|
||||||
'wrinkle_roughness3': 'Wrinkle_Roughness3',
|
|
||||||
'wrinkle_diffuse1': 'Wrinkle_Diffuse1',
|
|
||||||
'wrinkle_diffuse2': 'Wrinkle_Diffuse2',
|
|
||||||
'wrinkle_diffuse3': 'Wrinkle_Diffuse3',
|
|
||||||
'wrinkle_mask1': 'Wrinkle_Mask1',
|
|
||||||
'wrinkle_mask2': 'Wrinkle_Mask2',
|
|
||||||
'wrinkle_mask3': 'Wrinkle_Mask3',
|
|
||||||
'wrinkle_flow1': 'Wrinkle_Flow1',
|
|
||||||
'wrinkle_flow2': 'Wrinkle_Flow2',
|
|
||||||
'wrinkle_flow3': 'Wrinkle_Flow3',
|
|
||||||
|
|
||||||
# Character Creator pack suffixes (with spaces)
|
|
||||||
'flow pack': 'Flow Pack',
|
|
||||||
'msmnao pack': 'MSMNAO Pack',
|
|
||||||
'roughness pack': 'Roughness Pack',
|
|
||||||
'sstm pack': 'SSTM Pack',
|
|
||||||
'flow_pack': 'Flow_Pack',
|
|
||||||
'msmnao_pack': 'MSMNAO_Pack',
|
|
||||||
'roughness_pack': 'Roughness_Pack',
|
|
||||||
'sstm_pack': 'SSTM_Pack',
|
|
||||||
|
|
||||||
# Hair-related multi-word suffixes
|
|
||||||
'hair flow map': 'HairFlowMap',
|
|
||||||
'hair id map': 'HairIDMap',
|
|
||||||
'hair root map': 'HairRootMap',
|
|
||||||
'hair depth map': 'HairDepthMap',
|
|
||||||
'flow map': 'FlowMap',
|
|
||||||
'id map': 'IDMap',
|
|
||||||
'root map': 'RootMap',
|
|
||||||
'depth map': 'DepthMap',
|
|
||||||
|
|
||||||
# Additional common variations
|
|
||||||
'tex': 'Texture',
|
|
||||||
'map': 'Map',
|
|
||||||
'img': 'Image',
|
|
||||||
'texture': 'Texture',
|
|
||||||
|
|
||||||
# Single letter abbreviations
|
|
||||||
'd': 'Diffuse',
|
|
||||||
'n': 'Normal',
|
|
||||||
'r': 'Roughness',
|
|
||||||
'm': 'Metallic',
|
|
||||||
's': 'Specular',
|
|
||||||
'a': 'Alpha',
|
|
||||||
'h': 'Height',
|
|
||||||
'o': 'Occlusion',
|
|
||||||
'e': 'Emission'
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get the proper capitalization from mapping, or capitalize first letter as fallback
|
|
||||||
return suffix_mapping.get(suffix.lower(), suffix.capitalize())
|
|
||||||
|
|
||||||
def has_potential_suffix(self, name):
|
|
||||||
"""Check if the image name has a potential suffix pattern that we should try to recognize"""
|
|
||||||
# Remove file extension first
|
|
||||||
base_name = re.sub(r'\.[^.]+$', '', name)
|
|
||||||
|
|
||||||
# Check for common suffix patterns: _something, -something, .something, or space something
|
|
||||||
suffix_patterns = [
|
|
||||||
r'[._-][a-zA-Z0-9]+$', # Underscore, dot, or dash followed by alphanumeric
|
|
||||||
r'\s+[a-zA-Z0-9\s]+$', # Space followed by alphanumeric (for multi-word suffixes)
|
|
||||||
]
|
|
||||||
|
|
||||||
for pattern in suffix_patterns:
|
|
||||||
if re.search(pattern, base_name):
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
# Registration classes - need to register both operators
|
|
||||||
classes = (
|
|
||||||
RENAME_OT_summary_dialog,
|
|
||||||
Rename_images_by_mat,
|
|
||||||
)
|
|
||||||
|
|
||||||
def register():
|
|
||||||
for cls in classes:
|
|
||||||
bpy.utils.register_class(cls)
|
|
||||||
|
|
||||||
def unregister():
|
|
||||||
for cls in reversed(classes):
|
|
||||||
bpy.utils.unregister_class(cls)
|
|
||||||
|
|
||||||
-87
@@ -1,87 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
class ConvertRelationsToConstraint(bpy.types.Operator):
|
|
||||||
"""Convert regular parenting to Child Of constraints for all selected objects"""
|
|
||||||
bl_idname = "bst.convert_relations_to_constraint"
|
|
||||||
bl_label = "Convert Relations to Constraint"
|
|
||||||
bl_description = "Convert regular parenting relationships to Child Of constraints for selected objects"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
result = convert_relations_to_constraint()
|
|
||||||
if result:
|
|
||||||
self.report({'INFO'}, f"Converted {result} objects to Child Of constraints")
|
|
||||||
else:
|
|
||||||
self.report({'WARNING'}, "No objects with parents found in selection")
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
def convert_relations_to_constraint():
|
|
||||||
"""Convert regular parenting to Child Of constraints for all selected objects"""
|
|
||||||
|
|
||||||
# Get all selected objects
|
|
||||||
selected_objects = bpy.context.selected_objects
|
|
||||||
|
|
||||||
if not selected_objects:
|
|
||||||
print("No objects selected!")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
print(f"Converting parenting to Child Of constraints for {len(selected_objects)} objects...")
|
|
||||||
|
|
||||||
converted_count = 0
|
|
||||||
|
|
||||||
for obj in selected_objects:
|
|
||||||
# Check if object has a parent
|
|
||||||
if obj.parent is None:
|
|
||||||
print(f"Skipping {obj.name}: No parent found")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Store bone information if parented to a bone
|
|
||||||
parent_bone = obj.parent_bone if obj.parent_bone else None
|
|
||||||
bone_info = f" (bone: {parent_bone})" if parent_bone else ""
|
|
||||||
print(f"Processing {obj.name} -> {obj.parent.name}{bone_info}")
|
|
||||||
|
|
||||||
# Store original parent and current world matrix
|
|
||||||
original_parent = obj.parent
|
|
||||||
world_matrix = obj.matrix_world.copy()
|
|
||||||
|
|
||||||
# Remove the parent relationship
|
|
||||||
obj.parent = None
|
|
||||||
obj.parent_bone = "" # Clear the bone reference
|
|
||||||
|
|
||||||
# Add Child Of constraint
|
|
||||||
child_of_constraint = obj.constraints.new(type='CHILD_OF')
|
|
||||||
child_of_constraint.name = f"Child_Of_{original_parent.name}"
|
|
||||||
child_of_constraint.target = original_parent
|
|
||||||
|
|
||||||
# Transfer bone information to constraint subtarget
|
|
||||||
if parent_bone:
|
|
||||||
child_of_constraint.subtarget = parent_bone
|
|
||||||
print(f" ✓ Transferred bone target: {parent_bone}")
|
|
||||||
|
|
||||||
# Set the inverse matrix properly to maintain world position
|
|
||||||
# This is equivalent to clicking "Set Inverse" in the UI
|
|
||||||
child_of_constraint.inverse_matrix = original_parent.matrix_world.inverted()
|
|
||||||
|
|
||||||
# Restore the original world position
|
|
||||||
obj.matrix_world = world_matrix
|
|
||||||
|
|
||||||
# Set the constraint to be active
|
|
||||||
child_of_constraint.influence = 1.0
|
|
||||||
|
|
||||||
converted_count += 1
|
|
||||||
print(f" ✓ Converted {obj.name} to Child Of constraint")
|
|
||||||
|
|
||||||
print(f"\nConversion complete! Converted {converted_count} objects.")
|
|
||||||
|
|
||||||
# Report remaining parented objects
|
|
||||||
remaining_parented = [obj for obj in bpy.context.selected_objects if obj.parent is not None]
|
|
||||||
if remaining_parented:
|
|
||||||
print(f"\nObjects that still have parents (not converted):")
|
|
||||||
for obj in remaining_parented:
|
|
||||||
print(f" - {obj.name} -> {obj.parent.name}")
|
|
||||||
|
|
||||||
return converted_count
|
|
||||||
|
|
||||||
# Run the conversion
|
|
||||||
if __name__ == "__main__":
|
|
||||||
convert_relations_to_constraint()
|
|
||||||
-47
@@ -1,47 +0,0 @@
|
|||||||
import bpy
|
|
||||||
from bpy.types import Operator
|
|
||||||
|
|
||||||
class CreateOrthoCamera(Operator):
|
|
||||||
"""Create an orthographic camera with predefined settings"""
|
|
||||||
bl_idname = "bst.create_ortho_camera"
|
|
||||||
bl_label = "Create Ortho Camera"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
# Create a new camera
|
|
||||||
bpy.ops.object.camera_add()
|
|
||||||
camera = context.active_object
|
|
||||||
|
|
||||||
# Set camera to orthographic
|
|
||||||
camera.data.type = 'ORTHO'
|
|
||||||
camera.data.ortho_scale = 1.8 # Set orthographic scale
|
|
||||||
|
|
||||||
# Set camera position
|
|
||||||
camera.location = (0, -2, 1) # x=0, y=-2m, z=1m
|
|
||||||
|
|
||||||
# Set camera rotation (90 degrees around X axis)
|
|
||||||
camera.rotation_euler = (1.5708, 0, 0) # 90 degrees in radians
|
|
||||||
|
|
||||||
# Get or create camera collection
|
|
||||||
camera_collection = bpy.data.collections.get("Camera")
|
|
||||||
if not camera_collection:
|
|
||||||
camera_collection = bpy.data.collections.new("Camera")
|
|
||||||
context.scene.collection.children.link(camera_collection)
|
|
||||||
|
|
||||||
# Move camera to camera collection
|
|
||||||
# First unlink from current collection
|
|
||||||
for collection in camera.users_collection:
|
|
||||||
collection.objects.unlink(camera)
|
|
||||||
# Then link to camera collection
|
|
||||||
camera_collection.objects.link(camera)
|
|
||||||
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
def register():
|
|
||||||
bpy.utils.register_class(CreateOrthoCamera)
|
|
||||||
|
|
||||||
def unregister():
|
|
||||||
bpy.utils.unregister_class(CreateOrthoCamera)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
register()
|
|
||||||
-39
@@ -1,39 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
class DeleteSingleKeyframeActions(bpy.types.Operator):
|
|
||||||
"""Delete actions that have no keyframes, only one keyframe, or all keyframes on the same frame"""
|
|
||||||
bl_idname = "bst.delete_single_keyframe_actions"
|
|
||||||
bl_label = "Delete Single Keyframe Actions"
|
|
||||||
bl_description = "Delete actions with unwanted keyframe patterns (no keyframes, single keyframe, or all keyframes on same frame)"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
actions = bpy.data.actions
|
|
||||||
actions_to_delete = []
|
|
||||||
|
|
||||||
for action in actions:
|
|
||||||
keyframe_frames = set()
|
|
||||||
total_keyframes = 0
|
|
||||||
for fcurve in action.fcurves:
|
|
||||||
for kf in fcurve.keyframe_points:
|
|
||||||
keyframe_frames.add(kf.co[0])
|
|
||||||
total_keyframes += 1
|
|
||||||
|
|
||||||
# No keyframes
|
|
||||||
if total_keyframes == 0:
|
|
||||||
actions_to_delete.append(action)
|
|
||||||
# Only one keyframe
|
|
||||||
elif total_keyframes == 1:
|
|
||||||
actions_to_delete.append(action)
|
|
||||||
# All keyframes on the same frame
|
|
||||||
elif len(keyframe_frames) == 1:
|
|
||||||
actions_to_delete.append(action)
|
|
||||||
|
|
||||||
deleted_count = 0
|
|
||||||
for action in actions_to_delete:
|
|
||||||
print(f"Deleting action '{action.name}' (unwanted keyframe pattern)")
|
|
||||||
bpy.data.actions.remove(action)
|
|
||||||
deleted_count += 1
|
|
||||||
|
|
||||||
self.report({'INFO'}, f"Deleted {deleted_count} unwanted actions")
|
|
||||||
return {'FINISHED'}
|
|
||||||
-157
@@ -1,157 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
class MATERIAL_USERS_OT_summary_dialog(bpy.types.Operator):
|
|
||||||
"""Show material users analysis in a popup dialog"""
|
|
||||||
bl_idname = "bst.material_users_summary_dialog"
|
|
||||||
bl_label = "Material Users Summary"
|
|
||||||
bl_options = {'REGISTER', 'INTERNAL'}
|
|
||||||
|
|
||||||
# Properties to store summary data
|
|
||||||
material_name: bpy.props.StringProperty(default="")
|
|
||||||
users_count: bpy.props.IntProperty(default=0)
|
|
||||||
fake_user: bpy.props.BoolProperty(default=False)
|
|
||||||
object_users: bpy.props.StringProperty(default="")
|
|
||||||
node_users: bpy.props.StringProperty(default="")
|
|
||||||
material_node_users: bpy.props.StringProperty(default="")
|
|
||||||
total_user_count: bpy.props.IntProperty(default=0)
|
|
||||||
|
|
||||||
def draw(self, context):
|
|
||||||
layout = self.layout
|
|
||||||
|
|
||||||
# Title
|
|
||||||
layout.label(text=f"Material Users - '{self.material_name}'", icon='MATERIAL')
|
|
||||||
layout.separator()
|
|
||||||
|
|
||||||
# Basic info box
|
|
||||||
box = layout.box()
|
|
||||||
col = box.column(align=True)
|
|
||||||
col.label(text=f"Blender Users Count: {self.users_count}")
|
|
||||||
col.label(text=f"Fake User: {'Yes' if self.fake_user else 'No'}")
|
|
||||||
col.label(text=f"Total Found Users: {self.total_user_count}")
|
|
||||||
|
|
||||||
layout.separator()
|
|
||||||
|
|
||||||
# Object users section
|
|
||||||
if self.object_users:
|
|
||||||
layout.label(text="Object Users:", icon='OBJECT_DATA')
|
|
||||||
objects_box = layout.box()
|
|
||||||
objects_col = objects_box.column(align=True)
|
|
||||||
for obj_name in self.object_users.split('|'):
|
|
||||||
if obj_name.strip():
|
|
||||||
objects_col.label(text=f"• {obj_name}", icon='RIGHTARROW_THIN')
|
|
||||||
else:
|
|
||||||
layout.label(text="Object Users: None", icon='OBJECT_DATA')
|
|
||||||
|
|
||||||
# Node tree users section
|
|
||||||
if self.node_users:
|
|
||||||
layout.separator()
|
|
||||||
layout.label(text="Node Tree Users:", icon='NODETREE')
|
|
||||||
nodes_box = layout.box()
|
|
||||||
nodes_col = nodes_box.column(align=True)
|
|
||||||
for node_ref in self.node_users.split('|'):
|
|
||||||
if node_ref.strip():
|
|
||||||
nodes_col.label(text=f"• {node_ref}", icon='RIGHTARROW_THIN')
|
|
||||||
|
|
||||||
# Material node tree users section
|
|
||||||
if self.material_node_users:
|
|
||||||
layout.separator()
|
|
||||||
layout.label(text="Material Node Tree Users:", icon='MATERIAL')
|
|
||||||
mat_nodes_box = layout.box()
|
|
||||||
mat_nodes_col = mat_nodes_box.column(align=True)
|
|
||||||
for mat_node_ref in self.material_node_users.split('|'):
|
|
||||||
if mat_node_ref.strip():
|
|
||||||
mat_nodes_col.label(text=f"• {mat_node_ref}", icon='RIGHTARROW_THIN')
|
|
||||||
|
|
||||||
layout.separator()
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
def invoke(self, context, event):
|
|
||||||
return context.window_manager.invoke_popup(self, width=500)
|
|
||||||
|
|
||||||
class FindMaterialUsers(bpy.types.Operator):
|
|
||||||
"""Find all users of a specified material and display detailed information"""
|
|
||||||
bl_idname = "bst.find_material_users"
|
|
||||||
bl_label = "Find Material Users"
|
|
||||||
bl_description = "Find and display all users of a specified material"
|
|
||||||
bl_options = {'REGISTER'}
|
|
||||||
|
|
||||||
material_name: bpy.props.StringProperty(
|
|
||||||
name="Material",
|
|
||||||
description="Name of the material to analyze",
|
|
||||||
default="",
|
|
||||||
)
|
|
||||||
|
|
||||||
def draw(self, context):
|
|
||||||
layout = self.layout
|
|
||||||
|
|
||||||
# Set the material if we have a name
|
|
||||||
if self.material_name and self.material_name in bpy.data.materials:
|
|
||||||
context.scene.bst_temp_material = bpy.data.materials[self.material_name]
|
|
||||||
|
|
||||||
# Use template_ID to get the proper material selector (without new button)
|
|
||||||
layout.template_ID(context.scene, "bst_temp_material", text="Material")
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
# Get the material from the temp property
|
|
||||||
material = getattr(context.scene, 'bst_temp_material', None)
|
|
||||||
|
|
||||||
if not material:
|
|
||||||
self.report({'ERROR'}, "No material selected")
|
|
||||||
return {'CANCELLED'}
|
|
||||||
|
|
||||||
# Update our material_name property
|
|
||||||
self.material_name = material.name
|
|
||||||
|
|
||||||
# Check objects
|
|
||||||
object_users = []
|
|
||||||
for obj in bpy.data.objects:
|
|
||||||
if obj.material_slots:
|
|
||||||
for slot in obj.material_slots:
|
|
||||||
if slot.material == material:
|
|
||||||
object_users.append(obj.name)
|
|
||||||
break
|
|
||||||
|
|
||||||
# Check node groups more thoroughly
|
|
||||||
node_users = []
|
|
||||||
for node_tree in bpy.data.node_groups:
|
|
||||||
for node in node_tree.nodes:
|
|
||||||
# Check material nodes
|
|
||||||
if hasattr(node, 'material') and node.material == material:
|
|
||||||
node_users.append(f"{node_tree.name}.{node.name}")
|
|
||||||
# Check material input sockets
|
|
||||||
for input_socket in node.inputs:
|
|
||||||
if hasattr(input_socket, 'default_value') and hasattr(input_socket.default_value, 'name'):
|
|
||||||
if input_socket.default_value.name == material.name:
|
|
||||||
node_users.append(f"{node_tree.name}.{node.name}.{input_socket.name}")
|
|
||||||
|
|
||||||
# Check material node trees
|
|
||||||
material_node_users = []
|
|
||||||
for mat in bpy.data.materials:
|
|
||||||
if mat.node_tree:
|
|
||||||
for node in mat.node_tree.nodes:
|
|
||||||
if hasattr(node, 'material') and node.material == material:
|
|
||||||
material_node_users.append(f"{mat.name}.{node.name}")
|
|
||||||
|
|
||||||
# Show summary dialog
|
|
||||||
self.show_summary_dialog(context, material, object_users, node_users, material_node_users)
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
def show_summary_dialog(self, context, material, object_users, node_users, material_node_users):
|
|
||||||
"""Show the material users summary in a popup dialog"""
|
|
||||||
total_user_count = len(object_users) + len(node_users) + len(material_node_users)
|
|
||||||
|
|
||||||
# Create and configure the summary dialog
|
|
||||||
dialog_op = bpy.ops.bst.material_users_summary_dialog
|
|
||||||
dialog_op('INVOKE_DEFAULT',
|
|
||||||
material_name=material.name,
|
|
||||||
users_count=material.users,
|
|
||||||
fake_user=material.use_fake_user,
|
|
||||||
object_users='|'.join(object_users),
|
|
||||||
node_users='|'.join(node_users),
|
|
||||||
material_node_users='|'.join(material_node_users),
|
|
||||||
total_user_count=total_user_count)
|
|
||||||
|
|
||||||
def invoke(self, context, event):
|
|
||||||
return context.window_manager.invoke_props_dialog(self)
|
|
||||||
-253
@@ -1,253 +0,0 @@
|
|||||||
import bpy
|
|
||||||
import bmesh
|
|
||||||
from mathutils import Color
|
|
||||||
|
|
||||||
def rgb_to_hex(r, g, b, a=1.0):
|
|
||||||
"""Convert RGBA values (0-1 range) to hex color code."""
|
|
||||||
# Convert to 0-255 range and format as hex
|
|
||||||
r_int = int(round(r * 255))
|
|
||||||
g_int = int(round(g * 255))
|
|
||||||
b_int = int(round(b * 255))
|
|
||||||
a_int = int(round(a * 255))
|
|
||||||
|
|
||||||
# If alpha is full (255), use RGB format, otherwise use RGBA
|
|
||||||
if a_int == 255:
|
|
||||||
return f"#{r_int:02X}{g_int:02X}{b_int:02X}"
|
|
||||||
else:
|
|
||||||
return f"#{r_int:02X}{g_int:02X}{b_int:02X}{a_int:02X}"
|
|
||||||
|
|
||||||
def is_flat_color_image_efficient(image, max_pixels_to_check=10000):
|
|
||||||
"""
|
|
||||||
Efficiently check if an image has all pixels of the same color.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
image: The image to check
|
|
||||||
max_pixels_to_check: Maximum number of pixels to check (for performance)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple: (is_flat, color) where is_flat is bool and color is RGBA tuple
|
|
||||||
"""
|
|
||||||
if not image or not image.pixels:
|
|
||||||
print(f" DEBUG: No image or no pixels")
|
|
||||||
return False, None
|
|
||||||
|
|
||||||
# Get pixel data
|
|
||||||
pixels = image.pixels[:]
|
|
||||||
|
|
||||||
if len(pixels) == 0:
|
|
||||||
print(f" DEBUG: Empty pixel array")
|
|
||||||
return False, None
|
|
||||||
|
|
||||||
# Images in Blender are typically RGBA, so 4 values per pixel
|
|
||||||
channels = image.channels
|
|
||||||
if channels not in [3, 4]: # RGB or RGBA
|
|
||||||
print(f" DEBUG: Unsupported channels: {channels}")
|
|
||||||
return False, None
|
|
||||||
|
|
||||||
# Get the first pixel color as reference
|
|
||||||
first_pixel = pixels[:channels]
|
|
||||||
print(f" DEBUG: Reference color: {first_pixel}")
|
|
||||||
|
|
||||||
# Calculate total pixels
|
|
||||||
total_pixels = len(pixels) // channels
|
|
||||||
print(f" DEBUG: Total pixels: {total_pixels}")
|
|
||||||
|
|
||||||
# Determine how many pixels to check
|
|
||||||
pixels_to_check = min(total_pixels, max_pixels_to_check)
|
|
||||||
|
|
||||||
# For small images, check every pixel
|
|
||||||
if total_pixels <= max_pixels_to_check:
|
|
||||||
step = 1
|
|
||||||
print(f" DEBUG: Checking all {total_pixels} pixels")
|
|
||||||
else:
|
|
||||||
# For large images, sample evenly across the image
|
|
||||||
step = total_pixels // pixels_to_check
|
|
||||||
print(f" DEBUG: Sampling {pixels_to_check} pixels with step {step}")
|
|
||||||
|
|
||||||
# Check pixels
|
|
||||||
checked_count = 0
|
|
||||||
for i in range(0, total_pixels, step):
|
|
||||||
pixel_start = i * channels
|
|
||||||
current_pixel = pixels[pixel_start:pixel_start + channels]
|
|
||||||
checked_count += 1
|
|
||||||
|
|
||||||
# Compare with reference pixel (exact match)
|
|
||||||
for j in range(channels):
|
|
||||||
if current_pixel[j] != first_pixel[j]:
|
|
||||||
print(f" DEBUG: Pixel {i} differs at channel {j}: {current_pixel[j]} vs {first_pixel[j]}")
|
|
||||||
print(f" DEBUG: Checked {checked_count} pixels before finding difference")
|
|
||||||
return False, None
|
|
||||||
|
|
||||||
print(f" DEBUG: All {checked_count} checked pixels are identical")
|
|
||||||
|
|
||||||
# If we get here, all checked pixels are the same color
|
|
||||||
if channels == 3:
|
|
||||||
return True, (first_pixel[0], first_pixel[1], first_pixel[2], 1.0)
|
|
||||||
else:
|
|
||||||
return True, tuple(first_pixel)
|
|
||||||
|
|
||||||
def is_flat_color_image(image):
|
|
||||||
"""Check if an image has all pixels of the same color."""
|
|
||||||
# Use the efficient version by default
|
|
||||||
return is_flat_color_image_efficient(image, max_pixels_to_check=10000)
|
|
||||||
|
|
||||||
def safe_rename_image(image, new_name):
|
|
||||||
"""Safely rename an image datablock using context override."""
|
|
||||||
try:
|
|
||||||
# Method 1: Try direct assignment first (works in some contexts)
|
|
||||||
image.name = new_name
|
|
||||||
return True
|
|
||||||
except:
|
|
||||||
try:
|
|
||||||
# Method 2: Use context override with outliner
|
|
||||||
for area in bpy.context.screen.areas:
|
|
||||||
if area.type == 'OUTLINER':
|
|
||||||
with bpy.context.temp_override(area=area):
|
|
||||||
image.name = new_name
|
|
||||||
return True
|
|
||||||
except:
|
|
||||||
try:
|
|
||||||
# Method 3: Use bpy.ops with context override
|
|
||||||
# Set the image as active and use the rename operator
|
|
||||||
bpy.context.view_layer.objects.active = None
|
|
||||||
|
|
||||||
# Create a temporary override context
|
|
||||||
override_context = bpy.context.copy()
|
|
||||||
override_context['edit_image'] = image
|
|
||||||
|
|
||||||
with bpy.context.temp_override(**override_context):
|
|
||||||
image.name = new_name
|
|
||||||
return True
|
|
||||||
except:
|
|
||||||
# Method 4: Try using the data API directly with update
|
|
||||||
try:
|
|
||||||
old_name = image.name
|
|
||||||
# Force an update cycle
|
|
||||||
bpy.context.view_layer.update()
|
|
||||||
image.name = new_name
|
|
||||||
bpy.context.view_layer.update()
|
|
||||||
return True
|
|
||||||
except:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def rename_flat_color_textures():
|
|
||||||
"""Main function to find and rename flat color textures."""
|
|
||||||
renamed_count = 0
|
|
||||||
failed_count = 0
|
|
||||||
processed_count = 0
|
|
||||||
|
|
||||||
print("Scanning for flat color textures...")
|
|
||||||
|
|
||||||
# Store rename operations to perform them in batch
|
|
||||||
rename_operations = []
|
|
||||||
|
|
||||||
for image in bpy.data.images:
|
|
||||||
processed_count += 1
|
|
||||||
|
|
||||||
# Skip if image has no pixel data
|
|
||||||
if not hasattr(image, 'pixels') or len(image.pixels) == 0:
|
|
||||||
print(f"Skipping '{image.name}': No pixel data available")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if image has flat color
|
|
||||||
is_flat, color = is_flat_color_image(image)
|
|
||||||
|
|
||||||
if is_flat and color:
|
|
||||||
# Convert color to hex
|
|
||||||
hex_color = rgb_to_hex(*color)
|
|
||||||
|
|
||||||
# Store original name for logging
|
|
||||||
original_name = image.name
|
|
||||||
|
|
||||||
# Check if name is already a hex color (to avoid renaming again)
|
|
||||||
if not original_name.startswith('#'):
|
|
||||||
rename_operations.append((image, original_name, hex_color, color))
|
|
||||||
else:
|
|
||||||
print(f"Skipping '{original_name}': Already appears to be hex-named")
|
|
||||||
else:
|
|
||||||
print(f"'{image.name}': Not a flat color texture")
|
|
||||||
|
|
||||||
# Perform rename operations
|
|
||||||
print(f"\nPerforming {len(rename_operations)} rename operation(s)...")
|
|
||||||
|
|
||||||
for image, original_name, hex_color, color in rename_operations:
|
|
||||||
success = safe_rename_image(image, hex_color)
|
|
||||||
if success:
|
|
||||||
print(f"Renamed '{original_name}' to '{hex_color}' (Color: RGBA{color})")
|
|
||||||
renamed_count += 1
|
|
||||||
else:
|
|
||||||
print(f"Failed to rename '{original_name}' to '{hex_color}' - Context restriction")
|
|
||||||
failed_count += 1
|
|
||||||
|
|
||||||
print(f"\nSummary:")
|
|
||||||
print(f"Processed: {processed_count} images")
|
|
||||||
print(f"Successfully renamed: {renamed_count} flat color textures")
|
|
||||||
if failed_count > 0:
|
|
||||||
print(f"Failed to rename: {failed_count} textures (try running from Python Console instead)")
|
|
||||||
|
|
||||||
return renamed_count
|
|
||||||
|
|
||||||
def reload_image_pixels():
|
|
||||||
"""Reload pixel data for all images (useful if images aren't loaded)."""
|
|
||||||
print("Reloading pixel data for all images...")
|
|
||||||
|
|
||||||
for image in bpy.data.images:
|
|
||||||
if image.source == 'FILE' and image.filepath:
|
|
||||||
try:
|
|
||||||
image.reload()
|
|
||||||
print(f"Reloaded: {image.name}")
|
|
||||||
except:
|
|
||||||
print(f"Failed to reload: {image.name}")
|
|
||||||
|
|
||||||
# Alternative function for running in restricted contexts
|
|
||||||
def print_rename_suggestions():
|
|
||||||
"""Print suggested renames without actually renaming (for restricted contexts)."""
|
|
||||||
suggestions = []
|
|
||||||
|
|
||||||
print("Scanning for flat color textures (suggestion mode)...")
|
|
||||||
|
|
||||||
for image in bpy.data.images:
|
|
||||||
if not hasattr(image, 'pixels') or len(image.pixels) == 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
is_flat, color = is_flat_color_image(image)
|
|
||||||
|
|
||||||
if is_flat and color and not image.name.startswith('#'):
|
|
||||||
hex_color = rgb_to_hex(*color)
|
|
||||||
suggestions.append((image.name, hex_color, color))
|
|
||||||
|
|
||||||
if suggestions:
|
|
||||||
print(f"\nFound {len(suggestions)} flat color texture(s) that could be renamed:")
|
|
||||||
print("-" * 60)
|
|
||||||
for original_name, hex_color, color in suggestions:
|
|
||||||
print(f"'{original_name}' -> '{hex_color}' (RGBA{color})")
|
|
||||||
|
|
||||||
print("\nTo actually rename them, run this script from:")
|
|
||||||
print("1. Blender's Python Console, or")
|
|
||||||
print("2. Command line with: blender file.blend --python script.py")
|
|
||||||
else:
|
|
||||||
print("\nNo flat color textures found that need renaming.")
|
|
||||||
|
|
||||||
# Main execution
|
|
||||||
if __name__ == "__main__":
|
|
||||||
print("=" * 50)
|
|
||||||
print("Flat Color Texture Renamer")
|
|
||||||
print("=" * 50)
|
|
||||||
|
|
||||||
# Optional: Reload images to ensure pixel data is available
|
|
||||||
# Uncomment the line below if you want to force reload all images
|
|
||||||
# reload_image_pixels()
|
|
||||||
|
|
||||||
# Try to run the renaming process
|
|
||||||
try:
|
|
||||||
renamed_count = rename_flat_color_textures()
|
|
||||||
|
|
||||||
if renamed_count > 0:
|
|
||||||
print(f"\nSuccessfully renamed {renamed_count} flat color texture(s)!")
|
|
||||||
else:
|
|
||||||
print("\nNo flat color textures found to rename.")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"\nContext restriction detected. Running in suggestion mode...")
|
|
||||||
print_rename_suggestions()
|
|
||||||
|
|
||||||
print("Script completed.")
|
|
||||||
-690
@@ -1,690 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
def safe_wgt_removal():
|
|
||||||
"""Safely remove only WGT widget objects that are clearly ghosts"""
|
|
||||||
|
|
||||||
print("="*80)
|
|
||||||
print("CONSERVATIVE WGT GHOST REMOVAL")
|
|
||||||
print("="*80)
|
|
||||||
|
|
||||||
# Find all WGT objects
|
|
||||||
wgt_objects = []
|
|
||||||
for obj in bpy.data.objects:
|
|
||||||
if obj.name.startswith('WGT-'):
|
|
||||||
wgt_objects.append(obj)
|
|
||||||
|
|
||||||
print(f"Found {len(wgt_objects)} WGT objects")
|
|
||||||
|
|
||||||
# Check which ones are actually being used by armatures
|
|
||||||
used_wgts = set()
|
|
||||||
for armature in bpy.data.armatures:
|
|
||||||
for bone in armature.bones:
|
|
||||||
if bone.use_deform and hasattr(bone, 'custom_shape') and bone.custom_shape:
|
|
||||||
used_wgts.add(bone.custom_shape.name)
|
|
||||||
|
|
||||||
print(f"Found {len(used_wgts)} WGT objects actually used by armatures")
|
|
||||||
|
|
||||||
# Remove unused WGT objects
|
|
||||||
removed_wgts = 0
|
|
||||||
for obj in wgt_objects:
|
|
||||||
if obj.name not in used_wgts:
|
|
||||||
try:
|
|
||||||
# Skip linked objects (they're legitimate library content)
|
|
||||||
if hasattr(obj, 'library') and obj.library is not None:
|
|
||||||
print(f" Skipping linked WGT: {obj.name} (from {obj.library.name})")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if it's in the WGTS collection (typical ghost pattern)
|
|
||||||
in_wgts_collection = False
|
|
||||||
for collection in bpy.data.collections:
|
|
||||||
if 'WGTS' in collection.name and obj in collection.objects.values():
|
|
||||||
in_wgts_collection = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if in_wgts_collection:
|
|
||||||
print(f" Removing unused WGT: {obj.name}")
|
|
||||||
bpy.data.objects.remove(obj, do_unlink=True)
|
|
||||||
removed_wgts += 1
|
|
||||||
except Exception as e:
|
|
||||||
print(f" Failed to remove {obj.name}: {e}")
|
|
||||||
|
|
||||||
print(f"Removed {removed_wgts} unused WGT objects")
|
|
||||||
return removed_wgts
|
|
||||||
|
|
||||||
def is_collection_in_scene_hierarchy(collection, scene_collection):
|
|
||||||
"""Recursively check if a collection exists anywhere in the scene collection hierarchy"""
|
|
||||||
if collection == scene_collection:
|
|
||||||
return True
|
|
||||||
|
|
||||||
for child_collection in scene_collection.children:
|
|
||||||
if child_collection == collection:
|
|
||||||
return True
|
|
||||||
if is_collection_in_scene_hierarchy(collection, child_collection):
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def clean_empty_collections():
|
|
||||||
"""Remove empty collections that are not linked to scenes"""
|
|
||||||
|
|
||||||
print("\n" + "="*80)
|
|
||||||
print("CLEANING EMPTY COLLECTIONS")
|
|
||||||
print("="*80)
|
|
||||||
|
|
||||||
removed_collections = 0
|
|
||||||
collections_to_remove = []
|
|
||||||
|
|
||||||
for collection in bpy.data.collections:
|
|
||||||
# Check if collection is empty
|
|
||||||
if len(collection.objects) == 0 and len(collection.children) == 0:
|
|
||||||
# Skip linked collections (they're legitimate library content)
|
|
||||||
if hasattr(collection, 'library') and collection.library is not None:
|
|
||||||
print(f" Skipping linked empty collection: {collection.name}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if it's anywhere in any scene's collection hierarchy
|
|
||||||
linked_to_scene = False
|
|
||||||
for scene in bpy.data.scenes:
|
|
||||||
if is_collection_in_scene_hierarchy(collection, scene.collection):
|
|
||||||
linked_to_scene = True
|
|
||||||
print(f" Preserving empty collection: {collection.name} (in scene '{scene.name}')")
|
|
||||||
break
|
|
||||||
|
|
||||||
if not linked_to_scene:
|
|
||||||
collections_to_remove.append(collection)
|
|
||||||
|
|
||||||
for collection in collections_to_remove:
|
|
||||||
try:
|
|
||||||
print(f" Removing empty collection: {collection.name}")
|
|
||||||
bpy.data.collections.remove(collection)
|
|
||||||
removed_collections += 1
|
|
||||||
except Exception as e:
|
|
||||||
print(f" Failed to remove collection {collection.name}: {e}")
|
|
||||||
|
|
||||||
print(f"Removed {removed_collections} empty collections")
|
|
||||||
return removed_collections
|
|
||||||
|
|
||||||
def is_object_used_by_scene_instance_collections(obj):
|
|
||||||
"""Check if object is in a collection that's being instanced by objects in scenes"""
|
|
||||||
|
|
||||||
# Find all collections that contain this object
|
|
||||||
obj_collections = []
|
|
||||||
for collection in bpy.data.collections:
|
|
||||||
if obj in collection.objects.values():
|
|
||||||
obj_collections.append(collection)
|
|
||||||
|
|
||||||
if not obj_collections:
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Check if any of these collections are being instanced by objects in scenes
|
|
||||||
for collection in obj_collections:
|
|
||||||
# Find objects that instance this collection
|
|
||||||
for other_obj in bpy.data.objects:
|
|
||||||
if (other_obj.instance_type == 'COLLECTION' and
|
|
||||||
other_obj.instance_collection == collection):
|
|
||||||
|
|
||||||
# Check if the instancing object is in any scene
|
|
||||||
for scene in bpy.data.scenes:
|
|
||||||
if other_obj in scene.objects.values():
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def is_object_legitimate_outside_scene(obj):
|
|
||||||
"""Check if an object has legitimate reasons to exist outside scenes"""
|
|
||||||
|
|
||||||
# WGT objects (rig widgets) are legitimate outside scenes
|
|
||||||
if obj.name.startswith('WGT-'):
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Collection instance objects (linked collection references) are legitimate
|
|
||||||
if obj.instance_type == 'COLLECTION' and obj.instance_collection is not None:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Objects that are being used by instance collections in scenes are legitimate
|
|
||||||
if is_object_used_by_scene_instance_collections(obj):
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Objects used as curve modifiers, constraints targets, etc.
|
|
||||||
# Check if object is used by modifiers on other objects that are in scenes
|
|
||||||
for other_obj in bpy.data.objects:
|
|
||||||
# Check if the other object is in any scene
|
|
||||||
in_scene = False
|
|
||||||
for scene in bpy.data.scenes:
|
|
||||||
if other_obj in scene.objects.values():
|
|
||||||
in_scene = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if in_scene:
|
|
||||||
for modifier in other_obj.modifiers:
|
|
||||||
if hasattr(modifier, 'object') and modifier.object == obj:
|
|
||||||
return True
|
|
||||||
if hasattr(modifier, 'target') and modifier.target == obj:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Check if object is used by constraints on other objects that are in scenes
|
|
||||||
for other_obj in bpy.data.objects:
|
|
||||||
# Check if the other object is in any scene
|
|
||||||
in_scene = False
|
|
||||||
for scene in bpy.data.scenes:
|
|
||||||
if other_obj in scene.objects.values():
|
|
||||||
in_scene = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if in_scene:
|
|
||||||
for constraint in other_obj.constraints:
|
|
||||||
if hasattr(constraint, 'target') and constraint.target == obj:
|
|
||||||
return True
|
|
||||||
if hasattr(constraint, 'subtarget') and constraint.subtarget == obj.name:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Check if object is used in particle systems on objects that are in scenes
|
|
||||||
for other_obj in bpy.data.objects:
|
|
||||||
# Check if the other object is in any scene
|
|
||||||
in_scene = False
|
|
||||||
for scene in bpy.data.scenes:
|
|
||||||
if other_obj in scene.objects.values():
|
|
||||||
in_scene = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if in_scene:
|
|
||||||
for modifier in other_obj.modifiers:
|
|
||||||
if modifier.type == 'PARTICLE_SYSTEM':
|
|
||||||
settings = modifier.particle_system.settings
|
|
||||||
if hasattr(settings, 'object') and settings.object == obj:
|
|
||||||
return True
|
|
||||||
if hasattr(settings, 'instance_object') and settings.instance_object == obj:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def clean_object_ghosts(delete_low_priority=False):
|
|
||||||
"""Remove objects that are not in any scene and have no legitimate purpose (potential ghosts)"""
|
|
||||||
|
|
||||||
print("\n" + "="*80)
|
|
||||||
print("OBJECT GHOST CLEANUP")
|
|
||||||
print("="*80)
|
|
||||||
|
|
||||||
# Get all objects, excluding cameras and lights by default (they're often not in scenes for good reasons)
|
|
||||||
candidate_objects = [obj for obj in bpy.data.objects if obj.type not in ['CAMERA', 'LIGHT']]
|
|
||||||
|
|
||||||
if not candidate_objects:
|
|
||||||
print("No candidate objects found")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
print(f"Found {len(candidate_objects)} candidate objects")
|
|
||||||
|
|
||||||
removed_objects = 0
|
|
||||||
ghosts_to_remove = []
|
|
||||||
|
|
||||||
for obj in candidate_objects:
|
|
||||||
# Skip linked objects (they're legitimate library content)
|
|
||||||
if hasattr(obj, 'library') and obj.library is not None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check which scenes contain it
|
|
||||||
in_scenes = []
|
|
||||||
for scene in bpy.data.scenes:
|
|
||||||
if obj in scene.objects.values():
|
|
||||||
in_scenes.append(scene.name)
|
|
||||||
|
|
||||||
# If not in any scene, check if it has legitimate reasons to exist
|
|
||||||
if len(in_scenes) == 0:
|
|
||||||
if is_object_legitimate_outside_scene(obj):
|
|
||||||
print(f" Preserving object: {obj.name} (legitimate use outside scene)")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# If not legitimate, it's a ghost - but be conservative with low user count objects
|
|
||||||
should_remove = False
|
|
||||||
removal_reason = ""
|
|
||||||
|
|
||||||
if obj.users >= 2:
|
|
||||||
# Higher user count ghosts are definitely safe to remove
|
|
||||||
should_remove = True
|
|
||||||
removal_reason = "ghost (users >= 2, no legitimate use found)"
|
|
||||||
elif obj.users < 2 and delete_low_priority:
|
|
||||||
# Low user count ghosts only if user enables the option
|
|
||||||
should_remove = True
|
|
||||||
removal_reason = "low priority ghost (users < 2, no legitimate use found)"
|
|
||||||
elif obj.users < 2:
|
|
||||||
print(f" Skipping low priority object: {obj.name} (users < 2, enable 'Delete Low Priority' to remove)")
|
|
||||||
|
|
||||||
if should_remove:
|
|
||||||
ghosts_to_remove.append(obj)
|
|
||||||
print(f" Marking ghost for removal: {obj.name} (type: {obj.type}) - {removal_reason}")
|
|
||||||
|
|
||||||
# Remove the ghost objects
|
|
||||||
for obj in ghosts_to_remove:
|
|
||||||
try:
|
|
||||||
print(f" Removing object ghost: {obj.name}")
|
|
||||||
bpy.data.objects.remove(obj, do_unlink=True)
|
|
||||||
removed_objects += 1
|
|
||||||
except Exception as e:
|
|
||||||
print(f" Failed to remove object {obj.name}: {e}")
|
|
||||||
|
|
||||||
print(f"Removed {removed_objects} ghost objects")
|
|
||||||
return removed_objects
|
|
||||||
|
|
||||||
def manual_object_analysis():
|
|
||||||
"""Manual analysis of objects - show info but don't auto-remove"""
|
|
||||||
|
|
||||||
print("\n" + "="*80)
|
|
||||||
print("OBJECT GHOST ANALYSIS (MANUAL REVIEW)")
|
|
||||||
print("="*80)
|
|
||||||
|
|
||||||
# Get all objects, excluding cameras and lights (they're often legitimately not in scenes)
|
|
||||||
candidate_objects = [obj for obj in bpy.data.objects if obj.type not in ['CAMERA', 'LIGHT']]
|
|
||||||
|
|
||||||
# Filter to only objects not in scenes for analysis
|
|
||||||
objects_not_in_scenes = []
|
|
||||||
for obj in candidate_objects:
|
|
||||||
# Skip linked objects for analysis
|
|
||||||
if hasattr(obj, 'library') and obj.library is not None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check which scenes contain it
|
|
||||||
in_scenes = []
|
|
||||||
for scene in bpy.data.scenes:
|
|
||||||
if obj in scene.objects.values():
|
|
||||||
in_scenes.append(scene.name)
|
|
||||||
|
|
||||||
if len(in_scenes) == 0:
|
|
||||||
objects_not_in_scenes.append(obj)
|
|
||||||
|
|
||||||
if not objects_not_in_scenes:
|
|
||||||
print("No local objects found outside scenes")
|
|
||||||
return
|
|
||||||
|
|
||||||
print(f"Found {len(objects_not_in_scenes)} local objects not in any scene:")
|
|
||||||
|
|
||||||
for obj in objects_not_in_scenes:
|
|
||||||
print(f"\n Object: {obj.name} (type: {obj.type})")
|
|
||||||
print(f" Users: {obj.users}")
|
|
||||||
print(f" Parent: {obj.parent.name if obj.parent else 'None'}")
|
|
||||||
|
|
||||||
# Check collections
|
|
||||||
in_collections = []
|
|
||||||
for collection in bpy.data.collections:
|
|
||||||
if obj in collection.objects.values():
|
|
||||||
in_collections.append(collection.name)
|
|
||||||
print(f" In collections: {in_collections}")
|
|
||||||
|
|
||||||
# Show recommendation
|
|
||||||
if is_object_legitimate_outside_scene(obj):
|
|
||||||
print(f" -> LEGITIMATE: Has valid use outside scenes")
|
|
||||||
elif obj.users >= 2:
|
|
||||||
print(f" -> GHOST: No legitimate use found, users >= 2 (will be removed)")
|
|
||||||
elif obj.users < 2:
|
|
||||||
print(f" -> LOW PRIORITY: No legitimate use found, users < 2 (needs option enabled)")
|
|
||||||
else:
|
|
||||||
print(f" -> UNCLEAR: Manual review needed")
|
|
||||||
|
|
||||||
def main(delete_low_priority=False):
|
|
||||||
"""Main conservative cleanup function"""
|
|
||||||
|
|
||||||
print("CONSERVATIVE GHOST DATA CLEANUP")
|
|
||||||
print("="*80)
|
|
||||||
print("This script removes:")
|
|
||||||
print("1. Unused local WGT widget objects")
|
|
||||||
print("2. Empty unlinked collections")
|
|
||||||
print("3. Objects not in any scene with no legitimate use")
|
|
||||||
if delete_low_priority:
|
|
||||||
print(" - Including low priority ghosts (no legitimate use, users < 2)")
|
|
||||||
else:
|
|
||||||
print(" - Excluding low priority ghosts (no legitimate use, users < 2)")
|
|
||||||
print("="*80)
|
|
||||||
|
|
||||||
initial_objects = len(list(bpy.data.objects))
|
|
||||||
initial_collections = len(list(bpy.data.collections))
|
|
||||||
|
|
||||||
# Safe operations only
|
|
||||||
wgts_removed = safe_wgt_removal()
|
|
||||||
collections_removed = clean_empty_collections()
|
|
||||||
object_ghosts_removed = clean_object_ghosts(delete_low_priority)
|
|
||||||
|
|
||||||
# Show remaining object analysis
|
|
||||||
manual_object_analysis()
|
|
||||||
|
|
||||||
# Final purge
|
|
||||||
print("\n" + "="*80)
|
|
||||||
print("FINAL SAFE PURGE")
|
|
||||||
print("="*80)
|
|
||||||
|
|
||||||
try:
|
|
||||||
bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=True, do_recursive=True)
|
|
||||||
print("Safe purge completed")
|
|
||||||
except:
|
|
||||||
print("Purge had issues")
|
|
||||||
|
|
||||||
final_objects = len(list(bpy.data.objects))
|
|
||||||
final_collections = len(list(bpy.data.collections))
|
|
||||||
|
|
||||||
print(f"\n" + "="*80)
|
|
||||||
print("CONSERVATIVE CLEANUP SUMMARY")
|
|
||||||
print("="*80)
|
|
||||||
print(f"Objects: {initial_objects} -> {final_objects} (removed {initial_objects - final_objects})")
|
|
||||||
print(f"Collections: {initial_collections} -> {final_collections} (removed {collections_removed})")
|
|
||||||
print(f"WGT objects removed: {wgts_removed}")
|
|
||||||
print(f"Object ghosts removed: {object_ghosts_removed}")
|
|
||||||
print("="*80)
|
|
||||||
|
|
||||||
class GhostBuster(bpy.types.Operator):
|
|
||||||
"""Conservative cleanup of ghost data (unused WGT objects, empty collections)"""
|
|
||||||
bl_idname = "bst.ghost_buster"
|
|
||||||
bl_label = "Ghost Buster"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
try:
|
|
||||||
# Get the delete low priority setting from scene properties
|
|
||||||
delete_low_priority = getattr(context.scene, "ghost_buster_delete_low_priority", False)
|
|
||||||
|
|
||||||
# Call the main ghost buster function
|
|
||||||
main(delete_low_priority)
|
|
||||||
self.report({'INFO'}, "Ghost data cleanup completed")
|
|
||||||
return {'FINISHED'}
|
|
||||||
except Exception as e:
|
|
||||||
self.report({'ERROR'}, f"Ghost buster failed: {str(e)}")
|
|
||||||
return {'CANCELLED'}
|
|
||||||
|
|
||||||
class GhostDetector(bpy.types.Operator):
|
|
||||||
"""Detect and analyze ghost data without removing it"""
|
|
||||||
bl_idname = "bst.ghost_detector"
|
|
||||||
bl_label = "Ghost Detector"
|
|
||||||
bl_options = {'REGISTER', 'INTERNAL'}
|
|
||||||
|
|
||||||
# Properties to store analysis data
|
|
||||||
total_wgt_objects: bpy.props.IntProperty(default=0)
|
|
||||||
unused_wgt_objects: bpy.props.IntProperty(default=0)
|
|
||||||
used_wgt_objects: bpy.props.IntProperty(default=0)
|
|
||||||
empty_collections: bpy.props.IntProperty(default=0)
|
|
||||||
ghost_objects: bpy.props.IntProperty(default=0)
|
|
||||||
ghost_potential: bpy.props.IntProperty(default=0)
|
|
||||||
ghost_legitimate: bpy.props.IntProperty(default=0)
|
|
||||||
ghost_low_priority: bpy.props.IntProperty(default=0)
|
|
||||||
wgt_details: bpy.props.StringProperty(default="")
|
|
||||||
collection_details: bpy.props.StringProperty(default="")
|
|
||||||
ghost_details: bpy.props.StringProperty(default="")
|
|
||||||
|
|
||||||
def analyze_ghost_data(self):
|
|
||||||
"""Analyze ghost data similar to ghost_buster functions"""
|
|
||||||
|
|
||||||
# Analyze WGT objects
|
|
||||||
wgt_objects = []
|
|
||||||
for obj in bpy.data.objects:
|
|
||||||
if obj.name.startswith('WGT-'):
|
|
||||||
wgt_objects.append(obj)
|
|
||||||
|
|
||||||
self.total_wgt_objects = len(wgt_objects)
|
|
||||||
|
|
||||||
# Check which WGT objects are used by armatures
|
|
||||||
used_wgts = set()
|
|
||||||
for armature in bpy.data.armatures:
|
|
||||||
for bone in armature.bones:
|
|
||||||
if bone.use_deform and hasattr(bone, 'custom_shape') and bone.custom_shape:
|
|
||||||
used_wgts.add(bone.custom_shape.name)
|
|
||||||
|
|
||||||
self.used_wgt_objects = len(used_wgts)
|
|
||||||
|
|
||||||
# Count unused WGT objects
|
|
||||||
unused_wgts = []
|
|
||||||
wgt_details_list = []
|
|
||||||
for obj in wgt_objects:
|
|
||||||
if obj.name not in used_wgts:
|
|
||||||
# Skip linked objects (they're legitimate library content)
|
|
||||||
if hasattr(obj, 'library') and obj.library is not None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if it's in the WGTS collection (typical ghost pattern)
|
|
||||||
in_wgts_collection = False
|
|
||||||
for collection in bpy.data.collections:
|
|
||||||
if 'WGTS' in collection.name and obj in collection.objects.values():
|
|
||||||
in_wgts_collection = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if in_wgts_collection:
|
|
||||||
unused_wgts.append(obj)
|
|
||||||
wgt_details_list.append(f"• {obj.name} (in WGTS collection)")
|
|
||||||
|
|
||||||
self.unused_wgt_objects = len(unused_wgts)
|
|
||||||
self.wgt_details = "\n".join(wgt_details_list[:10]) # Limit to first 10
|
|
||||||
if len(unused_wgts) > 10:
|
|
||||||
self.wgt_details += f"\n... and {len(unused_wgts) - 10} more"
|
|
||||||
|
|
||||||
# Analyze empty collections
|
|
||||||
empty_collections = []
|
|
||||||
collection_details_list = []
|
|
||||||
for collection in bpy.data.collections:
|
|
||||||
if len(collection.objects) == 0 and len(collection.children) == 0:
|
|
||||||
# Skip linked collections (they're legitimate library content)
|
|
||||||
if hasattr(collection, 'library') and collection.library is not None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check if it's anywhere in any scene's collection hierarchy
|
|
||||||
linked_to_scene = False
|
|
||||||
for scene in bpy.data.scenes:
|
|
||||||
if is_collection_in_scene_hierarchy(collection, scene.collection):
|
|
||||||
linked_to_scene = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if not linked_to_scene:
|
|
||||||
empty_collections.append(collection)
|
|
||||||
collection_details_list.append(f"• {collection.name}")
|
|
||||||
|
|
||||||
self.empty_collections = len(empty_collections)
|
|
||||||
self.collection_details = "\n".join(collection_details_list[:10]) # Limit to first 10
|
|
||||||
if len(empty_collections) > 10:
|
|
||||||
self.collection_details += f"\n... and {len(empty_collections) - 10} more"
|
|
||||||
|
|
||||||
# Analyze ghost objects (objects not in scenes)
|
|
||||||
candidate_objects = [obj for obj in bpy.data.objects if obj.type not in ['CAMERA', 'LIGHT']]
|
|
||||||
|
|
||||||
potential_ghosts = 0
|
|
||||||
legitimate = 0
|
|
||||||
low_priority = 0
|
|
||||||
ghost_details_list = []
|
|
||||||
|
|
||||||
for obj in candidate_objects:
|
|
||||||
# Skip linked objects (they're legitimate library content)
|
|
||||||
if hasattr(obj, 'library') and obj.library is not None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Check which scenes contain it
|
|
||||||
in_scenes = []
|
|
||||||
for scene in bpy.data.scenes:
|
|
||||||
if obj in scene.objects.values():
|
|
||||||
in_scenes.append(scene.name)
|
|
||||||
|
|
||||||
# Only analyze objects not in scenes
|
|
||||||
if len(in_scenes) == 0:
|
|
||||||
# Classify object
|
|
||||||
status = ""
|
|
||||||
if is_object_legitimate_outside_scene(obj):
|
|
||||||
legitimate += 1
|
|
||||||
status = "LEGITIMATE (has valid use outside scenes)"
|
|
||||||
elif obj.users >= 2:
|
|
||||||
potential_ghosts += 1
|
|
||||||
status = "GHOST (no legitimate use found, users >= 2)"
|
|
||||||
elif obj.users < 2:
|
|
||||||
low_priority += 1
|
|
||||||
status = "LOW PRIORITY (no legitimate use found, users < 2)"
|
|
||||||
else:
|
|
||||||
status = "UNCLEAR"
|
|
||||||
|
|
||||||
ghost_details_list.append(f"• {obj.name} ({obj.type}): {status}")
|
|
||||||
|
|
||||||
self.ghost_objects = len([obj for obj in candidate_objects if len([s for s in bpy.data.scenes if obj in s.objects.values()]) == 0 and not (hasattr(obj, 'library') and obj.library is not None)])
|
|
||||||
self.ghost_potential = potential_ghosts
|
|
||||||
self.ghost_legitimate = legitimate
|
|
||||||
self.ghost_low_priority = low_priority
|
|
||||||
self.ghost_details = "\n".join(ghost_details_list[:10]) # Limit to first 10
|
|
||||||
if len(ghost_details_list) > 10:
|
|
||||||
self.ghost_details += f"\n... and {len(ghost_details_list) - 10} more"
|
|
||||||
|
|
||||||
def draw(self, context):
|
|
||||||
layout = self.layout
|
|
||||||
|
|
||||||
# Title
|
|
||||||
layout.label(text="Ghost Data Analysis", icon='GHOST_ENABLED')
|
|
||||||
layout.separator()
|
|
||||||
|
|
||||||
# WGT Objects section
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="WGT Widget Objects", icon='ARMATURE_DATA')
|
|
||||||
col = box.column(align=True)
|
|
||||||
col.label(text=f"Total WGT objects: {self.total_wgt_objects}")
|
|
||||||
col.label(text=f"Used by armatures: {self.used_wgt_objects}", icon='CHECKMARK')
|
|
||||||
if self.unused_wgt_objects > 0:
|
|
||||||
col.label(text=f"Unused (potential ghosts): {self.unused_wgt_objects}", icon='ERROR')
|
|
||||||
if self.wgt_details:
|
|
||||||
box.separator()
|
|
||||||
details_col = box.column(align=True)
|
|
||||||
for line in self.wgt_details.split('\n'):
|
|
||||||
if line.strip():
|
|
||||||
details_col.label(text=line)
|
|
||||||
else:
|
|
||||||
col.label(text="No unused WGT objects found", icon='CHECKMARK')
|
|
||||||
|
|
||||||
# Empty Collections section
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="Empty Collections", icon='OUTLINER_COLLECTION')
|
|
||||||
col = box.column(align=True)
|
|
||||||
if self.empty_collections > 0:
|
|
||||||
col.label(text=f"Empty unlinked collections: {self.empty_collections}", icon='ERROR')
|
|
||||||
if self.collection_details:
|
|
||||||
box.separator()
|
|
||||||
details_col = box.column(align=True)
|
|
||||||
for line in self.collection_details.split('\n'):
|
|
||||||
if line.strip():
|
|
||||||
details_col.label(text=line)
|
|
||||||
else:
|
|
||||||
col.label(text="No empty unlinked collections found", icon='CHECKMARK')
|
|
||||||
|
|
||||||
# Ghost Objects section
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="Ghost Objects Analysis", icon='OBJECT_DATA')
|
|
||||||
col = box.column(align=True)
|
|
||||||
col.label(text=f"Objects not in scenes: {self.ghost_objects}")
|
|
||||||
if self.ghost_objects > 0:
|
|
||||||
if self.ghost_potential > 0:
|
|
||||||
col.label(text=f"Ghosts (users >= 2): {self.ghost_potential}", icon='ERROR')
|
|
||||||
if self.ghost_legitimate > 0:
|
|
||||||
col.label(text=f"Legitimate objects: {self.ghost_legitimate}", icon='CHECKMARK')
|
|
||||||
if self.ghost_low_priority > 0:
|
|
||||||
col.label(text=f"Low priority (users < 2): {self.ghost_low_priority}", icon='QUESTION')
|
|
||||||
|
|
||||||
if self.ghost_details:
|
|
||||||
box.separator()
|
|
||||||
details_col = box.column(align=True)
|
|
||||||
for line in self.ghost_details.split('\n'):
|
|
||||||
if line.strip():
|
|
||||||
details_col.label(text=line)
|
|
||||||
else:
|
|
||||||
col.label(text="No ghost objects found", icon='CHECKMARK')
|
|
||||||
|
|
||||||
# Summary
|
|
||||||
layout.separator()
|
|
||||||
summary_box = layout.box()
|
|
||||||
summary_box.label(text="Summary", icon='INFO')
|
|
||||||
total_issues = self.unused_wgt_objects + self.empty_collections + self.ghost_potential
|
|
||||||
if total_issues > 0:
|
|
||||||
summary_box.label(text=f"Found {total_issues} ghost data issues that will be removed", icon='ERROR')
|
|
||||||
if self.ghost_low_priority > 0:
|
|
||||||
summary_box.label(text=f"+ {self.ghost_low_priority} low priority issues (optional)", icon='QUESTION')
|
|
||||||
summary_box.label(text="Use Ghost Buster to clean up safely")
|
|
||||||
else:
|
|
||||||
summary_box.label(text="No ghost data issues detected!", icon='CHECKMARK')
|
|
||||||
if self.ghost_low_priority > 0:
|
|
||||||
summary_box.label(text=f"({self.ghost_low_priority} low priority issues available)", icon='INFO')
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
def invoke(self, context, event):
|
|
||||||
# Analyze the ghost data before showing the dialog
|
|
||||||
self.analyze_ghost_data()
|
|
||||||
return context.window_manager.invoke_popup(self, width=500)
|
|
||||||
|
|
||||||
class ResyncEnforce(bpy.types.Operator):
|
|
||||||
"""Resync Enforce: Fix broken library override hierarchies by rebuilding from linked references"""
|
|
||||||
bl_idname = "bst.resync_enforce"
|
|
||||||
bl_label = "Resync Enforce"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def poll(cls, context):
|
|
||||||
# Only available if there are selected objects
|
|
||||||
return context.selected_objects
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
# Get selected objects
|
|
||||||
selected_objects = context.selected_objects.copy()
|
|
||||||
|
|
||||||
if not selected_objects:
|
|
||||||
self.report({'WARNING'}, "No objects selected for resync enforce")
|
|
||||||
return {'CANCELLED'}
|
|
||||||
|
|
||||||
# Count library override objects
|
|
||||||
override_objects = []
|
|
||||||
for obj in selected_objects:
|
|
||||||
if obj.override_library:
|
|
||||||
override_objects.append(obj)
|
|
||||||
|
|
||||||
if not override_objects:
|
|
||||||
self.report({'WARNING'}, "No library override objects found in selection")
|
|
||||||
return {'CANCELLED'}
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Store the current selection
|
|
||||||
original_selection = set(context.selected_objects)
|
|
||||||
|
|
||||||
# Select only the override objects
|
|
||||||
bpy.ops.object.select_all(action='DESELECT')
|
|
||||||
for obj in override_objects:
|
|
||||||
obj.select_set(True)
|
|
||||||
|
|
||||||
# Call Blender's resync enforce operation
|
|
||||||
result = bpy.ops.object.library_override_operation(
|
|
||||||
'INVOKE_DEFAULT',
|
|
||||||
type='OVERRIDE_LIBRARY_RESYNC_HIERARCHY_ENFORCE',
|
|
||||||
selection_set='SELECTED'
|
|
||||||
)
|
|
||||||
|
|
||||||
if result == {'FINISHED'}:
|
|
||||||
self.report({'INFO'}, f"Resync enforce completed on {len(override_objects)} override objects")
|
|
||||||
return_code = {'FINISHED'}
|
|
||||||
else:
|
|
||||||
self.report({'WARNING'}, "Resync enforce operation was cancelled or failed")
|
|
||||||
return_code = {'CANCELLED'}
|
|
||||||
|
|
||||||
# Restore original selection
|
|
||||||
bpy.ops.object.select_all(action='DESELECT')
|
|
||||||
for obj in original_selection:
|
|
||||||
if obj.name in bpy.data.objects: # Check if object still exists
|
|
||||||
obj.select_set(True)
|
|
||||||
|
|
||||||
return return_code
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.report({'ERROR'}, f"Resync enforce failed: {str(e)}")
|
|
||||||
return {'CANCELLED'}
|
|
||||||
|
|
||||||
# Note: main() is called by the operator, not automatically
|
|
||||||
|
|
||||||
# List of classes to register
|
|
||||||
classes = (
|
|
||||||
GhostBuster,
|
|
||||||
GhostDetector,
|
|
||||||
ResyncEnforce,
|
|
||||||
)
|
|
||||||
|
|
||||||
def register():
|
|
||||||
for cls in classes:
|
|
||||||
bpy.utils.register_class(cls)
|
|
||||||
|
|
||||||
def unregister():
|
|
||||||
for cls in reversed(classes):
|
|
||||||
try:
|
|
||||||
bpy.utils.unregister_class(cls)
|
|
||||||
except RuntimeError:
|
|
||||||
pass
|
|
||||||
-63
@@ -1,63 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
class RemoveCustomSplitNormals(bpy.types.Operator):
|
|
||||||
"""Remove custom split normals and apply smooth shading to all accessible mesh objects"""
|
|
||||||
bl_idname = "bst.remove_custom_split_normals"
|
|
||||||
bl_label = "Remove Custom Split Normals"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
only_selected: bpy.props.BoolProperty(
|
|
||||||
name="Only Selected Objects",
|
|
||||||
description="Apply only to selected objects",
|
|
||||||
default=True
|
|
||||||
)
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
# Store the current context
|
|
||||||
original_active = context.active_object
|
|
||||||
original_selected = context.selected_objects.copy()
|
|
||||||
original_mode = context.mode
|
|
||||||
|
|
||||||
# Get object names that are in the current view layer
|
|
||||||
view_layer_object_names = set(context.view_layer.objects.keys())
|
|
||||||
|
|
||||||
# Choose objects based on the property
|
|
||||||
if self.only_selected:
|
|
||||||
objects = [obj for obj in context.selected_objects if obj.type == 'MESH' and obj.name in view_layer_object_names]
|
|
||||||
else:
|
|
||||||
objects = [obj for obj in bpy.data.objects if obj.type == 'MESH' and obj.name in view_layer_object_names]
|
|
||||||
|
|
||||||
processed_count = 0
|
|
||||||
for obj in objects:
|
|
||||||
mesh = obj.data
|
|
||||||
if mesh.has_custom_normals:
|
|
||||||
# Select and make active
|
|
||||||
obj.select_set(True)
|
|
||||||
context.view_layer.objects.active = obj
|
|
||||||
bpy.ops.object.mode_set(mode='EDIT')
|
|
||||||
bpy.ops.mesh.customdata_custom_splitnormals_clear()
|
|
||||||
bpy.ops.object.mode_set(mode='OBJECT')
|
|
||||||
bpy.ops.object.shade_smooth()
|
|
||||||
obj.select_set(False)
|
|
||||||
processed_count += 1
|
|
||||||
self.report({'INFO'}, f"Removed custom split normals and applied smooth shading to: {obj.name}")
|
|
||||||
|
|
||||||
# Restore original selection and active object
|
|
||||||
context.view_layer.objects.active = original_active
|
|
||||||
for obj in original_selected:
|
|
||||||
if obj.name in view_layer_object_names:
|
|
||||||
obj.select_set(True)
|
|
||||||
|
|
||||||
self.report({'INFO'}, f"Done: custom split normals removed and smooth shading applied to {'selected' if self.only_selected else 'all'} mesh objects. ({processed_count} processed)")
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
# Registration
|
|
||||||
def register():
|
|
||||||
bpy.utils.register_class(MESH_OT_RemoveCustomSplitNormals)
|
|
||||||
|
|
||||||
def unregister():
|
|
||||||
bpy.utils.unregister_class(MESH_OT_RemoveCustomSplitNormals)
|
|
||||||
|
|
||||||
# Only run if this script is run directly
|
|
||||||
if __name__ == "__main__":
|
|
||||||
register()
|
|
||||||
-57
@@ -1,57 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
class RemoveUnusedMaterialSlots(bpy.types.Operator):
|
|
||||||
"""Remove unused material slots from all mesh objects"""
|
|
||||||
bl_idname = "bst.remove_unused_material_slots"
|
|
||||||
bl_label = "Remove Unused Material Slots"
|
|
||||||
bl_description = "Remove unused material slots from all mesh objects in the scene"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
processed_objects = 0
|
|
||||||
|
|
||||||
# Store original active object and selection
|
|
||||||
original_active = context.view_layer.objects.active
|
|
||||||
original_selection = [obj for obj in context.selected_objects]
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Remove unused material slots from all mesh objects
|
|
||||||
for obj in bpy.data.objects:
|
|
||||||
if obj.type == 'MESH' and obj.material_slots and obj.library is None:
|
|
||||||
# Temporarily ensure object is in view layer by linking to master collection
|
|
||||||
was_linked = False
|
|
||||||
if obj.name not in context.view_layer.objects:
|
|
||||||
context.scene.collection.objects.link(obj)
|
|
||||||
was_linked = True
|
|
||||||
|
|
||||||
# Store original selection state
|
|
||||||
original_obj_selection = obj.select_get()
|
|
||||||
|
|
||||||
# Select the object and make it active
|
|
||||||
obj.select_set(True)
|
|
||||||
context.view_layer.objects.active = obj
|
|
||||||
|
|
||||||
# Remove unused material slots
|
|
||||||
bpy.ops.object.material_slot_remove_unused()
|
|
||||||
processed_objects += 1
|
|
||||||
|
|
||||||
# Restore original selection state
|
|
||||||
obj.select_set(original_obj_selection)
|
|
||||||
|
|
||||||
# Unlink if we linked it
|
|
||||||
if was_linked:
|
|
||||||
context.scene.collection.objects.unlink(obj)
|
|
||||||
|
|
||||||
finally:
|
|
||||||
# Restore original active object and selection
|
|
||||||
context.view_layer.objects.active = original_active
|
|
||||||
# Clear all selections first
|
|
||||||
for obj in context.selected_objects:
|
|
||||||
obj.select_set(False)
|
|
||||||
# Restore original selection
|
|
||||||
for obj in original_selection:
|
|
||||||
if obj.name in context.view_layer.objects:
|
|
||||||
obj.select_set(True)
|
|
||||||
|
|
||||||
self.report({'INFO'}, f"Removed unused material slots from {processed_objects} mesh objects")
|
|
||||||
return {'FINISHED'}
|
|
||||||
-100
@@ -1,100 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
def find_node_distance_to_basecolor(node, visited=None):
|
|
||||||
"""Find the shortest path distance from a node to any Base Color input"""
|
|
||||||
if visited is None:
|
|
||||||
visited = set()
|
|
||||||
|
|
||||||
if node in visited:
|
|
||||||
return float('inf')
|
|
||||||
|
|
||||||
visited.add(node)
|
|
||||||
|
|
||||||
# If this is a Principled BSDF node, check if it has a Base Color input
|
|
||||||
if node.type == 'BSDF_PRINCIPLED':
|
|
||||||
for input in node.inputs:
|
|
||||||
if input.name == 'Base Color':
|
|
||||||
# If this input is connected, return 0 (we found our target)
|
|
||||||
if input.links:
|
|
||||||
return 0
|
|
||||||
return float('inf')
|
|
||||||
|
|
||||||
# Check all outputs of this node
|
|
||||||
min_distance = float('inf')
|
|
||||||
for output in node.outputs:
|
|
||||||
for link in output.links:
|
|
||||||
# Recursively check connected nodes
|
|
||||||
distance = find_node_distance_to_basecolor(link.to_node, visited.copy())
|
|
||||||
if distance is not None and distance < min_distance:
|
|
||||||
min_distance = distance + 1
|
|
||||||
|
|
||||||
return min_distance if min_distance != float('inf') else None
|
|
||||||
|
|
||||||
def find_connected_basecolor_texture(node_tree):
|
|
||||||
"""Find any image texture directly connected to a Base Color input"""
|
|
||||||
for node in node_tree.nodes:
|
|
||||||
if node.type == 'BSDF_PRINCIPLED':
|
|
||||||
base_color_input = node.inputs.get('Base Color')
|
|
||||||
if base_color_input and base_color_input.links:
|
|
||||||
# Get the node connected to Base Color
|
|
||||||
connected_node = base_color_input.links[0].from_node
|
|
||||||
# If it's an image texture, return it
|
|
||||||
if connected_node.type == 'TEX_IMAGE' and connected_node.image:
|
|
||||||
return connected_node
|
|
||||||
return None
|
|
||||||
|
|
||||||
def select_diffuse_nodes():
|
|
||||||
# Get all materials in the blend file
|
|
||||||
materials = bpy.data.materials
|
|
||||||
|
|
||||||
# Counter for found nodes
|
|
||||||
found_nodes = 0
|
|
||||||
|
|
||||||
# Keywords to look for in image names (case insensitive)
|
|
||||||
keywords = ['diffuse', 'basecolor', 'base_color', 'albedo', 'color']
|
|
||||||
|
|
||||||
# Iterate through all materials
|
|
||||||
for material in materials:
|
|
||||||
# Skip materials without node trees
|
|
||||||
if not material.use_nodes:
|
|
||||||
continue
|
|
||||||
|
|
||||||
node_tree = material.node_tree
|
|
||||||
|
|
||||||
# First, try to find any image texture connected to Base Color
|
|
||||||
base_color_texture = find_connected_basecolor_texture(node_tree)
|
|
||||||
if base_color_texture:
|
|
||||||
node_tree.nodes.active = base_color_texture
|
|
||||||
base_color_texture.select = True
|
|
||||||
found_nodes += 1
|
|
||||||
print(f"Selected Base Color connected texture '{base_color_texture.image.name}' in material: {material.name}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
# If no direct connection found, fall back to name-based search
|
|
||||||
matching_nodes = []
|
|
||||||
for node in node_tree.nodes:
|
|
||||||
if node.type == 'TEX_IMAGE' and node.image:
|
|
||||||
# Check if the image name contains any of our keywords
|
|
||||||
image_name = node.image.name.lower()
|
|
||||||
if any(keyword in image_name for keyword in keywords):
|
|
||||||
# Calculate distance to Base Color input
|
|
||||||
distance = find_node_distance_to_basecolor(node)
|
|
||||||
if distance is not None:
|
|
||||||
matching_nodes.append((node, distance))
|
|
||||||
|
|
||||||
# If we found any matching nodes, select the one with the shortest distance
|
|
||||||
if matching_nodes:
|
|
||||||
# Sort by distance (closest to Base Color first)
|
|
||||||
matching_nodes.sort(key=lambda x: x[1])
|
|
||||||
selected_node = matching_nodes[0][0]
|
|
||||||
|
|
||||||
node_tree.nodes.active = selected_node
|
|
||||||
selected_node.select = True
|
|
||||||
found_nodes += 1
|
|
||||||
print(f"Selected named texture '{selected_node.image.name}' in material: {material.name} (distance to Base Color: {matching_nodes[0][1]})")
|
|
||||||
|
|
||||||
print(f"\nTotal texture nodes selected: {found_nodes}")
|
|
||||||
|
|
||||||
# Only run if this script is run directly
|
|
||||||
if __name__ == "__main__":
|
|
||||||
select_diffuse_nodes()
|
|
||||||
-100
@@ -1,100 +0,0 @@
|
|||||||
import bpy
|
|
||||||
|
|
||||||
class SpawnSceneStructure(bpy.types.Operator):
|
|
||||||
"""Create a standard scene collection structure: Env, Animation, Lgt with subcollections"""
|
|
||||||
bl_idname = "bst.spawn_scene_structure"
|
|
||||||
bl_label = "Spawn Scene Structure"
|
|
||||||
bl_options = {'REGISTER', 'UNDO'}
|
|
||||||
|
|
||||||
def find_layer_collection(self, layer_collection, collection_name):
|
|
||||||
"""Recursively find a layer collection by name"""
|
|
||||||
if layer_collection.collection.name == collection_name:
|
|
||||||
return layer_collection
|
|
||||||
|
|
||||||
for child in layer_collection.children:
|
|
||||||
result = self.find_layer_collection(child, collection_name)
|
|
||||||
if result:
|
|
||||||
return result
|
|
||||||
return None
|
|
||||||
|
|
||||||
def execute(self, context):
|
|
||||||
scene = context.scene
|
|
||||||
scene_collection = scene.collection
|
|
||||||
|
|
||||||
# Define the structure to create
|
|
||||||
structure = {
|
|
||||||
"Env": ["ROOTS", "Dressing"],
|
|
||||||
"Animation": ["Cam", "Char"],
|
|
||||||
"Lgt": []
|
|
||||||
}
|
|
||||||
|
|
||||||
created_collections = []
|
|
||||||
skipped_collections = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
for main_collection_name, subcollections in structure.items():
|
|
||||||
# Check if main collection already exists
|
|
||||||
main_collection = None
|
|
||||||
for existing_collection in scene_collection.children:
|
|
||||||
if existing_collection.name == main_collection_name:
|
|
||||||
main_collection = existing_collection
|
|
||||||
skipped_collections.append(main_collection_name)
|
|
||||||
break
|
|
||||||
|
|
||||||
# Create main collection if it doesn't exist
|
|
||||||
if main_collection is None:
|
|
||||||
main_collection = bpy.data.collections.new(main_collection_name)
|
|
||||||
scene_collection.children.link(main_collection)
|
|
||||||
created_collections.append(main_collection_name)
|
|
||||||
|
|
||||||
# Create subcollections
|
|
||||||
for subcollection_name in subcollections:
|
|
||||||
# Check if subcollection already exists
|
|
||||||
subcollection_exists = False
|
|
||||||
existing_subcollection = None
|
|
||||||
for sub in main_collection.children:
|
|
||||||
if sub.name == subcollection_name:
|
|
||||||
subcollection_exists = True
|
|
||||||
existing_subcollection = sub
|
|
||||||
skipped_collections.append(f"{main_collection_name}/{subcollection_name}")
|
|
||||||
break
|
|
||||||
|
|
||||||
# Create subcollection if it doesn't exist
|
|
||||||
if not subcollection_exists:
|
|
||||||
subcollection = bpy.data.collections.new(subcollection_name)
|
|
||||||
main_collection.children.link(subcollection)
|
|
||||||
created_collections.append(f"{main_collection_name}/{subcollection_name}")
|
|
||||||
|
|
||||||
# Apply special settings to ROOTS collection
|
|
||||||
if subcollection_name == "ROOTS":
|
|
||||||
subcollection.hide_viewport = True # Hide in all viewports
|
|
||||||
# Exclude from view layer
|
|
||||||
view_layer = context.view_layer
|
|
||||||
layer_collection = self.find_layer_collection(view_layer.layer_collection, subcollection_name)
|
|
||||||
if layer_collection:
|
|
||||||
layer_collection.exclude = True
|
|
||||||
else:
|
|
||||||
# Apply settings to existing ROOTS collection if it wasn't properly configured
|
|
||||||
if subcollection_name == "ROOTS" and existing_subcollection:
|
|
||||||
existing_subcollection.hide_viewport = True
|
|
||||||
view_layer = context.view_layer
|
|
||||||
layer_collection = self.find_layer_collection(view_layer.layer_collection, subcollection_name)
|
|
||||||
if layer_collection:
|
|
||||||
layer_collection.exclude = True
|
|
||||||
|
|
||||||
# Report results
|
|
||||||
if created_collections:
|
|
||||||
created_list = ", ".join(created_collections)
|
|
||||||
if skipped_collections:
|
|
||||||
skipped_list = ", ".join(skipped_collections)
|
|
||||||
self.report({'INFO'}, f"Created: {created_list}. Skipped existing: {skipped_list}")
|
|
||||||
else:
|
|
||||||
self.report({'INFO'}, f"Created scene structure: {created_list}")
|
|
||||||
else:
|
|
||||||
self.report({'INFO'}, "Scene structure already exists - no collections created")
|
|
||||||
|
|
||||||
return {'FINISHED'}
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.report({'ERROR'}, f"Failed to create scene structure: {str(e)}")
|
|
||||||
return {'CANCELLED'}
|
|
||||||
-1458
File diff suppressed because it is too large
Load Diff
-1639
File diff suppressed because it is too large
Load Diff
-104
@@ -1,104 +0,0 @@
|
|||||||
import bpy
|
|
||||||
from ..ops.NoSubdiv import NoSubdiv
|
|
||||||
from ..ops.remove_custom_split_normals import RemoveCustomSplitNormals
|
|
||||||
from ..ops.create_ortho_camera import CreateOrthoCamera
|
|
||||||
from ..ops.spawn_scene_structure import SpawnSceneStructure
|
|
||||||
from ..ops.delete_single_keyframe_actions import DeleteSingleKeyframeActions
|
|
||||||
from ..ops.find_material_users import FindMaterialUsers, MATERIAL_USERS_OT_summary_dialog
|
|
||||||
from ..ops.remove_unused_material_slots import RemoveUnusedMaterialSlots
|
|
||||||
from ..ops.convert_relations_to_constraint import ConvertRelationsToConstraint
|
|
||||||
|
|
||||||
class BulkSceneGeneral(bpy.types.Panel):
|
|
||||||
"""Bulk Scene General Panel"""
|
|
||||||
bl_label = "Scene General"
|
|
||||||
bl_idname = "VIEW3D_PT_bulk_scene_general"
|
|
||||||
bl_space_type = 'VIEW_3D'
|
|
||||||
bl_region_type = 'UI'
|
|
||||||
bl_category = 'Edit'
|
|
||||||
bl_parent_id = "VIEW3D_PT_bulk_scene_tools"
|
|
||||||
bl_order = 0 # This will make it appear at the very top of the main panel
|
|
||||||
|
|
||||||
def draw(self, context):
|
|
||||||
layout = self.layout
|
|
||||||
|
|
||||||
# Scene Structure section
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="Scene Structure")
|
|
||||||
row = box.row()
|
|
||||||
row.scale_y = 1.2
|
|
||||||
row.operator("bst.spawn_scene_structure", text="Spawn Scene Structure", icon='OUTLINER_COLLECTION')
|
|
||||||
|
|
||||||
# Mesh section
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="Mesh")
|
|
||||||
# Add checkbox for only_selected property
|
|
||||||
row = box.row()
|
|
||||||
row.prop(context.window_manager, "bst_no_subdiv_only_selected", text="Selected Only")
|
|
||||||
row = box.row(align=True)
|
|
||||||
row.operator("bst.no_subdiv", text="No Subdiv", icon='MOD_SUBSURF').only_selected = context.window_manager.bst_no_subdiv_only_selected
|
|
||||||
row.operator("bst.remove_custom_split_normals", text="Remove Custom Split Normals", icon='X').only_selected = context.window_manager.bst_no_subdiv_only_selected
|
|
||||||
|
|
||||||
row = box.row(align=True)
|
|
||||||
row.operator("bst.create_ortho_camera", text="Create Ortho Camera", icon='OUTLINER_DATA_CAMERA')
|
|
||||||
row = box.row(align=True)
|
|
||||||
row.operator("bst.free_gpu", text="Free GPU", icon='MEMORY')
|
|
||||||
|
|
||||||
# Materials section
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="Materials")
|
|
||||||
row = box.row(align=True)
|
|
||||||
row.operator("bst.remove_unused_material_slots", text="Remove Unused Material Slots", icon='MATERIAL')
|
|
||||||
row = box.row(align=True)
|
|
||||||
row.operator("bst.find_material_users", text="Find Material Users", icon='VIEWZOOM')
|
|
||||||
|
|
||||||
# Animation Data section
|
|
||||||
box = layout.box()
|
|
||||||
box.label(text="Animation Data")
|
|
||||||
row = box.row(align=True)
|
|
||||||
row.operator("bst.delete_single_keyframe_actions", text="Delete Single Keyframe Actions", icon='ANIM_DATA')
|
|
||||||
row = box.row(align=True)
|
|
||||||
row.operator("bst.convert_relations_to_constraint", text="Convert Relations to Constraint", icon_value=405)
|
|
||||||
|
|
||||||
# List of all classes in this module
|
|
||||||
classes = (
|
|
||||||
BulkSceneGeneral,
|
|
||||||
NoSubdiv, # Add NoSubdiv operator class
|
|
||||||
RemoveCustomSplitNormals,
|
|
||||||
CreateOrthoCamera,
|
|
||||||
SpawnSceneStructure,
|
|
||||||
DeleteSingleKeyframeActions,
|
|
||||||
FindMaterialUsers,
|
|
||||||
MATERIAL_USERS_OT_summary_dialog,
|
|
||||||
RemoveUnusedMaterialSlots,
|
|
||||||
ConvertRelationsToConstraint,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Registration
|
|
||||||
def register():
|
|
||||||
for cls in classes:
|
|
||||||
bpy.utils.register_class(cls)
|
|
||||||
# Register the window manager property for the checkbox
|
|
||||||
bpy.types.WindowManager.bst_no_subdiv_only_selected = bpy.props.BoolProperty(
|
|
||||||
name="Selected Only",
|
|
||||||
description="Apply only to selected objects",
|
|
||||||
default=True
|
|
||||||
)
|
|
||||||
# Register temporary material property for Find Material Users operator
|
|
||||||
bpy.types.Scene.bst_temp_material = bpy.props.PointerProperty(
|
|
||||||
name="Temporary Material",
|
|
||||||
description="Temporary material selection for Find Material Users operator",
|
|
||||||
type=bpy.types.Material
|
|
||||||
)
|
|
||||||
|
|
||||||
def unregister():
|
|
||||||
for cls in reversed(classes):
|
|
||||||
try:
|
|
||||||
bpy.utils.unregister_class(cls)
|
|
||||||
except RuntimeError:
|
|
||||||
pass
|
|
||||||
# Unregister the window manager property
|
|
||||||
if hasattr(bpy.types.WindowManager, "bst_no_subdiv_only_selected"):
|
|
||||||
del bpy.types.WindowManager.bst_no_subdiv_only_selected
|
|
||||||
# Unregister temporary material property
|
|
||||||
if hasattr(bpy.types.Scene, "bst_temp_material"):
|
|
||||||
del bpy.types.Scene.bst_temp_material
|
|
||||||
-1033
File diff suppressed because it is too large
Load Diff
@@ -1,41 +0,0 @@
|
|||||||
# Raincloud's Bulk Scene Tools
|
|
||||||
|
|
||||||
A couple Blender tools to help me automate some tedious tasks in scene optimization.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
- Bulk Data Remap
|
|
||||||
- Bulk Viewport Display
|
|
||||||
|
|
||||||
Officially supports Blender 4.4.1, but may still work on older versions.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
1. Download the addon (zip file)
|
|
||||||
2. In Blender, go to Edit > Preferences > Add-ons
|
|
||||||
3. Click "Install..." and select the downloaded zip file, or click and drag if it allows.
|
|
||||||
4. Ensure addon is enabled.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
1. Open blender file/scene to optimize
|
|
||||||
2. Open side panel > Edit tab > Bulk Scene Tools
|
|
||||||
3. Data remapper: Select data types to remap. Currently supports Images, Materials, and Fonts. Select to exclude data type from remapping.
|
|
||||||
4. View amount of duplicates and use the dropdown menus to select which duplicate groups to exclude from remapping.
|
|
||||||
5. Remap. This action is undo-able!
|
|
||||||
6. If remapping has successfully remapped to your liking, Purge Unused Data so that the Viewport Display function has less materials to calculate, unless you are applying it only to selected objects.
|
|
||||||
7. Recommend activating Solid viewport shading mode so you can see what the Material Viewport function is doing. Change color from Material to Texture if you prefer; the function should find the diffuse texture if one exists.
|
|
||||||
8. Apply material calculation to selected objects if preferred.
|
|
||||||
9. Manually set display color for objects that couldn't be calculated, or weren't calculated to your preference.
|
|
||||||
|
|
||||||
## Workflow for unpacking and organizing all textures
|
|
||||||
|
|
||||||
1. Pack all images (File > external data > pack resources, or BST > Bulk Path Management > Workflow > Pack)
|
|
||||||
2. Rename all image (datablocks) as preferred (can be easily done within the Bulk Operations dropdown, but I also recommend the Simple Renaming extension available from the Blender community)
|
|
||||||
3. Remap all image paths as preferred (Bulk Operations)
|
|
||||||
4. Bulk Path Management > Save All (If selected, will save selected, if none are selected, will save all images in file)
|
|
||||||
5. Remove pack
|
|
||||||
|
|
||||||
## Author
|
|
||||||
|
|
||||||
- **RaincloudTheDragon**
|
|
||||||
@@ -7,7 +7,7 @@
|
|||||||
"id": "basedplayblast",
|
"id": "basedplayblast",
|
||||||
"name": "BasedPlayblast",
|
"name": "BasedPlayblast",
|
||||||
"tagline": "Easily create playblasts from Blender and Flamenco",
|
"tagline": "Easily create playblasts from Blender and Flamenco",
|
||||||
"version": "2.6.1",
|
"version": "2.6.3",
|
||||||
"type": "add-on",
|
"type": "add-on",
|
||||||
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
"license": [
|
"license": [
|
||||||
@@ -24,16 +24,16 @@
|
|||||||
"Workflow",
|
"Workflow",
|
||||||
"Video"
|
"Video"
|
||||||
],
|
],
|
||||||
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.1/BasedPlayblast.v2.6.1.zip",
|
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.3/BasedPlayblast.v2.6.3.zip",
|
||||||
"archive_size": 48471,
|
"archive_size": 49732,
|
||||||
"archive_hash": "sha256:ce9740ad252a00643f75202b53c9ef1e9c6ee8b5a2d34cbaf751b4084e78665c"
|
"archive_hash": "sha256:078b406105ce6f4802e75233569841e2f73d082e09cd1d954696681ebf72b627"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"schema_version": "1.0.0",
|
"schema_version": "1.0.0",
|
||||||
"id": "rainclouds_bulk_scene_tools",
|
"id": "rainclouds_bulk_scene_tools",
|
||||||
"name": "Raincloud's Bulk Scene Tools",
|
"name": "Raincloud's Bulk Scene Tools",
|
||||||
"tagline": "Bulk utilities for optimizing scene data",
|
"tagline": "Bulk utilities for optimizing scene data",
|
||||||
"version": "0.14.0",
|
"version": "0.16.0",
|
||||||
"type": "add-on",
|
"type": "add-on",
|
||||||
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
"license": [
|
"license": [
|
||||||
@@ -49,16 +49,16 @@
|
|||||||
"Workflow",
|
"Workflow",
|
||||||
"Materials"
|
"Materials"
|
||||||
],
|
],
|
||||||
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.14.0/Rainys_Bulk_Scene_Tools.v0.14.0.zip",
|
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.16.0/Rainys_Bulk_Scene_Tools.v0.16.0.zip",
|
||||||
"archive_size": 78363,
|
"archive_size": 80251,
|
||||||
"archive_hash": "sha256:943c723511fb8d7199bf079cb94ba63c552d6477b9a4e003bfffc185c169ea4b"
|
"archive_hash": "sha256:3e6fafe11caa39e48b94288c12b2a88e521c928955a854ffdd1bd0936e6bc70a"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"schema_version": "1.0.0",
|
"schema_version": "1.0.0",
|
||||||
"id": "atomic_data_manager",
|
"id": "atomic_data_manager",
|
||||||
"name": "Atomic Data Manager",
|
"name": "Atomic Data Manager",
|
||||||
"tagline": "Smart cleanup and inspection of Blender data-blocks",
|
"tagline": "Smart cleanup and inspection of Blender data-blocks",
|
||||||
"version": "2.1.0",
|
"version": "2.5.0",
|
||||||
"type": "add-on",
|
"type": "add-on",
|
||||||
"maintainer": "RaincloudTheDragon",
|
"maintainer": "RaincloudTheDragon",
|
||||||
"license": [
|
"license": [
|
||||||
@@ -70,9 +70,31 @@
|
|||||||
"management",
|
"management",
|
||||||
"cleanup"
|
"cleanup"
|
||||||
],
|
],
|
||||||
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.1.0/Atomic_Data_Manager.v2.1.0.zip",
|
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.5.0/Atomic_Data_Manager.v2.5.0.zip",
|
||||||
"archive_size": 73646,
|
"archive_size": 114674,
|
||||||
"archive_hash": "sha256:a10f6b7eb9d7c437574c66dc15f73d74a0ff86e793c7460804d7bf5cb7cb29cc"
|
"archive_hash": "sha256:4b4834ed3910a428d4cb01f1891247ad80089b6c5324fc27c6862b09e81ff1c1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "sheepit_project_submitter",
|
||||||
|
"name": "SheepIt Project Submitter",
|
||||||
|
"tagline": "Submit projects to SheepIt render farm",
|
||||||
|
"version": "0.0.7",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "3.0.0",
|
||||||
|
"tags": [
|
||||||
|
"render",
|
||||||
|
"farm",
|
||||||
|
"submission",
|
||||||
|
"utility"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/sheepit_project_submitter/releases/download/v0.0.7/SheepIt_Project_Submitter.v0.0.7.zip",
|
||||||
|
"archive_size": 47250,
|
||||||
|
"archive_hash": "sha256:cb8dee48c45cc51dd8237981f4ab96d97d476b547c8c640606e9bbfd0390a055"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,100 @@
|
|||||||
|
{
|
||||||
|
"version": "v1",
|
||||||
|
"blocklist": [],
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "basedplayblast",
|
||||||
|
"name": "BasedPlayblast",
|
||||||
|
"tagline": "Easily create playblasts from Blender and Flamenco",
|
||||||
|
"version": "2.6.3",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
|
||||||
|
"permissions": {
|
||||||
|
"files": "Import/export files and data"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"Animation",
|
||||||
|
"Render",
|
||||||
|
"Workflow",
|
||||||
|
"Video"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.3/BasedPlayblast.v2.6.3.zip",
|
||||||
|
"archive_size": 49732,
|
||||||
|
"archive_hash": "sha256:078b406105ce6f4802e75233569841e2f73d082e09cd1d954696681ebf72b627"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "rainclouds_bulk_scene_tools",
|
||||||
|
"name": "Raincloud's Bulk Scene Tools",
|
||||||
|
"tagline": "Bulk utilities for optimizing scene data",
|
||||||
|
"version": "0.16.0",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
|
||||||
|
"permissions": {
|
||||||
|
"files": "Read and write external resources referenced by scenes"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"Scene",
|
||||||
|
"Workflow",
|
||||||
|
"Materials"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.16.0/Rainys_Bulk_Scene_Tools.v0.16.0.zip",
|
||||||
|
"archive_size": 80251,
|
||||||
|
"archive_hash": "sha256:3e6fafe11caa39e48b94288c12b2a88e521c928955a854ffdd1bd0936e6bc70a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "atomic_data_manager",
|
||||||
|
"name": "Atomic Data Manager",
|
||||||
|
"tagline": "Smart cleanup and inspection of Blender data-blocks",
|
||||||
|
"version": "2.5.0",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"tags": [
|
||||||
|
"utility",
|
||||||
|
"management",
|
||||||
|
"cleanup"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.5.0/Atomic_Data_Manager.v2.5.0.zip",
|
||||||
|
"archive_size": 114674,
|
||||||
|
"archive_hash": "sha256:4b4834ed3910a428d4cb01f1891247ad80089b6c5324fc27c6862b09e81ff1c1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "sheepit_project_submitter",
|
||||||
|
"name": "SheepIt Project Submitter",
|
||||||
|
"tagline": "Submit projects to SheepIt render farm",
|
||||||
|
"version": "0.0.6",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "3.0.0",
|
||||||
|
"tags": [
|
||||||
|
"render",
|
||||||
|
"farm",
|
||||||
|
"submission",
|
||||||
|
"utility"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/sheepit_project_submitter/releases/download/v0.0.6/SheepIt_Project_Submitter.v0.0.6.zip",
|
||||||
|
"archive_size": 46535,
|
||||||
|
"archive_hash": "sha256:c465fe190fc2e487f9a0bd7e335d7fbaa8da1000bfbedb785dc821775fbc82a5"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
+31
-10
@@ -7,13 +7,13 @@
|
|||||||
"id": "basedplayblast",
|
"id": "basedplayblast",
|
||||||
"name": "BasedPlayblast",
|
"name": "BasedPlayblast",
|
||||||
"tagline": "Easily create playblasts from Blender and Flamenco",
|
"tagline": "Easily create playblasts from Blender and Flamenco",
|
||||||
"version": "2.3.1",
|
"version": "2.6.2",
|
||||||
"type": "add-on",
|
"type": "add-on",
|
||||||
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
"license": [
|
"license": [
|
||||||
"GPL-3.0-or-later"
|
"GPL-3.0-or-later"
|
||||||
],
|
],
|
||||||
"blender_version_min": "5.0.0",
|
"blender_version_min": "4.2.0",
|
||||||
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
|
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
|
||||||
"permissions": {
|
"permissions": {
|
||||||
"files": "Import/export files and data"
|
"files": "Import/export files and data"
|
||||||
@@ -24,22 +24,22 @@
|
|||||||
"Workflow",
|
"Workflow",
|
||||||
"Video"
|
"Video"
|
||||||
],
|
],
|
||||||
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.3.1/BasedPlayblast.v2.3.1.zip",
|
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.2/BasedPlayblast.v2.6.2.zip",
|
||||||
"archive_size": 38295,
|
"archive_size": 48968,
|
||||||
"archive_hash": "sha256:98f978a96fb8d15bae60987f305901ba0acd7a37ddb45627724326809e43622d"
|
"archive_hash": "sha256:c359a24fccb10b9d8df2941b0d75435eb0f7ac89db61836edb6d993b86354952"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"schema_version": "1.0.0",
|
"schema_version": "1.0.0",
|
||||||
"id": "rainclouds_bulk_scene_tools",
|
"id": "rainclouds_bulk_scene_tools",
|
||||||
"name": "Raincloud's Bulk Scene Tools",
|
"name": "Raincloud's Bulk Scene Tools",
|
||||||
"tagline": "Bulk utilities for optimizing scene data",
|
"tagline": "Bulk utilities for optimizing scene data",
|
||||||
"version": "0.11.0",
|
"version": "0.15.1",
|
||||||
"type": "add-on",
|
"type": "add-on",
|
||||||
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
"license": [
|
"license": [
|
||||||
"GPL-3.0-or-later"
|
"GPL-3.0-or-later"
|
||||||
],
|
],
|
||||||
"blender_version_min": "4.5.0",
|
"blender_version_min": "4.2.0",
|
||||||
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
|
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
|
||||||
"permissions": {
|
"permissions": {
|
||||||
"files": "Read and write external resources referenced by scenes"
|
"files": "Read and write external resources referenced by scenes"
|
||||||
@@ -49,9 +49,30 @@
|
|||||||
"Workflow",
|
"Workflow",
|
||||||
"Materials"
|
"Materials"
|
||||||
],
|
],
|
||||||
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.11.0/Rainys_Bulk_Scene_Tools.v0.11.0.zip",
|
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.15.1/Rainys_Bulk_Scene_Tools.v0.15.1.zip",
|
||||||
"archive_size": 72969,
|
"archive_size": 81044,
|
||||||
"archive_hash": "sha256:827451b11808488e8682f4bdd4bfff8b6f1f0fe2aa5bbc53845fbf9a13e15757"
|
"archive_hash": "sha256:a72f7dbf7c35fda94a7b67df79ef131391e0fe2ac4f416703b07ef59afd7235b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "atomic_data_manager",
|
||||||
|
"name": "Atomic Data Manager",
|
||||||
|
"tagline": "Smart cleanup and inspection of Blender data-blocks",
|
||||||
|
"version": "2.4.1",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"tags": [
|
||||||
|
"utility",
|
||||||
|
"management",
|
||||||
|
"cleanup"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.4.1/Atomic_Data_Manager.v2.4.1.zip",
|
||||||
|
"archive_size": 108842,
|
||||||
|
"archive_hash": "sha256:4086ada3e9e8c852fd02d455f11f2f20fd19ca68acd10b101ab3aa0fae2be210"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,100 @@
|
|||||||
|
{
|
||||||
|
"version": "v1",
|
||||||
|
"blocklist": [],
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "basedplayblast",
|
||||||
|
"name": "BasedPlayblast",
|
||||||
|
"tagline": "Easily create playblasts from Blender and Flamenco",
|
||||||
|
"version": "2.6.3",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
|
||||||
|
"permissions": {
|
||||||
|
"files": "Import/export files and data"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"Animation",
|
||||||
|
"Render",
|
||||||
|
"Workflow",
|
||||||
|
"Video"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.3/BasedPlayblast.v2.6.3.zip",
|
||||||
|
"archive_size": 49732,
|
||||||
|
"archive_hash": "sha256:078b406105ce6f4802e75233569841e2f73d082e09cd1d954696681ebf72b627"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "rainclouds_bulk_scene_tools",
|
||||||
|
"name": "Raincloud's Bulk Scene Tools",
|
||||||
|
"tagline": "Bulk utilities for optimizing scene data",
|
||||||
|
"version": "0.16.0",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
|
||||||
|
"permissions": {
|
||||||
|
"files": "Read and write external resources referenced by scenes"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"Scene",
|
||||||
|
"Workflow",
|
||||||
|
"Materials"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.16.0/Rainys_Bulk_Scene_Tools.v0.16.0.zip",
|
||||||
|
"archive_size": 80251,
|
||||||
|
"archive_hash": "sha256:3e6fafe11caa39e48b94288c12b2a88e521c928955a854ffdd1bd0936e6bc70a"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "atomic_data_manager",
|
||||||
|
"name": "Atomic Data Manager",
|
||||||
|
"tagline": "Smart cleanup and inspection of Blender data-blocks",
|
||||||
|
"version": "2.5.0",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"tags": [
|
||||||
|
"utility",
|
||||||
|
"management",
|
||||||
|
"cleanup"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.5.0/Atomic_Data_Manager.v2.5.0.zip",
|
||||||
|
"archive_size": 114674,
|
||||||
|
"archive_hash": "sha256:4b4834ed3910a428d4cb01f1891247ad80089b6c5324fc27c6862b09e81ff1c1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "sheepit_project_submitter",
|
||||||
|
"name": "SheepIt Project Submitter",
|
||||||
|
"tagline": "Submit projects to SheepIt render farm",
|
||||||
|
"version": "0.0.6",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "3.0.0",
|
||||||
|
"tags": [
|
||||||
|
"render",
|
||||||
|
"farm",
|
||||||
|
"submission",
|
||||||
|
"utility"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/sheepit_project_submitter/releases/download/v0.0.6/SheepIt_Project_Submitter.v0.0.6.zip",
|
||||||
|
"archive_size": 46535,
|
||||||
|
"archive_hash": "sha256:c465fe190fc2e487f9a0bd7e335d7fbaa8da1000bfbedb785dc821775fbc82a5"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,78 @@
|
|||||||
|
{
|
||||||
|
"version": "v1",
|
||||||
|
"blocklist": [],
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "basedplayblast",
|
||||||
|
"name": "BasedPlayblast",
|
||||||
|
"tagline": "Easily create playblasts from Blender and Flamenco",
|
||||||
|
"version": "2.6.2",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
|
||||||
|
"permissions": {
|
||||||
|
"files": "Import/export files and data"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"Animation",
|
||||||
|
"Render",
|
||||||
|
"Workflow",
|
||||||
|
"Video"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.2/BasedPlayblast.v2.6.2.zip",
|
||||||
|
"archive_size": 48968,
|
||||||
|
"archive_hash": "sha256:c359a24fccb10b9d8df2941b0d75435eb0f7ac89db61836edb6d993b86354952"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "rainclouds_bulk_scene_tools",
|
||||||
|
"name": "Raincloud's Bulk Scene Tools",
|
||||||
|
"tagline": "Bulk utilities for optimizing scene data",
|
||||||
|
"version": "0.15.1",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
|
||||||
|
"permissions": {
|
||||||
|
"files": "Read and write external resources referenced by scenes"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"Scene",
|
||||||
|
"Workflow",
|
||||||
|
"Materials"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.15.1/Rainys_Bulk_Scene_Tools.v0.15.1.zip",
|
||||||
|
"archive_size": 81044,
|
||||||
|
"archive_hash": "sha256:a72f7dbf7c35fda94a7b67df79ef131391e0fe2ac4f416703b07ef59afd7235b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "atomic_data_manager",
|
||||||
|
"name": "Atomic Data Manager",
|
||||||
|
"tagline": "Smart cleanup and inspection of Blender data-blocks",
|
||||||
|
"version": "2.4.1",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"tags": [
|
||||||
|
"utility",
|
||||||
|
"management",
|
||||||
|
"cleanup"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.4.1/Atomic_Data_Manager.v2.4.1.zip",
|
||||||
|
"archive_size": 108842,
|
||||||
|
"archive_hash": "sha256:4086ada3e9e8c852fd02d455f11f2f20fd19ca68acd10b101ab3aa0fae2be210"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
+30
-9
@@ -7,7 +7,7 @@
|
|||||||
"id": "basedplayblast",
|
"id": "basedplayblast",
|
||||||
"name": "BasedPlayblast",
|
"name": "BasedPlayblast",
|
||||||
"tagline": "Easily create playblasts from Blender and Flamenco",
|
"tagline": "Easily create playblasts from Blender and Flamenco",
|
||||||
"version": "2.4.0",
|
"version": "2.6.2",
|
||||||
"type": "add-on",
|
"type": "add-on",
|
||||||
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
"license": [
|
"license": [
|
||||||
@@ -24,22 +24,22 @@
|
|||||||
"Workflow",
|
"Workflow",
|
||||||
"Video"
|
"Video"
|
||||||
],
|
],
|
||||||
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.4.0/BasedPlayblast.v2.4.0.zip",
|
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.2/BasedPlayblast.v2.6.2.zip",
|
||||||
"archive_size": 40376,
|
"archive_size": 48968,
|
||||||
"archive_hash": "sha256:544369c72024681cb45a4ee073ae684b56f08f2e0d8d9906a75fcbb11e0a2196"
|
"archive_hash": "sha256:c359a24fccb10b9d8df2941b0d75435eb0f7ac89db61836edb6d993b86354952"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"schema_version": "1.0.0",
|
"schema_version": "1.0.0",
|
||||||
"id": "rainclouds_bulk_scene_tools",
|
"id": "rainclouds_bulk_scene_tools",
|
||||||
"name": "Raincloud's Bulk Scene Tools",
|
"name": "Raincloud's Bulk Scene Tools",
|
||||||
"tagline": "Bulk utilities for optimizing scene data",
|
"tagline": "Bulk utilities for optimizing scene data",
|
||||||
"version": "0.11.0",
|
"version": "0.14.1",
|
||||||
"type": "add-on",
|
"type": "add-on",
|
||||||
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
"license": [
|
"license": [
|
||||||
"GPL-3.0-or-later"
|
"GPL-3.0-or-later"
|
||||||
],
|
],
|
||||||
"blender_version_min": "4.5.0",
|
"blender_version_min": "4.2.0",
|
||||||
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
|
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
|
||||||
"permissions": {
|
"permissions": {
|
||||||
"files": "Read and write external resources referenced by scenes"
|
"files": "Read and write external resources referenced by scenes"
|
||||||
@@ -49,9 +49,30 @@
|
|||||||
"Workflow",
|
"Workflow",
|
||||||
"Materials"
|
"Materials"
|
||||||
],
|
],
|
||||||
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.11.0/Rainys_Bulk_Scene_Tools.v0.11.0.zip",
|
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.14.1/Rainys_Bulk_Scene_Tools.v0.14.1.zip",
|
||||||
"archive_size": 72969,
|
"archive_size": 79521,
|
||||||
"archive_hash": "sha256:827451b11808488e8682f4bdd4bfff8b6f1f0fe2aa5bbc53845fbf9a13e15757"
|
"archive_hash": "sha256:0ae09f57cf81a971406f05f50dc0d9a25c8dfbbedfabb1a9d72655194c1a9250"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "atomic_data_manager",
|
||||||
|
"name": "Atomic Data Manager",
|
||||||
|
"tagline": "Smart cleanup and inspection of Blender data-blocks",
|
||||||
|
"version": "2.3.0",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"tags": [
|
||||||
|
"utility",
|
||||||
|
"management",
|
||||||
|
"cleanup"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.3.0/Atomic_Data_Manager.v2.3.0.zip",
|
||||||
|
"archive_size": 92609,
|
||||||
|
"archive_hash": "sha256:be0304820428e461c3ecda4ab652d5c84d3df9c0548292870350ca86a9ba828c"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,78 @@
|
|||||||
|
{
|
||||||
|
"version": "v1",
|
||||||
|
"blocklist": [],
|
||||||
|
"data": [
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "basedplayblast",
|
||||||
|
"name": "BasedPlayblast",
|
||||||
|
"tagline": "Easily create playblasts from Blender and Flamenco",
|
||||||
|
"version": "2.6.2",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"website": "https://github.com/RaincloudTheDragon/BasedPlayblast",
|
||||||
|
"permissions": {
|
||||||
|
"files": "Import/export files and data"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"Animation",
|
||||||
|
"Render",
|
||||||
|
"Workflow",
|
||||||
|
"Video"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/BasedPlayblast/releases/download/v2.6.2/BasedPlayblast.v2.6.2.zip",
|
||||||
|
"archive_size": 48968,
|
||||||
|
"archive_hash": "sha256:c359a24fccb10b9d8df2941b0d75435eb0f7ac89db61836edb6d993b86354952"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "rainclouds_bulk_scene_tools",
|
||||||
|
"name": "Raincloud's Bulk Scene Tools",
|
||||||
|
"tagline": "Bulk utilities for optimizing scene data",
|
||||||
|
"version": "0.15.1",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon <raincloudthedragon@gmail.com>",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"website": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools",
|
||||||
|
"permissions": {
|
||||||
|
"files": "Read and write external resources referenced by scenes"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"Scene",
|
||||||
|
"Workflow",
|
||||||
|
"Materials"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/Rainys-Bulk-Scene-Tools/releases/download/v0.15.1/Rainys_Bulk_Scene_Tools.v0.15.1.zip",
|
||||||
|
"archive_size": 81044,
|
||||||
|
"archive_hash": "sha256:a72f7dbf7c35fda94a7b67df79ef131391e0fe2ac4f416703b07ef59afd7235b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"schema_version": "1.0.0",
|
||||||
|
"id": "atomic_data_manager",
|
||||||
|
"name": "Atomic Data Manager",
|
||||||
|
"tagline": "Smart cleanup and inspection of Blender data-blocks",
|
||||||
|
"version": "2.4.1",
|
||||||
|
"type": "add-on",
|
||||||
|
"maintainer": "RaincloudTheDragon",
|
||||||
|
"license": [
|
||||||
|
"GPL-3.0-or-later"
|
||||||
|
],
|
||||||
|
"blender_version_min": "4.2.0",
|
||||||
|
"tags": [
|
||||||
|
"utility",
|
||||||
|
"management",
|
||||||
|
"cleanup"
|
||||||
|
],
|
||||||
|
"archive_url": "https://github.com/RaincloudTheDragon/atomic-data-manager/releases/download/v2.4.1/Atomic_Data_Manager.v2.4.1.zip",
|
||||||
|
"archive_size": 108842,
|
||||||
|
"archive_hash": "sha256:4086ada3e9e8c852fd02d455f11f2f20fd19ca68acd10b101ab3aa0fae2be210"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -1,3 +1,102 @@
|
|||||||
|
## [v2.5.0] - 2026-01-28
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Missing file tools: add “Relink All” and improve replacement workflow.
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Missing file UI: fix text field paste + layout/truncation issues; center the detect-missing popup; refine replacement path handling (better dir vs file behavior).
|
||||||
|
- RNA analysis: expand datablock coverage and refine dependency tracking to reduce false “unused” results.
|
||||||
|
|
||||||
|
### Internal
|
||||||
|
- Maintenance: remove deprecated recovery option; improve ignore rules for hidden dot-directories.
|
||||||
|
|
||||||
|
## [v2.4.1] - 2026-01-14
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Fixed RNA analysis crashes when opening new blend files by rebuilding data-block type references dynamically
|
||||||
|
- Fixed indentation errors that prevented RNA dump from processing most data-blocks
|
||||||
|
- Fixed compositing nodetree detection by adding scenes as root nodes in dependency graph
|
||||||
|
- RNA dump JSON file now always generated regardless of debug print settings
|
||||||
|
- Refactored repetitive snapshotting code into `_safe_snapshot()` helper function
|
||||||
|
|
||||||
|
## [v2.4.0] - 2026-01-13
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- **Major Architecture Change: RNA-Based Analysis System**
|
||||||
|
- Replaced multi-process worker system with faster, more robust RNA-based dependency analysis
|
||||||
|
- All data types now use unified RNA introspection for dependency tracking
|
||||||
|
- Eliminated worker processes, job indexing, and subprocess overhead
|
||||||
|
- RNA data can be optionally dumped to JSON for debugging
|
||||||
|
- Improved Clean dialog layout
|
||||||
|
- Increased dialog width to 1000px for better visibility
|
||||||
|
- Items now display in 4-column grid layout to reduce vertical scrolling
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Fixed node groups used by objects via Geometry Nodes modifiers not being detected as used
|
||||||
|
- Fixed RigidBodyWorld and other scene-linked data-blocks incorrectly flagged as cleanable
|
||||||
|
- Fixed area lights and other object data-blocks in scene collections not being marked as used
|
||||||
|
- Added safety checks to prevent crashes during RNA extraction (recursion limits, data-block validation)
|
||||||
|
- Fixed RNA extraction handling for objects' modifier node groups
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
- Significantly faster scanning across all categories using RNA analysis
|
||||||
|
- Single-pass dependency graph building shared across all category scans
|
||||||
|
|
||||||
|
## [v2.3.1] - 2026-01-13
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Integrate proper UDIM detection
|
||||||
|
|
||||||
|
## [v2.3.0] - 2026-01-06
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Added "Enable Debug Prints" preference to control debug console output
|
||||||
|
- Debug messages now only print when this preference is enabled (default: off)
|
||||||
|
- All debug print statements use centralized `config.debug_print()` helper
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Fixed preferences not displaying in Blender 5.0 extensions
|
||||||
|
- Preferences now correctly match the full module path (`bl_ext.vscode_development.atomic_data_manager`)
|
||||||
|
- Added safe property setter to handle read-only context errors during file loading
|
||||||
|
- Fixed node groups used only by unused materials/objects not being detected as unused (#5)
|
||||||
|
- Node groups now recursively check if parent node groups are unused
|
||||||
|
- Fixed compositor node tree detection to use reference comparison instead of name
|
||||||
|
- Fixed missing import error in node_group_compositors()
|
||||||
|
- Made Clean execute deletion synchronous for faster performance
|
||||||
|
- Fixed callback-initiated scan state not being preserved, causing scans to fail
|
||||||
|
- Fixed instanced collection usage detection
|
||||||
|
|
||||||
|
## [v2.2.0] - 2026-01-05
|
||||||
|
|
||||||
|
### Features
|
||||||
|
- Add loading bars; non-blocking timer-based UI (#10)
|
||||||
|
- Operations no longer freeze the UI during scanning
|
||||||
|
- Real-time progress updates with cancel support at any time
|
||||||
|
- Descriptive status messages showing current operation details
|
||||||
|
- Unified Smart Select and Clean scanning logic
|
||||||
|
- Eliminated code duplication between operations
|
||||||
|
- Clean now only scans selected categories (more efficient)
|
||||||
|
- Both operations use consistent incremental scanning for images and worlds
|
||||||
|
- Added manual cache clear operator for testing and debugging
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
- Optimized deep scan functions with caching and fast-path checks
|
||||||
|
- Image scanning now uses cached results to avoid redundant scene scans
|
||||||
|
- Early exit for clearly unused images using Blender's built-in user count
|
||||||
|
- Incremental processing for large datasets
|
||||||
|
- Images processed in batches (5 per callback) to maintain UI responsiveness
|
||||||
|
- Worlds processed one at a time incrementally
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
- Fixed images used only by unused objects being incorrectly flagged as unused (#5)
|
||||||
|
- Fixed material detection in brushes and node groups (#6, #7)
|
||||||
|
- Fixed Clean operator not showing dialog when invoked programmatically (#8)
|
||||||
|
- Improved material detection in inspection tools (brushes, node groups)
|
||||||
|
|
||||||
|
### Internal
|
||||||
|
- Refactored scanning architecture for maintainability
|
||||||
|
- Added comprehensive debug output for troubleshooting
|
||||||
|
|
||||||
## [v2.1.0] - 2025-12-18
|
## [v2.1.0] - 2025-12-18
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|||||||
@@ -224,6 +224,23 @@ class ATOMIC_PG_main(bpy.types.PropertyGroup):
|
|||||||
# search field for the inspect replace operator
|
# search field for the inspect replace operator
|
||||||
replace_field: bpy.props.StringProperty()
|
replace_field: bpy.props.StringProperty()
|
||||||
|
|
||||||
|
# progress tracking properties for timer-based operations
|
||||||
|
is_operation_running: bpy.props.BoolProperty(default=False)
|
||||||
|
operation_progress: bpy.props.FloatProperty(
|
||||||
|
default=0.0,
|
||||||
|
min=0.0,
|
||||||
|
max=100.0,
|
||||||
|
subtype='PERCENTAGE' # This makes it display as percentage
|
||||||
|
)
|
||||||
|
operation_status: bpy.props.StringProperty(default="")
|
||||||
|
cancel_operation: bpy.props.BoolProperty(default=False)
|
||||||
|
|
||||||
|
|
||||||
|
def _on_undo_pre(scene):
|
||||||
|
"""Handler called before undo - invalidate cache."""
|
||||||
|
from .ops import main_ops
|
||||||
|
main_ops._invalidate_cache()
|
||||||
|
|
||||||
|
|
||||||
def register():
|
def register():
|
||||||
register_class(ATOMIC_PG_main)
|
register_class(ATOMIC_PG_main)
|
||||||
@@ -233,6 +250,9 @@ def register():
|
|||||||
ui.register()
|
ui.register()
|
||||||
ops.register()
|
ops.register()
|
||||||
|
|
||||||
|
# Register undo handler to invalidate cache
|
||||||
|
bpy.app.handlers.undo_pre.append(_on_undo_pre)
|
||||||
|
|
||||||
# bootstrap Rainy's Extensions repository
|
# bootstrap Rainy's Extensions repository
|
||||||
rainys_repo_bootstrap.register()
|
rainys_repo_bootstrap.register()
|
||||||
|
|
||||||
@@ -241,6 +261,10 @@ def unregister():
|
|||||||
# bootstrap unregistration
|
# bootstrap unregistration
|
||||||
rainys_repo_bootstrap.unregister()
|
rainys_repo_bootstrap.unregister()
|
||||||
|
|
||||||
|
# Remove undo handler
|
||||||
|
if _on_undo_pre in bpy.app.handlers.undo_pre:
|
||||||
|
bpy.app.handlers.undo_pre.remove(_on_undo_pre)
|
||||||
|
|
||||||
# atomic package unregistration
|
# atomic package unregistration
|
||||||
ui.unregister()
|
ui.unregister()
|
||||||
ops.unregister()
|
ops.unregister()
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ schema_version = "1.0.0"
|
|||||||
|
|
||||||
id = "atomic_data_manager"
|
id = "atomic_data_manager"
|
||||||
name = "Atomic Data Manager"
|
name = "Atomic Data Manager"
|
||||||
version = "2.1.0"
|
version = "2.5.0"
|
||||||
type = "add-on"
|
type = "add-on"
|
||||||
author = "RaincloudTheDragon"
|
author = "RaincloudTheDragon"
|
||||||
maintainer = "RaincloudTheDragon"
|
maintainer = "RaincloudTheDragon"
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ Blender, not in here.
|
|||||||
enable_missing_file_warning = True
|
enable_missing_file_warning = True
|
||||||
include_fake_users = False
|
include_fake_users = False
|
||||||
enable_pie_menu_ui = True
|
enable_pie_menu_ui = True
|
||||||
|
enable_debug_prints = False
|
||||||
|
|
||||||
# hidden atomic preferences
|
# hidden atomic preferences
|
||||||
pie_menu_type = "D"
|
pie_menu_type = "D"
|
||||||
@@ -39,4 +40,13 @@ pie_menu_alt = False
|
|||||||
pie_menu_any = False
|
pie_menu_any = False
|
||||||
pie_menu_ctrl = False
|
pie_menu_ctrl = False
|
||||||
pie_menu_oskey = False
|
pie_menu_oskey = False
|
||||||
pie_menu_shift = False
|
pie_menu_shift = False
|
||||||
|
|
||||||
|
|
||||||
|
def debug_print(*args, **kwargs):
|
||||||
|
"""
|
||||||
|
Print debug messages only if enable_debug_prints is True.
|
||||||
|
Usage: debug_print("message") or debug_print(f"formatted {value}")
|
||||||
|
"""
|
||||||
|
if enable_debug_prints:
|
||||||
|
print(*args, **kwargs)
|
||||||
@@ -30,6 +30,9 @@ from ..utils import compat
|
|||||||
from .utils import delete
|
from .utils import delete
|
||||||
from .utils import duplicate
|
from .utils import duplicate
|
||||||
|
|
||||||
|
# Module-level state for inspection delete
|
||||||
|
_inspect_delete_state = None
|
||||||
|
|
||||||
|
|
||||||
def _check_library_or_override(datablock):
|
def _check_library_or_override(datablock):
|
||||||
"""Check if datablock is library-linked or override, return error message if so."""
|
"""Check if datablock is library-linked or override, return error message if so."""
|
||||||
@@ -488,115 +491,181 @@ class ATOMIC_OT_inspection_delete(bpy.types.Operator):
|
|||||||
bl_label = "Delete Data-Block"
|
bl_label = "Delete Data-Block"
|
||||||
|
|
||||||
def execute(self, context):
|
def execute(self, context):
|
||||||
atom = bpy.context.scene.atomic
|
atom = context.scene.atomic
|
||||||
inspection = atom.active_inspection
|
inspection = atom.active_inspection
|
||||||
|
|
||||||
if inspection == 'COLLECTIONS':
|
# Initialize progress tracking
|
||||||
key = atom.collections_field
|
atom.is_operation_running = True
|
||||||
collections = bpy.data.collections
|
atom.operation_progress = 0.0
|
||||||
|
atom.operation_status = f"Deleting {inspection.lower()}..."
|
||||||
if key in collections.keys():
|
atom.cancel_operation = False
|
||||||
collection = collections[key]
|
|
||||||
error = _check_library_or_override(collection)
|
# Store state in module-level variable for timer processing
|
||||||
if error:
|
global _inspect_delete_state
|
||||||
self.report({'ERROR'}, error)
|
_inspect_delete_state = {
|
||||||
return {'CANCELLED'}
|
'inspection': inspection
|
||||||
delete.collection(key)
|
}
|
||||||
atom.collections_field = ""
|
|
||||||
|
# Start timer for processing (even though it's quick, keep UI responsive)
|
||||||
elif inspection == 'IMAGES':
|
bpy.app.timers.register(_process_inspect_delete_step)
|
||||||
key = atom.images_field
|
|
||||||
images = bpy.data.images
|
|
||||||
|
|
||||||
if key in images.keys():
|
|
||||||
image = images[key]
|
|
||||||
error = _check_library_or_override(image)
|
|
||||||
if error:
|
|
||||||
self.report({'ERROR'}, error)
|
|
||||||
return {'CANCELLED'}
|
|
||||||
delete.image(key)
|
|
||||||
atom.images_field = ""
|
|
||||||
|
|
||||||
elif inspection == 'LIGHTS':
|
|
||||||
key = atom.lights_field
|
|
||||||
lights = bpy.data.lights
|
|
||||||
|
|
||||||
if key in lights.keys():
|
|
||||||
light = lights[key]
|
|
||||||
error = _check_library_or_override(light)
|
|
||||||
if error:
|
|
||||||
self.report({'ERROR'}, error)
|
|
||||||
return {'CANCELLED'}
|
|
||||||
delete.light(key)
|
|
||||||
atom.lights_field = ""
|
|
||||||
|
|
||||||
elif inspection == 'MATERIALS':
|
|
||||||
key = atom.materials_field
|
|
||||||
materials = bpy.data.materials
|
|
||||||
|
|
||||||
if key in materials.keys():
|
|
||||||
material = materials[key]
|
|
||||||
error = _check_library_or_override(material)
|
|
||||||
if error:
|
|
||||||
self.report({'ERROR'}, error)
|
|
||||||
return {'CANCELLED'}
|
|
||||||
delete.material(key)
|
|
||||||
atom.materials_field = ""
|
|
||||||
|
|
||||||
elif inspection == 'NODE_GROUPS':
|
|
||||||
key = atom.node_groups_field
|
|
||||||
node_groups = bpy.data.node_groups
|
|
||||||
|
|
||||||
if key in node_groups.keys():
|
|
||||||
node_group = node_groups[key]
|
|
||||||
error = _check_library_or_override(node_group)
|
|
||||||
if error:
|
|
||||||
self.report({'ERROR'}, error)
|
|
||||||
return {'CANCELLED'}
|
|
||||||
delete.node_group(key)
|
|
||||||
atom.node_groups_field = ""
|
|
||||||
|
|
||||||
elif inspection == 'PARTICLES':
|
|
||||||
key = atom.particles_field
|
|
||||||
particles = bpy.data.particles
|
|
||||||
if key in particles.keys():
|
|
||||||
particle = particles[key]
|
|
||||||
error = _check_library_or_override(particle)
|
|
||||||
if error:
|
|
||||||
self.report({'ERROR'}, error)
|
|
||||||
return {'CANCELLED'}
|
|
||||||
delete.particle(key)
|
|
||||||
atom.particles_field = ""
|
|
||||||
|
|
||||||
elif inspection == 'TEXTURES':
|
|
||||||
key = atom.textures_field
|
|
||||||
textures = bpy.data.textures
|
|
||||||
|
|
||||||
if key in textures.keys():
|
|
||||||
texture = textures[key]
|
|
||||||
error = _check_library_or_override(texture)
|
|
||||||
if error:
|
|
||||||
self.report({'ERROR'}, error)
|
|
||||||
return {'CANCELLED'}
|
|
||||||
delete.texture(key)
|
|
||||||
atom.textures_field = ""
|
|
||||||
|
|
||||||
elif inspection == 'WORLDS':
|
|
||||||
key = atom.worlds_field
|
|
||||||
worlds = bpy.data.worlds
|
|
||||||
|
|
||||||
if key in worlds.keys():
|
|
||||||
world = worlds[key]
|
|
||||||
error = _check_library_or_override(world)
|
|
||||||
if error:
|
|
||||||
self.report({'ERROR'}, error)
|
|
||||||
return {'CANCELLED'}
|
|
||||||
delete.world(key)
|
|
||||||
atom.worlds_field = ""
|
|
||||||
|
|
||||||
return {'FINISHED'}
|
return {'FINISHED'}
|
||||||
|
|
||||||
|
|
||||||
|
def _process_inspect_delete_step():
|
||||||
|
"""Process inspection delete in steps to avoid blocking the UI"""
|
||||||
|
atom = bpy.context.scene.atomic
|
||||||
|
global _inspect_delete_state
|
||||||
|
|
||||||
|
if _inspect_delete_state is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
inspection = _inspect_delete_state['inspection']
|
||||||
|
|
||||||
|
# Check for cancellation
|
||||||
|
if atom.cancel_operation:
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_progress = 0.0
|
||||||
|
atom.operation_status = "Operation cancelled"
|
||||||
|
atom.cancel_operation = False
|
||||||
|
_inspect_delete_state = None
|
||||||
|
# Force UI update
|
||||||
|
for area in bpy.context.screen.areas:
|
||||||
|
area.tag_redraw()
|
||||||
|
return None
|
||||||
|
|
||||||
|
atom.operation_progress = 50.0
|
||||||
|
|
||||||
|
# Perform deletion
|
||||||
|
try:
|
||||||
|
if inspection == 'COLLECTIONS':
|
||||||
|
key = atom.collections_field
|
||||||
|
collections = bpy.data.collections
|
||||||
|
|
||||||
|
if key in collections.keys():
|
||||||
|
collection = collections[key]
|
||||||
|
error = _check_library_or_override(collection)
|
||||||
|
if error:
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_status = ""
|
||||||
|
return None
|
||||||
|
delete.collection(key)
|
||||||
|
atom.collections_field = ""
|
||||||
|
|
||||||
|
elif inspection == 'IMAGES':
|
||||||
|
key = atom.images_field
|
||||||
|
images = bpy.data.images
|
||||||
|
|
||||||
|
if key in images.keys():
|
||||||
|
image = images[key]
|
||||||
|
error = _check_library_or_override(image)
|
||||||
|
if error:
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_status = ""
|
||||||
|
return None
|
||||||
|
delete.image(key)
|
||||||
|
atom.images_field = ""
|
||||||
|
|
||||||
|
elif inspection == 'LIGHTS':
|
||||||
|
key = atom.lights_field
|
||||||
|
lights = bpy.data.lights
|
||||||
|
|
||||||
|
if key in lights.keys():
|
||||||
|
light = lights[key]
|
||||||
|
error = _check_library_or_override(light)
|
||||||
|
if error:
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_status = ""
|
||||||
|
return None
|
||||||
|
delete.light(key)
|
||||||
|
atom.lights_field = ""
|
||||||
|
|
||||||
|
elif inspection == 'MATERIALS':
|
||||||
|
key = atom.materials_field
|
||||||
|
materials = bpy.data.materials
|
||||||
|
|
||||||
|
if key in materials.keys():
|
||||||
|
material = materials[key]
|
||||||
|
error = _check_library_or_override(material)
|
||||||
|
if error:
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_status = ""
|
||||||
|
return None
|
||||||
|
delete.material(key)
|
||||||
|
atom.materials_field = ""
|
||||||
|
|
||||||
|
elif inspection == 'NODE_GROUPS':
|
||||||
|
key = atom.node_groups_field
|
||||||
|
node_groups = bpy.data.node_groups
|
||||||
|
|
||||||
|
if key in node_groups.keys():
|
||||||
|
node_group = node_groups[key]
|
||||||
|
error = _check_library_or_override(node_group)
|
||||||
|
if error:
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_status = ""
|
||||||
|
return None
|
||||||
|
delete.node_group(key)
|
||||||
|
atom.node_groups_field = ""
|
||||||
|
|
||||||
|
elif inspection == 'PARTICLES':
|
||||||
|
key = atom.particles_field
|
||||||
|
particles = bpy.data.particles
|
||||||
|
if key in particles.keys():
|
||||||
|
particle = particles[key]
|
||||||
|
error = _check_library_or_override(particle)
|
||||||
|
if error:
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_status = ""
|
||||||
|
return None
|
||||||
|
delete.particle(key)
|
||||||
|
atom.particles_field = ""
|
||||||
|
|
||||||
|
elif inspection == 'TEXTURES':
|
||||||
|
key = atom.textures_field
|
||||||
|
textures = bpy.data.textures
|
||||||
|
|
||||||
|
if key in textures.keys():
|
||||||
|
texture = textures[key]
|
||||||
|
error = _check_library_or_override(texture)
|
||||||
|
if error:
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_status = ""
|
||||||
|
return None
|
||||||
|
delete.texture(key)
|
||||||
|
atom.textures_field = ""
|
||||||
|
|
||||||
|
elif inspection == 'WORLDS':
|
||||||
|
key = atom.worlds_field
|
||||||
|
worlds = bpy.data.worlds
|
||||||
|
|
||||||
|
if key in worlds.keys():
|
||||||
|
world = worlds[key]
|
||||||
|
error = _check_library_or_override(world)
|
||||||
|
if error:
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_status = ""
|
||||||
|
return None
|
||||||
|
delete.world(key)
|
||||||
|
atom.worlds_field = ""
|
||||||
|
except:
|
||||||
|
pass # Handle any errors gracefully
|
||||||
|
|
||||||
|
# Operation complete
|
||||||
|
atom.is_operation_running = False
|
||||||
|
atom.operation_progress = 100.0
|
||||||
|
atom.operation_status = ""
|
||||||
|
|
||||||
|
# Clear state
|
||||||
|
_inspect_delete_state = None
|
||||||
|
|
||||||
|
# Force UI update
|
||||||
|
for area in bpy.context.screen.areas:
|
||||||
|
area.tag_redraw()
|
||||||
|
|
||||||
|
return None # Stop timer
|
||||||
|
|
||||||
|
|
||||||
reg_list = [
|
reg_list = [
|
||||||
ATOMIC_OT_inspection_rename,
|
ATOMIC_OT_inspection_rename,
|
||||||
ATOMIC_OT_inspection_replace,
|
ATOMIC_OT_inspection_replace,
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user