2025-07-01

This commit is contained in:
2026-03-17 14:30:01 -06:00
parent f9a22056dd
commit 62b5978595
4579 changed files with 1257472 additions and 0 deletions
@@ -0,0 +1,15 @@
The Auto-Rig Pro addon source code in this folder and subfolders is released under the GNU GPL license.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
File diff suppressed because it is too large Load Diff
File diff suppressed because one or more lines are too long
@@ -0,0 +1 @@
{'c_foot_bank_01': ('Transformation', (0.0, 0.0, 0.0, 0.0, -37.836708068847656, 37.836708068847656), 4.100761890411377), 'c_foot_bank_02': ('Transformation', (0.0, 0.0, 0.0, 0.0, -37.83671188354492, 37.83671188354492), 4.100762844085693), 'c_foot_heel': ('Transformation', (-75.67342376708984, 75.67342376708984, 0.0, 0.0, 0.0, 0.0), 4.100762844085693), 'c_toes_end': ('Transformation', (-50.0000114440918, 50.0000114440918, 0.0, 0.0, 0.0, 0.0), 2.68825364112854)}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,235 @@
import bpy, os, platform, sys, ast
def update_all_tab_names(self, context):
try:
from . import auto_rig
auto_rig.update_arp_tab()
except:
pass
try:
from . import auto_rig_ge
auto_rig_ge.update_arp_tab()
except:
pass
try:
from . import auto_rig_smart
auto_rig_smart.update_arp_tab()
except:
pass
try:
from . import auto_rig_remap
auto_rig_remap.update_arp_tab()
except:
pass
try:
from . import rig_functions
rig_functions.update_arp_tab()
except:
pass
def get_documents_path(other_folder):
home = os.path.expanduser("~") # get the user documents path
documents = os.path.join(home, "Documents")
p = os.path.join(documents, 'AutoRigPro')
p = os.path.join(p, other_folder)
return p
def get_prefs():
if bpy.app.version >= (4,2,0):
return bpy.context.preferences.addons[__package__[:-4]].preferences
else:
return bpy.context.preferences.addons[__package__.split('.')[0]].preferences
class ARP_OT_save_prefs(bpy.types.Operator):
"""Save addon preferences to file, to preserve them when installing a new version"""
bl_idname = 'arp.prefs_save'
bl_label = 'Save ARP prefs'
def execute(self, context):
scn = bpy.context.scene
fp = os.path.abspath(__file__)#get_prefs().prefs_presets_path
fp = os.path.dirname(fp)
fp = os.path.dirname(fp)
fp = os.path.dirname(fp)#Blender addons folder
#print(fp)
if not (fp.endswith("\\") or fp.endswith('/')):
fp += '/'
fp = fp+'autorigpro.prefs'
fp = os.path.abspath(fp)# automatically adds the driver letter if the path does not contain any
#print(fp)
if not os.path.exists(os.path.dirname(fp)):
try:
os.makedirs(os.path.dirname(fp))
except:
pass
file = open(fp, 'w', encoding='utf8', newline='\n')
prefs_settings = {
'arp_tab_name':get_prefs().arp_tab_name,
'arp_tools_tab_name':get_prefs().arp_tools_tab_name,
'beginner_mode': get_prefs().beginner_mode,
'custom_armatures_path': get_prefs().custom_armatures_path,
'custom_limb_path': get_prefs().custom_limb_path,
'rig_layers_path': get_prefs().rig_layers_path,
'remap_presets_path': get_prefs().remap_presets_path,
'ge_presets_path': get_prefs().ge_presets_path,
#'prefs_presets_path': get_prefs().prefs_presets_path,
'default_ikfk_arm':get_prefs().default_ikfk_arm,
'default_ikfk_leg': get_prefs().default_ikfk_leg,
'default_head_lock': get_prefs().default_head_lock,
'remove_existing_arm_mods': get_prefs().remove_existing_arm_mods,
'remove_existing_vgroups': get_prefs().remove_existing_vgroups,
'show_export_popup': get_prefs().show_export_popup,
'arp_debug_mode': scn.arp_debug_mode,
'arp_debug_bind': scn.arp_debug_bind,
'arp_experimental_mode': scn.arp_experimental_mode,
'arp_disable_smart_fx': scn.arp_disable_smart_fx,
}
file.write(str(prefs_settings))
file.close()
print("Auto-Rig Pro preferences saved successfully!")
print(fp)
return {'FINISHED'}
class ARP_MT_arp_addon_preferences(bpy.types.AddonPreferences):
bl_idname = __package__[:-4] if bpy.app.version >= (4,2,0) else __package__.split('.')[0]
arp_tab_name : bpy.props.StringProperty(name='Interface Tab', description='Name of the tab to display the interface in', default='ARP', update=update_all_tab_names)
arp_tools_tab_name : bpy.props.StringProperty(name='Tools Interface Tab', description='Name of the tab to display the tools (IK-FK snap...) interface in', default='Tool', update=update_all_tab_names)
beginner_mode: bpy.props.BoolProperty(name='Beginner Mode', default=True)
custom_armatures_path: bpy.props.StringProperty(name='Armatures', subtype='FILE_PATH', default=get_documents_path('Armatures Presets'), description='Path to store armature presets')
custom_limb_path: bpy.props.StringProperty(name='Limbs', subtype='FILE_PATH', default=get_documents_path('Custom Limbs'), description='Path to store custom limb presets')
rig_layers_path: bpy.props.StringProperty(name='Rig Layers', subtype='FILE_PATH', default=get_documents_path('Rig Layers'), description='Path to store rig layers presets')
remap_presets_path: bpy.props.StringProperty(name='Remap Presets', subtype='FILE_PATH', default=get_documents_path('Remap Presets'), description='Path to store remap presets')
ge_presets_path: bpy.props.StringProperty(name='Export Presets', subtype='FILE_PATH', default=get_documents_path('Game Engine Presets'), description='Path to store game engine export presets')
#prefs_presets_path: bpy.props.StringProperty(name='Preferences', subtype='FILE_PATH', default=get_documents_path('Preferences'), description='Path to store Auto-Rig Pro preferences')
default_ikfk_arm: bpy.props.EnumProperty(items=(('IK', 'IK', 'IK'), ('FK', 'FK', 'FK')), description='Default value for arms IK-FK switch', name='IK-FK Arms Default')
default_ikfk_leg: bpy.props.EnumProperty(items=(('IK', 'IK', 'IK'), ('FK', 'FK', 'FK')), description='Default value for legs IK-FK switch', name='IK-FK Legs Default')
default_head_lock: bpy.props.BoolProperty(default=True, name='Head Lock Default', description='Default value for the Head Lock switch')
remove_existing_arm_mods: bpy.props.BoolProperty(default=True, name='Remove Armature Modifiers', description='Remove existing armature modifiers when binding')
remove_existing_vgroups: bpy.props.BoolProperty(default=True, name='Remove Existing Vertex Groups', description='Remove existing vertex groups when binding')
rem_arm_mods_set: bpy.props.BoolProperty(default=False, description='Toggle to be executed the first time binding, to set default prefs')
rem_vgroups_set: bpy.props.BoolProperty(default=False, description='Toggle to be executed the first time binding, to set default prefs')
show_export_popup: bpy.props.BoolProperty(default=True, description='Show a popup notification on export completion')
def draw(self, context):
col = self.layout.column(align=True)
col.operator(ARP_OT_save_prefs.bl_idname, text='Save Preferences')
col.prop(self, 'beginner_mode', text='Beginner Mode (help buttons)')
col.separator()
col.label(text='Rig:')
col.prop(self, 'remove_existing_arm_mods', text='Remove Existing Armature Modifiers when Binding')
col.prop(self, 'remove_existing_vgroups', text='Remove Existing Vertex Groups when Binding')
col.separator()
col.prop(self, 'default_ikfk_arm', text='IK-FK Arms')
col.prop(self, 'default_ikfk_leg', text='IK-FK Legs')
col.prop(self, 'default_head_lock', text='Head Lock')
col.separator()
col.separator()
col.label(text='Interface:')
col.prop(self, 'arp_tab_name', text='Main ARP Tab')
col.prop(self, 'arp_tools_tab_name', text='Tools Tab')
col.prop(self, 'show_export_popup', text='Show Popup when Export Finished')
col.separator()
col.separator()
col.label(text='Paths:')
#col.prop(self, 'prefs_presets_path')
col.prop(self, 'custom_armatures_path')
col.prop(self, 'custom_limb_path')
col.prop(self, 'rig_layers_path')
col.prop(self, 'remap_presets_path')
col.prop(self, 'ge_presets_path')
col.separator()
col.separator()
col.label(text='Special-Debug:', icon='ERROR')
col.prop(context.scene, 'arp_disable_smart_fx')
col.prop(context.scene, 'arp_debug_mode')
col.prop(context.scene, 'arp_debug_bind')
col.prop(context.scene, 'arp_experimental_mode')
def load_arp_prefs():
fp = os.path.abspath(__file__)#get_prefs().prefs_presets_path
fp = os.path.dirname(fp)
fp = os.path.dirname(fp)
fp = os.path.dirname(fp)#Blender addons folder
#print(fp)
if not (fp.endswith("\\") or fp.endswith('/')):
fp += '/'
fp = fp+'autorigpro.prefs'
fp = os.path.abspath(fp)# automatically adds the driver letter if the path does not contain any
#print(fp)
if not os.path.exists(os.path.dirname(fp)):
print("Auto-Rig Pro preferences are not saved yet")
return
file = None
settings = None
try:
file = open(fp, 'r') if sys.version_info >= (3, 11) else open(fp, 'rU')
file_lines = file.readlines()
settings= str(file_lines[0])
except:
print("Cannot read ARP prefs")
return
settings_dict = ast.literal_eval(settings)
for setting in settings_dict:
setattr(get_prefs(), setting, settings_dict[setting])
#print('Loaded setting', setting)
file.close()
print('Auto-Rig Pro preferences loaded successfully!')
def register():
from bpy.utils import register_class
try:
register_class(ARP_MT_arp_addon_preferences)
register_class(ARP_OT_save_prefs)
except:
pass
# load exported prefs on addon startup
load_arp_prefs()
bpy.types.Scene.arp_debug_mode = bpy.props.BoolProperty(name='Debug Mode', default=False, description = 'Run the addon in debug mode for debugging purposes.\nWarning, can generate earthquakes and solar tempest. Do not enable for normal usage!', options={'HIDDEN'})
bpy.types.Scene.arp_debug_bind = bpy.props.BoolProperty(name='Debug Bind', default=False, description='Enable Debug mode for bind functions, for debugging purposes.\nWarning, will break tools and generate earthquakes!\nDo not enable for normal usage!', options={'HIDDEN'})
bpy.types.Scene.arp_experimental_mode = bpy.props.BoolProperty(name='Experimental Mode', default=False, description = 'Enable experimental, unstable tools. Warning, can lead to errors. Use it at your own risks.', options={'HIDDEN'})
bpy.types.Scene.arp_disable_smart_fx = bpy.props.BoolProperty(name='Disable Smart FX', default=False, description='Disable Smart markers FX for debug purposes, such as Mac systems not supporting some graphics. Safe to use.', options={'HIDDEN'})
def unregister():
from bpy.utils import unregister_class
unregister_class(ARP_MT_arp_addon_preferences)
del bpy.types.Scene.arp_debug_mode
del bpy.types.Scene.arp_debug_bind
del bpy.types.Scene.arp_experimental_mode
del bpy.types.Scene.arp_disable_smart_fx
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,87 @@
#######################################################
## Reset functions for internal usage (Match to Rig)
## resets all controllers transforms (pose mode)
#######################################################
import bpy
def reset_all():
rig = bpy.context.active_object
def set_inverse_child(cns):
# direct inverse matrix method
if cns.subtarget != '':
if rig.data.bones.get(cns.subtarget):
cns.inverse_matrix = rig.pose.bones[cns.subtarget].matrix.inverted()
else:
print("Child Of constraint could not be reset, bone does not exist:", '"'+cns.subtarget+'" from', cns.name)
# Reset transforms------------------------------
bpy.ops.pose.select_all(action='SELECT')
bpy.ops.pose.loc_clear()
bpy.ops.pose.rot_clear()
# "scale clear" leads to resetting bbones_easeout/in value, we need to preserve them
bdict = {}
for b in rig.pose.bones:
bdict[b.name] = [b.bbone_easein, b.bbone_easeout]
bpy.ops.pose.scale_clear()
for bname in bdict:
pbone = rig.pose.bones[bname]
pbone.bbone_easein, pbone.bbone_easeout = bdict[bname]
for pbone in rig.pose.bones:
# Reset locked transforms
for i, rot in enumerate(pbone.rotation_euler):
if pbone.lock_rotation[i]:
pbone.rotation_euler[i] = 0.0
# Reset Properties
if len(pbone.keys()):
try:# Error in some rare cases > Error RuntimeError: IDPropertyGroup changed size during iteration
for key in pbone.keys():
if key == 'ik_fk_switch':
try:
pbone['ik_fk_switch'] = get_prop_setting(pbone, 'ik_fk_switch', 'default')
except:
if 'hand' in pbone.name:
pbone['ik_fk_switch'] = 1.0
else:
pbone['ik_fk_switch'] = 0.0
if key == 'stretch_length':
pbone[key] = 1.0
# don't set auto-stretch to 1 for now, it's not compatible with Fbx export
if key == 'leg_pin':
pbone[key] = 0.0
if key == 'elbow_min':
pbone[key] = 0.0
if key == 'bend_all':
pbone[key] = 0.0
except:
pass
reset_child_of_bones = {'c_leg_pole':'startswith', 'c_arms_pole':'startswith', 'hand':'in', 'foot':'in', 'head':'in', 'c_thumb':'startswith', 'c_index':'startswith', 'c_middle':'startswith', 'c_ring':'startswith', 'c_pinky':'startswith', 'c_eye_target':'startswith', 'c_toes_':'startswith'}
valid = False
if not 'cc' in pbone.keys():# do not set inverse for custom bones
for bname in reset_child_of_bones:
type = reset_child_of_bones[bname]
if type == 'startswith':
if pbone.name.startswith(bname):
valid = True
elif type == 'in':
if bname in pbone.name:
valid = True
if valid:
for cns in pbone.constraints:
if cns.type == 'CHILD_OF':
set_inverse_child(cns)
bpy.ops.pose.select_all(action='DESELECT')
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,345 @@
if "bpy" in locals():
import importlib
if "export_fbx_bin" in locals():
importlib.reload(export_fbx_bin)
import bpy
import addon_utils, sys
from bpy.props import (
StringProperty,
BoolProperty,
FloatProperty,
EnumProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
orientation_helper,
path_reference_mode,
axis_conversion,
)
@orientation_helper(axis_forward='-Z', axis_up='Y')
class ARP_OT_export_fbx_wrap(bpy.types.Operator, ExportHelper):
"""Write a FBX file"""
bl_idname = "arp_export_scene.fbx"
bl_label = "Export ARP FBX"
bl_options = {'UNDO', 'PRESET'}
filename_ext = ".fbx"
filter_glob: StringProperty(default="*.fbx", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
ui_tab: EnumProperty(
items=(('MAIN', "Main", "Main basic settings"),
('GEOMETRY', "Geometries", "Geometry-related settings"),
('ARMATURE', "Armatures", "Armature-related settings"),
('ANIMATION', "Animation", "Animation-related settings"),
),
name="ui_tab",
description="Export options categories",
)
use_selection: BoolProperty(
name="Selected Objects",
description="Export selected and visible objects only",
default=False,
)
use_active_collection: BoolProperty(
name="Active Collection",
description="Export only objects from the active collection (and its children)",
default=False,
)
global_scale: FloatProperty(
name="Scale",
description="Scale all data (Some importers do not support scaled armatures!)",
min=0.001, max=1000.0,
soft_min=0.01, soft_max=1000.0,
default=1.0,
)
apply_unit_scale: BoolProperty(
name="Apply Unit",
description="Take into account current Blender units settings (if unset, raw Blender Units values are used as-is)",
default=True,
)
apply_scale_options: EnumProperty(
items=(('FBX_SCALE_NONE', "All Local",
"Apply custom scaling and units scaling to each object transformation, FBX scale remains at 1.0"),
('FBX_SCALE_UNITS', "FBX Units Scale",
"Apply custom scaling to each object transformation, and units scaling to FBX scale"),
('FBX_SCALE_CUSTOM', "FBX Custom Scale",
"Apply custom scaling to FBX scale, and units scaling to each object transformation"),
('FBX_SCALE_ALL', "FBX All",
"Apply custom scaling and units scaling to FBX scale"),
),
name="Apply Scalings",
description="How to apply custom and units scalings in generated FBX file "
"(Blender uses FBX scale to detect units on import, "
"but many other applications do not handle the same way)",
)
use_space_transform: BoolProperty(
name="Use Space Transform",
description="Apply global space transform to the object rotations. When disabled "
"only the axis space is written to the file and all object transforms are left as-is",
default=True,
)
bake_space_transform: BoolProperty(
name="!EXPERIMENTAL! Apply Transform",
description="Bake space transform into object data, avoids getting unwanted rotations to objects when "
"target space is not aligned with Blender's space "
"(WARNING! experimental option, use at own risks, known broken with armatures/animations)",
default=False,
)
colors_type: EnumProperty(
name="Vertex Colors",
items=(('NONE', "None", "Do not import color attributes"),
('SRGB', "sRGB", "Expect file colors in sRGB color space"),
('LINEAR', "Linear", "Expect file colors in linear color space"),
),
description="Import vertex color attributes",
default='SRGB',
)
prioritize_active_color: BoolProperty(
name="Prioritize Active Color",
description="Make sure active color will be exported first. Could be important "
"since some other software can discard other color attributes besides the first one",
default=False,
)
object_types: EnumProperty(
name="Object Types",
options={'ENUM_FLAG'},
items=(('EMPTY', "Empty", ""),
('CAMERA', "Camera", ""),
('LIGHT', "Lamp", ""),
('ARMATURE', "Armature", "WARNING: not supported in dupli/group instances"),
('MESH', "Mesh", ""),
('OTHER', "Other", "Other geometry types, like curve, metaball, etc. (converted to meshes)"),
),
description="Which kind of object to export",
default={'EMPTY', 'CAMERA', 'LIGHT', 'ARMATURE', 'MESH', 'OTHER'},
)
use_mesh_modifiers: BoolProperty(
name="Apply Modifiers",
description="Apply modifiers to mesh objects (except Armature ones) - "
"WARNING: prevents exporting shape keys",
default=True,
)
use_mesh_modifiers_render: BoolProperty(
name="Use Modifiers Render Setting",
description="Use render settings when applying modifiers to mesh objects",
default=True,
)
mesh_smooth_type: EnumProperty(
name="Smoothing",
items=(('OFF', "Normals Only", "Export only normals instead of writing edge or face smoothing data"),
('FACE', "Face", "Write face smoothing"),
('EDGE', "Edge", "Write edge smoothing"),
),
description="Export smoothing information "
"(prefer 'Normals Only' option if your target importer understand split normals)",
default='OFF',
)
use_subsurf: BoolProperty(
name="Export Subdivision Surface",
description="Export the last Catmull-Rom subdivision modifier as FBX subdivision "
"(does not apply the modifier even if 'Apply Modifiers' is enabled)",
default=False,
)
use_mesh_edges: BoolProperty(
name="Loose Edges",
description="Export loose edges (as two-vertices polygons)",
default=False,
)
use_tspace: BoolProperty(
name="Tangent Space",
description="Add binormal and tangent vectors, together with normal they form the tangent space "
"(will only work correctly with tris/quads only meshes!)",
default=False,
)
use_triangles: BoolProperty(
name="Triangulate Faces",
description="Convert all faces to triangles",
default=False,
)
use_custom_props: BoolProperty(
name="Custom Properties",
description="Export custom properties",
default=False,
)
add_leaf_bones: BoolProperty(
name="Add Leaf Bones",
description="Append a final bone to the end of each chain to specify last bone length "
"(use this when you intend to edit the armature from exported data)",
default=True # False for commit!
)
primary_bone_axis: EnumProperty(
name="Primary Bone Axis",
items=(('X', "X Axis", ""),
('Y', "Y Axis", ""),
('Z', "Z Axis", ""),
('-X', "-X Axis", ""),
('-Y', "-Y Axis", ""),
('-Z', "-Z Axis", ""),
),
default='Y',
)
secondary_bone_axis: EnumProperty(
name="Secondary Bone Axis",
items=(('X', "X Axis", ""),
('Y', "Y Axis", ""),
('Z', "Z Axis", ""),
('-X', "-X Axis", ""),
('-Y', "-Y Axis", ""),
('-Z', "-Z Axis", ""),
),
default='X',
)
use_armature_deform_only: BoolProperty(
name="Only Deform Bones",
description="Only write deforming bones (and non-deforming ones when they have deforming children)",
default=False,
)
armature_nodetype: EnumProperty(
name="Armature FBXNode Type",
items=(('NULL', "Null", "'Null' FBX node, similar to Blender's Empty (default)"),
('ROOT', "Root", "'Root' FBX node, supposed to be the root of chains of bones..."),
('LIMBNODE', "LimbNode", "'LimbNode' FBX node, a regular joint between two bones..."),
),
description="FBX type of node (object) used to represent Blender's armatures "
"(use Null one unless you experience issues with other app, other choices may no import back "
"perfectly in Blender...)",
default='NULL',
)
bake_anim: BoolProperty(
name="Baked Animation",
description="Export baked keyframe animation",
default=True,
)
bake_anim_use_all_bones: BoolProperty(
name="Key All Bones",
description="Force exporting at least one key of animation for all bones "
"(needed with some target applications, like UE4)",
default=True,
)
bake_anim_use_nla_strips: BoolProperty(
name="NLA Strips",
description="Export each non-muted NLA strip as a separated FBX's AnimStack, if any, "
"instead of global scene animation",
default=True,
)
bake_anim_use_all_actions: BoolProperty(
name="All Actions",
description="Export each action as a separated FBX's AnimStack, instead of global scene animation "
"(note that animated objects will get all actions compatible with them, "
"others will get no animation at all)",
default=True,
)
bake_anim_force_startend_keying: BoolProperty(
name="Force Start/End Keying",
description="Always add a keyframe at start and end of actions for animated channels",
default=True,
)
bake_anim_force_startend_keying_sk: BoolProperty(
name="Force Start/End Keying for Shape Keys",
description="Always add a keyframe at start and end of actions for animated channels",
default=False,
)
bake_anim_step: FloatProperty(
name="Sampling Rate",
description="How often to evaluate animated values (in frames)",
min=0.01, max=100.0,
soft_min=0.1, soft_max=10.0,
default=1.0,
)
bake_anim_simplify_factor: FloatProperty(
name="Simplify",
description="How much to simplify baked values (0.0 to disable, the higher the more simplified)",
min=0.0, max=100.0, # No simplification to up to 10% of current magnitude tolerance.
soft_min=0.0, soft_max=10.0,
default=1.0, # default: min slope: 0.005, max frame step: 10.
)
path_mode: path_reference_mode
embed_textures: BoolProperty(
name="Embed Textures",
description="Embed textures in FBX binary file (only for \"Copy\" path mode!)",
default=False,
)
batch_mode: EnumProperty(
name="Batch Mode",
items=(('OFF', "Off", "Active scene to file"),
('SCENE', "Scene", "Each scene as a file"),
('COLLECTION', "Collection",
"Each collection (data-block ones) as a file, does not include content of children collections"),
('SCENE_COLLECTION', "Scene Collections",
"Each collection (including master, non-data-block ones) of each scene as a file, "
"including content from children collections"),
('ACTIVE_SCENE_COLLECTION', "Active Scene Collections",
"Each collection (including master, non-data-block one) of the active scene as a file, "
"including content from children collections"),
),
)
use_batch_own_dir: BoolProperty(
name="Batch Own Dir",
description="Create a dir for each exported file",
default=True,
)
use_metadata: BoolProperty(
name="Use Metadata",
default=True,
options={'HIDDEN'},
)
#humanoid_actions: BoolProperty(name="Humanoid Actions Only", default=True)
shape_keys_baked_data: StringProperty(name="sk data", default="")
mesh_names_data: StringProperty(name="mesh names", default="")
export_action_only: StringProperty(name="", default="")
@property
def check_extension(self):
return self.batch_mode == 'OFF'
def execute(self, context):
from mathutils import Matrix
if not self.filepath:
raise Exception("filepath not set")
global_matrix = (axis_conversion(to_forward=self.axis_forward,
to_up=self.axis_up,
).to_4x4()
if self.use_space_transform else Matrix())
keywords = self.as_keywords(ignore=("check_existing",
"filter_glob",
"ui_tab",
))
keywords["global_matrix"] = global_matrix
from . import export_fbx_bin
return export_fbx_bin.arp_save(self, context, **keywords)
def register():
if bpy.app.version >= (4,1,0):
from bpy.utils import register_class
try:
register_class(ARP_OT_export_fbx_wrap)
except:
pass
def unregister():
if bpy.app.version >= (4,1,0):
from bpy.utils import unregister_class
try:
unregister_class(ARP_OT_export_fbx_wrap)
except:
pass
@@ -0,0 +1,62 @@
# SPDX-FileCopyrightText: 2006-2012 assimp team
# SPDX-FileCopyrightText: 2013 Blender Foundation
#
# SPDX-License-Identifier: GPL-2.0-or-later
BOOL = b'B'[0]
CHAR = b'C'[0]
INT8 = b'Z'[0]
INT16 = b'Y'[0]
INT32 = b'I'[0]
INT64 = b'L'[0]
FLOAT32 = b'F'[0]
FLOAT64 = b'D'[0]
BYTES = b'R'[0]
STRING = b'S'[0]
INT32_ARRAY = b'i'[0]
INT64_ARRAY = b'l'[0]
FLOAT32_ARRAY = b'f'[0]
FLOAT64_ARRAY = b'd'[0]
BOOL_ARRAY = b'b'[0]
BYTE_ARRAY = b'c'[0]
# Some other misc defines
# Known combinations so far - supposed meaning: A = animatable, A+ = animated, U = UserProp
# VALID_NUMBER_FLAGS = {b'A', b'A+', b'AU', b'A+U'} # Not used...
# array types - actual length may vary (depending on underlying C implementation)!
import array
# For now, bytes and bool are assumed always 1byte.
ARRAY_BOOL = 'b'
ARRAY_BYTE = 'B'
ARRAY_INT32 = None
ARRAY_INT64 = None
for _t in 'ilq':
size = array.array(_t).itemsize
if size == 4:
ARRAY_INT32 = _t
elif size == 8:
ARRAY_INT64 = _t
if ARRAY_INT32 and ARRAY_INT64:
break
if not ARRAY_INT32:
raise Exception("Impossible to get a 4-bytes integer type for array!")
if not ARRAY_INT64:
raise Exception("Impossible to get an 8-bytes integer type for array!")
ARRAY_FLOAT32 = None
ARRAY_FLOAT64 = None
for _t in 'fd':
size = array.array(_t).itemsize
if size == 4:
ARRAY_FLOAT32 = _t
elif size == 8:
ARRAY_FLOAT64 = _t
if ARRAY_FLOAT32 and ARRAY_FLOAT64:
break
if not ARRAY_FLOAT32:
raise Exception("Impossible to get a 4-bytes float type for array!")
if not ARRAY_FLOAT64:
raise Exception("Impossible to get an 8-bytes float type for array!")
@@ -0,0 +1,434 @@
# SPDX-FileCopyrightText: 2013 Campbell Barton
#
# SPDX-License-Identifier: GPL-2.0-or-later
try:
from . import data_types
from .fbx_utils_threading import MultiThreadedTaskConsumer
except:
import data_types
from fbx_utils_threading import MultiThreadedTaskConsumer
from struct import pack
from contextlib import contextmanager
import array
import numpy as np
import zlib
_BLOCK_SENTINEL_LENGTH = ...
_BLOCK_SENTINEL_DATA = ...
_ELEM_META_FORMAT = ...
_ELEM_META_SIZE = ...
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
# fbx has very strict CRC rules, all based on file timestamp
# until we figure these out, write files at a fixed time. (workaround!)
# Assumes: CreationTime
_TIME_ID = b'1970-01-01 10:00:00:000'
_FILE_ID = b'\x28\xb3\x2a\xeb\xb6\x24\xcc\xc2\xbf\xc8\xb0\x2a\xa9\x2b\xfc\xf1'
_FOOT_ID = b'\xfa\xbc\xab\x09\xd0\xc8\xd4\x66\xb1\x76\xfb\x83\x1c\xf7\x26\x7e'
# Awful exceptions: those "classes" of elements seem to need block sentinel even when having no children and some props.
_ELEMS_ID_ALWAYS_BLOCK_SENTINEL = {b"AnimationStack", b"AnimationLayer"}
class FBXElem:
__slots__ = (
"id",
"props",
"props_type",
"elems",
"_props_length", # combine length of props
"_end_offset", # byte offset from the start of the file.
)
def __init__(self, id):
assert(len(id) < 256) # length must fit in a uint8
self.id = id
self.props = []
self.props_type = bytearray()
self.elems = []
self._end_offset = -1
self._props_length = -1
@classmethod
@contextmanager
def enable_multithreading_cm(cls):
"""Temporarily enable multithreaded array compression.
The context manager handles starting up and shutting down the threads.
Only exits once all the threads are done (either all tasks were completed or an error occurred and the threads
were stopped prematurely).
Writing to a file is temporarily disabled as a safeguard."""
# __enter__()
orig_func = cls._add_compressed_array_helper
orig_write = cls._write
def insert_compressed_array(props, insert_at, data, length):
# zlib.compress releases the GIL, so can be multithreaded.
data = zlib.compress(data, 1)
comp_len = len(data)
encoding = 1
data = pack('<3I', length, encoding, comp_len) + data
props[insert_at] = data
with MultiThreadedTaskConsumer.new_cpu_bound_cm(insert_compressed_array) as wrapped_func:
try:
def _add_compressed_array_helper_multi(self, data, length):
# Append a dummy value that will be replaced with the compressed array data later.
self.props.append(...)
# The index to insert the compressed array into.
insert_at = len(self.props) - 1
# Schedule the array to be compressed on a separate thread and then inserted into the hierarchy at
# `insert_at`.
wrapped_func(self.props, insert_at, data, length)
# As an extra safeguard, temporarily replace the `_write` function to raise an error if called.
def temp_write(*_args, **_kwargs):
raise RuntimeError("Writing is not allowed until multithreaded array compression has been disabled")
cls._add_compressed_array_helper = _add_compressed_array_helper_multi
cls._write = temp_write
# Return control back to the caller of __enter__().
yield
finally:
# __exit__()
# Restore the original functions.
cls._add_compressed_array_helper = orig_func
cls._write = orig_write
# Exiting the MultiThreadedTaskConsumer context manager will wait for all scheduled tasks to complete.
def add_bool(self, data):
assert(isinstance(data, bool))
data = pack('?', data)
self.props_type.append(data_types.BOOL)
self.props.append(data)
def add_char(self, data):
assert(isinstance(data, bytes))
assert(len(data) == 1)
data = pack('<c', data)
self.props_type.append(data_types.CHAR)
self.props.append(data)
def add_int8(self, data):
assert(isinstance(data, int))
data = pack('<b', data)
self.props_type.append(data_types.INT8)
self.props.append(data)
def add_int16(self, data):
assert(isinstance(data, int))
data = pack('<h', data)
self.props_type.append(data_types.INT16)
self.props.append(data)
def add_int32(self, data):
assert(isinstance(data, int))
data = pack('<i', data)
self.props_type.append(data_types.INT32)
self.props.append(data)
def add_int64(self, data):
assert(isinstance(data, int))
data = pack('<q', data)
self.props_type.append(data_types.INT64)
self.props.append(data)
def add_float32(self, data):
assert(isinstance(data, float))
data = pack('<f', data)
self.props_type.append(data_types.FLOAT32)
self.props.append(data)
def add_float64(self, data):
assert(isinstance(data, float))
data = pack('<d', data)
self.props_type.append(data_types.FLOAT64)
self.props.append(data)
def add_bytes(self, data):
assert(isinstance(data, bytes))
data = pack('<I', len(data)) + data
self.props_type.append(data_types.BYTES)
self.props.append(data)
def add_string(self, data):
assert(isinstance(data, bytes))
data = pack('<I', len(data)) + data
self.props_type.append(data_types.STRING)
self.props.append(data)
def add_string_unicode(self, data):
assert(isinstance(data, str))
data = data.encode('utf8')
data = pack('<I', len(data)) + data
self.props_type.append(data_types.STRING)
self.props.append(data)
def _add_compressed_array_helper(self, data, length):
"""Note: This function may be swapped out by enable_multithreading_cm with an equivalent that supports
multithreading."""
data = zlib.compress(data, 1)
comp_len = len(data)
encoding = 1
data = pack('<3I', length, encoding, comp_len) + data
self.props.append(data)
def _add_array_helper(self, data, prop_type, length):
self.props_type.append(prop_type)
# mimic behavior of fbxconverter (also common sense)
# we could make this configurable.
encoding = 0 if len(data) <= 128 else 1
if encoding == 0:
data = pack('<3I', length, encoding, len(data)) + data
self.props.append(data)
elif encoding == 1:
self._add_compressed_array_helper(data, length)
def _add_parray_helper(self, data, array_type, prop_type):
assert (isinstance(data, array.array))
assert (data.typecode == array_type)
length = len(data)
if _IS_BIG_ENDIAN:
data = data[:]
data.byteswap()
data = data.tobytes()
self._add_array_helper(data, prop_type, length)
def _add_ndarray_helper(self, data, dtype, prop_type):
assert (isinstance(data, np.ndarray))
assert (data.dtype == dtype)
length = data.size
if _IS_BIG_ENDIAN and data.dtype.isnative:
data = data.byteswap()
data = data.tobytes()
self._add_array_helper(data, prop_type, length)
def add_int32_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.int32, data_types.INT32_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_INT32, data)
self._add_parray_helper(data, data_types.ARRAY_INT32, data_types.INT32_ARRAY)
def add_int64_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.int64, data_types.INT64_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_INT64, data)
self._add_parray_helper(data, data_types.ARRAY_INT64, data_types.INT64_ARRAY)
def add_float32_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.float32, data_types.FLOAT32_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_FLOAT32, data)
self._add_parray_helper(data, data_types.ARRAY_FLOAT32, data_types.FLOAT32_ARRAY)
def add_float64_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.float64, data_types.FLOAT64_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_FLOAT64, data)
self._add_parray_helper(data, data_types.ARRAY_FLOAT64, data_types.FLOAT64_ARRAY)
def add_bool_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, bool, data_types.BOOL_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_BOOL, data)
self._add_parray_helper(data, data_types.ARRAY_BOOL, data_types.BOOL_ARRAY)
def add_byte_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.byte, data_types.BYTE_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_BYTE, data)
self._add_parray_helper(data, data_types.ARRAY_BYTE, data_types.BYTE_ARRAY)
# -------------------------
# internal helper functions
def _calc_offsets(self, offset, is_last):
"""
Call before writing, calculates fixed offsets.
"""
assert(self._end_offset == -1)
assert(self._props_length == -1)
offset += _ELEM_META_SIZE # 3 uints (or 3 ulonglongs for FBX 7500 and later)
offset += 1 + len(self.id) # len + idname
props_length = 0
for data in self.props:
# 1 byte for the prop type
props_length += 1 + len(data)
self._props_length = props_length
offset += props_length
offset = self._calc_offsets_children(offset, is_last)
self._end_offset = offset
return offset
def _calc_offsets_children(self, offset, is_last):
if self.elems:
elem_last = self.elems[-1]
for elem in self.elems:
offset = elem._calc_offsets(offset, (elem is elem_last))
offset += _BLOCK_SENTINEL_LENGTH
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
offset += _BLOCK_SENTINEL_LENGTH
return offset
def _write(self, write, tell, is_last):
assert(self._end_offset != -1)
assert(self._props_length != -1)
write(pack(_ELEM_META_FORMAT, self._end_offset, len(self.props), self._props_length))
write(bytes((len(self.id),)))
write(self.id)
for i, data in enumerate(self.props):
write(bytes((self.props_type[i],)))
write(data)
self._write_children(write, tell, is_last)
if tell() != self._end_offset:
raise IOError("scope length not reached, "
"something is wrong (%d)" % (self._end_offset - tell()))
def _write_children(self, write, tell, is_last):
if self.elems:
elem_last = self.elems[-1]
for elem in self.elems:
assert(elem.id != b'')
elem._write(write, tell, (elem is elem_last))
write(_BLOCK_SENTINEL_DATA)
elif (not self.props and not is_last) or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
write(_BLOCK_SENTINEL_DATA)
def _write_timedate_hack(elem_root):
# perform 2 changes
# - set the FileID
# - set the CreationTime
ok = 0
for elem in elem_root.elems:
if elem.id == b'FileId':
assert(elem.props_type[0] == b'R'[0])
assert(len(elem.props_type) == 1)
elem.props.clear()
elem.props_type.clear()
elem.add_bytes(_FILE_ID)
ok += 1
elif elem.id == b'CreationTime':
assert(elem.props_type[0] == b'S'[0])
assert(len(elem.props_type) == 1)
elem.props.clear()
elem.props_type.clear()
elem.add_string(_TIME_ID)
ok += 1
if ok == 2:
break
if ok != 2:
print("Missing fields!")
# FBX 7500 (aka FBX2016) introduces incompatible changes at binary level:
# * The NULL block marking end of nested stuff switches from 13 bytes long to 25 bytes long.
# * The FBX element metadata (end_offset, prop_count and prop_length) switch from uint32 to uint64.
def init_version(fbx_version):
global _BLOCK_SENTINEL_LENGTH, _BLOCK_SENTINEL_DATA, _ELEM_META_FORMAT, _ELEM_META_SIZE
_BLOCK_SENTINEL_LENGTH = ...
_BLOCK_SENTINEL_DATA = ...
_ELEM_META_FORMAT = ...
_ELEM_META_SIZE = ...
if fbx_version < 7500:
_ELEM_META_FORMAT = '<3I'
_ELEM_META_SIZE = 12
else:
_ELEM_META_FORMAT = '<3Q'
_ELEM_META_SIZE = 24
_BLOCK_SENTINEL_LENGTH = _ELEM_META_SIZE + 1
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
def write(fn, elem_root, version):
assert(elem_root.id == b'')
with open(fn, 'wb') as f:
write = f.write
tell = f.tell
init_version(version)
write(_HEAD_MAGIC)
write(pack('<I', version))
# hack since we don't decode time.
# ideally we would _not_ modify this data.
_write_timedate_hack(elem_root)
elem_root._calc_offsets_children(tell(), False)
elem_root._write_children(write, tell, False)
write(_FOOT_ID)
write(b'\x00' * 4)
# padding for alignment (values between 1 & 16 observed)
# if already aligned to 16, add a full 16 bytes padding.
ofs = tell()
pad = ((ofs + 15) & ~15) - ofs
if pad == 0:
pad = 16
write(b'\0' * pad)
write(pack('<I', version))
# unknown magic (always the same)
write(b'\0' * 120)
write(b'\xf8\x5a\x8c\x6a\xde\xf5\xd9\x7e\xec\xe9\x0c\xe3\x75\x8f\x29\x0b')
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,338 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Script copyright (C) 2006-2012, assimp team
# Script copyright (C) 2013 Blender Foundation
"""
Usage
=====
fbx2json [FILES]...
This script will write a JSON file for each FBX argument given.
Output
======
The JSON data is formatted into a list of nested lists of 4 items:
``[id, [data, ...], "data_types", [subtree, ...]]``
Where each list may be empty, and the items in
the subtree are formatted the same way.
data_types is a string, aligned with data that spesifies a type
for each property.
The types are as follows:
* 'Z': - INT8
* 'Y': - INT16
* 'C': - BOOL
* 'I': - INT32
* 'F': - FLOAT32
* 'D': - FLOAT64
* 'L': - INT64
* 'R': - BYTES
* 'S': - STRING
* 'f': - FLOAT32_ARRAY
* 'i': - INT32_ARRAY
* 'd': - FLOAT64_ARRAY
* 'l': - INT64_ARRAY
* 'b': - BOOL ARRAY
* 'c': - BYTE ARRAY
Note that key:value pairs aren't used since the id's are not
ensured to be unique.
"""
# ----------------------------------------------------------------------------
# FBX Binary Parser
from struct import unpack
import array
import zlib
# at the end of each nested block, there is a NUL record to indicate
# that the sub-scope exists (i.e. to distinguish between P: and P : {})
_BLOCK_SENTINEL_LENGTH = ...
_BLOCK_SENTINEL_DATA = ...
read_fbx_elem_uint = ...
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
from collections import namedtuple
FBXElem = namedtuple("FBXElem", ("id", "props", "props_type", "elems"))
del namedtuple
def read_uint(read):
return unpack(b'<I', read(4))[0]
def read_uint64(read):
return unpack(b'<Q', read(8))[0]
def read_ubyte(read):
return unpack(b'B', read(1))[0]
def read_string_ubyte(read):
size = read_ubyte(read)
data = read(size)
return data
def unpack_array(read, array_type, array_stride, array_byteswap):
length = read_uint(read)
encoding = read_uint(read)
comp_len = read_uint(read)
data = read(comp_len)
if encoding == 0:
pass
elif encoding == 1:
data = zlib.decompress(data)
assert(length * array_stride == len(data))
data_array = array.array(array_type, data)
if array_byteswap and _IS_BIG_ENDIAN:
data_array.byteswap()
return data_array
read_data_dict = {
b'Z'[0]: lambda read: unpack(b'<b', read(1))[0], # 8 bit int
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
b'C'[0]: lambda read: unpack(b'?', read(1))[0], # 1 bit bool (yes/no)
b'I'[0]: lambda read: unpack(b'<i', read(4))[0], # 32 bit int
b'F'[0]: lambda read: unpack(b'<f', read(4))[0], # 32 bit float
b'D'[0]: lambda read: unpack(b'<d', read(8))[0], # 64 bit float
b'L'[0]: lambda read: unpack(b'<q', read(8))[0], # 64 bit int
b'R'[0]: lambda read: read(read_uint(read)), # binary data
b'S'[0]: lambda read: read(read_uint(read)), # string data
b'f'[0]: lambda read: unpack_array(read, 'f', 4, False), # array (float)
b'i'[0]: lambda read: unpack_array(read, 'i', 4, True), # array (int)
b'd'[0]: lambda read: unpack_array(read, 'd', 8, False), # array (double)
b'l'[0]: lambda read: unpack_array(read, 'q', 8, True), # array (long)
b'b'[0]: lambda read: unpack_array(read, 'b', 1, False), # array (bool)
b'c'[0]: lambda read: unpack_array(read, 'B', 1, False), # array (ubyte)
}
# FBX 7500 (aka FBX2016) introduces incompatible changes at binary level:
# * The NULL block marking end of nested stuff switches from 13 bytes long to 25 bytes long.
# * The FBX element metadata (end_offset, prop_count and prop_length) switch from uint32 to uint64.
def init_version(fbx_version):
global _BLOCK_SENTINEL_LENGTH, _BLOCK_SENTINEL_DATA, read_fbx_elem_uint
assert(_BLOCK_SENTINEL_LENGTH == ...)
assert(_BLOCK_SENTINEL_DATA == ...)
if fbx_version < 7500:
_BLOCK_SENTINEL_LENGTH = 13
read_fbx_elem_uint = read_uint
else:
_BLOCK_SENTINEL_LENGTH = 25
read_fbx_elem_uint = read_uint64
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
def read_elem(read, tell, use_namedtuple):
# [0] the offset at which this block ends
# [1] the number of properties in the scope
# [2] the length of the property list
end_offset = read_fbx_elem_uint(read)
if end_offset == 0:
return None
prop_count = read_fbx_elem_uint(read)
prop_length = read_fbx_elem_uint(read)
elem_id = read_string_ubyte(read) # elem name of the scope/key
elem_props_type = bytearray(prop_count) # elem property types
elem_props_data = [None] * prop_count # elem properties (if any)
elem_subtree = [] # elem children (if any)
for i in range(prop_count):
data_type = read(1)[0]
elem_props_data[i] = read_data_dict[data_type](read)
elem_props_type[i] = data_type
if tell() < end_offset:
while tell() < (end_offset - _BLOCK_SENTINEL_LENGTH):
elem_subtree.append(read_elem(read, tell, use_namedtuple))
if read(_BLOCK_SENTINEL_LENGTH) != _BLOCK_SENTINEL_DATA:
raise IOError("failed to read nested block sentinel, "
"expected all bytes to be 0")
if tell() != end_offset:
raise IOError("scope length not reached, something is wrong")
args = (elem_id, elem_props_data, elem_props_type, elem_subtree)
return FBXElem(*args) if use_namedtuple else args
def parse_version(fn):
"""
Return the FBX version,
if the file isn't a binary FBX return zero.
"""
with open(fn, 'rb') as f:
read = f.read
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
return 0
return read_uint(read)
def parse(fn, use_namedtuple=True):
root_elems = []
with open(fn, 'rb') as f:
read = f.read
tell = f.tell
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
raise IOError("Invalid header")
fbx_version = read_uint(read)
init_version(fbx_version)
while True:
elem = read_elem(read, tell, use_namedtuple)
if elem is None:
break
root_elems.append(elem)
args = (b'', [], bytearray(0), root_elems)
return FBXElem(*args) if use_namedtuple else args, fbx_version
# ----------------------------------------------------------------------------
# Inline Modules
# pyfbx.data_types
data_types = type(array)("data_types")
data_types.__dict__.update(
dict(
INT8 = b'Z'[0],
INT16 = b'Y'[0],
BOOL = b'C'[0],
INT32 = b'I'[0],
FLOAT32 = b'F'[0],
FLOAT64 = b'D'[0],
INT64 = b'L'[0],
BYTES = b'R'[0],
STRING = b'S'[0],
FLOAT32_ARRAY = b'f'[0],
INT32_ARRAY = b'i'[0],
FLOAT64_ARRAY = b'd'[0],
INT64_ARRAY = b'l'[0],
BOOL_ARRAY = b'b'[0],
BYTE_ARRAY = b'c'[0],
))
# pyfbx.parse_bin
parse_bin = type(array)("parse_bin")
parse_bin.__dict__.update(
dict(
parse = parse
))
# ----------------------------------------------------------------------------
# JSON Converter
# from pyfbx import parse_bin, data_types
import json
import array
def fbx2json_property_as_string(prop, prop_type):
if prop_type == data_types.STRING:
prop_str = prop.decode('utf-8')
prop_str = prop_str.replace('\x00\x01', '::')
return json.dumps(prop_str)
else:
prop_py_type = type(prop)
if prop_py_type == bytes:
return json.dumps(repr(prop)[2:-1])
elif prop_py_type == bool:
return json.dumps(prop)
elif prop_py_type == array.array:
return repr(list(prop))
return repr(prop)
def fbx2json_properties_as_string(fbx_elem):
return ", ".join(fbx2json_property_as_string(*prop_item)
for prop_item in zip(fbx_elem.props,
fbx_elem.props_type))
def fbx2json_recurse(fw, fbx_elem, ident, is_last):
fbx_elem_id = fbx_elem.id.decode('utf-8')
fw('%s["%s", ' % (ident, fbx_elem_id))
fw('[%s], ' % fbx2json_properties_as_string(fbx_elem))
fw('"%s", ' % (fbx_elem.props_type.decode('ascii')))
fw('[')
if fbx_elem.elems:
fw('\n')
ident_sub = ident + " "
for fbx_elem_sub in fbx_elem.elems:
fbx2json_recurse(fw, fbx_elem_sub, ident_sub,
fbx_elem_sub is fbx_elem.elems[-1])
fw(']')
fw(']%s' % ('' if is_last else ',\n'))
def fbx2json(fn):
import os
fn_json = "%s.json" % os.path.splitext(fn)[0]
print("Writing: %r " % fn_json, end="")
fbx_root_elem, fbx_version = parse(fn, use_namedtuple=True)
print("(Version %d) ..." % fbx_version)
with open(fn_json, 'w', encoding="ascii", errors='xmlcharrefreplace') as f:
fw = f.write
fw('[\n')
ident_sub = " "
for fbx_elem_sub in fbx_root_elem.elems:
fbx2json_recurse(f.write, fbx_elem_sub, ident_sub,
fbx_elem_sub is fbx_root_elem.elems[-1])
fw(']\n')
# ----------------------------------------------------------------------------
# Command Line
def main():
import sys
if "--help" in sys.argv:
print(__doc__)
return
for arg in sys.argv[1:]:
try:
fbx2json(arg)
except:
print("Failed to convert %r, error:" % arg)
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,194 @@
# SPDX-FileCopyrightText: 2023 Blender Foundation
#
# SPDX-License-Identifier: GPL-2.0-or-later
from contextlib import contextmanager, nullcontext
import os
from queue import SimpleQueue
# Note: `bpy` cannot be imported here because this module is also used by the fbx2json.py and json2fbx.py scripts.
# For debugging/profiling purposes, can be modified at runtime to force single-threaded execution.
_MULTITHREADING_ENABLED = True
# The concurrent.futures module may not work or may not be available on WebAssembly platforms wasm32-emscripten and
# wasm32-wasi.
try:
from concurrent.futures import ThreadPoolExecutor
except ModuleNotFoundError:
_MULTITHREADING_ENABLED = False
ThreadPoolExecutor = None
else:
try:
# The module may be available, but not be fully functional. An error may be raised when attempting to start a
# new thread.
with ThreadPoolExecutor() as tpe:
# Attempt to start a thread by submitting a callable.
tpe.submit(lambda: None)
except Exception:
# Assume that multithreading is not supported and fall back to single-threaded execution.
_MULTITHREADING_ENABLED = False
def get_cpu_count():
"""Get the number of cpus assigned to the current process if that information is available on this system.
If not available, get the total number of cpus.
If the cpu count is indeterminable, it is assumed that there is only 1 cpu available."""
sched_getaffinity = getattr(os, "sched_getaffinity", None)
if sched_getaffinity is not None:
# Return the number of cpus assigned to the current process.
return len(sched_getaffinity(0))
count = os.cpu_count()
return count if count is not None else 1
class MultiThreadedTaskConsumer:
"""Helper class that encapsulates everything needed to run a function on separate threads, with a single-threaded
fallback if multithreading is not available.
Lower overhead than typical use of ThreadPoolExecutor because no Future objects are returned, which makes this class
more suitable to running many smaller tasks.
As with any threaded parallelization, because of Python's Global Interpreter Lock, only one thread can execute
Python code at a time, so threaded parallelization is only useful when the functions used release the GIL, such as
many IO related functions."""
# A special task value used to signal task consumer threads to shut down.
_SHUT_DOWN_THREADS = object()
__slots__ = ("_consumer_function", "_shared_task_queue", "_task_consumer_futures", "_executor",
"_max_consumer_threads", "_shutting_down", "_max_queue_per_consumer")
def __init__(self, consumer_function, max_consumer_threads, max_queue_per_consumer=5):
# It's recommended to use MultiThreadedTaskConsumer.new_cpu_bound_cm() instead of creating new instances
# directly.
# __init__ should only be called after checking _MULTITHREADING_ENABLED.
assert(_MULTITHREADING_ENABLED)
# The function that will be called on separate threads to consume tasks.
self._consumer_function = consumer_function
# All the threads share a single queue. This is a simplistic approach, but it is unlikely to be problematic
# unless the main thread is expected to wait a long time for the consumer threads to finish.
self._shared_task_queue = SimpleQueue()
# Reference to each thread is kept through the returned Future objects. This is used as part of determining when
# new threads should be started and is used to be able to receive and handle exceptions from the threads.
self._task_consumer_futures = []
# Create the executor.
self._executor = ThreadPoolExecutor(max_workers=max_consumer_threads)
# Technically the max workers of the executor is accessible through its `._max_workers`, but since it's private,
# meaning it could be changed without warning, we'll store the max workers/consumers ourselves.
self._max_consumer_threads = max_consumer_threads
# The maximum task queue size (before another consumer thread is started) increases by this amount with every
# additional consumer thread.
self._max_queue_per_consumer = max_queue_per_consumer
# When shutting down the threads, this is set to True as an extra safeguard to prevent new tasks being
# scheduled.
self._shutting_down = False
@classmethod
def new_cpu_bound_cm(cls, consumer_function, other_cpu_bound_threads_in_use=1, hard_max_threads=32):
"""Return a context manager that, when entered, returns a wrapper around `consumer_function` that schedules
`consumer_function` to be run on a separate thread.
If the system can't use multithreading, then the context manager's returned function will instead be the input
`consumer_function` argument, causing tasks to be run immediately on the calling thread.
When exiting the context manager, it waits for all scheduled tasks to complete and prevents the creation of new
tasks, similar to calling ThreadPoolExecutor.shutdown(). For these reasons, the wrapped function should only be
called from the thread that entered the context manager, otherwise there is no guarantee that all tasks will get
scheduled before the context manager exits.
Any task that fails with an exception will cause all task consumer threads to stop.
The maximum number of threads used matches the number of cpus available up to a maximum of `hard_max_threads`.
`hard_max_threads`'s default of 32 matches ThreadPoolExecutor's default behaviour.
The maximum number of threads used is decreased by `other_cpu_bound_threads_in_use`. Defaulting to `1`, assuming
that the calling thread will also be doing CPU-bound work.
Most IO-bound tasks can probably use a ThreadPoolExecutor directly instead because there will typically be fewer
tasks and, on average, each individual task will take longer.
If needed, `cls.new_cpu_bound_cm(consumer_function, -4)` could be suitable for lots of small IO-bound tasks,
because it ensures a minimum of 5 threads, like the default ThreadPoolExecutor."""
if _MULTITHREADING_ENABLED:
max_threads = get_cpu_count() - other_cpu_bound_threads_in_use
max_threads = min(max_threads, hard_max_threads)
if max_threads > 0:
return cls(consumer_function, max_threads)._wrap_executor_cm()
# Fall back to single-threaded.
return nullcontext(consumer_function)
def _task_consumer_callable(self):
"""Callable that is run by each task consumer thread.
Signals the other task consumer threads to stop when stopped intentionally or when an exception occurs."""
try:
while True:
# Blocks until it can get a task.
task_args = self._shared_task_queue.get()
if task_args is self._SHUT_DOWN_THREADS:
# This special value signals that it's time for all the threads to stop.
break
else:
# Call the task consumer function.
self._consumer_function(*task_args)
finally:
# Either the thread has been told to shut down because it received _SHUT_DOWN_THREADS or an exception has
# occurred.
# Add _SHUT_DOWN_THREADS to the queue so that the other consumer threads will also shut down.
self._shared_task_queue.put(self._SHUT_DOWN_THREADS)
def _schedule_task(self, *args):
"""Task consumer threads are only started as tasks are added.
To mitigate starting lots of threads if many tasks are scheduled in quick succession, new threads are only
started if the number of queued tasks grows too large.
This function is a slight misuse of ThreadPoolExecutor. Normally each task to be scheduled would be submitted
through ThreadPoolExecutor.submit, but doing so is noticeably slower for small tasks. We could start new Thread
instances manually without using ThreadPoolExecutor, but ThreadPoolExecutor gives us a higher level API for
waiting for threads to finish and handling exceptions without having to implement an API using Thread ourselves.
"""
if self._shutting_down:
# Shouldn't occur through normal usage.
raise RuntimeError("Cannot schedule new tasks after shutdown")
# Schedule the task by adding it to the task queue.
self._shared_task_queue.put(args)
# Check if more consumer threads need to be added to account for the rate at which tasks are being scheduled
# compared to the rate at which tasks are being consumed.
current_consumer_count = len(self._task_consumer_futures)
if current_consumer_count < self._max_consumer_threads:
# The max queue size increases as new threads are added, otherwise, by the time the next task is added, it's
# likely that the queue size will still be over the max, causing another new thread to be added immediately.
# Increasing the max queue size whenever a new thread is started gives some time for the new thread to start
# up and begin consuming tasks before it's determined that another thread is needed.
max_queue_size_for_current_consumers = self._max_queue_per_consumer * current_consumer_count
if self._shared_task_queue.qsize() > max_queue_size_for_current_consumers:
# Add a new consumer thread because the queue has grown too large.
self._task_consumer_futures.append(self._executor.submit(self._task_consumer_callable))
@contextmanager
def _wrap_executor_cm(self):
"""Wrap the executor's context manager to instead return self._schedule_task and such that the threads
automatically start shutting down before the executor itself starts shutting down."""
# .__enter__()
# Exiting the context manager of the executor will wait for all threads to finish and prevent new
# threads from being created, as if its shutdown() method had been called.
with self._executor:
try:
yield self._schedule_task
finally:
# .__exit__()
self._shutting_down = True
# Signal all consumer threads to finish up and shut down so that the executor can shut down.
# When this is run on the same thread that schedules new tasks, this guarantees that no more tasks will
# be scheduled after the consumer threads start to shut down.
self._shared_task_queue.put(self._SHUT_DOWN_THREADS)
# Because `self._executor` was entered with a context manager, it will wait for all the consumer threads
# to finish even if we propagate an exception from one of the threads here.
for future in self._task_consumer_futures:
# .exception() waits for the future to finish and returns its raised exception or None.
ex = future.exception()
if ex is not None:
# If one of the threads raised an exception, propagate it to the main thread.
# Only the first exception will be propagated if there were multiple.
raise ex
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,161 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Script copyright (C) 2014 Blender Foundation
"""
Usage
=====
json2fbx [FILES]...
This script will write a binary FBX file for each JSON argument given.
Input
======
The JSON data is formatted into a list of nested lists of 4 items:
``[id, [data, ...], "data_types", [subtree, ...]]``
Where each list may be empty, and the items in
the subtree are formatted the same way.
data_types is a string, aligned with data that spesifies a type
for each property.
The types are as follows:
* 'Z': - INT8
* 'Y': - INT16
* 'C': - BOOL
* 'I': - INT32
* 'F': - FLOAT32
* 'D': - FLOAT64
* 'L': - INT64
* 'R': - BYTES
* 'S': - STRING
* 'f': - FLOAT32_ARRAY
* 'i': - INT32_ARRAY
* 'd': - FLOAT64_ARRAY
* 'l': - INT64_ARRAY
* 'b': - BOOL ARRAY
* 'c': - BYTE ARRAY
Note that key:value pairs aren't used since the id's are not
ensured to be unique.
"""
def elem_empty(elem, name):
import encode_bin
sub_elem = encode_bin.FBXElem(name)
if elem is not None:
elem.elems.append(sub_elem)
return sub_elem
def parse_json_rec(fbx_root, json_node):
name, data, data_types, children = json_node
ver = 0
assert(len(data_types) == len(data))
e = elem_empty(fbx_root, name.encode())
for d, dt in zip(data, data_types):
if dt == "C":
e.add_bool(d)
elif dt == "Z":
e.add_int8(d)
elif dt == "Y":
e.add_int16(d)
elif dt == "I":
e.add_int32(d)
elif dt == "L":
e.add_int64(d)
elif dt == "F":
e.add_float32(d)
elif dt == "D":
e.add_float64(d)
elif dt == "R":
d = eval('b"""' + d + '"""')
e.add_bytes(d)
elif dt == "S":
d = d.encode().replace(b"::", b"\x00\x01")
e.add_string(d)
elif dt == "i":
e.add_int32_array(d)
elif dt == "l":
e.add_int64_array(d)
elif dt == "f":
e.add_float32_array(d)
elif dt == "d":
e.add_float64_array(d)
elif dt == "b":
e.add_bool_array(d)
elif dt == "c":
e.add_byte_array(d)
if name == "FBXVersion":
assert(data_types == "I")
ver = int(data[0])
for child in children:
_ver = parse_json_rec(e, child)
if _ver:
ver = _ver
return ver
def parse_json(json_root):
root = elem_empty(None, b"")
ver = 0
for n in json_root:
_ver = parse_json_rec(root, n)
if _ver:
ver = _ver
return root, ver
def json2fbx(fn):
import os
import json
import encode_bin
fn_fbx = "%s.fbx" % os.path.splitext(fn)[0]
print("Writing: %r " % fn_fbx, end="")
json_root = []
with open(fn) as f_json:
json_root = json.load(f_json)
fbx_root, fbx_version = parse_json(json_root)
print("(Version %d) ..." % fbx_version)
encode_bin.write(fn_fbx, fbx_root, fbx_version)
# ----------------------------------------------------------------------------
# Command Line
def main():
import sys
if "--help" in sys.argv:
print(__doc__)
return
for arg in sys.argv[1:]:
try:
json2fbx(arg)
except:
print("Failed to convert %r, error:" % arg)
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()
@@ -0,0 +1,194 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Script copyright (C) 2006-2012, assimp team
# Script copyright (C) 2013 Blender Foundation
__all__ = (
"parse",
"data_types",
"parse_version",
"FBXElem",
)
from struct import unpack
import array
import zlib
from . import data_types
# at the end of each nested block, there is a NUL record to indicate
# that the sub-scope exists (i.e. to distinguish between P: and P : {})
_BLOCK_SENTINEL_LENGTH = ...
_BLOCK_SENTINEL_DATA = ...
read_fbx_elem_uint = ...
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
from collections import namedtuple
FBXElem = namedtuple("FBXElem", ("id", "props", "props_type", "elems"))
del namedtuple
def read_uint(read):
return unpack(b'<I', read(4))[0]
def read_uint64(read):
return unpack(b'<Q', read(8))[0]
def read_ubyte(read):
return unpack(b'B', read(1))[0]
def read_string_ubyte(read):
size = read_ubyte(read)
data = read(size)
return data
def unpack_array(read, array_type, array_stride, array_byteswap):
length = read_uint(read)
encoding = read_uint(read)
comp_len = read_uint(read)
data = read(comp_len)
if encoding == 0:
pass
elif encoding == 1:
data = zlib.decompress(data)
assert(length * array_stride == len(data))
data_array = array.array(array_type, data)
if array_byteswap and _IS_BIG_ENDIAN:
data_array.byteswap()
return data_array
read_data_dict = {
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
b'C'[0]: lambda read: unpack(b'?', read(1))[0], # 1 bit bool (yes/no)
b'I'[0]: lambda read: unpack(b'<i', read(4))[0], # 32 bit int
b'F'[0]: lambda read: unpack(b'<f', read(4))[0], # 32 bit float
b'D'[0]: lambda read: unpack(b'<d', read(8))[0], # 64 bit float
b'L'[0]: lambda read: unpack(b'<q', read(8))[0], # 64 bit int
b'R'[0]: lambda read: read(read_uint(read)), # binary data
b'S'[0]: lambda read: read(read_uint(read)), # string data
b'f'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT32, 4, False), # array (float)
b'i'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT32, 4, True), # array (int)
b'd'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT64, 8, False), # array (double)
b'l'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT64, 8, True), # array (long)
b'b'[0]: lambda read: unpack_array(read, data_types.ARRAY_BOOL, 1, False), # array (bool)
b'c'[0]: lambda read: unpack_array(read, data_types.ARRAY_BYTE, 1, False), # array (ubyte)
}
# FBX 7500 (aka FBX2016) introduces incompatible changes at binary level:
# * The NULL block marking end of nested stuff switches from 13 bytes long to 25 bytes long.
# * The FBX element metadata (end_offset, prop_count and prop_length) switch from uint32 to uint64.
def init_version(fbx_version):
global _BLOCK_SENTINEL_LENGTH, _BLOCK_SENTINEL_DATA, read_fbx_elem_uint
_BLOCK_SENTINEL_LENGTH = ...
_BLOCK_SENTINEL_DATA = ...
read_fbx_elem_uint = ...
if fbx_version < 7500:
_BLOCK_SENTINEL_LENGTH = 13
read_fbx_elem_uint = read_uint
else:
_BLOCK_SENTINEL_LENGTH = 25
read_fbx_elem_uint = read_uint64
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
def read_elem(read, tell, use_namedtuple):
# [0] the offset at which this block ends
# [1] the number of properties in the scope
# [2] the length of the property list
end_offset = read_fbx_elem_uint(read)
if end_offset == 0:
return None
prop_count = read_fbx_elem_uint(read)
prop_length = read_fbx_elem_uint(read)
elem_id = read_string_ubyte(read) # elem name of the scope/key
elem_props_type = bytearray(prop_count) # elem property types
elem_props_data = [None] * prop_count # elem properties (if any)
elem_subtree = [] # elem children (if any)
for i in range(prop_count):
data_type = read(1)[0]
elem_props_data[i] = read_data_dict[data_type](read)
elem_props_type[i] = data_type
if tell() < end_offset:
while tell() < (end_offset - _BLOCK_SENTINEL_LENGTH):
elem_subtree.append(read_elem(read, tell, use_namedtuple))
if read(_BLOCK_SENTINEL_LENGTH) != _BLOCK_SENTINEL_DATA:
raise IOError("failed to read nested block sentinel, "
"expected all bytes to be 0")
if tell() != end_offset:
raise IOError("scope length not reached, something is wrong")
args = (elem_id, elem_props_data, elem_props_type, elem_subtree)
return FBXElem(*args) if use_namedtuple else args
def parse_version(fn):
"""
Return the FBX version,
if the file isn't a binary FBX return zero.
"""
with open(fn, 'rb') as f:
read = f.read
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
return 0
return read_uint(read)
def parse(fn, use_namedtuple=True):
root_elems = []
with open(fn, 'rb') as f:
read = f.read
tell = f.tell
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
raise IOError("Invalid header")
fbx_version = read_uint(read)
init_version(fbx_version)
while True:
elem = read_elem(read, tell, use_namedtuple)
if elem is None:
break
root_elems.append(elem)
args = (b'', [], bytearray(0), root_elems)
return FBXElem(*args) if use_namedtuple else args, fbx_version
@@ -0,0 +1,337 @@
if "bpy" in locals():
import importlib
if "export_fbx_bin" in locals():
importlib.reload(export_fbx_bin)
import bpy
import addon_utils, sys
from bpy.props import (
StringProperty,
BoolProperty,
FloatProperty,
EnumProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
orientation_helper,
path_reference_mode,
axis_conversion,
)
@orientation_helper(axis_forward='-Z', axis_up='Y')
class ARP_OT_export_fbx_wrap(bpy.types.Operator, ExportHelper):
"""Write a FBX file"""
bl_idname = "arp_export_scene.fbx"
bl_label = "Export ARP FBX"
bl_options = {'UNDO', 'PRESET'}
filename_ext = ".fbx"
filter_glob: StringProperty(default="*.fbx", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
ui_tab: EnumProperty(
items=(('MAIN', "Main", "Main basic settings"),
('GEOMETRY', "Geometries", "Geometry-related settings"),
('ARMATURE', "Armatures", "Armature-related settings"),
('ANIMATION', "Animation", "Animation-related settings"),
),
name="ui_tab",
description="Export options categories",
)
use_selection: BoolProperty(
name="Selected Objects",
description="Export selected and visible objects only",
default=False,
)
use_active_collection: BoolProperty(
name="Active Collection",
description="Export only objects from the active collection (and its children)",
default=False,
)
global_scale: FloatProperty(
name="Scale",
description="Scale all data (Some importers do not support scaled armatures!)",
min=0.001, max=1000.0,
soft_min=0.01, soft_max=1000.0,
default=1.0,
)
apply_unit_scale: BoolProperty(
name="Apply Unit",
description="Take into account current Blender units settings (if unset, raw Blender Units values are used as-is)",
default=True,
)
apply_scale_options: EnumProperty(
items=(('FBX_SCALE_NONE', "All Local",
"Apply custom scaling and units scaling to each object transformation, FBX scale remains at 1.0"),
('FBX_SCALE_UNITS', "FBX Units Scale",
"Apply custom scaling to each object transformation, and units scaling to FBX scale"),
('FBX_SCALE_CUSTOM', "FBX Custom Scale",
"Apply custom scaling to FBX scale, and units scaling to each object transformation"),
('FBX_SCALE_ALL', "FBX All",
"Apply custom scaling and units scaling to FBX scale"),
),
name="Apply Scalings",
description="How to apply custom and units scalings in generated FBX file "
"(Blender uses FBX scale to detect units on import, "
"but many other applications do not handle the same way)",
)
use_space_transform: BoolProperty(
name="Use Space Transform",
description="Apply global space transform to the object rotations. When disabled "
"only the axis space is written to the file and all object transforms are left as-is",
default=True,
)
bake_space_transform: BoolProperty(
name="!EXPERIMENTAL! Apply Transform",
description="Bake space transform into object data, avoids getting unwanted rotations to objects when "
"target space is not aligned with Blender's space "
"(WARNING! experimental option, use at own risks, known broken with armatures/animations)",
default=False,
)
colors_type: EnumProperty(
name="Vertex Colors",
items=(('NONE', "None", "Do not import color attributes"),
('SRGB', "sRGB", "Expect file colors in sRGB color space"),
('LINEAR', "Linear", "Expect file colors in linear color space"),
),
description="Import vertex color attributes",
default='SRGB',
)
prioritize_active_color: BoolProperty(
name="Prioritize Active Color",
description="Make sure active color will be exported first. Could be important "
"since some other software can discard other color attributes besides the first one",
default=False,
)
object_types: EnumProperty(
name="Object Types",
options={'ENUM_FLAG'},
items=(('EMPTY', "Empty", ""),
('CAMERA', "Camera", ""),
('LIGHT', "Lamp", ""),
('ARMATURE', "Armature", "WARNING: not supported in dupli/group instances"),
('MESH', "Mesh", ""),
('OTHER', "Other", "Other geometry types, like curve, metaball, etc. (converted to meshes)"),
),
description="Which kind of object to export",
default={'EMPTY', 'CAMERA', 'LIGHT', 'ARMATURE', 'MESH', 'OTHER'},
)
use_mesh_modifiers: BoolProperty(
name="Apply Modifiers",
description="Apply modifiers to mesh objects (except Armature ones) - "
"WARNING: prevents exporting shape keys",
default=True,
)
use_mesh_modifiers_render: BoolProperty(
name="Use Modifiers Render Setting",
description="Use render settings when applying modifiers to mesh objects",
default=True,
)
mesh_smooth_type: EnumProperty(
name="Smoothing",
items=(('OFF', "Normals Only", "Export only normals instead of writing edge or face smoothing data"),
('FACE', "Face", "Write face smoothing"),
('EDGE', "Edge", "Write edge smoothing"),
),
description="Export smoothing information "
"(prefer 'Normals Only' option if your target importer understand split normals)",
default='OFF',
)
use_subsurf: BoolProperty(
name="Export Subdivision Surface",
description="Export the last Catmull-Rom subdivision modifier as FBX subdivision "
"(does not apply the modifier even if 'Apply Modifiers' is enabled)",
default=False,
)
use_mesh_edges: BoolProperty(
name="Loose Edges",
description="Export loose edges (as two-vertices polygons)",
default=False,
)
use_tspace: BoolProperty(
name="Tangent Space",
description="Add binormal and tangent vectors, together with normal they form the tangent space "
"(will only work correctly with tris/quads only meshes!)",
default=False,
)
use_triangles: BoolProperty(
name="Triangulate Faces",
description="Convert all faces to triangles",
default=False,
)
use_custom_props: BoolProperty(
name="Custom Properties",
description="Export custom properties",
default=False,
)
add_leaf_bones: BoolProperty(
name="Add Leaf Bones",
description="Append a final bone to the end of each chain to specify last bone length "
"(use this when you intend to edit the armature from exported data)",
default=True # False for commit!
)
primary_bone_axis: EnumProperty(
name="Primary Bone Axis",
items=(('X', "X Axis", ""),
('Y', "Y Axis", ""),
('Z', "Z Axis", ""),
('-X', "-X Axis", ""),
('-Y', "-Y Axis", ""),
('-Z', "-Z Axis", ""),
),
default='Y',
)
secondary_bone_axis: EnumProperty(
name="Secondary Bone Axis",
items=(('X', "X Axis", ""),
('Y', "Y Axis", ""),
('Z', "Z Axis", ""),
('-X', "-X Axis", ""),
('-Y', "-Y Axis", ""),
('-Z', "-Z Axis", ""),
),
default='X',
)
use_armature_deform_only: BoolProperty(
name="Only Deform Bones",
description="Only write deforming bones (and non-deforming ones when they have deforming children)",
default=False,
)
armature_nodetype: EnumProperty(
name="Armature FBXNode Type",
items=(('NULL', "Null", "'Null' FBX node, similar to Blender's Empty (default)"),
('ROOT', "Root", "'Root' FBX node, supposed to be the root of chains of bones..."),
('LIMBNODE', "LimbNode", "'LimbNode' FBX node, a regular joint between two bones..."),
),
description="FBX type of node (object) used to represent Blender's armatures "
"(use Null one unless you experience issues with other app, other choices may no import back "
"perfectly in Blender...)",
default='NULL',
)
bake_anim: BoolProperty(
name="Baked Animation",
description="Export baked keyframe animation",
default=True,
)
bake_anim_use_all_bones: BoolProperty(
name="Key All Bones",
description="Force exporting at least one key of animation for all bones "
"(needed with some target applications, like UE4)",
default=True,
)
bake_anim_use_nla_strips: BoolProperty(
name="NLA Strips",
description="Export each non-muted NLA strip as a separated FBX's AnimStack, if any, "
"instead of global scene animation",
default=True,
)
bake_anim_use_all_actions: BoolProperty(
name="All Actions",
description="Export each action as a separated FBX's AnimStack, instead of global scene animation "
"(note that animated objects will get all actions compatible with them, "
"others will get no animation at all)",
default=True,
)
bake_anim_force_startend_keying: BoolProperty(
name="Force Start/End Keying",
description="Always add a keyframe at start and end of actions for animated channels",
default=True,
)
bake_anim_step: FloatProperty(
name="Sampling Rate",
description="How often to evaluate animated values (in frames)",
min=0.01, max=100.0,
soft_min=0.1, soft_max=10.0,
default=1.0,
)
bake_anim_simplify_factor: FloatProperty(
name="Simplify",
description="How much to simplify baked values (0.0 to disable, the higher the more simplified)",
min=0.0, max=100.0, # No simplification to up to 10% of current magnitude tolerance.
soft_min=0.0, soft_max=10.0,
default=1.0, # default: min slope: 0.005, max frame step: 10.
)
path_mode: path_reference_mode
embed_textures: BoolProperty(
name="Embed Textures",
description="Embed textures in FBX binary file (only for \"Copy\" path mode!)",
default=False,
)
batch_mode: EnumProperty(
name="Batch Mode",
items=(('OFF', "Off", "Active scene to file"),
('SCENE', "Scene", "Each scene as a file"),
('COLLECTION', "Collection",
"Each collection (data-block ones) as a file, does not include content of children collections"),
('SCENE_COLLECTION', "Scene Collections",
"Each collection (including master, non-data-block ones) of each scene as a file, "
"including content from children collections"),
('ACTIVE_SCENE_COLLECTION', "Active Scene Collections",
"Each collection (including master, non-data-block one) of the active scene as a file, "
"including content from children collections"),
),
)
use_batch_own_dir: BoolProperty(
name="Batch Own Dir",
description="Create a dir for each exported file",
default=True,
)
use_metadata: BoolProperty(
name="Use Metadata",
default=True,
options={'HIDDEN'},
)
#humanoid_actions: BoolProperty(name="Humanoid Actions Only", default=True)
shape_keys_baked_data: StringProperty(name="sk data", default="")
mesh_names_data: StringProperty(name="mesh names", default="")
export_action_only: StringProperty(name="", default="")
@property
def check_extension(self):
return self.batch_mode == 'OFF'
def execute(self, context):
from mathutils import Matrix
if not self.filepath:
raise Exception("filepath not set")
global_matrix = (axis_conversion(to_forward=self.axis_forward,
to_up=self.axis_up,
).to_4x4()
if self.use_space_transform else Matrix())
keywords = self.as_keywords(ignore=("check_existing",
"filter_glob",
"ui_tab",
))
keywords["global_matrix"] = global_matrix
from . import export_fbx_bin
return export_fbx_bin.arp_save(self, context, **keywords)
def register():
if bpy.app.version < (4,1,0):
from bpy.utils import register_class
register_class(ARP_OT_export_fbx_wrap)
def unregister():
if bpy.app.version < (4,1,0):
from bpy.utils import unregister_class
unregister_class(ARP_OT_export_fbx_wrap)
@@ -0,0 +1,61 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Script copyright (C) 2006-2012, assimp team
# Script copyright (C) 2013 Blender Foundation
BOOL = b'C'[0]
INT8 = b'Z'[0]
INT16 = b'Y'[0]
INT32 = b'I'[0]
INT64 = b'L'[0]
FLOAT32 = b'F'[0]
FLOAT64 = b'D'[0]
BYTES = b'R'[0]
STRING = b'S'[0]
INT32_ARRAY = b'i'[0]
INT64_ARRAY = b'l'[0]
FLOAT32_ARRAY = b'f'[0]
FLOAT64_ARRAY = b'd'[0]
BOOL_ARRAY = b'b'[0]
BYTE_ARRAY = b'c'[0]
# Some other misc defines
# Known combinations so far - supposed meaning: A = animatable, A+ = animated, U = UserProp
# VALID_NUMBER_FLAGS = {b'A', b'A+', b'AU', b'A+U'} # Not used...
# array types - actual length may vary (depending on underlying C implementation)!
import array
# For now, bytes and bool are assumed always 1byte.
ARRAY_BOOL = 'b'
ARRAY_BYTE = 'B'
ARRAY_INT32 = None
ARRAY_INT64 = None
for _t in 'ilq':
size = array.array(_t).itemsize
if size == 4:
ARRAY_INT32 = _t
elif size == 8:
ARRAY_INT64 = _t
if ARRAY_INT32 and ARRAY_INT64:
break
if not ARRAY_INT32:
raise Exception("Impossible to get a 4-bytes integer type for array!")
if not ARRAY_INT64:
raise Exception("Impossible to get an 8-bytes integer type for array!")
ARRAY_FLOAT32 = None
ARRAY_FLOAT64 = None
for _t in 'fd':
size = array.array(_t).itemsize
if size == 4:
ARRAY_FLOAT32 = _t
elif size == 8:
ARRAY_FLOAT64 = _t
if ARRAY_FLOAT32 and ARRAY_FLOAT64:
break
if not ARRAY_FLOAT32:
raise Exception("Impossible to get a 4-bytes float type for array!")
if not ARRAY_FLOAT64:
raise Exception("Impossible to get an 8-bytes float type for array!")
@@ -0,0 +1,344 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Script copyright (C) 2013 Campbell Barton
try:
from . import data_types
except:
import data_types
from struct import pack
import array
import numpy as np
import zlib
_BLOCK_SENTINEL_LENGTH = 13
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
# fbx has very strict CRC rules, all based on file timestamp
# until we figure these out, write files at a fixed time. (workaround!)
# Assumes: CreationTime
_TIME_ID = b'1970-01-01 10:00:00:000'
_FILE_ID = b'\x28\xb3\x2a\xeb\xb6\x24\xcc\xc2\xbf\xc8\xb0\x2a\xa9\x2b\xfc\xf1'
_FOOT_ID = b'\xfa\xbc\xab\x09\xd0\xc8\xd4\x66\xb1\x76\xfb\x83\x1c\xf7\x26\x7e'
# Awful exceptions: those "classes" of elements seem to need block sentinel even when having no children and some props.
_ELEMS_ID_ALWAYS_BLOCK_SENTINEL = {b"AnimationStack", b"AnimationLayer"}
class FBXElem:
__slots__ = (
"id",
"props",
"props_type",
"elems",
"_props_length", # combine length of props
"_end_offset", # byte offset from the start of the file.
)
def __init__(self, id):
assert(len(id) < 256) # length must fit in a uint8
self.id = id
self.props = []
self.props_type = bytearray()
self.elems = []
self._end_offset = -1
self._props_length = -1
def add_bool(self, data):
assert(isinstance(data, bool))
data = pack('?', data)
self.props_type.append(data_types.BOOL)
self.props.append(data)
def add_int8(self, data):
assert(isinstance(data, int))
data = pack('<b', data)
self.props_type.append(data_types.INT8)
self.props.append(data)
def add_int16(self, data):
assert(isinstance(data, int))
data = pack('<h', data)
self.props_type.append(data_types.INT16)
self.props.append(data)
def add_int32(self, data):
assert(isinstance(data, int))
data = pack('<i', data)
self.props_type.append(data_types.INT32)
self.props.append(data)
def add_int64(self, data):
assert(isinstance(data, int))
data = pack('<q', data)
self.props_type.append(data_types.INT64)
self.props.append(data)
def add_float32(self, data):
assert(isinstance(data, float))
data = pack('<f', data)
self.props_type.append(data_types.FLOAT32)
self.props.append(data)
def add_float64(self, data):
assert(isinstance(data, float))
data = pack('<d', data)
self.props_type.append(data_types.FLOAT64)
self.props.append(data)
def add_bytes(self, data):
assert(isinstance(data, bytes))
data = pack('<I', len(data)) + data
self.props_type.append(data_types.BYTES)
self.props.append(data)
def add_string(self, data):
assert(isinstance(data, bytes))
data = pack('<I', len(data)) + data
self.props_type.append(data_types.STRING)
self.props.append(data)
def add_string_unicode(self, data):
assert(isinstance(data, str))
data = data.encode('utf8')
data = pack('<I', len(data)) + data
self.props_type.append(data_types.STRING)
self.props.append(data)
def _add_array_helper(self, data, prop_type, length):
# mimic behavior of fbxconverter (also common sense)
# we could make this configurable.
encoding = 0 if len(data) <= 128 else 1
if encoding == 0:
pass
elif encoding == 1:
data = zlib.compress(data, 1)
comp_len = len(data)
data = pack('<3I', length, encoding, comp_len) + data
self.props_type.append(prop_type)
self.props.append(data)
def _add_parray_helper(self, data, array_type, prop_type):
assert (isinstance(data, array.array))
assert (data.typecode == array_type)
length = len(data)
if _IS_BIG_ENDIAN:
data = data[:]
data.byteswap()
data = data.tobytes()
self._add_array_helper(data, prop_type, length)
def _add_ndarray_helper(self, data, dtype, prop_type):
assert (isinstance(data, np.ndarray))
assert (data.dtype == dtype)
length = data.size
if _IS_BIG_ENDIAN and data.dtype.isnative:
data = data.byteswap()
data = data.tobytes()
self._add_array_helper(data, prop_type, length)
def add_int32_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.int32, data_types.INT32_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_INT32, data)
self._add_parray_helper(data, data_types.ARRAY_INT32, data_types.INT32_ARRAY)
def add_int64_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.int64, data_types.INT64_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_INT64, data)
self._add_parray_helper(data, data_types.ARRAY_INT64, data_types.INT64_ARRAY)
def add_float32_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.float32, data_types.FLOAT32_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_FLOAT32, data)
self._add_parray_helper(data, data_types.ARRAY_FLOAT32, data_types.FLOAT32_ARRAY)
def add_float64_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.float64, data_types.FLOAT64_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_FLOAT64, data)
self._add_parray_helper(data, data_types.ARRAY_FLOAT64, data_types.FLOAT64_ARRAY)
def add_bool_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, bool, data_types.BOOL_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_BOOL, data)
self._add_parray_helper(data, data_types.ARRAY_BOOL, data_types.BOOL_ARRAY)
def add_byte_array(self, data):
if isinstance(data, np.ndarray):
self._add_ndarray_helper(data, np.byte, data_types.BYTE_ARRAY)
else:
if not isinstance(data, array.array):
data = array.array(data_types.ARRAY_BYTE, data)
self._add_parray_helper(data, data_types.ARRAY_BYTE, data_types.BYTE_ARRAY)
# -------------------------
# internal helper functions
def _calc_offsets(self, offset, is_last):
"""
Call before writing, calculates fixed offsets.
"""
assert(self._end_offset == -1)
assert(self._props_length == -1)
offset += 12 # 3 uints
offset += 1 + len(self.id) # len + idname
props_length = 0
for data in self.props:
# 1 byte for the prop type
props_length += 1 + len(data)
self._props_length = props_length
offset += props_length
offset = self._calc_offsets_children(offset, is_last)
self._end_offset = offset
return offset
def _calc_offsets_children(self, offset, is_last):
if self.elems:
elem_last = self.elems[-1]
for elem in self.elems:
offset = elem._calc_offsets(offset, (elem is elem_last))
offset += _BLOCK_SENTINEL_LENGTH
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
if not is_last:
offset += _BLOCK_SENTINEL_LENGTH
return offset
def _write(self, write, tell, is_last):
assert(self._end_offset != -1)
assert(self._props_length != -1)
write(pack('<3I', self._end_offset, len(self.props), self._props_length))
write(bytes((len(self.id),)))
write(self.id)
for i, data in enumerate(self.props):
write(bytes((self.props_type[i],)))
write(data)
self._write_children(write, tell, is_last)
if tell() != self._end_offset:
raise IOError("scope length not reached, "
"something is wrong (%d)" % (end_offset - tell()))
def _write_children(self, write, tell, is_last):
if self.elems:
elem_last = self.elems[-1]
for elem in self.elems:
assert(elem.id != b'')
elem._write(write, tell, (elem is elem_last))
write(_BLOCK_SENTINEL_DATA)
elif not self.props or self.id in _ELEMS_ID_ALWAYS_BLOCK_SENTINEL:
if not is_last:
write(_BLOCK_SENTINEL_DATA)
def _write_timedate_hack(elem_root):
# perform 2 changes
# - set the FileID
# - set the CreationTime
ok = 0
for elem in elem_root.elems:
if elem.id == b'FileId':
assert(elem.props_type[0] == b'R'[0])
assert(len(elem.props_type) == 1)
elem.props.clear()
elem.props_type.clear()
elem.add_bytes(_FILE_ID)
ok += 1
elif elem.id == b'CreationTime':
assert(elem.props_type[0] == b'S'[0])
assert(len(elem.props_type) == 1)
elem.props.clear()
elem.props_type.clear()
elem.add_string(_TIME_ID)
ok += 1
if ok == 2:
break
if ok != 2:
print("Missing fields!")
def write(fn, elem_root, version):
assert(elem_root.id == b'')
with open(fn, 'wb') as f:
write = f.write
tell = f.tell
write(_HEAD_MAGIC)
write(pack('<I', version))
# hack since we don't decode time.
# ideally we would _not_ modify this data.
_write_timedate_hack(elem_root)
elem_root._calc_offsets_children(tell(), False)
elem_root._write_children(write, tell, False)
write(_FOOT_ID)
write(b'\x00' * 4)
# padding for alignment (values between 1 & 16 observed)
# if already aligned to 16, add a full 16 bytes padding.
ofs = tell()
pad = ((ofs + 15) & ~15) - ofs
if pad == 0:
pad = 16
write(b'\0' * pad)
write(pack('<I', version))
# unknown magic (always the same)
write(b'\0' * 120)
write(b'\xf8\x5a\x8c\x6a\xde\xf5\xd9\x7e\xec\xe9\x0c\xe3\x75\x8f\x29\x0b')
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,338 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Script copyright (C) 2006-2012, assimp team
# Script copyright (C) 2013 Blender Foundation
"""
Usage
=====
fbx2json [FILES]...
This script will write a JSON file for each FBX argument given.
Output
======
The JSON data is formatted into a list of nested lists of 4 items:
``[id, [data, ...], "data_types", [subtree, ...]]``
Where each list may be empty, and the items in
the subtree are formatted the same way.
data_types is a string, aligned with data that spesifies a type
for each property.
The types are as follows:
* 'Z': - INT8
* 'Y': - INT16
* 'C': - BOOL
* 'I': - INT32
* 'F': - FLOAT32
* 'D': - FLOAT64
* 'L': - INT64
* 'R': - BYTES
* 'S': - STRING
* 'f': - FLOAT32_ARRAY
* 'i': - INT32_ARRAY
* 'd': - FLOAT64_ARRAY
* 'l': - INT64_ARRAY
* 'b': - BOOL ARRAY
* 'c': - BYTE ARRAY
Note that key:value pairs aren't used since the id's are not
ensured to be unique.
"""
# ----------------------------------------------------------------------------
# FBX Binary Parser
from struct import unpack
import array
import zlib
# at the end of each nested block, there is a NUL record to indicate
# that the sub-scope exists (i.e. to distinguish between P: and P : {})
_BLOCK_SENTINEL_LENGTH = ...
_BLOCK_SENTINEL_DATA = ...
read_fbx_elem_uint = ...
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
from collections import namedtuple
FBXElem = namedtuple("FBXElem", ("id", "props", "props_type", "elems"))
del namedtuple
def read_uint(read):
return unpack(b'<I', read(4))[0]
def read_uint64(read):
return unpack(b'<Q', read(8))[0]
def read_ubyte(read):
return unpack(b'B', read(1))[0]
def read_string_ubyte(read):
size = read_ubyte(read)
data = read(size)
return data
def unpack_array(read, array_type, array_stride, array_byteswap):
length = read_uint(read)
encoding = read_uint(read)
comp_len = read_uint(read)
data = read(comp_len)
if encoding == 0:
pass
elif encoding == 1:
data = zlib.decompress(data)
assert(length * array_stride == len(data))
data_array = array.array(array_type, data)
if array_byteswap and _IS_BIG_ENDIAN:
data_array.byteswap()
return data_array
read_data_dict = {
b'Z'[0]: lambda read: unpack(b'<b', read(1))[0], # 8 bit int
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
b'C'[0]: lambda read: unpack(b'?', read(1))[0], # 1 bit bool (yes/no)
b'I'[0]: lambda read: unpack(b'<i', read(4))[0], # 32 bit int
b'F'[0]: lambda read: unpack(b'<f', read(4))[0], # 32 bit float
b'D'[0]: lambda read: unpack(b'<d', read(8))[0], # 64 bit float
b'L'[0]: lambda read: unpack(b'<q', read(8))[0], # 64 bit int
b'R'[0]: lambda read: read(read_uint(read)), # binary data
b'S'[0]: lambda read: read(read_uint(read)), # string data
b'f'[0]: lambda read: unpack_array(read, 'f', 4, False), # array (float)
b'i'[0]: lambda read: unpack_array(read, 'i', 4, True), # array (int)
b'd'[0]: lambda read: unpack_array(read, 'd', 8, False), # array (double)
b'l'[0]: lambda read: unpack_array(read, 'q', 8, True), # array (long)
b'b'[0]: lambda read: unpack_array(read, 'b', 1, False), # array (bool)
b'c'[0]: lambda read: unpack_array(read, 'B', 1, False), # array (ubyte)
}
# FBX 7500 (aka FBX2016) introduces incompatible changes at binary level:
# * The NULL block marking end of nested stuff switches from 13 bytes long to 25 bytes long.
# * The FBX element metadata (end_offset, prop_count and prop_length) switch from uint32 to uint64.
def init_version(fbx_version):
global _BLOCK_SENTINEL_LENGTH, _BLOCK_SENTINEL_DATA, read_fbx_elem_uint
assert(_BLOCK_SENTINEL_LENGTH == ...)
assert(_BLOCK_SENTINEL_DATA == ...)
if fbx_version < 7500:
_BLOCK_SENTINEL_LENGTH = 13
read_fbx_elem_uint = read_uint
else:
_BLOCK_SENTINEL_LENGTH = 25
read_fbx_elem_uint = read_uint64
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
def read_elem(read, tell, use_namedtuple):
# [0] the offset at which this block ends
# [1] the number of properties in the scope
# [2] the length of the property list
end_offset = read_fbx_elem_uint(read)
if end_offset == 0:
return None
prop_count = read_fbx_elem_uint(read)
prop_length = read_fbx_elem_uint(read)
elem_id = read_string_ubyte(read) # elem name of the scope/key
elem_props_type = bytearray(prop_count) # elem property types
elem_props_data = [None] * prop_count # elem properties (if any)
elem_subtree = [] # elem children (if any)
for i in range(prop_count):
data_type = read(1)[0]
elem_props_data[i] = read_data_dict[data_type](read)
elem_props_type[i] = data_type
if tell() < end_offset:
while tell() < (end_offset - _BLOCK_SENTINEL_LENGTH):
elem_subtree.append(read_elem(read, tell, use_namedtuple))
if read(_BLOCK_SENTINEL_LENGTH) != _BLOCK_SENTINEL_DATA:
raise IOError("failed to read nested block sentinel, "
"expected all bytes to be 0")
if tell() != end_offset:
raise IOError("scope length not reached, something is wrong")
args = (elem_id, elem_props_data, elem_props_type, elem_subtree)
return FBXElem(*args) if use_namedtuple else args
def parse_version(fn):
"""
Return the FBX version,
if the file isn't a binary FBX return zero.
"""
with open(fn, 'rb') as f:
read = f.read
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
return 0
return read_uint(read)
def parse(fn, use_namedtuple=True):
root_elems = []
with open(fn, 'rb') as f:
read = f.read
tell = f.tell
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
raise IOError("Invalid header")
fbx_version = read_uint(read)
init_version(fbx_version)
while True:
elem = read_elem(read, tell, use_namedtuple)
if elem is None:
break
root_elems.append(elem)
args = (b'', [], bytearray(0), root_elems)
return FBXElem(*args) if use_namedtuple else args, fbx_version
# ----------------------------------------------------------------------------
# Inline Modules
# pyfbx.data_types
data_types = type(array)("data_types")
data_types.__dict__.update(
dict(
INT8 = b'Z'[0],
INT16 = b'Y'[0],
BOOL = b'C'[0],
INT32 = b'I'[0],
FLOAT32 = b'F'[0],
FLOAT64 = b'D'[0],
INT64 = b'L'[0],
BYTES = b'R'[0],
STRING = b'S'[0],
FLOAT32_ARRAY = b'f'[0],
INT32_ARRAY = b'i'[0],
FLOAT64_ARRAY = b'd'[0],
INT64_ARRAY = b'l'[0],
BOOL_ARRAY = b'b'[0],
BYTE_ARRAY = b'c'[0],
))
# pyfbx.parse_bin
parse_bin = type(array)("parse_bin")
parse_bin.__dict__.update(
dict(
parse = parse
))
# ----------------------------------------------------------------------------
# JSON Converter
# from pyfbx import parse_bin, data_types
import json
import array
def fbx2json_property_as_string(prop, prop_type):
if prop_type == data_types.STRING:
prop_str = prop.decode('utf-8')
prop_str = prop_str.replace('\x00\x01', '::')
return json.dumps(prop_str)
else:
prop_py_type = type(prop)
if prop_py_type == bytes:
return json.dumps(repr(prop)[2:-1])
elif prop_py_type == bool:
return json.dumps(prop)
elif prop_py_type == array.array:
return repr(list(prop))
return repr(prop)
def fbx2json_properties_as_string(fbx_elem):
return ", ".join(fbx2json_property_as_string(*prop_item)
for prop_item in zip(fbx_elem.props,
fbx_elem.props_type))
def fbx2json_recurse(fw, fbx_elem, ident, is_last):
fbx_elem_id = fbx_elem.id.decode('utf-8')
fw('%s["%s", ' % (ident, fbx_elem_id))
fw('[%s], ' % fbx2json_properties_as_string(fbx_elem))
fw('"%s", ' % (fbx_elem.props_type.decode('ascii')))
fw('[')
if fbx_elem.elems:
fw('\n')
ident_sub = ident + " "
for fbx_elem_sub in fbx_elem.elems:
fbx2json_recurse(fw, fbx_elem_sub, ident_sub,
fbx_elem_sub is fbx_elem.elems[-1])
fw(']')
fw(']%s' % ('' if is_last else ',\n'))
def fbx2json(fn):
import os
fn_json = "%s.json" % os.path.splitext(fn)[0]
print("Writing: %r " % fn_json, end="")
fbx_root_elem, fbx_version = parse(fn, use_namedtuple=True)
print("(Version %d) ..." % fbx_version)
with open(fn_json, 'w', encoding="ascii", errors='xmlcharrefreplace') as f:
fw = f.write
fw('[\n')
ident_sub = " "
for fbx_elem_sub in fbx_root_elem.elems:
fbx2json_recurse(f.write, fbx_elem_sub, ident_sub,
fbx_elem_sub is fbx_root_elem.elems[-1])
fw(']\n')
# ----------------------------------------------------------------------------
# Command Line
def main():
import sys
if "--help" in sys.argv:
print(__doc__)
return
for arg in sys.argv[1:]:
try:
fbx2json(arg)
except:
print("Failed to convert %r, error:" % arg)
import traceback
traceback.print_exc()
if bpy.app.version < (4,1,0):
if __name__ == "__main__":
main()
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,161 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Script copyright (C) 2014 Blender Foundation
"""
Usage
=====
json2fbx [FILES]...
This script will write a binary FBX file for each JSON argument given.
Input
======
The JSON data is formatted into a list of nested lists of 4 items:
``[id, [data, ...], "data_types", [subtree, ...]]``
Where each list may be empty, and the items in
the subtree are formatted the same way.
data_types is a string, aligned with data that spesifies a type
for each property.
The types are as follows:
* 'Z': - INT8
* 'Y': - INT16
* 'C': - BOOL
* 'I': - INT32
* 'F': - FLOAT32
* 'D': - FLOAT64
* 'L': - INT64
* 'R': - BYTES
* 'S': - STRING
* 'f': - FLOAT32_ARRAY
* 'i': - INT32_ARRAY
* 'd': - FLOAT64_ARRAY
* 'l': - INT64_ARRAY
* 'b': - BOOL ARRAY
* 'c': - BYTE ARRAY
Note that key:value pairs aren't used since the id's are not
ensured to be unique.
"""
def elem_empty(elem, name):
import encode_bin
sub_elem = encode_bin.FBXElem(name)
if elem is not None:
elem.elems.append(sub_elem)
return sub_elem
def parse_json_rec(fbx_root, json_node):
name, data, data_types, children = json_node
ver = 0
assert(len(data_types) == len(data))
e = elem_empty(fbx_root, name.encode())
for d, dt in zip(data, data_types):
if dt == "C":
e.add_bool(d)
elif dt == "Z":
e.add_int8(d)
elif dt == "Y":
e.add_int16(d)
elif dt == "I":
e.add_int32(d)
elif dt == "L":
e.add_int64(d)
elif dt == "F":
e.add_float32(d)
elif dt == "D":
e.add_float64(d)
elif dt == "R":
d = eval('b"""' + d + '"""')
e.add_bytes(d)
elif dt == "S":
d = d.encode().replace(b"::", b"\x00\x01")
e.add_string(d)
elif dt == "i":
e.add_int32_array(d)
elif dt == "l":
e.add_int64_array(d)
elif dt == "f":
e.add_float32_array(d)
elif dt == "d":
e.add_float64_array(d)
elif dt == "b":
e.add_bool_array(d)
elif dt == "c":
e.add_byte_array(d)
if name == "FBXVersion":
assert(data_types == "I")
ver = int(data[0])
for child in children:
_ver = parse_json_rec(e, child)
if _ver:
ver = _ver
return ver
def parse_json(json_root):
root = elem_empty(None, b"")
ver = 0
for n in json_root:
_ver = parse_json_rec(root, n)
if _ver:
ver = _ver
return root, ver
def json2fbx(fn):
import os
import json
import encode_bin
fn_fbx = "%s.fbx" % os.path.splitext(fn)[0]
print("Writing: %r " % fn_fbx, end="")
json_root = []
with open(fn) as f_json:
json_root = json.load(f_json)
fbx_root, fbx_version = parse_json(json_root)
print("(Version %d) ..." % fbx_version)
encode_bin.write(fn_fbx, fbx_root, fbx_version)
# ----------------------------------------------------------------------------
# Command Line
def main():
import sys
if "--help" in sys.argv:
print(__doc__)
return
for arg in sys.argv[1:]:
try:
json2fbx(arg)
except:
print("Failed to convert %r, error:" % arg)
import traceback
traceback.print_exc()
if __name__ == "__main__":
main()
@@ -0,0 +1,194 @@
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Script copyright (C) 2006-2012, assimp team
# Script copyright (C) 2013 Blender Foundation
__all__ = (
"parse",
"data_types",
"parse_version",
"FBXElem",
)
from struct import unpack
import array
import zlib
from . import data_types
# at the end of each nested block, there is a NUL record to indicate
# that the sub-scope exists (i.e. to distinguish between P: and P : {})
_BLOCK_SENTINEL_LENGTH = ...
_BLOCK_SENTINEL_DATA = ...
read_fbx_elem_uint = ...
_IS_BIG_ENDIAN = (__import__("sys").byteorder != 'little')
_HEAD_MAGIC = b'Kaydara FBX Binary\x20\x20\x00\x1a\x00'
from collections import namedtuple
FBXElem = namedtuple("FBXElem", ("id", "props", "props_type", "elems"))
del namedtuple
def read_uint(read):
return unpack(b'<I', read(4))[0]
def read_uint64(read):
return unpack(b'<Q', read(8))[0]
def read_ubyte(read):
return unpack(b'B', read(1))[0]
def read_string_ubyte(read):
size = read_ubyte(read)
data = read(size)
return data
def unpack_array(read, array_type, array_stride, array_byteswap):
length = read_uint(read)
encoding = read_uint(read)
comp_len = read_uint(read)
data = read(comp_len)
if encoding == 0:
pass
elif encoding == 1:
data = zlib.decompress(data)
assert(length * array_stride == len(data))
data_array = array.array(array_type, data)
if array_byteswap and _IS_BIG_ENDIAN:
data_array.byteswap()
return data_array
read_data_dict = {
b'Y'[0]: lambda read: unpack(b'<h', read(2))[0], # 16 bit int
b'C'[0]: lambda read: unpack(b'?', read(1))[0], # 1 bit bool (yes/no)
b'I'[0]: lambda read: unpack(b'<i', read(4))[0], # 32 bit int
b'F'[0]: lambda read: unpack(b'<f', read(4))[0], # 32 bit float
b'D'[0]: lambda read: unpack(b'<d', read(8))[0], # 64 bit float
b'L'[0]: lambda read: unpack(b'<q', read(8))[0], # 64 bit int
b'R'[0]: lambda read: read(read_uint(read)), # binary data
b'S'[0]: lambda read: read(read_uint(read)), # string data
b'f'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT32, 4, False), # array (float)
b'i'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT32, 4, True), # array (int)
b'd'[0]: lambda read: unpack_array(read, data_types.ARRAY_FLOAT64, 8, False), # array (double)
b'l'[0]: lambda read: unpack_array(read, data_types.ARRAY_INT64, 8, True), # array (long)
b'b'[0]: lambda read: unpack_array(read, data_types.ARRAY_BOOL, 1, False), # array (bool)
b'c'[0]: lambda read: unpack_array(read, data_types.ARRAY_BYTE, 1, False), # array (ubyte)
}
# FBX 7500 (aka FBX2016) introduces incompatible changes at binary level:
# * The NULL block marking end of nested stuff switches from 13 bytes long to 25 bytes long.
# * The FBX element metadata (end_offset, prop_count and prop_length) switch from uint32 to uint64.
def init_version(fbx_version):
global _BLOCK_SENTINEL_LENGTH, _BLOCK_SENTINEL_DATA, read_fbx_elem_uint
_BLOCK_SENTINEL_LENGTH = ...
_BLOCK_SENTINEL_DATA = ...
read_fbx_elem_uint = ...
if fbx_version < 7500:
_BLOCK_SENTINEL_LENGTH = 13
read_fbx_elem_uint = read_uint
else:
_BLOCK_SENTINEL_LENGTH = 25
read_fbx_elem_uint = read_uint64
_BLOCK_SENTINEL_DATA = (b'\0' * _BLOCK_SENTINEL_LENGTH)
def read_elem(read, tell, use_namedtuple):
# [0] the offset at which this block ends
# [1] the number of properties in the scope
# [2] the length of the property list
end_offset = read_fbx_elem_uint(read)
if end_offset == 0:
return None
prop_count = read_fbx_elem_uint(read)
prop_length = read_fbx_elem_uint(read)
elem_id = read_string_ubyte(read) # elem name of the scope/key
elem_props_type = bytearray(prop_count) # elem property types
elem_props_data = [None] * prop_count # elem properties (if any)
elem_subtree = [] # elem children (if any)
for i in range(prop_count):
data_type = read(1)[0]
elem_props_data[i] = read_data_dict[data_type](read)
elem_props_type[i] = data_type
if tell() < end_offset:
while tell() < (end_offset - _BLOCK_SENTINEL_LENGTH):
elem_subtree.append(read_elem(read, tell, use_namedtuple))
if read(_BLOCK_SENTINEL_LENGTH) != _BLOCK_SENTINEL_DATA:
raise IOError("failed to read nested block sentinel, "
"expected all bytes to be 0")
if tell() != end_offset:
raise IOError("scope length not reached, something is wrong")
args = (elem_id, elem_props_data, elem_props_type, elem_subtree)
return FBXElem(*args) if use_namedtuple else args
def parse_version(fn):
"""
Return the FBX version,
if the file isn't a binary FBX return zero.
"""
with open(fn, 'rb') as f:
read = f.read
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
return 0
return read_uint(read)
def parse(fn, use_namedtuple=True):
root_elems = []
with open(fn, 'rb') as f:
read = f.read
tell = f.tell
if read(len(_HEAD_MAGIC)) != _HEAD_MAGIC:
raise IOError("Invalid header")
fbx_version = read_uint(read)
init_version(fbx_version)
while True:
elem = read_elem(read, tell, use_namedtuple)
if elem is None:
break
root_elems.append(elem)
args = (b'', [], bytearray(0), root_elems)
return FBXElem(*args) if use_namedtuple else args, fbx_version
@@ -0,0 +1,453 @@
import bpy, sys, math
from .maths_geo import *
from .bone_pose import *
from .version import *
from .sys_print import *
def nla_exit_tweak():
active_obj = bpy.context.active_object
if active_obj.animation_data:
if active_obj.animation_data.use_tweak_mode:
print('NLA is in tweak mode, disable it')
active_action = active_obj.animation_data.action
active_obj.animation_data.use_tweak_mode = False
# on exit, the active action is set to None. Bring it back
active_obj.animation_data.action = active_action
return True
return False
def nla_restore_tweak(state):
active_obj = bpy.context.active_object
if state:
if active_obj.animation_data:
try:
print(' restore tweak mode')
if state:# the active action must be set to None if tweak mode
active_obj.animation_data.action = None
# set tweak state
active_obj.animation_data.use_tweak_mode = state
except:
pass
def nla_mute(object):
muted_tracks = []
if object == None:
return muted_tracks
if object.animation_data:
if object.animation_data.nla_tracks:
for track in object.animation_data.nla_tracks:
if track.mute == False:
track.mute = True
muted_tracks.append(track.name)
return muted_tracks
def nla_unmute(object, tracks_names):
if object == None:
return
if object.animation_data:
if object.animation_data.nla_tracks:
for track_name in tracks_names:
track = object.animation_data.nla_tracks.get(track_name)
track.mute = False
def clear_fcurve(fcurve):
found = True
while found:
try:
fcurve.keyframe_points.remove(fcurve.keyframe_points[0])
except:
found = False
def get_keyf_data(key):
# return keyframe point data
return [key.co[0], key.co[1], key.handle_left[0], key.handle_left[1], key.handle_right[0], key.handle_right[1],
key.handle_left_type, key.handle_right_type, key.easing]
def set_keyf_data(key, data):
# set keyframe point from data (list)
key.co[0] = data[0]
key.co[1] = data[1]
key.handle_left[0] = data[2]
key.handle_left[1] = data[3]
key.handle_right[0] = data[4]
key.handle_right[1] = data[5]
key.handle_left_type = data[6]
key.handle_right_type = data[7]
key.easing = data[8]
def bake_anim(frame_start=0, frame_end=10, only_selected=False, bake_bones=True, bake_object=False,
shape_keys=False, _self=None, action_export_name=None, new_action=True, new_action_name='Action',
interpolation_type='LINEAR', handle_type='DEFAULT',
keyframes_dict=None, sampling_rate=1.0,
support_constraints=False):
scn = bpy.context.scene
obj_data = []
bones_data = []
armature = bpy.data.objects.get(bpy.context.active_object.name)
def get_bones_matrix():
matrices_dict = {}
for pbone in armature.pose.bones:
if only_selected and not pbone.bone.select:
continue
def_matrix = None
constraint = None
bparent_name = ''
parent_type = ''
valid_constraint = True
if support_constraints:# counter transform the ChildOf/Armature constraints
if len(pbone.constraints):
for c in pbone.constraints:
if not c.mute and c.influence > 0.5:
if c.type == 'CHILD_OF':
if c.target:
#if bone
if c.target.type == 'ARMATURE':
bparent_name = c.subtarget
parent_type = "bone"
constraint = c
break
#if object
else:
bparent_name = c.target.name
parent_type = "object"
constraint = c
break
elif c.type == 'ARMATURE':
for tar in c.targets:
if tar.weight > 0.5:
bparent_name = tar.subtarget
parent_type = "bone"
constraint = c
break
if constraint:
if parent_type == 'bone':
if bparent_name == '':
valid_constraint = False
# apply constraint parent
if constraint and valid_constraint:
if parent_type == "bone":
bone_parent = get_pose_bone(bparent_name)
def_matrix = bone_parent.matrix_channel.inverted() @ pbone.matrix
if parent_type == "object":
rig = bpy.data.objects[bparent_name]
def_matrix = constraint.inverse_matrix.inverted() @ rig.matrix_world.inverted() @ def_matrix.matrix
# apply armature object matrix
def_matrix = armature.convert_space(pose_bone=pbone, matrix=def_matrix, from_space="POSE", to_space="LOCAL")
else:
def_matrix = armature.convert_space(pose_bone=pbone, matrix=pbone.matrix, from_space="POSE", to_space="LOCAL")
matrices_dict[pbone.name] = def_matrix
return matrices_dict
def get_obj_matrix():
parent = armature.parent
matrix = armature.matrix_world
if parent:
return parent.matrix_world.inverted_safe() @ matrix
else:
return matrix.copy()
# make list of meshes with valid shape keys
sk_objects = []
if shape_keys and _self and action_export_name:# bake shape keys value for animation export
for ob_name in _self.char_objects:
ob = bpy.data.objects.get(ob_name+"_arpexp")
if ob.type != "MESH":
continue
if ob.data.shape_keys == None:
continue
if len(ob.data.shape_keys.key_blocks) <= 1:
continue
sk_objects.append(ob)
# store matrices
current_frame = scn.frame_current
f = float(int(frame_start))
while f <= int(frame_end):
f = round(f, 3)# round frame value because of decimals issues
scn.frame_set(math.floor(f), subframe=f-math.floor(f))
bpy.context.view_layer.update()
if bake_bones:
bones_data.append((f, get_bones_matrix()))
if bake_object:
obj_data.append((f, get_obj_matrix()))
# shape keys data (for animation export only)
#print('f', f)
for ob in sk_objects:
for i, sk in enumerate(ob.data.shape_keys.key_blocks):
if (sk.name == "Basis" or sk.name == "00_Basis") and i == 0:
continue
frame_in_action = float(f-int(frame_start))
frame_in_action = round(frame_in_action, 3)# round frame value because of decimals issues
if scn.arp_retro_ge_mesh == False:
obj_base = bpy.data.objects.get(ob.name.replace('_arpexp', ''))
obj_data_name = obj_base.data.name
else:
obj_data_name = ob.data.name
#print('Bake shape key', obj_data_name, sk.name, sk.value)
dict_entry = action_export_name+'|'+'BMesh#'+obj_data_name+'|Shape|BShape Key#'+sk.name+'|'+str(frame_in_action)
_self.shape_keys_data[dict_entry] = sk.value
print_progress_bar("Baking phase 1", f-frame_start, frame_end-frame_start)
f += sampling_rate
f = round(f, 3)# round frame value because of decimals issues
print("")
# set new action
action = None
if new_action:
action = bpy.data.actions.new(new_action_name)
anim_data = armature.animation_data_create()
anim_data.action = action
else:
action = armature.animation_data.action
def store_keyframe(bone_name, prop_type, fc_array_index, frame, value):
fc_data_path = 'pose.bones["' + bone_name + '"].' + prop_type
fc_key = (fc_data_path, fc_array_index)
if not keyframes.get(fc_key):
keyframes[fc_key] = []
keyframes[fc_key].extend((frame, value))
# set transforms and store keyframes
if bake_bones:
bone_count = 0
total_bone_count = len(armature.pose.bones)
for pbone in armature.pose.bones:
bone_count += 1
print_progress_bar("Baking phase 2", bone_count, total_bone_count)
if only_selected and not pbone.bone.select:
continue
euler_prev = None
quat_prev = None
keyframes = {}
for (f, matrix) in bones_data:
# optional, only keyframe given frames
if keyframes_dict:
keyf_list = keyframes_dict[pbone.name]
if not f in keyf_list:
continue
pbone.matrix_basis = matrix[pbone.name].copy()
for arr_idx, value in enumerate(pbone.location):
store_keyframe(pbone.name, "location", arr_idx, f, value)
rotation_mode = pbone.rotation_mode
if rotation_mode == 'QUATERNION':
if quat_prev is not None:
quat = pbone.rotation_quaternion.copy()
if bpy.app.version >= (2,82,0):# previous versions don't know this function
quat.make_compatible(quat_prev)
pbone.rotation_quaternion = quat
quat_prev = quat
del quat
else:
quat_prev = pbone.rotation_quaternion.copy()
for arr_idx, value in enumerate(pbone.rotation_quaternion):
store_keyframe(pbone.name, "rotation_quaternion", arr_idx, f, value)
elif rotation_mode == 'AXIS_ANGLE':
for arr_idx, value in enumerate(pbone.rotation_axis_angle):
store_keyframe(pbone.name, "rotation_axis_angle", arr_idx, f, value)
else: # euler, XYZ, ZXY etc
if euler_prev is not None:
euler = pbone.matrix_basis.to_euler(pbone.rotation_mode, euler_prev)
pbone.rotation_euler = euler
del euler
euler_prev = pbone.rotation_euler.copy()
for arr_idx, value in enumerate(pbone.rotation_euler):
store_keyframe(pbone.name, "rotation_euler", arr_idx, f, value)
for arr_idx, value in enumerate(pbone.scale):
store_keyframe(pbone.name, "scale", arr_idx, f, value)
# Add keyframes
for fc_key, key_values in keyframes.items():
data_path, index = fc_key
fcurve = action.fcurves.find(data_path=data_path, index=index)
if new_action == False and fcurve:# for now always remove existing keyframes if overwriting current action, must be driven by constraints only
action.fcurves.remove(fcurve)
fcurve = action.fcurves.new(data_path, index=index, action_group=pbone.name)
if fcurve == None:
fcurve = action.fcurves.new(data_path, index=index, action_group=pbone.name)
# set keyframes points
num_keys = len(key_values) // 2
fcurve.keyframe_points.add(num_keys)
fcurve.keyframe_points.foreach_set('co', key_values)
# set interpolation type
key_interp = interpolation_type
if 'const_interp' in pbone.bone.keys():
if pbone.bone['const_interp'] == True:
key_interp = 'CONSTANT'
if bpy.app.version >= (2,90,0):# internal error when doing so with Blender 2.83, only for Blender 2.90 and higher
interp_value = bpy.types.Keyframe.bl_rna.properties['interpolation'].enum_items[key_interp].value
fcurve.keyframe_points.foreach_set('interpolation', (interp_value,) * num_keys)
# set handle type
if handle_type != 'DEFAULT':
handle_enum_value = bpy.types.Keyframe.bl_rna.properties['handle_left_type'].enum_items[handle_type].value
fcurve.keyframe_points.foreach_set('handle_left_type', (handle_enum_value,) * num_keys)
fcurve.keyframe_points.foreach_set('handle_right_type', (handle_enum_value,) * num_keys)
else:
for kf in fcurve.keyframe_points:
# set interpolation type (pre Blender 2.90 versions)
kf.interpolation = key_interp
# set handle type (pre Blender 2.90 versions)
if handle_type != 'DEFAULT':
kf.handle_type_right = handle_type
kf.handle_type_left = handle_type
fcurve.update()
if bake_object:
euler_prev = None
quat_prev = None
for (f, matrix) in obj_data:
name = "Action Bake"
armature.matrix_basis = matrix
armature.keyframe_insert("location", index=-1, frame=f, group=name)
rotation_mode = armature.rotation_mode
if rotation_mode == 'QUATERNION':
if quat_prev is not None:
quat = armature.rotation_quaternion.copy()
if bpy.app.version >= (2,82,0):# previous versions don't know this function
quat.make_compatible(quat_prev)
armature.rotation_quaternion = quat
quat_prev = quat
del quat
else:
quat_prev = armature.rotation_quaternion.copy()
armature.keyframe_insert("rotation_quaternion", index=-1, frame=f, group=name)
elif rotation_mode == 'AXIS_ANGLE':
armature.keyframe_insert("rotation_axis_angle", index=-1, frame=f, group=name)
else: # euler, XYZ, ZXY etc
if euler_prev is not None:
euler = armature.rotation_euler.copy()
euler.make_compatible(euler_prev)
armature.rotation_euler = euler
euler_prev = euler
del euler
else:
euler_prev = armature.rotation_euler.copy()
armature.keyframe_insert("rotation_euler", index=-1, frame=f, group=name)
armature.keyframe_insert("scale", index=-1, frame=f, group=name)
# restore current frame
scn.frame_set(current_frame)
print("\n")
def get_bone_keyframes_list(pb, act):
# return a list containing all keyframes frames of the given pose bone
key_list = []
# loc
for i in range(0,3):
fc = act.fcurves.find('pose.bones["'+pb.name+'"].location', index=i)
if fc:
for k in fc.keyframe_points:
if not k.co[0] in key_list:
key_list.append(k.co[0])
# rot
_range = 3
rot_path = 'rotation_euler'
if pb.rotation_mode == 'QUATERNION':
_range = 4
rot_path = 'rotation_quaternion'
for i in range(0,_range):# rot
fc = act.fcurves.find('pose.bones["'+pb.name+'"].'+rot_path, index=i)
if fc:
for k in fc.keyframe_points:
if not k.co[0] in key_list:
key_list.append(k.co[0])
# scale
for i in range(0,3):
fc = act.fcurves.find('pose.bones["'+pb.name+'"].scale', index=i)
if fc:
for k in fc.keyframe_points:
if not k.co[0] in key_list:
key_list.append(k.co[0])
return key_list
def copy_shapekeys_tracks(obj1, obj2):
# copy the NLA shape keys tracks from one object to another
if obj1.data.shape_keys == None:
return
if obj1.data.shape_keys.animation_data == None:
return
for anim_track in obj1.data.shape_keys.animation_data.nla_tracks:
# copy sk tracks
if obj2.data.shape_keys.animation_data == None:
obj2.data.shape_keys.animation_data_create()
track2 = obj2.data.shape_keys.animation_data.nla_tracks.get(anim_track.name)
if track2 == None:
track2 = obj2.data.shape_keys.animation_data.nla_tracks.new()
track2.name = anim_track.name
for strip in anim_track.strips:
strip2 = track2.strips.get(strip.name)
if strip2 == None:
strip2 = track2.strips.new(strip.name, int(strip.frame_start), strip.action)
for setting in ['action_frame_end', 'action_frame_start', 'blend_in', 'blend_out', 'blend_type', 'extrapolation', 'frame_end', 'frame_start', 'mute', 'repeat']:
setattr(strip2, setting, getattr(strip, setting))
@@ -0,0 +1,211 @@
import bpy
from .. import auto_rig_datas as ard
from .version_arm_collec import *
def remove_bone_from_layer(bone, layer_type):
if bpy.app.version >= (4,0,0):
arma = bpy.context.active_object
col = get_armature_collections(arma).get(layer_type)
if col:
if bpy.context.mode == 'EDIT_ARMATURE':
col.unassign(bone)
else:
col.unassign(arma.data.bones[bone.name])
else:
if layer_type in ard.layer_col_map_special:# layer idx special cases
layer_idx = ard.layer_col_map_special[layer_type]
else:
layer_idx = ard.layer_col_map[layer_type]
bone.layers[layer_idx] = False
def set_bone_layer(bone, layer_type, show_new_layer=False, multi=False):
if bpy.app.version >= (4,0,0):
arma = bpy.context.active_object
col = get_armature_collections(arma).get(layer_type)
if col == None:# create the collection if necessary
col = arma.data.collections.new(layer_type)
col.is_visible = show_new_layer
col['arp_collec'] = True# custom tag
if bpy.context.mode == 'EDIT_ARMATURE':
col.assign(bone)
else:
col.assign(arma.data.bones[bone.name])
if multi:
return
for col in get_armature_collections(arma):
if col.name != layer_type:
if bpy.context.mode == 'EDIT_ARMATURE':
col.unassign(bone)
else:
col.unassign(arma.data.bones[bone.name])
else:
if layer_type in ard.layer_col_map_special:# layer idx special cases
layer_idx = ard.layer_col_map_special[layer_type]
else:# standard layer/collec conversion
layer_idx = ard.layer_col_map[layer_type]
bone.layers[layer_idx] = True
if multi:
return
for i, lay in enumerate(bone.layers):
if i != layer_idx:
bone.layers[i] = False
def is_arp_collec(col):
if col.name in ard.layer_col_map or col.name in ard.layer_col_map_special or col.name.startswith('color_body.') or col.name.startswith('mch_'):
return True
def is_bone_in_layer(bone_name, layer_type):
if bpy.app.version >= (4,0,0):
if bpy.context.mode == 'EDIT_ARMATURE':# # in Edit mode, access edit bones only. Prone to error otherwise (bone data not up to date)
in_collection = [ebone.name for ebone in bpy.context.active_object.data.edit_bones if layer_type in ebone.collections]
return bone_name in in_collection
else:
return layer_type in bpy.context.active_object.data.bones.get(bone_name).collections
else:
if layer_type in ard.layer_col_map_special:# layer idx special cases
layer_idx = ard.layer_col_map_special[layer_type]
else:# standard ARP layer-collec conversion
layer_idx = ard.layer_col_map[layer_type]
if bpy.context.mode == 'EDIT_ARMATURE':# in Edit mode, access edit bones only. Prone to error otherwise (bone data not up to date)
return bpy.context.active_object.data.edit_bones.get(bone_name).layers[layer_idx]
else:
return bpy.context.active_object.data.bones.get(bone_name).layers[layer_idx]
def is_layer_enabled(layer_type):
if bpy.app.version >= (4,0,0):
if layer_type == 'mch_disabled':# only there for backward-compatibility, this collection is no more used
col = get_armature_collections(bpy.context.active_object.data).get(layer_type)
if col == None:
bpy.context.active_object.data.collections.new(mch_disabled)
col = get_armature_collections(bpy.context.active_object).get(layer_type)
if col:
return col.is_visible
else:# old layer system
if layer_type in ard.layer_col_map_special:# layer idx special cases
layer_idx = ard.layer_col_map_special[layer_type]
else:
layer_idx = ard.layer_col_map[layer_type]
return bpy.context.active_object.data.layers[layer_idx]
def hide_layer(layer_type):
if bpy.app.version >= (4,0,0):
col = get_armature_collections(bpy.context.active_object).get(layer_type)
col.is_visible = False
else:
if layer_type in ard.layer_col_map_special:# layer idx special cases
layer_idx = ard.layer_col_map_special[layer_type]
else:
layer_idx = ard.layer_col_map[layer_type]
bpy.context.active_object.data.layers[layer_idx] = False
def enable_layer_exclusive(layer_type, use_solo=False):
if bpy.app.version >= (4,1,0) and use_solo:
for col in get_armature_collections(bpy.context.active_object):
col.is_solo = col.name == layer_type
if col.name == layer_type:
col.is_visible = True
return
if bpy.app.version >= (4,0,0):
for col in get_armature_collections(bpy.context.active_object):
# ensure to disable pinned collections (Blender 4.1+)
if bpy.app.version >= (4,1,0):
col.is_solo = False
if col.name == layer_type:
col.is_visible = True
else:
col.is_visible = False
else:
if layer_type in ard.layer_col_map_special:# layer idx special cases
layer_idx = ard.layer_col_map_special[layer_type]
else:
layer_idx = ard.layer_col_map[layer_type]
bpy.context.active_object.data.layers[layer_idx] = True
for i in range(0, 32):
if i != layer_idx:
bpy.context.active_object.data.layers[i] = False
def enable_layer(layer_type):
if bpy.app.version >= (4,0,0):
col = get_armature_collections(bpy.context.active_object).get(layer_type)
col.is_visible = True
else:
if layer_type in ard.layer_col_map_special:# layer idx special cases
layer_idx = ard.layer_col_map_special[layer_type]
else:
layer_idx = ard.layer_col_map[layer_type]
bpy.context.active_object.data.layers[layer_idx] = True
def restore_armature_layers(layers_select):
if bpy.app.version >= (4,0,0):
for col_name in layers_select:
col = get_armature_collections(bpy.context.active_object).get(col_name)
if col:# may have been renamed or deleted
if bpy.app.version >= (4,1,0):
viz, solo = layers_select[col_name]
col.is_visible = viz
col.is_solo = solo
else:
col.is_visible = layers_select[col_name]
for col in get_armature_collections(bpy.context.active_object):#disable newly created layers
if not col.name in layers_select:
col.is_visible = False
else:
# must enabling at least one
bpy.context.active_object.data.layers[layers_select[0]] = True
# restore the armature layers visibility
for i in range(0, 32):
bpy.context.active_object.data.layers[i] = layers_select[i]
def enable_all_armature_layers():
# enable all layers/collections
# and return the list of each layer visibility
if bpy.app.version >= (4,0,0):
layers_select = {}
for col in get_armature_collections(bpy.context.active_object):
layers_select[col.name] = col.is_visible
if bpy.app.version >= (4,1,0):
layers_select[col.name] = col.is_visible, col.is_solo
col.is_solo = False
col.is_visible = True
return layers_select
else:
layers_select = []
_layers = bpy.context.active_object.data.layers
for i in range(0, 32):
layers_select.append(_layers[i])
for i in range(0, 32):
bpy.context.active_object.data.layers[i] = True
return layers_select
@@ -0,0 +1,164 @@
import bpy
from .objects import *
class ARP_BonesData:
custom_bones_list = []
softlink_bones = []
armature_name = ''
#renamed_bones = {}
def init_values(self):
self.custom_bones_list = []
self.softlink_bones = []
#self.renamed_bones = {}
self.const_interp_bones = []
def collect(self, arm_name):
self.armature_name = arm_name
arm = get_object(self.armature_name)
self.init_values()
def add_stretch_bones(b):
# the main stretch arm/leg bones must be added as well in case
# of Humanoid export and Secondary Controllers set to Twist
for f in ["arm", "forearm", "thigh", "leg"]:
s = get_bone_side(b.name)
if b.name == "c_"+f+"_stretch"+s:
self.softlink_bones.append(f+"_stretch"+s)
# collect props in Edit/Object mode
for b in arm.data.bones:
found_bone = False
if len(b.keys()):
if "custom_bone" in b.keys() or "cc" in b.keys():
found_bone = True
if "softlink" in b.keys():
if not b.name in self.softlink_bones:
self.softlink_bones.append(b.name)
add_stretch_bones(b)
#if 'rename' in b.keys():
# if not b.name in self.renamed_bones:
# self.renamed_bones[b.name] = b['rename']
if 'const_interp' in b.keys():
if not b.name in self.const_interp_bones:
self.const_interp_bones.append(b.name)
if b.name.startswith("cc_"):
found_bone = True
if found_bone and not b.name in self.custom_bones_list:
self.custom_bones_list.append(b.name)
if "b" in locals():
del b
# also collect props in Pose Mode
set_active_object(self.armature_name)
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action='DESELECT')
set_active_object(self.armature_name)
bpy.ops.object.mode_set(mode='POSE')
for b in arm.pose.bones:
if len(b.keys()):
if "custom_bone" in b.keys() or "cc" in b.keys():
if not b.name in self.custom_bones_list:
self.custom_bones_list.append(b.name)
if "softlink" in b.keys():
if not b.name in self.softlink_bones:
self.softlink_bones.append(b.name)
add_stretch_bones(b)
#if 'rename' in b.keys():
# if not b.name in self.renamed_bones:
# self.renamed_bones[b.name] = b['rename']
if 'const_interp' in b.keys():
if not b.name in self.const_interp_bones:
self.const_interp_bones.append(b.name)
arp_bones_data = ARP_BonesData()
def is_custom_bone(bone_name):
return bone_name in arp_bones_data.custom_bones_list
def exclude_custom_bone(bone_name):
arp_bones_data.custom_bones_list.remove(bone_name)
def is_softlink_bone(bone_name):
return bone_name in arp_bones_data.softlink_bones
def is_const_interp_bone(bone_name):
return bone_name in arp_bones_data.const_interp_bones
def get_renamed_bone(bone_name):
if bone_name in arp_bones_data.renamed_bones:
return arp_bones_data.renamed_bones[bone_name]
return ''
def get_bone_base_name(bone_name):
base_name = bone_name[:-2]# head.x > head
if "_dupli_" in bone_name:
base_name = bone_name[:-12]
return base_name
def retarget_bone_side(bone_name, target_side, dupli_only=False):#"head.x", "_dupli_001.x"
current_side = get_bone_side(bone_name)#'.x'
base_name = get_bone_base_name(bone_name)#'head'
new_name = ""
if dupli_only:# we only want to set the dupli target side and preserve the left/right/center end letters
current_side_letters = bone_name[-2:]#.l
dupli_side = target_side[:-2]#'_dupli_001' or ''
new_name = base_name+dupli_side+current_side_letters #'eyelid'+'_dupli_001'+'.l'
else:
new_name = base_name+target_side#'head'+'_dupli_001.x'
#if bone_name != new_name:
# print("retarget bone side", bone_name, new_name)
return new_name
def get_bone_side(bone_name):
side = ""
if not "_dupli_" in bone_name:
side = bone_name[-2:]
else:
side = bone_name[-12:]
return side
def get_opposite_side(side):
if side.endswith('.l'):
return side[:-2] + '.r'
elif side.endswith('.r'):
return side[:-2] + '.l'
else:
return ''
def get_data_bone(bonename):
return bpy.context.active_object.data.bones.get(bonename)
def duplicate(type=None):
# runs the operator to duplicate the selected objects/bones
if type == "EDIT_BONE":
bpy.ops.armature.duplicate_move(ARMATURE_OT_duplicate={}, TRANSFORM_OT_translate={"value": (0.0, 0.0, 0.0), "constraint_axis": (False, False, False),"orient_type": 'LOCAL', "mirror": False, "use_proportional_edit": False, "snap": False, "remove_on_cancel": False, "release_confirm": False})
elif type == "OBJECT":
bpy.ops.object.duplicate(linked=False, mode='TRANSLATION')
@@ -0,0 +1,144 @@
import bpy
from math import *
from mathutils import *
from .maths_geo import *
from .collections import *
from .. import auto_rig_datas as ard
def init_bone_coordinates(edit_bone):
edit_bone.head = [0,0,0]
edit_bone.tail = [0,0,0.1]
def is_deforming(bone):
if get_edit_bone(bone):
return get_edit_bone(bone).use_deform
def get_selected_edit_bones():
return bpy.context.selected_editable_bones
def get_edit_bone(name):
return bpy.context.object.data.edit_bones.get(name)
def move_bone_to_bone(bone1, bone2):
# move editbone bone1 to bone2 based on the head location
vec_delta = bone2.head - bone1.head
roll = bone1.roll
bone1.head += vec_delta
bone1.tail += vec_delta
bone1.roll = roll
def move_bone(bone, value, axis):
get_edit_bone(bone).head[axis] += value / bpy.context.scene.unit_settings.scale_length
get_edit_bone(bone).tail[axis] += value / bpy.context.scene.unit_settings.scale_length
def copy_bone_rotation(bone1, bone2):
# copy editbone bone1 rotation to bone2
bone1_vec = bone1.tail-bone1.head
bone2_length = (bone2.tail-bone2.head).magnitude
bone2.tail = bone2.head + (bone1_vec.normalized() * bone2_length)
bone2.roll = bone1.roll
def copy_bone_transforms(bone1, bone2):
# copy editbone bone1 transforms to bone 2
if bone1 == None or bone2 == None:
return
bone2.head = bone1.head.copy()
bone2.tail = bone1.tail.copy()
bone2.roll = bone1.roll
def copy_bone_transforms_mirror(bone1, bone2):
bone01 = get_edit_bone(bone1 + ".l")
bone02 = get_edit_bone(bone2 + ".l")
bone02.head = bone01.head
bone02.tail = bone01.tail
bone02.roll = bone01.roll
bone01 = get_edit_bone(bone1 + ".r")
bone02 = get_edit_bone(bone2 + ".r")
bone02.head = bone01.head
bone02.tail = bone01.tail
bone02.roll = bone01.roll
def rotate_edit_bone(edit_bone, angle_radian, axis):
old_head = edit_bone.head.copy()
# rotate
R = Matrix.Rotation(angle_radian, 4, axis.normalized())
edit_bone.transform(R, roll=True)
# back to initial head pos
offset_vec = -(edit_bone.head - old_head)
new_x_axis = edit_bone.x_axis.copy()
edit_bone.head += offset_vec
edit_bone.tail += offset_vec
# preserve roll
align_bone_x_axis(edit_bone, new_x_axis)
def create_edit_bone(bone_name, deform=False, tag=None):
_b = bpy.context.active_object.data.edit_bones.get(bone_name)
if _b == None:
_b = bpy.context.active_object.data.edit_bones.new(bone_name)
_b.use_deform = deform
_b.head = Vector((0.0,0.0,0.0))
_b.tail = Vector((0.0,0.0,0.1))
#init_bone_coordinates(_b)
if tag:# optional tag as custom prop
_b[tag] = 1
return _b
def select_edit_bone(name, mode=1):
o = bpy.context.active_object
ebone = get_edit_bone(name)
if mode == 1:
o.data.bones.active = o.pose.bones[name].bone
elif mode == 2:
o.data.edit_bones.active = o.data.edit_bones[name]
o.data.edit_bones.active.select = True
ebone.select_head = True
ebone.select_tail = True
ebone.select = True
def delete_edit_bone(editbone):
bpy.context.active_object.data.edit_bones.remove(editbone)
def mirror_bones_transforms(ebones_list):
roll_copy = {}
for ebone in ebones_list:
roll_copy[ebone.name] = ebone.roll
# mirror head-tails
for ebone in ebones_list:
ebone.head[0] *= -1
# use_connect handling
found_connected_child = False
if len(ebone.children):
for ch in ebone.children:
if ch.use_connect:
found_connected_child = True
break
if not found_connected_child:
ebone.tail[0] *= -1
# mirror roll
for ebone in ebones_list:
ebone.roll = -roll_copy[ebone.name]
@@ -0,0 +1,228 @@
import bpy
from .objects import *
from .version import blender_version
from .types_convert import *
from .armature import *
def get_selected_pose_bones():
return bpy.context.selected_pose_bones
def get_pose_bone(name):
return bpy.context.active_object.pose.bones.get(name)
def get_custom_shape_scale_prop_name():
if bpy.app.version >= (3,0,0):
return 'custom_shape_scale_xyz'
else:
return 'custom_shape_scale'
def set_custom_shape_scale(pbone, scale):
if bpy.app.version >= (3,0,0):
# uniform scale
if type(scale) == int or type(scale) == float:
for i in range(0,3):
pbone.custom_shape_scale_xyz[i] = scale
# array scale
else:
pbone.custom_shape_scale_xyz = scale
# pre-Blender 3.0
else:
pbone.custom_shape_scale = scale
def scale_custom_shape(custom_shape, scale, origin='cog'):
cs_base_name = custom_shape.name
cs_scaled_name = cs_base_name+'_scaled_'+str(scale)
cs_base_scaled = get_object(cs_scaled_name)
if cs_base_scaled:
return cs_base_scaled
# make
cs_base_scaled = duplicate_object(new_name=cs_scaled_name, method='data', obj=custom_shape)
cs_base_scaled.data.name = cs_scaled_name
cog = Vector((0.0,0.0,0.0))
if origin == 'cog':
for v in cs_base_scaled.data.vertices:
cog += v.co
cog = cog/len(cs_base_scaled.data.vertices)
elif origin == 'zero':
cog = Vector((0.0,0.0,0.0))
for v in cs_base_scaled.data.vertices:
scale_vec = cog - v.co
v.co = v.co + (scale_vec * (1-scale))
return cs_base_scaled
def get_custom_shape_scale(pbone, uniform=True, as_list=False):
if bpy.app.version >= (3,0,0):
if uniform:
# uniform scale
val = 0
for i in range(0,3):
val += pbone.custom_shape_scale_xyz[i]
return val/3
# array scale
else:
if as_list:
return vector_to_list(pbone.custom_shape_scale_xyz)
else:
return pbone.custom_shape_scale_xyz
# pre-Blender 3.0
else:
return pbone.custom_shape_scale
def set_bone_custom_shape(pbone, cs_name):
cs = get_object(cs_name)
if cs == None:
# load custom shape
append_from_arp(nodes=[cs_name], type='object')
cs = get_object(cs_name)
elif len(cs.users_collection) == 0:
# custom shape is found, but not in any collection. Fix it
cs_grp = None
for __o in bpy.context.scene.objects:
if __o.name.startswith('cs_grp') and __o.type == 'EMPTY':
cs_grp = __o
break
if cs_grp:
for col in cs_grp.users_collection:
col.objects.link(cs)
# assign custom shape
pbone.custom_shape = cs
def set_bone_custom_shape_rot(pbone, rot_angle, axis):
if bpy.app.version >= (3,0,0):
axis_int = 0
if axis == 'Y':
axis_int = 1
elif axis == 'Z':
axis_int = 2
pbone.custom_shape_rotation_euler[axis_int] = rot_angle
else:# no custom shape rot setting in older versions
return
def set_bone_color_group(obj, bone_data, grp_name, custom_color=None, custom_highlight=None, assign_only_if_empty=False, body_side=None):
grp_color = (0.5,0.5,0.5)# default color
color_collec = None
if grp_name:
if grp_name == 'body_mid':
grp_color = bpy.context.scene.color_set_middle
color_collec = 'color_body.x'
elif grp_name == 'body_left':
grp_color = bpy.context.scene.color_set_left
color_collec = 'color_body.l'
elif grp_name == 'body_right':
grp_color = bpy.context.scene.color_set_right
color_collec = 'color_body.r'
elif grp_name == 'yellow':
grp_color = (1.0, 1.0, 0.0)
elif grp_name == 'red':
grp_color = (1.0, 0.0, 0.0)
elif grp_name == 'kilt':
grp_color = (0.2, 1.0, 0.3)
if body_side:
if body_side.endswith('.l'):
grp_color = bpy.context.scene.color_set_left
grp_name = 'body.l'
color_collec = 'color_body.l'
elif body_side.endswith('.r'):
grp_color = bpy.context.scene.color_set_right
grp_name = 'body.r'
color_collec = 'color_body.r'
elif body_side.endswith('.x'):
grp_color = bpy.context.scene.color_set_middle
grp_name = 'body.x'
color_collec = 'color_body.x'
if custom_color:
grp_color = custom_color
if custom_highlight == None:
custom_highlight = [0.2, 0.4]
if bpy.app.version >= (4,0,0):
if color_collec:
set_bone_layer(bone_data, color_collec, multi=True)
if assign_only_if_empty:# do not change color group if a group is already assigned
if bone_data.color.palette != 'DEFAULT':
return
bone_data.color.palette = 'CUSTOM'
# set normal color
bone_data.color.custom.normal = grp_color
# set select, active colors
for col_idx in range(0,3):
bone_data.color.custom.select[col_idx] = grp_color[col_idx] + custom_highlight[0]
bone_data.color.custom.active[col_idx] = grp_color[col_idx] + custom_highlight[1]
else:
bone_pose = get_pose_bone(bone_data.name)
if assign_only_if_empty:# do not change color group if a group is already assigned
if bone_pose.bone_group != None:
return
grp = obj.pose.bone_groups.get(grp_name)
if grp == None:
grp = obj.pose.bone_groups.new(name=grp_name)
grp.color_set = 'CUSTOM'
# set normal color
grp.colors.normal = grp_color
# set select color/active color
for col_idx in range(0,3):
grp.colors.select[col_idx] = grp_color[col_idx] + custom_highlight[0]
grp.colors.active[col_idx] = grp_color[col_idx] + custom_highlight[1]
bone_pose.bone_group = grp
def get_bone_colors(bone_data, list=False):
if bone_data.color.palette == 'CUSTOM':
if list == False:
return bone_data.color.custom.normal, bone_data.color.custom.select, bone_data.color.custom.active
else:
return [i for i in bone_data.color.custom.normal], [i for i in bone_data.color.custom.select], [i for i in bone_data.color.custom.active]
else:
return bone_data.color.palette
def set_bone_color(bone_data, bcolors):
# Blender 4 and higher only
if type(bcolors) == str:# set the color palette string
bone_data.color.palette = bcolors
else:# set the color lists
col_normal, col_select, col_active = bcolors
bone_data.color.palette = 'CUSTOM'
bone_data.color.custom.normal = col_normal
bone_data.color.custom.select = col_select
bone_data.color.custom.active = col_active
def reset_pbone_transforms(pbone):
pbone.location = [0,0,0]
pbone.rotation_euler = [0,0,0]
pbone.rotation_quaternion = [1,0,0,0]
pbone.scale = [1,1,1]
@@ -0,0 +1,136 @@
import bpy
from .version_arm_collec import *
def get_arm_col_idx(armature, name):
for i, coll in enumerate(get_armature_collections(armature)):
if coll.name == name:
return i
def sort_armature_collections(armature, only_collection=None, custom_collection=None, to_index=None):
order = {'Main':0, 'Secondary':1, 'Deform':2, 'Reference':3}
# sort a specific custom collection with custom index
if custom_collection and to_index != None:
col = get_armature_collections(armature).get(custom_collection)
cur_idx = get_arm_col_idx(armature, custom_collection)
armature.data.collections.move(cur_idx, to_index)
return
# sort collections as defined in the "order" dict
for col_name in order:
if only_collection:
if only_collection != col_name:
continue
col = get_armature_collections(armature).get(col_name)
cur_idx = get_arm_col_idx(armature, col_name)
to_idx = order[col_name]
armature.data.collections.move(cur_idx, to_idx)
def get_parent_collections(target):
# return the list of all parent collections to the specified target collection
# with a recursive function. A sub-function is used, string based, to ease the process
def get_parent_collections_string(target_name):
parent_collections = ""
found = None
for collec in bpy.data.collections:
for child in collec.children:
if child.name == target_name:
#print("found", collec.name)
parent_collections += collec.name + ","
parent_collections += get_parent_collections_string(collec.name)
return parent_collections
string_result = get_parent_collections_string(target.name)
to_list = [bpy.data.collections[i] for i in string_result[:-1].split(",") if i != ""]
return to_list
meshes_data = []
for child in children:
# store the mesh data for removal afterward
if child.data:
if not child.data.name in meshes_data:
meshes_data.append(child.data.name)
bpy.data.objects.remove(child, do_unlink=True, do_id_user=True, do_ui_user=True)
for data_name in meshes_data:
current_mesh = bpy.data.meshes.get(data_name)
if current_mesh:
bpy.data.meshes.remove(current_mesh, do_unlink=True, do_id_user=True, do_ui_user=True)
bpy.data.objects.remove(passed_node, do_unlink = True)
def get_all_collections_viewlayer():
def mt_traverse_tree(t):
yield t
for child in t.children:
yield from mt_traverse_tree(child)
colls = []
coll = bpy.context.view_layer.layer_collection
for c in mt_traverse_tree(coll):
colls.append(c)
return colls
def get_rig_collection(rig):
if rig == None:
return None
for col in rig.users_collection:
#if col.name.endswith('_rig'):
return col
return None
def get_master_collection(rig_col):
if rig_col == None:
return None
for col in bpy.data.collections:
if len(col.children):
for child_col in col.children:
if child_col == rig_col:
return col
return None
def get_cs_collection(col_master):
if col_master:
for child_col in col_master.children:
if len(child_col.objects):
for o in child_col.objects:
if o.name.startswith('cs_grp'):
return child_col
# the collection haven't been found, the collection hierarchy isn't correct
# look for any collection called cs_grp
for collec in bpy.data.collections:
if collec.name.startswith("cs_grp"):
return collec
return None
def search_layer_collection(layerColl, collName):
# Recursivly transverse layer_collection for a particular name
found = None
if (layerColl.name == collName):
return layerColl
for layer in layerColl.children:
found = search_layer_collection(layer, collName)
if found:
return found
@@ -0,0 +1,101 @@
import bpy
from .bone_pose import *
from mathutils import *
from math import *
def enable_constraint(cns, value):
if bpy.app.version >= (3,0,0):
cns.enabled = value
else:
cns.mute = not value
def set_constraint_inverse_matrix(cns):
# set the inverse matrix of Child Of constraint
subtarget_pbone = get_pose_bone(cns.subtarget)
if subtarget_pbone:
cns.inverse_matrix = subtarget_pbone.bone.matrix_local.to_4x4().inverted()
def add_copy_transf(p_bone, tar=None, subtar='', h_t=0.0, no_scale=False):
if tar == None:
tar = bpy.context.active_object
if no_scale:
cns1 = p_bone.constraints.new("COPY_LOCATION")
cns1.name = "Copy Location"
cns1.target = tar
cns1.subtarget = subtar
cns1.head_tail = h_t
cns2 = p_bone.constraints.new("COPY_ROTATION")
cns2.name = "Copy Rotation"
cns2.target = tar
cns2.subtarget = subtar
return cns1, cns2
else:
cns1 = p_bone.constraints.new("COPY_TRANSFORMS")
cns1.name = "Copy Transforms"
cns1.target = tar
cns1.subtarget = subtar
cns1.head_tail=h_t
return cns1, None
def get_constraint_index(pb, cns):
for i, c in enumerate(pb.constraints):
if c == cns:
return i
def move_constraint(pbone, cns, dir, repeat):
# must be in pose mode
armature = bpy.context.active_object
# the bone layer must be visible
enabled_layers = []
if bpy.app.version >= (4,0,0):
for collec in armature.data.collections:
if is_bone_in_layer(pbone.name, collec.name):
if collec.is_visible == False:
collec.is_visible = True
enabled_layers.append(collec.name)
else:
for i, lay in enumerate(pbone.bone.layers):
if lay and armature.data.layers[i] == False:
armature.data.layers[i] = True
enabled_layers.append(i)
# move
if bpy.app.version >= (2, 81, 16):
cns_idx = get_constraint_index(pbone, cns)
#print('cns_idx', cns_idx)
#print('repeat', repeat)
to_idx = cns_idx+repeat if dir == 'DOWN' else cns_idx-repeat
if to_idx > len(pbone.constraints)-1:
to_idx = len(pbone.constraints)-1
if to_idx < 0:
to_idx = 0
pbone.constraints.move(cns_idx, to_idx)
else:# backward-compatibility
bpy.context.active_object.data.bones.active = pbone.bone
my_context = bpy.context.copy()
my_context["constraint"] = cns
for i in range(0, repeat):
if dir == 'UP':
bpy.ops.constraint.move_up(my_context, constraint=cns.name, owner='BONE')
elif dir == 'DOWN':
bpy.ops.constraint.move_down(my_context, constraint=cns.name, owner='BONE')
# restore layers
for idx in enabled_layers:
if bpy.app.version >= (4,0,0):
armature.data.collections.get(idx).is_visible = False
else:
armature.data.layers[idx] = False
@@ -0,0 +1,34 @@
import bpy
def get_current_mode():
return bpy.context.mode
def restore_current_mode(current_mode):
if current_mode == 'EDIT_ARMATURE':
current_mode = 'EDIT'
if current_mode == "EDIT_MESH":
current_mode = "EDIT"
bpy.ops.object.mode_set(mode=current_mode)
def simplify_scene(self):
self.simplify_value = bpy.context.scene.render.use_simplify
self.simplify_subd = bpy.context.scene.render.simplify_subdivision
bpy.context.scene.render.use_simplify = True
bpy.context.scene.render.simplify_subdivision = 0
def restore_simplify(self):
bpy.context.scene.render.use_simplify = self.simplify_value
bpy.context.scene.render.simplify_subdivision = self.simplify_subd
def disable_autokeyf():
cur_state = bpy.context.scene.tool_settings.use_keyframe_insert_auto
bpy.context.scene.tool_settings.use_keyframe_insert_auto = False
return cur_state
def restore_autokeyf(cur_state):
bpy.context.scene.tool_settings.use_keyframe_insert_auto = cur_state
@@ -0,0 +1,196 @@
import bpy
from .objects import *
from .bone_pose import *
from .context import *
def add_driver_to_prop(obj, dr_dp, tar_dp, array_idx=-1, exp="var", multi_var=False):
if obj.animation_data == None:
obj.animation_data_create()
drivers_list = obj.animation_data.drivers
dr = drivers_list.find(dr_dp, index=array_idx)
if dr == None:
dr = obj.driver_add(dr_dp, array_idx)
if multi_var == False:
var = dr.driver.variables.get('var')
if var == None:
var = dr.driver.variables.new()
var.name = 'var'
var.type = 'SINGLE_PROP'
var.targets[0].id = obj
var.targets[0].data_path = tar_dp
else:# create multiple variables, tar_dp is a dict in that case
for var_name in tar_dp:
var = dr.driver.variables.get(var_name)
if var == None:
var = dr.driver.variables.new()
var.name = var_name
var.type = 'SINGLE_PROP'
var.targets[0].id = obj
var.targets[0].data_path = tar_dp[var_name]
dr.driver.expression = exp
def get_pbone_name_from_data_path(dp):
# return the pbone name from the driver data path
if not '"' in dp:
return None
return dp.split('"')[1]
def replace_driver_target_object(dr, current_obj_name, new_obj_name):
# replace the given driver target object as set in the variables, with a new one
for var in dr.driver.variables:
for tar in var.targets:
if tar.id == get_object(current_obj_name):
tar.id = get_object(new_obj_name)
def copy_driver_variables(variables, source_driver, suffix):
for v1 in variables:
# create a variable
clone_var = source_driver.driver.variables.new()
clone_var.name = v1.name
clone_var.type = v1.type
# copy variable path
try:
clone_var.targets[0].data_path = v1.targets[0].data_path
# increment bone data path name
if '.r"]' in v1.targets[0].data_path:
new_d_path = v1.targets[0].data_path
new_d_path = new_d_path.replace('.r"]', suffix + '"]')
if '.l"]' in v1.targets[0].data_path:
new_d_path = v1.targets[0].data_path
new_d_path = new_d_path.replace('.l"]', suffix + '"]')
clone_var.targets[0].data_path = new_d_path
except:
print("no data_path for: " + v1.name)
try:
clone_var.targets[0].bone_target = v1.targets[0].bone_target
if ".r" in v1.targets[0].bone_target:
clone_var.targets[0].bone_target = v1.targets[0].bone_target.replace(".r", suffix)
if ".l" in v1.targets[0].bone_target:
clone_var.targets[0].bone_target = v1.targets[0].bone_target.replace(".l", suffix)
except:
print("no bone_target for: " + v1.name)
try:
clone_var.targets[0].transform_type = v1.targets[0].transform_type
except:
print("no transform_type for: " + v1.name)
try:
clone_var.targets[0].transform_space = v1.targets[0].transform_space
except:
print("no transform_space for: " + v1.name)
try:
clone_var.targets[0].id_type = v1.targets[0].id_type
except:
print("no id_type for: " + v1.name)
try:
clone_var.targets[0].id = v1.targets[0].id
except:
print("no id for: " + v1.name)
def remove_duplicated_drivers():
arm = bpy.context.active_object
to_delete = []
for i, dr in enumerate(arm.animation_data.drivers):
found = False
# find duplicates only if the current one is not already found
for d in to_delete:
if d[0] == dr.data_path and d[1] == dr.array_index:
found = True
break
if not found:
dp = dr.data_path
array_idx = dr.array_index
for j, dr1 in enumerate(arm.animation_data.drivers):
if i != j:
if dp == dr1.data_path and array_idx == dr1.array_index:
to_delete.append([dp, array_idx])
print("Found", len(to_delete), "duplicated drivers, delete them...")
for dri in to_delete:
try:
arm.driver_remove(dri[0], dri[1])
except:
arm.driver_remove(dri[0], -1)
def remove_invalid_drivers():
obj = bpy.context.active_object
if obj.animation_data == None:
return
current_mode = bpy.context.mode
bpy.ops.object.mode_set(mode='POSE')
invalid_drivers_total = 0
def is_driver_valid(dr, bone_name):
if not dr.is_valid:
return False
if not obj.data.bones.get(bone_name):
return False
if "constraints" in dr.data_path:
cns_name = dr.data_path.split('"')[3]
target_bone = get_pose_bone(bone_name)
found_cns = False
if len(target_bone.constraints) > 0:
for cns in target_bone.constraints:
if cns.name == cns_name:
found_cns = True
if "cns" in locals():
del cns
if not found_cns:
return False
return True
for dr in obj.animation_data.drivers:
if dr.data_path.startswith('pose.bones'):
b = dr.data_path.split('"')[1]
if not is_driver_valid(dr, b):
# the driver is invalid
# assign a dummy but valid data path since we can't remove drivers
# with invalid data path
# print("Invalid driver found:", dr.data_path)
invalid_drivers_total += 1
dr.array_index = 0
dr.data_path = 'delta_scale'
if 'dr' in locals():
del dr
#print("Found", invalid_drivers_total, "invalid drivers")
count = 0
for dr in obj.animation_data.drivers:
if dr.data_path == "delta_scale":
obj.animation_data.drivers.remove(dr)
count += 1
#print(count, "invalid drivers deleted")
# restore saved mode
restore_current_mode(current_mode)
@@ -0,0 +1,21 @@
import bpy
from .version import *
def is_action_baked(action):
# check if the action is a baked one, for either humanoid or universal skeleton
scn = bpy.context.scene
if scn.arp_export_rig_type == 'HUMANOID' or scn.arp_export_rig_type == 'UNIVERSAL':
if scn.arp_bake_anim and check_id_root(action):
if len(action.keys()):
if "arp_baked_action" in action.keys():
return True
return False
def is_action_exportable(action):
# check if the action is marked as exportable
if len(action.keys()):
if 'arp_export' in action.keys():
return action['arp_export']
return True
@@ -0,0 +1,424 @@
import bpy
from math import *
from mathutils import *
import numpy as np
def compare_transform(transf1, transf2):
for i, j in enumerate(transf1):
if j == transf2[i]:
continue
else:
return False
return True
def resample_curve(coords, length=1.0, amount=5, symmetrical=True, generate_normals=False):
# resample a given set of points belonging to a curve
# only works by reduction
resampled_coords = []
dist_sum = 0.0
dist = length/amount
for i, coord in enumerate(coords):
# special case, since we need symmetrical positioning,
# the first coord must be positioned half-distance
if len(resampled_coords) == 0 and symmetrical:
if coord == coords[0]:
continue
p_prev = coords[i-1]
cur_dist = (coord-p_prev).magnitude
dist_sum += cur_dist
if dist_sum >= dist/2:
dist_sum = 0.0
resampled_coords.append(coord.copy())
else:
p_prev = coords[i-1]
cur_dist = (coord-p_prev).magnitude
dist_sum += cur_dist
if dist_sum < dist:
continue
else:
dist_sum = 0.0
resampled_coords.append(coord.copy())
# In case of precision error, the last coord did not fit in.
# Make sure to include it
#print('resampled_coords', len(resampled_coords), 'amount', amount)
if len(resampled_coords) == amount - 1:
#print('Curve resampling error, add last coord as tip coord')
tip_coord = coords[len(coords)-2]
resampled_coords.append(tip_coord)
# Normals generation
# only works for closed curves for now
if generate_normals:
normals = []
# get curve center to correct invalid normals
curve_center = Vector((0,0,0))
for loc in resampled_coords:
curve_center += loc
curve_center = curve_center / len(resampled_coords)
# evaluate normals from neighbours points
for i in range(0, len(resampled_coords)):
p_curr = resampled_coords[i]
if i == 0:#start
p_prev = resampled_coords[len(resampled_coords)-1]
p_next = resampled_coords[i+1]
elif i == len(resampled_coords)-1:#tip
p_prev = resampled_coords[i-1]
p_next = resampled_coords[0]
else:
p_prev = resampled_coords[i-1]
p_next = resampled_coords[i+1]
p1 = p_curr + (p_prev-p_curr).normalized()
p2 = p_curr + (p_next-p_curr).normalized()
mid = (p1 + p2) * 0.5
norm = p_curr - mid
# invalid normal due to straight points
# apply artificial offset from curve center
if norm.magnitude <= 0.00001:
offset = (p_prev-p_curr).magnitude
norm = (p_curr + (p_curr - curve_center).normalized() * offset) - mid
# check if inverted normal by evaluating point_on_normal-curve_center distance
p_norm = p_curr + (norm)
if (curve_center-p_norm).magnitude < (curve_center-p_curr).magnitude:
norm *= -1
normals.append(norm.normalized())
return resampled_coords, normals
else:
return resampled_coords
def get_curve_length(coords):
length = 0.0
p_last = None
for coord in coords:
if p_last == None:
p_last = coord.copy()
else:
length += (coord-p_last).magnitude
p_last = coord.copy()
print("Nurbs length:", length)
return length
def nurbs_basis(i, degree, u, knots):
if degree == 0:
return 1.0 if knots[i] <= u < knots[i + 1] else 0.0
if knots[i + degree] == knots[i]:
left = 0.0
else:
left = (u - knots[i]) / (knots[i + degree] - knots[i]) * nurbs_basis(i, degree - 1, u, knots)
if knots[i + degree + 1] == knots[i + 1]:
right = 0.0
else:
right = (knots[i + degree + 1] - u) / (knots[i + degree + 1] - knots[i + 1]) * nurbs_basis(i + 1, degree - 1, u, knots)
return left + right
def generate_nurbs_curve(points, degree=3, num_points=100):
if len(points) < degree + 1:
raise ValueError("Number of points should be at least degree + 1.")
# Convert control points to numpy array
control_points = np.array(points)
# Calculate the number of knots needed for a closed curve
num_knots = len(control_points) + degree + 1
# Create a list of equally spaced parameter values for the control points
parameter_values = np.linspace(0, 1, len(control_points))
# Compute the knot vector (closed curve)
knots = np.zeros(num_knots)
knots[degree:-degree] = np.linspace(0, 1, num_knots - 2*degree)
knots[-degree:] = 1
# Evaluate the NURBS curve at 'num_points' points
u_new = np.linspace(0, 1, num_points)
x = np.zeros(num_points)
y = np.zeros(num_points)
z = np.zeros(num_points)
for i in range(len(u_new)):
if i == len(u_new)-1:# the last one must be set manually, sigh
x[i] += control_points[len(points)-1, 0]
y[i] += control_points[len(points)-1, 1]
z[i] += control_points[len(points)-1, 2]
break
for j in range(len(control_points)):
basis = nurbs_basis(j, degree, u_new[i], knots)
x[i] += control_points[j, 0] * basis
y[i] += control_points[j, 1] * basis
z[i] += control_points[j, 2] * basis
coords = []
for _x, _y, _z in zip(x, y, z):
coord = Vector((_x, _y, _z))
coords.append(coord)
return coords#x, y, z
def signed_angle(vector_u, vector_v, normal):
normal = normal.normalized()
a = vector_u.angle(vector_v)
if vector_u.cross(vector_v).angle(normal) < 1:
a = -a
return a
def mat3_to_vec_roll(mat, ret_vec=False):
vec = mat.col[1]
vecmat = vec_roll_to_mat3(mat.col[1], 0)
vecmatinv = vecmat.inverted()
rollmat = vecmatinv @ mat
roll = atan2(rollmat[0][2], rollmat[2][2])
if ret_vec:
return vec, roll
else:
return roll
def vec_roll_to_mat3(vec, roll):
epsi = 1e-10
target = Vector((0, 0.1, 0))
nor = vec.normalized()
axis = target.cross(nor)
if axis.dot(axis) > epsi:
axis.normalize()
theta = target.angle(nor)
bMatrix = Matrix.Rotation(theta, 3, axis)
else:
updown = 1 if target.dot(nor) > 0 else -1
bMatrix = Matrix.Scale(updown, 3)
bMatrix[2][2] = 1.0
rMatrix = Matrix.Rotation(roll, 3, nor)
mat = rMatrix @ bMatrix
return mat
def align_bone_x_axis(edit_bone, new_x_axis):
new_x_axis = new_x_axis.cross(edit_bone.y_axis)
new_x_axis.normalize()
dot = max(-1.0, min(1.0, edit_bone.z_axis.dot(new_x_axis)))
angle = acos(dot)
edit_bone.roll += angle
dot1 = edit_bone.z_axis.dot(new_x_axis)
edit_bone.roll -= angle * 2.0
dot2 = edit_bone.z_axis.dot(new_x_axis)
if dot1 > dot2:
edit_bone.roll += angle * 2.0
def align_bone_z_axis(edit_bone, new_z_axis):
new_z_axis = -(new_z_axis.cross(edit_bone.y_axis))
new_z_axis.normalize()
dot = max(-1.0, min(1.0, edit_bone.x_axis.dot(new_z_axis)))
angle = acos(dot)
edit_bone.roll += angle
dot1 = edit_bone.x_axis.dot(new_z_axis)
edit_bone.roll -= angle * 2.0
dot2 = edit_bone.x_axis.dot(new_z_axis)
if dot1 > dot2:
edit_bone.roll += angle * 2.0
def project_point_onto_plane(q, p, n):
# q = point
# p = point belonging to the plane
# n = plane normal
n = n.normalized()
return q - ((q - p).dot(n)) * n
def project_vec_onto_plane(x, n):
# x: Vector
# n: plane normal vector
d = x.dot(n) / n.magnitude
p = [d * n.normalized()[i] for i in range(len(n))]
return Vector([x[i] - p[i] for i in range(len(x))])
def get_pole_angle(base_bone, ik_bone, pole_location):
pole_normal = (ik_bone.tail - base_bone.head).cross(pole_location - base_bone.head)
projected_pole_axis = pole_normal.cross(base_bone.tail - base_bone.head)
return signed_angle(base_bone.x_axis, projected_pole_axis, base_bone.tail - base_bone.head)
def smooth_interpolate(value, linear=0.0, repeat=1):
# value: float belonging to [0, 1]
# return the smooth interpolated value using cosinus function
base_value = value
value_smooth = value
for i in range(0, repeat):
value_smooth = (cos((value_smooth*pi + pi )) + 1) /2
result = (value_smooth*(1-linear)) + (base_value*linear)
if linear >= 0.0:
return result
else:#when linear is negative, smooth even more: repeat 6 times and blend between the base smoothed value and extra one
smooth_x6 = smooth_interpolate(base_value, linear=0.0, repeat=4)
fac = abs(linear)
return (result*(1-fac)) + (smooth_x6*fac)
def round_interpolate(value, linear=0.0, repeat=1):
# value: float belonging to [0, 1]
# return the smooth-rounded interpolated value using cosinus function
value = abs(value)
base_value = value
result = None
for i in range(0, repeat):
smooth_value1 = (cos((value/2*pi + pi)) + 1)
smooth_value2 = (cos((smooth_value1/2*pi + pi)) + 1)
value = (smooth_value1+smooth_value2)*0.5
result = (value*(1-linear)) + (base_value*linear)
if linear >= 0.0:
return result
else:#when linear is negative, smooth even more: repeat 6 times and blend between the base smoothed value and extra one
smooth_x6 = round_interpolate(base_value, linear=0.0, repeat=4)
fac = abs(linear)
return (result*(1-fac)) + (smooth_x6*fac)
def get_point_projection_onto_line_factor(a, b, p):
# return the factor of the projected point 'p' onto the line 'a,b'
# if below a, factor[0] < 0
# if above b, factor[1] < 0
return ((p - a).dot(b - a), (p - b).dot(b - a))
def project_point_onto_line(a, b, p):
# project the point p onto the line a,b
ap = p - a
ab = b - a
result_pos = a + ap.dot(ab) / ab.dot(ab) * ab
return result_pos
def project_vector_onto_vector(a, b):
abdot = (a[0] * b[0]) + (a[1] * b[1]) + (a[2] * b[2])
blensq = (b[0] ** 2) + (b[1] ** 2) + (b[2] ** 2)
temp = abdot / blensq
c = Vector((b[0] * temp, b[1] * temp, b[2] * temp))
return c
def cross(a, b):
c = Vector((a[1]*b[2] - a[2]*b[1], a[2]*b[0] - a[0]*b[2], a[0]*b[1] - a[1]*b[0]))
return c
def get_line_plane_intersection(planeNormal, planePoint, rayDirection, rayPoint, epsilon=1e-6):
ndotu = planeNormal.dot(rayDirection)
if abs(ndotu) < epsilon:
raise RuntimeError("no intersection or line is within plane")
w = rayPoint - planePoint
si = -planeNormal.dot(w) / ndotu
Psi = w + si @ rayDirection + planePoint
return Psi
def translate_object(obj, dist, dir):
# move an object for a given distance "dist" (float)
# along a given direction "dir" (vector 3)
obj_rot_euler = obj.rotation_euler.copy()
obj_rot_quat = obj.rotation_quaternion.copy()
obj_scale = obj.scale.copy()
loc, rot, scale = obj.matrix_world.decompose()
tar_loc = loc + (dir*dist)
tar_mat = Matrix.Translation(tar_loc).to_4x4()
obj.matrix_world = tar_mat
# restore rot and scale
obj.rotation_euler = obj_rot_euler
obj.rotation_quaternion = obj_rot_quat
obj.scale = obj_scale
def rotate_object(obj, angle, axis, origin):
# rotate an object around a given axis "axis" (vector 3)
# for the angle value "angle" (radians)
# around the origin point "origin" (vector 3)
rot_mat = Matrix.Rotation(angle, 4, axis.normalized())
loc, rot, scale = obj.matrix_world.decompose()
loc = loc - origin
obj_mat = Matrix.Translation(loc) @ rot.to_matrix().to_4x4()
obj_mat_rotated = rot_mat @ obj_mat
loc, rot, scale = obj_mat_rotated.decompose()
loc = loc + origin
obj.location = loc.copy()
obj.rotation_euler = rot.to_euler()
# fix numerical imprecisions
for i in range(0,3):
rot = obj.rotation_euler[i]
obj.rotation_euler[i] = round(rot, 4)
def rotate_point(point_loc, angle, axis, origin):
# rotate the point_loc (vector 3) around the "axis" (vector 3)
# for the angle value (radians)
# around the origin (vector 3)
rot_mat = Matrix.Rotation(angle, 4, axis.normalized())
loc = point_loc.copy()
loc = loc - origin
point_mat = Matrix.Translation(loc).to_4x4()
point_mat_rotated = rot_mat @ point_mat
loc, rot, scale = point_mat_rotated.decompose()
loc = loc + origin
return loc
def matrix_loc_rot(mat_full):
# returns a loc + rot matrix from a global transformation matrix (loc, rot, scale)
mat_loc = Matrix.Translation(mat_full.to_translation())
mat_rot = matrix_rot(mat_full)
return mat_loc @ mat_rot
def matrix_rot(mat_full):
# return a rotation matrix only from a global transformation matrix (loc, rot, scale)
return mat_full.to_quaternion().to_matrix().to_4x4()
def compare_mat(mat1, mat2, prec):
for i in range(0,4):
for j in range(0,4):
if round(mat1[i][j], prec) != round(mat2[i][j], prec):
return False
return True
@@ -0,0 +1,498 @@
import bpy, bmesh
from .objects import *
from .bone_data import *
from .version import *
def find_armature(mesh_obj):
# the built-in function object.find_armature() is flawed,
# returns None if multiple armature modifiers, and one of them is None.
# Use this as a working alternative
for mod in mesh_obj.modifiers:
if mod.type == 'ARMATURE':
if mod.object:
return mod.object
def get_skinned_objects(armature):
deformed_objects = []
for ob in bpy.data.objects:
if ob.type != 'MESH':
continue
if find_armature(ob) == None:
continue
if find_armature(ob).name == armature.name:
deformed_objects.append(ob.name)
return deformed_objects
def overwrite_vgroup(obj, vgroup, new_vgname):
new_vgrp = obj.vertex_groups.get(new_vgname)
if new_vgrp:
obj.vertex_groups.remove(new_vgrp)
vgroup.name = new_vgname
def create_mesh_data(mesh_name, verts, edges, faces):
# create an new mesh data given verts, edges and faces data
new_mesh = bpy.data.meshes.new(name=mesh_name)
new_mesh.from_pydata(verts, edges, faces)
return new_mesh
def create_object_mesh(obj_name, verts, edges, faces):
shape_mesh = create_mesh_data(obj_name, verts, edges, faces)
# create object
shape = bpy.data.objects.new(obj_name, shape_mesh)
return shape
def transfer_shape_keys(source_obj, target_obj):
if source_obj == None or target_obj == None:
return
if source_obj.data.shape_keys == None:
return
source_shape_keys = source_obj.data.shape_keys.key_blocks
for sk_index, sk in enumerate(source_shape_keys):
if sk_index == 0:# basis
continue
source_obj.active_shape_key_index = sk_index
bpy.ops.object.shape_key_transfer()
target_sk = target_obj.data.shape_keys.key_blocks.get(sk.name)
target_sk.value = sk.value
target_sk.slider_min, target_sk.slider_max = sk.slider_min, sk.slider_max
source_obj.show_only_shape_key = False
target_obj.show_only_shape_key = False
# copy drivers
anim_data = source_obj.data.shape_keys.animation_data
if anim_data and anim_data.drivers:
if target_obj.data.shape_keys:# If None, shape keys couldn't transfer previously, invalid modifier
obj_anim_data = target_obj.data.shape_keys.animation_data_create()
for fcurve in anim_data.drivers:
new_fc = obj_anim_data.drivers.from_existing(src_driver=fcurve)
new_fc.driver.is_valid = True
for dvar in new_fc.driver.variables:
for dtar in dvar.targets:
if dtar.id == source_obj:
dtar.id = target_obj
def transfer_shape_keys_deformed(source_obj, target_obj, apply_mods=False):
if source_obj == None or target_obj == None:
return
failed_sk = []
# disable all non-armature modifiers to solve issues when baking the mesh
disabled_mod = {}
if apply_mods == False:
for obj in [source_obj, target_obj]:
for mod in obj.modifiers:
if mod.type != "ARMATURE" and mod.show_viewport:
mod.show_viewport = False
if not obj.name in disabled_mod:
disabled_mod[obj.name] = {}
disabled_mod[obj.name][mod.name] = mod.name
if apply_mods:
# enable viewport modifier level if render is enabled
# necessary to bake mesh properly
for mod in source_obj.modifiers:
if mod.show_render:
mod.show_viewport = True
if source_obj.data.shape_keys == None:
return
source_shape_keys = source_obj.data.shape_keys.key_blocks
basis_index = 0
# pin the Basis key
source_obj.active_shape_key_index = basis_index
source_obj.show_only_shape_key = True
bpy.context.evaluated_depsgraph_get().update()
# store the vert coords in basis shape keys
mesh_baked = bmesh.new()
if bpy.app.version < (2,93,0):
mesh_baked.from_object(source_obj, bpy.context.evaluated_depsgraph_get(), deform=True, face_normals=False)
elif bpy.app.version >= (2,93,0) and bpy.app.version < (3,0,2):
mesh_baked.from_object(source_obj, bpy.context.evaluated_depsgraph_get(), face_normals=False)
elif bpy.app.version >= (3,0,2):
mesh_baked.from_object(source_obj, bpy.context.evaluated_depsgraph_get())#, cage=False, face_normals=False, vertex_normals=False)
mesh_baked.verts.ensure_lookup_table()
base_verts_coords = [i.co.copy() for i in mesh_baked.verts]
if 'mesh_baked' in locals():
del mesh_baked
# store the vert coords in basis shape keys
for sk_index, sk in enumerate(source_shape_keys):
if sk_index == basis_index:
continue
source_obj.active_shape_key_index = sk_index
bpy.context.evaluated_depsgraph_get().update()
# get the verts moved in shape key
mesh_baked1 = bmesh.new()
if bpy.app.version < (2,93,0):
mesh_baked1.from_object(source_obj, bpy.context.evaluated_depsgraph_get(), deform=True, face_normals=False)
elif bpy.app.version >= (2,93,0) and bpy.app.version < (3,0,2):
mesh_baked1.from_object(source_obj, bpy.context.evaluated_depsgraph_get(), face_normals=False)
elif bpy.app.version >= (3,0,2):
mesh_baked1.from_object(source_obj, bpy.context.evaluated_depsgraph_get())#, cage=False, face_normals=False, vertex_normals=False)
mesh_baked1.verts.ensure_lookup_table()
if len(mesh_baked1.verts) == len(base_verts_coords):
deformed_verts_coords = [i.co.copy() for i in mesh_baked1.verts]
deformed_verts_index_list = []
for vi, v in enumerate(mesh_baked1.verts):
if v.co != base_verts_coords[vi]:
deformed_verts_index_list.append(vi)
# transfer the shape key
#bpy.ops.object.shape_key_transfer()
create_basis = False
if target_obj.data.shape_keys == None:
create_basis = True
elif len(target_obj.data.shape_keys.key_blocks) == 0:
create_basis = True
if create_basis:# add basis
target_obj.shape_key_add(name='Basis')
target_sk = target_obj.shape_key_add(name=sk.name, from_mix=False)
target_sk.value = sk.value
target_sk.slider_min, target_sk.slider_max = sk.slider_min, sk.slider_max
#print('target_sk vert count:', len(target_sk.data))
#print('mesh_baked1 vert count:', len(mesh_baked1.verts))
# correct the deformed vert coordinates
for deformed_vert_index in deformed_verts_index_list:
#print("set vertex", deformed_vert_index, "from", target_sk.data[deformed_vert_index].co, "TO", mesh_baked1.verts[deformed_vert_index].co)
if deformed_vert_index < len(target_sk.data):
target_sk.data[deformed_vert_index].co = mesh_baked1.verts[deformed_vert_index].co
else:
if not sk.name in failed_sk:
failed_sk.append(sk.name)
print('Cannot transfer shape key, different amount of vertices. ShapeKey:', sk.name, 'Object:', source_obj.name, '> Aborting')
else:# looks like a modifier is adding or removing verts from one shape key to another... not supported! (e.g Bevel, angle based, Decimate...)
if not sk.name in failed_sk:
failed_sk.append(sk.name)
print('Cannot transfer shape key, different amount of vertices. ShapeKey:', sk.name, 'Object:', source_obj.name, '> Aborting')
if 'mesh_baked1' in locals():
del mesh_baked1
source_obj.show_only_shape_key = False
target_obj.show_only_shape_key = False
# copy drivers
anim_data = source_obj.data.shape_keys.animation_data
if anim_data and anim_data.drivers:
if target_obj.data.shape_keys:# If None, shape keys couldn't transfer previously, invalid modifier
obj_anim_data = target_obj.data.shape_keys.animation_data_create()
for fcurve in anim_data.drivers:
new_fc = obj_anim_data.drivers.from_existing(src_driver=fcurve)
new_fc.driver.is_valid = True
for dvar in new_fc.driver.variables:
for dtar in dvar.targets:
if dtar.id == source_obj:
dtar.id = target_obj
# restore disabled modifiers
for objname in disabled_mod:
ob = get_object(objname)
for modname in disabled_mod[objname]:
ob.modifiers[modname].show_viewport = True
return failed_sk
# Transfer weights with operators. Slower or faster than per vertex depending on the context
def transfer_weight_mod_operator(object=None, src_grp_name=None, tar_grp_name=None, replace=False):
mix_mod = object.modifiers.new(type='VERTEX_WEIGHT_MIX', name='ARP_VWM')
mix_mod.vertex_group_a = tar_grp_name
mix_mod.vertex_group_b = src_grp_name
mix_mod.mix_set = 'ALL'
if replace:
mix_mod.mix_mode = 'SET'
else:
mix_mod.mix_mode = 'ADD'
# set in first position
if bpy.app.version < (3,5,0):# backward-compatibility
i_test = 0# safety check, some modifiers can't be moved, limit maximum trials
while object.modifiers[0] != mix_mod and i_test < 50:
i_test += 1
bpy.ops.object.modifier_move_up(modifier=mix_mod.name)
else:
object.modifiers.move(len(object.modifiers)-1, 0)
bpy.ops.object.modifier_apply(modifier=mix_mod.name)
def transfer_weight_mod(object=None, dict=None, list=None, replace=False, tar_grp_name=""):
#vgroups_copy = [i for i in object.vertex_groups]
vgroups_names_copy = [i.name for i in object.vertex_groups]
for vgroup_name in vgroups_names_copy:
v_group = object.vertex_groups.get(vgroup_name)
grp_name_base = get_bone_base_name(v_group.name)
side = get_bone_side(v_group.name)
if dict:
if grp_name_base in dict:
for tar_grp_base_name in dict[grp_name_base]:
if tar_grp_base_name.endswith('.x'):
side = side[:-2]
tar_grp_name = tar_grp_base_name+side
if object.vertex_groups.get(tar_grp_name) == None:
object.vertex_groups.new(name=tar_grp_name)
transfer_weight_mod_operator(object=object, src_grp_name=v_group.name, tar_grp_name=tar_grp_name, replace=replace)
if list:
if grp_name_base in list:
if object.vertex_groups.get(tar_grp_name) == None:
object.vertex_groups.new(name=tar_grp_name)
transfer_weight_mod_operator(object=object, src_grp_name=v_group.name, tar_grp_name=tar_grp_name, replace=replace)
def transfer_weight_prefix_mod(object=None, prefix=None, tar_grp_base_name=None):
vgroups_names_copy = [i.name for i in object.vertex_groups]# iterating over a copy fix bug: UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb0 in position 1: invalid start byte
for vgr_n in vgroups_names_copy:
vgr = object.vertex_groups.get(vgr_n)
if vgr.index == -1:
continue
if vgr.name.startswith(prefix):
side = vgr.name[-2:]
tar_grp_name = tar_grp_base_name+side
if object.vertex_groups.get(tar_grp_name):#if exists
transfer_weight_mod_operator(object=object, src_grp_name=vgr.name, tar_grp_name=tar_grp_name)
def clamp_weight_mod(object=None, dict=None, list=None):
for vg in object.vertex_groups:
grp_name_base = get_bone_base_name(vg.name)
side = get_bone_side(vg.name)
if dict:
if grp_name_base in dict:
tar_grp_base_name = dict[grp_name_base]
if tar_grp_base_name.endswith('.x'):
side = side[:-2]
tar_grp_name = tar_grp_base_name+side
tar_grp = object.vertex_groups.get(tar_grp_name)
if tar_grp:
clamp_weight_mod_operator(object=object, src_grp_name=vg.name, tar_grp_name=tar_grp_name)
def clamp_weight_mod_operator(object=None, src_grp_name=None, tar_grp_name=None):
mix_mod = object.modifiers.new(type='VERTEX_WEIGHT_MIX', name='ARP_VWM')
mix_mod.vertex_group_a = tar_grp_name
mix_mod.vertex_group_b = src_grp_name
mix_mod.mix_set = 'A'
mix_mod.mix_mode = 'SET'
# set in first position
i_test = 0# safety check, some modifiers can't be moved
while object.modifiers[0] != mix_mod and i_test < 50:
i_test += 1
bpy.ops.object.modifier_move_up(modifier="ARP_VWM")
# apply
bpy.ops.object.modifier_apply(modifier='ARP_VWM')
def multiply_weight_mod_operator(object=None, tar_grp_name='', fac=0.5):
mix_mod = object.modifiers.new(type='VERTEX_WEIGHT_MIX', name='ARP_VWM')
mix_mod.vertex_group_a = tar_grp_name
mix_mod.mix_set = 'ALL'
mix_mod.mix_mode = 'SET'
mix_mod.mask_constant = 1 - fac
# set in first position
i_test = 0# safety check, some modifiers can't be moved
while object.modifiers[0] != mix_mod and i_test < 50:
i_test += 1
bpy.ops.object.modifier_move_up(modifier="ARP_VWM")
# apply
bpy.ops.object.modifier_apply(modifier='ARP_VWM')
def multiply_weight_mod(object=None, dict=None):
for _vg in object.vertex_groups:
grp_name_base = get_bone_base_name(_vg.name)
side = get_bone_side(_vg.name)
if grp_name_base in dict:
fac = dict[grp_name_base]
multiply_weight_mod_operator(object=object, tar_grp_name=_vg.name, fac=fac)
# Transfer weights functions per vertex. Slower or faster than operator depending on the context
def transfer_weight_verts(object=None, dict=None, list=None, target_group_name=None, use_side=False):
for vert in object.data.vertices:
if len(vert.groups) == 0:
continue
for grp in vert.groups:
try:
grp_name = object.vertex_groups[grp.group].name
except:
continue
transfer_weight(object=object, vertice=vert, vertex_weight=grp.weight, group_name=grp_name, dict=dict, list=list, target_group_name=target_group_name, use_side=use_side)
def transfer_weight(object=None, vertice=None, vertex_weight=None, group_name=None, dict=None, list=None, target_group_name=None, use_side=False):
grp_name_base = get_bone_base_name(group_name) if not use_side else group_name
side = get_bone_side(group_name) if not use_side else ''
# Dict mode
if dict:
if grp_name_base in dict:
for target_grp in dict[grp_name_base]:
if target_grp.endswith('.x') and not use_side:
side = side[:-2]
target_group_name = target_grp+side
target_group = object.vertex_groups.get(target_group_name)
if target_group == None:
target_group = object.vertex_groups.new(name=target_group_name)
target_group.add([vertice.index], vertex_weight, 'ADD')
# List mode
if list:
if grp_name_base in list:
target_group = object.vertex_groups.get(target_group_name)
if target_group == None:
target_group = object.vertex_groups.new(name=target_group_name)
target_group.add([vertice.index], vertex_weight, 'ADD')
def transfer_weight_prefix_verts(object=None, prefix='', tar_grp_base_name=''):
for vert in object.data.vertices:
if len(vert.groups) == 0:
continue
for grp in vert.groups:
try:
grp_name = object.vertex_groups[grp.group].name
except:
continue
transfer_weight_prefix(object=object, vertice=vert, vertex_weight=grp.weight, group_name=grp_name, prefix=prefix, target_group=tar_grp_base_name)
def transfer_weight_prefix(object=None, vertice=None, vertex_weight=None, group_name=None, prefix='', target_group=''):
if group_name.startswith(prefix):
side = group_name[-2:]
tar_group_name = target_group+side
if object.vertex_groups.get(tar_group_name):# if exists
object.vertex_groups[tar_group_name].add([vertice.index], vertex_weight, 'ADD')
def copy_vgroup(object=None, dict=None, use_side=False):
# dict = {'arm_stretch': ['c_arm_twist_offset'],...}
vgroups_names_copy = [i.name for i in object.vertex_groups]
for vg_name in vgroups_names_copy:
vg = object.vertex_groups.get(vg_name)
#print(vg.index)
#print('vg.name', vg.name)
grp_name_base = get_bone_base_name(vg.name) if not use_side else vg.name
side = get_bone_side(vg.name) if not use_side else ''
if grp_name_base in dict:
tar_grp_names = dict[grp_name_base]
for tar_grp_name in tar_grp_names:
tar_grp = object.vertex_groups.get(tar_grp_name+side)
if tar_grp == None:
continue
# remove current tar group
object.vertex_groups.remove(tar_grp)
# copy source group
object.vertex_groups.active_index = vg.index
bpy.ops.object.vertex_group_copy()
copy_idx = object.vertex_groups.active_index
copy_grp = object.vertex_groups[copy_idx]
copy_grp.name = tar_grp_name+side
def multiply_weight(object=None, vertice=None, vertex_weight=None, group_name=None, dict=None):
grp_name_base = get_bone_base_name(group_name)
side = get_bone_side(group_name)
if grp_name_base in dict:
if object.vertex_groups.get(group_name) != None:#if exists
object.vertex_groups[group_name].add([vertice.index], vertex_weight * dict[grp_name_base], 'REPLACE')
def clamp_weights(object=None, vertice=None, vertex_weight=None, group_name=None, dict=None):
grp_name_base = group_name[:-2]
side = group_name[-2:]
if "_dupli_" in group_name:
grp_name_base = group_name[:-12]
side = "_" + group_name[-11:]
if grp_name_base in dict:
if dict[grp_name_base][-2:] == '.x':
side = ''
_target_group = dict[grp_name_base]+side
target_weight = 0.0
if object.vertex_groups.get(_target_group) != None:
for grp in vertice.groups:
if object.vertex_groups[grp.group].name == _target_group:
target_weight = grp.weight
def_weight = min(vertex_weight, target_weight)
object.vertex_groups[group_name].add([vertice.index], def_weight, 'REPLACE')
@@ -0,0 +1,13 @@
import bpy
from .version import *
def apply_modifier(mod_name):
try:# crash if modifier is viewport disabled
if bpy.app.version >= (2,90,0):
bpy.ops.object.modifier_apply(modifier=mod_name)
else:
bpy.ops.object.modifier_apply(apply_as="DATA", modifier=mod_name)
except:
print('Modifier could not be applied: '+mod_name)
pass
@@ -0,0 +1,17 @@
def trim_dupli_name(name):
# trim last digits of duplicated names e.g. myobj.002
split_names = name.split('.')
if len(split_names) > 1:
last_digits = split_names[len(split_names)-1]
for i in last_digits:# make sure they're integer, otherwise it shouldn't be a duplicated name
try:
int(i)
#print('int i', int(i))
except:
return name
len_to_trim = len(last_digits) + 1
return name[:-len_to_trim]
else:
return name
@@ -0,0 +1,359 @@
import bpy, os
from .bone_edit import *
from .context import *
from .armature import *
def get_object_boundaries(obj):
bound_box_world = []
for coord in obj.bound_box:
vec = Vector((coord[0], coord[1], coord[2]))
global_coord = obj.matrix_world @ vec
bound_box_world.append(global_coord)
front = 1000000000
back = -front
left = -front
right = front
top = -front
bottom = front
# get bound box front/back/left/right bounds
for coord in bound_box_world:
if coord[1] < front:
front = coord[1]
if coord[1] > back:
back = coord[1]
if coord[0] > left:
left = coord[0]
if coord[0] < right:
right = coord[0]
if coord[2] > top:
top = coord[2]
if coord[2] < bottom:
bottom = coord[2]
return {'front':front, 'back':back, 'left':left, 'right':right, 'top':top, 'bottom':bottom}
def append_from_arp(nodes=None, type=None):
context = bpy.context
scene = context.scene
addon_directory = os.path.dirname(os.path.abspath(__file__))
addon_directory = os.path.dirname(addon_directory)
addon_directory = os.path.dirname(addon_directory)
filepath = addon_directory + "/armature_presets/" + "master.blend"
if type == "object":
# Clean the cs_ materials names (avoid .001, .002...)
for mat in bpy.data.materials:
if mat.name[:3] == "cs_":
if mat.name[-3:].isdigit() and bpy.data.materials.get(mat.name[:-4]) == None:
mat.name = mat.name[:-4]
# make a list of current custom shapes objects in the scene for removal later
cs_objects = [obj.name for obj in bpy.data.objects if obj.name.startswith('cs_')]
# Load the objects data in the file
with bpy.data.libraries.load(filepath, link=False) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects if name in nodes]
# get cs_grp
cs_grp = None
for _ob in bpy.context.scene.objects:
if _ob.name.startswith('cs_grp') and _ob.type == 'EMPTY':
cs_grp = _ob
print('FOUND cs_grp', cs_grp.name)
break
# Add the objects in the scene
for obj in data_to.objects:
if obj:
# link
bpy.context.scene.collection.objects.link(obj)
# apply existing scene material if exists
if len(obj.material_slots):
mat_name = obj.material_slots[0].name
found_mat = None
for mat in bpy.data.materials:
if mat.name == mat_name[:-4]: # substract .001, .002...
found_mat = mat.name
break
# assign existing material if already in file and delete the imported one
if found_mat:
obj.material_slots[0].material = bpy.data.materials[found_mat]
bpy.data.materials.remove(bpy.data.materials[mat_name], do_unlink=True)
# If we append a custom shape
if obj.name.startswith('cs_') or 'c_sphere' in obj.name:
if cs_grp:
# parent the custom shape
obj.parent = cs_grp
# assign to new collection
assigned_collections = []
for collec in cs_grp.users_collection:
collec.objects.link(obj)
assigned_collections.append(collec)
if len(assigned_collections):
# remove previous collections
for i in obj.users_collection:
if not i in assigned_collections:
i.objects.unlink(obj)
# and the scene collection
try:
bpy.context.scene.collection.objects.unlink(obj)
except:
pass
# If we append other objects,
# find added/useless custom shapes and delete them
else:
for obj in bpy.data.objects:
if obj.name.startswith('cs_'):
if not obj.name in cs_objects:
bpy.data.objects.remove(obj, do_unlink=True)
if 'obj' in locals():
del obj
if type == "text":
# Load the objects data in the file
with bpy.data.libraries.load(filepath, link=False) as (data_from, data_to):
data_to.texts = [name for name in data_from.texts if name in nodes]
print("Loading text file:", data_to.texts)
bpy.context.evaluated_depsgraph_get().update()
if type == "font":
# Load the data in the file
with bpy.data.libraries.load(filepath, link=False) as (data_from, data_to):
data_to.fonts = [name for name in data_from.fonts if name in nodes]
print("Loading font file:", data_to.fonts)
bpy.context.evaluated_depsgraph_get().update()
def get_object(name, view_layer_change=False):
ob = bpy.data.objects.get(name)
if ob:
if view_layer_change:
found = False
for v_o in bpy.context.view_layer.objects:
if v_o == ob:
found = True
if not found:# object not in view layer, add to the base collection
bpy.context.collection.objects.link(ob)
return ob
def is_obj_in_current_view_layer(obj):
for v_o in bpy.context.view_layer.objects:
if v_o == obj:
return True
return False
def get_object_id(arp_id):
for _ob in bpy.data.objects:
if len(_ob.keys()):
if 'arp_id' in _ob.keys():
if _ob['arp_id'] == arp_id:
return _ob
return None
def is_object_id(_ob, arp_id, suffix_only=False):
object_has_id = False
if len(_ob.keys()):
if 'arp_id' in _ob.keys():
if suffix_only:
if _ob['arp_id'].endswith(arp_id):
object_has_id = True
else:
if _ob['arp_id'] == arp_id:
object_has_id = True
return object_has_id
def delete_object(obj):
bpy.data.objects.remove(obj, do_unlink=True)
def set_active_object(object_name):
bpy.context.view_layer.objects.active = bpy.data.objects[object_name]
bpy.data.objects[object_name].select_set(state=True)
def hide_object(obj_to_set):
try:# object may not be in current view layer
obj_to_set.hide_set(True)
obj_to_set.hide_viewport = True
except:
pass
def hide_object_visual(obj_to_set):
obj_to_set.hide_set(True)
def is_object_hidden(obj_to_get):
try:
if obj_to_get.hide_get() == False and obj_to_get.hide_viewport == False:
return False
else:
return True
except:# the object must be in another view layer, it can't be accessed
return True
def unhide_object(obj_to_set):
# we can only operate on the object if it's in the active view layer...
try:
obj_to_set.hide_set(False)
obj_to_set.hide_viewport = False
except:
print("Could not reveal object:", obj_to_set.name)
def duplicate_object(new_name="", method='operator', obj=None):
if method == 'operator':
try:
bpy.ops.object.duplicate(linked=False, mode='TRANSLATION')
except:
bpy.ops.object.duplicate('TRANSLATION', False)
if new_name != "":
bpy.context.active_object.name = new_name
elif method == 'data':
if obj:
obj_dupli = obj.copy()
for col in obj.users_collection:
col.objects.link(obj_dupli)
obj_dupli.data = obj_dupli.data.copy()
obj_dupli.name = new_name
return obj_dupli
else:
print('Cannot duplicate object, not found')
def delete_children(passed_node, type):
if passed_node:
if type == "OBJECT":
parent_obj = passed_node
children = []
for obj in bpy.data.objects:
if obj.parent:
if obj.parent == parent_obj:
children.append(obj)
for _obj in children:
for obj_1 in bpy.data.objects:
if obj_1.parent:
if obj_1.parent == _obj:
children.append(obj_1)
meshes_data = []
for child in children:
# store the mesh data for removal afterward
try:
if child.data:
if not child.data.name in meshes_data:
meshes_data.append(child.data.name)
except:
continue
bpy.data.objects.remove(child, do_unlink=True, do_id_user=True, do_ui_user=True)
for data_name in meshes_data:
current_mesh = bpy.data.meshes.get(data_name)
if current_mesh:
bpy.data.meshes.remove(current_mesh, do_unlink=True, do_id_user=True, do_ui_user=True)
bpy.data.objects.remove(passed_node, do_unlink=True)
elif type == "EDIT_BONE":
current_mode = bpy.context.mode
bpy.ops.object.mode_set(mode='EDIT')
if bpy.context.active_object.data.edit_bones.get(passed_node.name):
# Save displayed layers
_layers = enable_all_armature_layers()
bpy.ops.armature.select_all(action='DESELECT')
bpy.context.evaluated_depsgraph_get().update()
bpy.context.active_object.data.edit_bones.active = get_edit_bone(passed_node.name)
bpy.ops.armature.select_similar(type='CHILDREN')
bpy.ops.armature.delete()
restore_armature_layers(_layers)
# restore saved mode
restore_current_mode(current_mode)
def parent_objects(_obj_list, target, mesh_only=True):
for obj in _obj_list:
if mesh_only:
if obj.type != "MESH":
continue
#print("parenting", obj.name)
obj_mat = obj.matrix_world.copy()
obj.parent = target
obj.matrix_world = obj_mat
def select_children(obname, ob_type=None):
_ob = bpy.data.objects.get(obname)
for child in _ob.children:
if ob_type:
if ob_type != child.type:
continue
set_active_object(child.name)
bpy.ops.object.mode_set(mode='OBJECT')
if len(child.children):
select_children(child.name)
def get_children(obname, ob_type=None):
children_list = []
children_list = get_children_recursive(obname, obtype=ob_type, list=children_list)
return children_list
def get_children_recursive(obname, obtype=None, list=None):
_ob = get_object(obname)
if _ob.children:
for child in _ob.children:
if obtype:
if obtype != child.type:
continue
list.append(child.name)
get_children_recursive(child.name, obtype=obtype, list=list)
return list
def has_delta_transforms(obj):
for i in obj.delta_location:
if i != 0.0:
return True
for i in obj.delta_rotation_euler:
if i != 0.0:
return True
for j, i in enumerate(obj.delta_rotation_quaternion):
if (i != 0.0 and j != 0) or (i != 1.0 and j == 0):
return True
for i in obj.delta_scale:
if i != 1.0:
return True
return False
@@ -0,0 +1,67 @@
import bpy
from .version import blender_version
def get_prop_setting(node, prop_name, setting):
if bpy.app.version >= (3,0,0):
return node.id_properties_ui(prop_name).as_dict()[setting]
else:
return node['_RNA_UI'][prop_name][setting]
def set_prop_setting(node, prop_name, setting, value):
if bpy.app.version >= (3,0,0):
ui_data = node.id_properties_ui(prop_name)
if setting == 'default':
ui_data.update(default=value)
elif setting == 'min':
ui_data.update(min=value)
elif setting == 'max':
ui_data.update(max=value)
elif setting == 'soft_min':
ui_data.update(soft_min=value)
elif setting == 'soft_max':
ui_data.update(soft_max=value)
elif setting == 'description':
ui_data.update(description=value)
else:
if not "_RNA_UI" in node.keys():
node["_RNA_UI"] = {}
node['_RNA_UI'][prop_name][setting] = value
def create_custom_prop(node=None, prop_name="", prop_val=1.0, prop_min=0.0, prop_max=1.0, prop_description="", soft_min=None, soft_max=None, default=None):
if soft_min == None:
soft_min = prop_min
if soft_max == None:
soft_max = prop_max
if bpy.app.version < (3,0,0):
if not "_RNA_UI" in node.keys():
node["_RNA_UI"] = {}
node[prop_name] = prop_val
if default == None:
default = prop_val
if bpy.app.version < (3,0,0):
node["_RNA_UI"][prop_name] = {'use_soft_limits':True, 'min': prop_min, 'max': prop_max, 'description': prop_description, 'soft_min':soft_min, 'soft_max':soft_max, 'default':default}
else:
if type(prop_val) != str and type(prop_val) != bool:#string props have no min, max, soft min, soft max
set_prop_setting(node, prop_name, 'min', prop_min)
set_prop_setting(node, prop_name, 'max', prop_max)
set_prop_setting(node, prop_name, 'soft_min', soft_min)
set_prop_setting(node, prop_name, 'soft_max', soft_max)
set_prop_setting(node, prop_name, 'description', prop_description)
set_prop_setting(node, prop_name, 'default', default)
# set as overridable
node.property_overridable_library_set('["'+prop_name+'"]', True)
def is_rna_prop(prop_name):
if prop_name in ['bl_rna', 'rna_type'] or prop_name.startswith('__'):
return True
return False
@@ -0,0 +1,9 @@
import sys
def print_progress_bar(job_title, progress, length):
if length != 0:
progress = int((progress * 100) / length)
else:
progress = 100
sys.stdout.write("\r " + job_title + " %d%%" % progress)
sys.stdout.flush()
@@ -0,0 +1,55 @@
from math import *
from mathutils import *
def vectorize3(list):
return Vector((list[0], list[1], list[2]))
def vector_to_list(vector):
return [i for i in vector]
def dict_to_string(dico):
if type(dico) != dict:# custom properties in Blender 3.0 are not dict type, must be converted
dico = dico.to_dict()
dict_str = {}
for i in dico:
dict_str[str(i)] = str(dico[i])
return dict_str
def dict_to_int(dico):
if type(dico) != dict:# custom properties in Blender 3.0 are not dict type, must be converted
dico = dico.to_dict()
dict_int = {}
for i in dico:
dict_int[int(i)] = int(dico[i])
return dict_int
def str_list_to_fl_list(list):
new_list = []
for i in list:
new_list.append(float(i))
return new_list
def vec_to_string(vec):
string_var = str(vec[0])+','+str(vec[1])+','+str(vec[2])
return string_var
def string_to_bool(string):
if string.lower() == 'true':
return True
elif string.lower() == 'false':
return False
else:
return None
def clamp_max(value, max):
if value > max:
return max
else:
return value
@@ -0,0 +1,305 @@
import bpy
import addon_utils
from .. import auto_rig_datas as ard
from .collections import *
from .armature import *
class ARP_blender_version:
_string = bpy.app.version_string
blender_v = bpy.app.version
_float = 0.0
def __init__(self):
str = ''.join([i for i in self._string if i in '0123456789'])#int(_string.replace('.', ''))#blender_v[0]*100+blender_v[1]+blender_v[2]*0.01
self._float = float(str)
if len(str) > 3:# hu! some version are defined as '3.00', some as '2.93.9'
self._float = float(str)/10
blender_version = ARP_blender_version()
def is_proxy(obj):
# proxy atttribute removed in Blender 3.3
if 'proxy' in dir(obj):
if obj.proxy:
return True
return False
def get_autorigpro_version():
addons = None
if bpy.app.version >= (4,2,0):
addons = addon_utils.modules()
else:
addons = addon_utils.modules()[:]
for addon in addons:
addon_name = addon.__name__ if bpy.app.version >= (4,2,0) else addon.bl_info['name']
arp_name = 'bl_ext.user_default.auto_rig_pro' if bpy.app.version >= (4,2,0) else 'Auto-Rig Pro'
quickr_name = '_quick_rig' if bpy.app.version >= (4,2,0) else 'Quick Rig'
if addon_name.startswith(arp_name) and not quickr_name in addon_name:
#print(addon)
#print()
ver_list = addon.bl_info.get('version')
ver_string = str(ver_list[0]) + str(ver_list[1]) + str(ver_list[2])
ver_int = int(ver_string)
return ver_int
def ver_int_to_str(version_int):
to_str = str(version_int)
return to_str[0] + '.' + to_str[1] + to_str[2] + '.' + to_str[3] + to_str[4]
def convert_drivers_cs_to_xyz(armature):
# Blender 3.0 requires Vector3 custom_shape_scale values
# convert single uniform driver to vector3 array drivers
drivers_armature = [i for i in armature.animation_data.drivers]
for dr in drivers_armature:
if 'custom_shape_scale' in dr.data_path:
if not 'custom_shape_scale_xyz' in dr.data_path:
for i in range(0, 3):
new_dr = armature.animation_data.drivers.from_existing(src_driver=dr)
new_dr.data_path = new_dr.data_path.replace('custom_shape_scale', 'custom_shape_scale_xyz')
new_dr.array_index = i
new_dr.driver.expression += ''# update hack
armature.driver_remove(dr.data_path, dr.array_index)
# tag in prop
armature.data["arp_updated_3.0"] = True
print("Converted custom shape scale drivers to xyz")
def convert_armature_layers_to_collection(armature):
# convert old armature layers and bone colors groups
# from pre Blender 4.0 versions, to collections
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.transform.translate(value=(0, 0, 0))# update hack
bpy.context.evaluated_depsgraph_get().update()
get_armature_collections(armature).update()
col_names = [col.name for col in get_armature_collections(armature)]# make a copy of current collections, necessary for proper removal
for col_name in col_names:#armature.data.collections_all:
_col = get_armature_collections(armature).get(col_name)
if _col == None:# debug...
print(" Collec is None, error when removing collection! Exit")
continue
# remove deprecated bones color groups
if _col.name in ard.bones_groups_to_remove:
armature.data.collections.remove(_col)
continue
# rename color collections
if _col.name in ard.bones_groups:
_col.name = 'color_'+_col.name
# rename bones collections
print('Rename collections...')
get_armature_collections(armature).update()
for i, _col in enumerate(get_armature_collections(armature)):
if _col.name.startswith('Layer '):
lidx = int(_col.name.split(' ')[1])-1
# special case, both kilt bones and feather bones are in layer 24
# Split in two dedicated collections
if lidx == 24:
for bone in armature.data.bones:
if is_bone_in_layer(bone.name, _col.name):
if 'arp_kilt' in bone.keys():
set_bone_layer(bone, 'mch_kilt_masters')
if 'feather' in bone.name:
set_bone_layer(bone, 'mch_feathers')
# remove layer 24
# in case other unexpected bones remain in layer 24, move them out first
for bone in armature.data.bones:
if is_bone_in_layer(bone.name, _col.name):
set_bone_layer(bone, 'Misc')
continue
for col_name in ard.layer_col_map:
if ard.layer_col_map[col_name] == lidx:
_col.name = col_name
break
# remove remaining Layer 24 if any
col_24 = get_armature_collections(armature).get('Layer 25')
if col_24:
armature.data.collections.remove(col_24)
# ensure all ARP collections are created
for col_name in ard.layer_col_map:
if col_name[0].isupper():# only main collections with capital letters
if get_armature_collections(armature).get(col_name) == None:
print('Create collection', col_name)
armature.data.collections.new(col_name)
sort_armature_collections(armature)
# update tag as prop
armature.data["arp_updated_4.0"] = True
def convert_picker_layers_to_collection(armature):
converted = False
for pb in armature.pose.bones:
if 'layer' in pb.keys() and not 'collec' in pb.keys():
layer_idx = pb['layer']
layer_name = None
for col_name in ard.layer_col_map:
if ard.layer_col_map[col_name] == layer_idx:
layer_name = col_name
break
pb['collec'] = col_name
converted = True
return converted
def is_fc_bb_param(fc, param):
# is the fcurve a bendy-bones parameter?
# bendy-bones params data path depends on the Blender version
# scale are array
# scale in
if param == 'bbone_scaleinx':
if get_bbone_param_name(param) in fc.data_path:
if (bpy.app.version >= (3,0,0) and fc.array_index == 0) or (bpy.app.version < (3,0,0)):
return True
elif param == 'bbone_scaleiny':
if get_bbone_param_name(param) in fc.data_path:
if (bpy.app.version >= (3,0,0) and fc.array_index == 1) or (bpy.app.version < (3,0,0)):
return True
elif param == 'bbone_scaleinz':
if 'bbone_scalein' in fc.data_path:# only in Blender 3.0 and after
if (bpy.app.version >= (3,0,0) and fc.array_index == 2):
return True
# scale out
elif param == 'bbone_scaleoutx':
if get_bbone_param_name(param) in fc.data_path:
if (bpy.app.version >= (3,0,0) and fc.array_index == 0) or (bpy.app.version < (3,0,0)):
return True
elif param == 'bbone_scaleouty':
if get_bbone_param_name(param) in fc.data_path:
if (bpy.app.version >= (3,0,0) and fc.array_index == 1) or (bpy.app.version < (3,0,0)):
return True
elif param == 'bbone_scaleoutz':
if 'bbone_scaleout' in fc.data_path:# only in Blender 3.0 and after
if (bpy.app.version >= (3,0,0) and fc.array_index == 2):
return True
def get_bbone_param_name(setting):
# bendy-bones setting name depending on the Blender version
# curve out
if setting == 'bbone_curveoutz':
if bpy.app.version < (3,0,0):
return 'bbone_curveouty'
else:
return 'bbone_curveoutz'
# curve in
elif setting == 'bbone_curveinz':
if bpy.app.version < (3,0,0):
return 'bbone_curveiny'
else:
return 'bbone_curveinz'
# scale in X
elif setting == 'bbone_scaleinx':
if bpy.app.version < (3,0,0):
return 'bbone_scaleinx'
else:
return 'bbone_scalein'
# scale in Y
elif setting == 'bbone_scaleiny':
if bpy.app.version < (3,0,0):
return 'bbone_scaleiny'
else:
return 'bbone_scalein'
# scale out X
elif setting == 'bbone_scaleoutx':
if bpy.app.version < (3,0,0):
return 'bbone_scaleoutx'
else:
return 'bbone_scaleout'
# scale in Y
elif setting == 'bbone_scaleouty':
if bpy.app.version < (3,0,0):
return 'bbone_scaleouty'
else:
return 'bbone_scaleout'
def check_id_root(action):
if bpy.app.version >= (2,90,1):
if getattr(action, 'id_root', None) == 'OBJECT':
return True
elif getattr(action, 'id_root', None) == 'KEY':# shape keys actions are not exportable armature actions in that case
return False
else:# sometimes, no tag, not sure why. Keep it then
return True
else:
return True
def invert_angle_with_blender_versions(angle=None, bone=False, axis=None):
# Deprecated!
# Use rotate_edit_bone() and rotate_object() instead
#
# bpy.ops.transform.rotate has inverted angle value depending on the Blender version
# this function is necessary to support these version specificities
invert = False
if bone == False:
if (bpy.app.version >= (2,83,0) and bpy.app.version < (2,90,0)) or (bpy.app.version >= (2,90,1) and bpy.app.version < (2,90,2)):
invert = True
elif bone == True:
# bone rotation support
# the rotation direction is inverted in Blender 2.83 only for Z axis
if axis == "Z":
if bpy.app.version >= (2,83,0) and bpy.app.version < (2,90,0):
invert = True
# the rotation direction is inverted for all but Z axis in Blender 2.90 and higher
if axis != "Z":
if bpy.app.version >= (2,90,0):
invert = True
if invert:
angle = -angle
return angle
def disable_bone_inherit_scale(editbone):
if bpy.app.version >= (2,81,0):
editbone.inherit_scale = 'NONE'
else:# backward-compatibility
editbone.use_inherit_scale = False
def enable_bone_inherit_scale(editbone):
if bpy.app.version >= (2,81,0):
editbone.inherit_scale = 'FULL'
else:# backward-compatibility
editbone.use_inherit_scale = True
def get_prefs():
if bpy.app.version >= (4,2,0):
return bpy.context.preferences.addons[__package__[:-8]].preferences
else:
return bpy.context.preferences.addons[__package__.split('.')[0]].preferences
@@ -0,0 +1,8 @@
import bpy
def get_armature_collections(_arm):
arm_data = _arm.data if 'type' in dir(_arm) else _arm
if bpy.app.version >= (4,1,0):
return arm_data.collections_all
else:
return arm_data.collections
File diff suppressed because one or more lines are too long
@@ -0,0 +1,3 @@
from mathutils import Vector, Euler, Matrix
coords={'pelvis': Vector((1.1648752433757181e-06, -0.03546452522277832, 9.705429077148438)), 'spine_01': Vector((2.2931205876375316e-06, -0.6347151398658752, 19.094528198242188)), 'spine_02': Vector((1.598883045517141e-06, 0.6980798244476318, 13.395744323730469)), 'spine_03': Vector((1.6606641111138742e-06, 1.0556304454803467, 13.992294311523438)), 'clavicle_l': Vector((14.954872131347656, 4.510254859924316, -2.2741546630859375)), 'upperarm_l': Vector((15.369140625, -0.2687036991119385, -1.2218170166015625)), 'lowerarm_l': Vector((20.141151428222656, -3.486912250518799, -1.384307861328125)), 'hand_l': Vector((10.576087951660156, -1.987648606300354, -0.7958221435546875)), 'index_01_l': Vector((4.158714294433594, -0.7536659240722656, -0.720947265625)), 'index_02_l': Vector((3.249267578125, -0.3236865997314453, -0.9248504638671875)), 'index_03_l': Vector((3.1864013671875, -0.4611630439758301, -1.073333740234375)), 'middle_01_l': Vector((4.579429626464844, -0.4014453887939453, -0.6331024169921875)), 'middle_02_l': Vector((3.5030288696289062, -0.391385555267334, -0.943206787109375)), 'middle_03_l': Vector((3.4647369384765625, -0.42128419876098633, -1.06402587890625)), 'pinky_01_l': Vector((3.498687744140625, 0.6987745761871338, -0.15118408203125)), 'pinky_02_l': Vector((2.8414535522460938, 0.5827560424804688, -0.70745849609375)), 'pinky_03_l': Vector((2.8664474487304688, 0.37882447242736816, -0.744293212890625)), 'ring_01_l': Vector((4.408515930175781, 0.2745676636695862, -0.3408203125)), 'ring_02_l': Vector((3.3953475952148438, 0.27488279342651367, -0.695098876953125)), 'ring_03_l': Vector((3.3896865844726562, 0.14387929439544678, -0.75921630859375)), 'thumb_01_l': Vector((2.0802688598632812, -2.6089048385620117, -1.9597320556640625)), 'thumb_02_l': Vector((2.58538818359375, -1.9712257385253906, -2.4354095458984375)), 'thumb_03_l': Vector((2.8783111572265625, -1.8077516555786133, -2.2245330810546875)), 'clavicle_r': Vector((-14.95479679107666, 4.510228633880615, -2.274139404296875)), 'upperarm_r': Vector((-15.369194030761719, -0.26871156692504883, -1.2218170166015625)), 'lowerarm_r': Vector((-20.141193389892578, -3.486935615539551, -1.384307861328125)), 'hand_r': Vector((-10.576080322265625, -1.987655758857727, -0.7958221435546875)), 'index_01_r': Vector((-4.158897399902344, -0.7537021636962891, -0.720977783203125)), 'index_02_r': Vector((-3.249267578125, -0.32368993759155273, -0.9248504638671875)), 'index_03_r': Vector((-3.1864089965820312, -0.4611630439758301, -1.0733184814453125)), 'middle_01_r': Vector((-4.579620361328125, -0.40146565437316895, -0.6331329345703125)), 'middle_02_r': Vector((-3.5030975341796875, -0.39139294624328613, -0.9432220458984375)), 'middle_03_r': Vector((-3.4648056030273438, -0.42128705978393555, -1.06402587890625)), 'pinky_01_r': Vector((-3.4987640380859375, 0.6987893581390381, -0.1511993408203125)), 'pinky_02_r': Vector((-2.84124755859375, 0.5827126502990723, -0.7073974609375)), 'pinky_03_r': Vector((-2.8662338256835938, 0.37879395484924316, -0.744232177734375)), 'ring_01_r': Vector((-4.408203125, 0.2745439410209656, -0.3408050537109375)), 'ring_02_r': Vector((-3.3953475952148438, 0.27488067746162415, -0.695098876953125)), 'ring_03_r': Vector((-3.3897171020507812, 0.14389744400978088, -0.7591552734375)), 'thumb_01_r': Vector((-2.0802078247070312, -2.608829975128174, -1.9596710205078125)), 'thumb_02_r': Vector((-2.58538818359375, -1.9712257385253906, -2.4354248046875)), 'thumb_03_r': Vector((-2.878326416015625, -1.8077640533447266, -2.2245025634765625)), 'neck_01': Vector((1.083126335288398e-06, -2.8042922019958496, 8.857421875)), 'head': Vector((1.1140700735268183e-06, 0.1489097774028778, 16.57891845703125)), 'thigh_l': Vector((3.5867919921875, -0.9371733665466309, -32.11991882324219)), 'calf_l': Vector((1.8110084533691406, 1.1467314958572388, -30.26090431213379)), 'foot_l': Vector((0.8927364349365234, -19.544645309448242, -1.1506776809692383)), 'thigh_r': Vector((-3.586806297302246, -0.9371740818023682, -32.120018005371094)), 'calf_r': Vector((-1.8110074996948242, 1.1467384099960327, -30.261043548583984)), 'foot_r': Vector((-0.8925895690917969, -19.54458999633789, -1.1506767272949219))}
File diff suppressed because one or more lines are too long
@@ -0,0 +1,134 @@
#############################################################
## Reset All functions used to reset bone controllers positions
## to be used when posing or animating the character.
## Accessed from the picker "Reset" button
## and from the "Reset All" buttons from the N-key panel,
## Rig Main Properties tab
#############################################################
import bpy
# FUNCTIONS ------------------------------------
def get_armature_collections(_arm):
arm_data = _arm.data if 'type' in dir(_arm) else _arm
if bpy.app.version >= (4,1,0):
return arm_data.collections_all
else:
return arm_data.collections
def get_blender_version():
ver = bpy.app.version
return ver[0]*100+ver[1]+ver[2]*0.01
def get_prop_setting(node, prop_name, setting):
if bpy.app.version >= (3,0,0):
return node.id_properties_ui(prop_name).as_dict()[setting]
else:
return node['_RNA_UI'][prop_name][setting]
def set_inverse_child(b, cns):
# direct inverse matrix method
if cns.subtarget != "":
if bpy.context.active_object.data.bones.get(cns.subtarget):
cns.inverse_matrix = bpy.context.active_object.pose.bones[cns.subtarget].matrix.inverted()
else:
print("Child Of constraint could not be reset, bone does not exist:", '"'+cns.subtarget+'" from', cns.name)
def is_reset_bone(bone_name):
reset_bones_parent = ["c_foot_ik", "c_hand_ik"]
for n in reset_bones_parent:
if n in bone_name:
return True
def reset_all_controllers():
# the function is run at startup, in case of error, exit
try:
bpy.context.active_object
except:
return
# display all collections
layers_select = None
if bpy.app.version >= (4,0,0):
layers_select = {}
for col in get_armature_collections(bpy.context.active_object):
layers_select[col.name] = col.is_visible
col.is_visible = True
else:
layers_select = [layer_bool for layer_bool in bpy.context.active_object.data.layers]
for i in range(0, 32):
bpy.context.active_object.data.layers[i] = True
bones_data = bpy.context.active_object.data.bones
# reset properties
for bone in bpy.context.object.pose.bones:
bone_parent = ""
try:
bone_parent = bone.parent.name
except:
pass
if (bone.name.startswith('c_') or bone.name.startswith("cc_") or 'cc' in bone.keys()) and bone_parent != "Picker":
bone.location = [0.0,0.0,0.0]
bone.rotation_euler = [0.0,0.0,0.0]
bone.rotation_quaternion = [1.0,0.0,0.0,0.0]
bone.scale = [1.0,1.0,1.0]
if len(bone.keys()):
try:# Error in some rare cases > Error RuntimeError: IDPropertyGroup changed size during iteration
for key in bone.keys():
if key == 'ik_fk_switch':
try:
bone['ik_fk_switch'] = get_prop_setting(bone, 'ik_fk_switch', 'default')
except:
if 'hand' in bone.name:
bone['ik_fk_switch'] = 1.0
else:
bone['ik_fk_switch'] = 0.0
if key == 'stretch_length':
bone[key] = 1.0
# don't set auto-stretch to 1 for now, it's not compatible with Fbx export
if key == 'leg_pin':
bone[key] = 0.0
if key == 'elbow_min':
bone[key] = 0.0
if key == 'bend_all':
bone[key] = 0.0
if key == 'fingers_grasp':
bone[key] = 0.0
if key == 'thigh_twist':
bone[key] = 0.0
if key == 'arm_twist':
bone[key] = 0.0
except:
pass
# restore collections
if bpy.app.version >= (4,0,0):
for col_name in layers_select:
get_armature_collections(bpy.context.active_object).get(col_name).is_visible = layers_select[col_name]
else:
# must enabling at least one
bpy.context.active_object.data.layers[layers_select[0]] = True
# restore the armature layers visibility
for i in range(0, 32):
bpy.context.active_object.data.layers[i] = layers_select[i]
bpy.ops.pose.select_all(action='DESELECT')
# necessary, since the picker execute scripts instead of calling functions
reset_all_controllers()
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,27 @@
import bpy, os
from math import *
from mathutils import *
from bpy.types import Panel, UIList
from .lib.animation import *
from .lib.armature import *
from .lib.bone_data import *
from .lib.bone_edit import *
from .lib.bone_pose import *
from .lib.collections import *
from .lib.constraints import *
from .lib.context import *
from .lib.drivers import *
from .lib.export import *
from .lib.maths_geo import *
from .lib.mesh import *
from .lib.modifiers import *
from .lib.names_func import *
from .lib.objects import *
from .lib.sys_print import *
from .lib.types_convert import *
from .lib.properties import *
from .lib.version_arm_collec import *
from .lib.version import *