2025-07-01

This commit is contained in:
2026-03-17 14:30:01 -06:00
parent f9a22056dd
commit 62b5978595
4579 changed files with 1257472 additions and 0 deletions
@@ -0,0 +1,165 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import logging
logger = logging.getLogger(f"polygoniq.{__name__}")
if "asset_pack_bpy" not in locals():
from . import asset_pack
from . import bl_info_utils
# polib is used outside of Blender as well, we have to support
# a usecase where bpy is not available and can't be imported
try:
import bpy
from . import asset_pack_bpy
from . import color_utils
from . import geonodes_mod_utils_bpy
from . import installation_utils_bpy
from . import linalg_bpy
from . import log_helpers_bpy
from . import material_utils_bpy
from . import module_install_utils_bpy
from . import node_utils_bpy
from . import preview_manager_bpy
from . import remove_duplicates_bpy
from . import render_bpy
from . import rigs_shared_bpy
from . import snap_to_ground_bpy
from . import spline_utils_bpy
from . import split_file_reader
from . import telemetry_module_bpy as telemetry_native_module
from . import ui_bpy
from . import utils_bpy
# singleton instance
module_provider = module_install_utils_bpy.ModuleProvider()
def init_polygoniq_global():
global telemetry_module_bpy
if not hasattr(bpy, "polygoniq_global"):
bpy.polygoniq_global = {"telemetry": {}, "telemetry_module_bpy": {}} # deprecated!
if "telemetry_module_bpy" not in bpy.polygoniq_global:
bpy.polygoniq_global["telemetry_module_bpy"] = {}
# another polygoniq addon might have already initialized telemetry!
# we want to use just one instance unless it's a different API version
if telemetry_native_module.API_VERSION in bpy.polygoniq_global["telemetry_module_bpy"]:
telemetry_module_bpy = bpy.polygoniq_global["telemetry_module_bpy"][
telemetry_native_module.API_VERSION
]
else:
telemetry_module_bpy = telemetry_native_module
bpy.polygoniq_global["telemetry_module_bpy"][
telemetry_native_module.API_VERSION
] = telemetry_module_bpy
telemetry_module_bpy.bootstrap_telemetry()
init_polygoniq_global()
def get_telemetry(product: str):
return telemetry_module_bpy.get_telemetry(product)
except ImportError as e:
if e.name != "bpy":
raise
logger.info(
f"polib has been initialized without bpy, all polib modules that use bpy are imported as dummies only."
)
import types
asset_pack_bpy = types.ModuleType("asset_pack_bpy")
color_utils = types.ModuleType("color_utils")
geonodes_mod_utils_bpy = types.ModuleType("geonodes_mod_utils_bpy")
installation_utils_bpy = types.ModuleType("installation_utils_bpy")
linalg_bpy = types.ModuleType("linalg_bpy")
log_helpers_bpy = types.ModuleType("log_helpers_bpy")
material_utils_bpy = types.ModuleType("material_utils_bpy")
module_install_utils_bpy = types.ModuleType("module_install_utils_bpy")
node_utils_bpy = types.ModuleType("node_utils_bpy")
preview_manager_bpy = types.ModuleType("preview_manager_bpy")
remove_duplicates_bpy = types.ModuleType("remove_duplicates_bpy")
render_bpy = types.ModuleType("render_bpy")
rigs_shared_bpy = types.ModuleType("rigs_shared_bpy")
snap_to_ground_bpy = types.ModuleType("snap_to_ground_bpy")
spline_utils_bpy = types.ModuleType("spline_utils_bpy")
split_file_reader = types.ModuleType("split_file_reader")
telemetry_native_module = types.ModuleType("telemetry_native_module")
ui_bpy = types.ModuleType("ui_bpy")
utils_bpy = types.ModuleType("utils_bpy")
else:
import importlib
try:
asset_pack = importlib.reload(asset_pack)
asset_pack_bpy = importlib.reload(asset_pack_bpy)
color_utils = importlib.reload(color_utils)
bl_info_utils = importlib.reload(bl_info_utils)
geonodes_mod_utils_bpy = importlib.reload(geonodes_mod_utils_bpy)
installation_utils_bpy = importlib.reload(installation_utils_bpy)
linalg_bpy = importlib.reload(linalg_bpy)
log_helpers_bpy = importlib.reload(log_helpers_bpy)
material_utils_bpy = importlib.reload(material_utils_bpy)
module_install_utils_bpy = importlib.reload(module_install_utils_bpy)
node_utils_bpy = importlib.reload(node_utils_bpy)
remove_duplicates_bpy = importlib.reload(remove_duplicates_bpy)
render_bpy = importlib.reload(render_bpy)
rigs_shared_bpy = importlib.reload(rigs_shared_bpy)
snap_to_ground_bpy = importlib.reload(snap_to_ground_bpy)
spline_utils_bpy = importlib.reload(spline_utils_bpy)
split_file_reader = importlib.reload(split_file_reader)
telemetry_native_module = importlib.reload(telemetry_native_module)
ui_bpy = importlib.reload(ui_bpy)
utils_bpy = importlib.reload(utils_bpy)
except ImportError:
# in case these are fake modules created with types.ModuleType (when bpy is not available)
pass
# fake bl_info so that this gets picked up by vscode blender integration
bl_info = {
"name": "polib",
"description": "",
}
def register(): # stub just to avoid an AttributeError when using blender_vscode extension
pass
def unregister(): # stub just to avoid an AttributeError when using blender_vscode extension
pass
__all__ = [
"asset_pack_bpy",
"asset_pack",
"color_utils",
"bl_info_utils",
"geonodes_mod_utils_bpy",
"get_telemetry",
"installation_utils_bpy",
"linalg_bpy",
"log_helpers_bpy",
"material_utils_bpy",
"module_install_utils_bpy",
"node_utils_bpy",
"preview_manager_bpy",
"remove_duplicates_bpy",
"render_bpy",
"rigs_shared_bpy",
"snap_to_ground_bpy",
"spline_utils_bpy",
"split_file_reader",
# telemetry_module_bpy intentionally missing, you should interact with it via get_telemetry
"ui_bpy",
"utils_bpy",
]
@@ -0,0 +1,12 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import os
import re
def is_library_blend(path: str) -> bool:
basename = os.path.basename(path)
# lowercase letters and numbers for prefix, followed by _Library_
# e.g. "mq_Library_NodeGroups.blend, am154_Library_Materials.blend"
return re.match(r"^[a-z0-9]+_Library_.+\.blend$", basename) is not None
@@ -0,0 +1,803 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import bpy.utils.previews
import typing
import collections
import enum
import logging
try:
import hatchery
except ImportError:
from blender_addons import hatchery
logger = logging.getLogger(f"polygoniq.{__name__}")
if "linalg_bpy" not in locals():
from . import linalg_bpy
from . import utils_bpy
from . import rigs_shared_bpy
else:
import importlib
linalg_bpy = importlib.reload(linalg_bpy)
utils_bpy = importlib.reload(utils_bpy)
rigs_shared_bpy = importlib.reload(rigs_shared_bpy)
CustomAttributeValueType = typing.Union[
str,
int,
float,
typing.Tuple[int, ...],
typing.Tuple[float, ...],
typing.List[int],
typing.List[float],
]
# Maps asset pack names to blender Collection color_tags
ASSET_PACK_COLLECTION_COLOR_MAP = {
"botaniq": 'COLOR_04', # green
"traffiq": 'COLOR_02', # orange
"aquatiq": 'COLOR_05', # blue
}
PARTICLE_SYSTEM_TOKEN = "pps"
PREVIEW_NOT_FOUND = "No-Asset-Found"
BOTANIQ_SEASONS = {"spring", "summer", "autumn", "winter"}
# order matters, assets often have multiple seasons, color is set according to the first
# matched season
BOTANIQ_SEASONS_WITH_COLOR_CHANNEL = (
("summer", 1.0),
("spring", 0.75),
("winter", 0.5),
("autumn", 0.25),
)
BOTANIQ_ANIMATED_CATEGORIES = {
"coniferous",
"deciduous",
"shrubs",
"flowers",
"grass",
"ivy",
"plants",
"sapling",
"tropical",
"vine",
"weed",
}
class CustomPropertyNames:
# traffiq specific custom property names
TQ_DIRT = "tq_dirt"
TQ_SCRATCHES = "tq_scratches"
TQ_BUMPS = "tq_bumps"
TQ_PRIMARY_COLOR = "tq_primary_color"
TQ_FLAKES_AMOUNT = "tq_flakes_amount"
TQ_CLEARCOAT = "tq_clearcoat"
TQ_LIGHTS = "tq_main_lights"
# botaniq specific custom property names
BQ_BRIGHTNESS = "bq_brightness"
BQ_RANDOM_PER_BRANCH = "bq_random_per_branch"
BQ_RANDOM_PER_LEAF = "bq_random_per_leaf"
BQ_SEASON_OFFSET = "bq_season_offset"
def get_all_object_ancestors(obj: bpy.types.Object) -> typing.Iterable[bpy.types.Object]:
"""Returns given object's parent, the parent's parent, ..."""
current = obj.parent
while current is not None:
yield current
current = current.parent
def filter_out_descendants_from_objects(
objects: typing.Iterable[bpy.types.Object],
) -> typing.Set[bpy.types.Object]:
"""Given a list of objects (i.e. selected objects) this function will return only the
roots. By roots we mean included objects that have no ancestor that is also contained
in object.
Example of use of this is when figuring out which objects to snap to ground. If you have
a complicated selection of cars, their wheels, etc... you onlt want to snap the parent car
body, not all objects.
"""
all_objects = set(objects)
ret = set()
for obj in objects:
ancestors = get_all_object_ancestors(obj)
if len(all_objects.intersection(ancestors)) == 0:
# this object has no ancestors that are also contained in objects
ret.add(obj)
return ret
def is_polygoniq_object(
obj: bpy.types.Object,
addon_name_filter: typing.Optional[typing.Callable[[str], bool]] = None,
include_editable: bool = True,
include_linked: bool = True,
) -> bool:
if include_editable and obj.instance_type == 'NONE' and obj.get("polygoniq_addon", None):
# only non-'EMPTY' objects can be considered editable
return addon_name_filter is None or addon_name_filter(obj.get("polygoniq_addon", None))
elif include_linked and obj.instance_collection is not None:
# the object is linked and the custom properties are in the linked collection
# in most cases there will be exactly one linked object but we want to play it
# safe and will check all of them. if any linked object is a polygoniq object
# we assume the whole instance collection is
for linked_obj in obj.instance_collection.objects:
if is_polygoniq_object(linked_obj, addon_name_filter):
return True
return False
def find_polygoniq_root_objects(
objects: typing.Iterable[bpy.types.Object], addon_name: typing.Optional[str] = None
) -> typing.Set[bpy.types.Object]:
"""Finds and returns polygoniq root objects in 'objects'.
Returned objects are either root or their parent isn't polygoniq object.
E. g. for 'objects' selected from hierarchy:
Users_Empty -> Audi_R8 -> [Lights, Wheel1..N -> [Brakes]], this returns Audi_R8.
"""
traversed_objects = set()
root_objects = set()
addon_name_filter = None if addon_name is None else lambda x: x == addon_name
for obj in objects:
if obj in traversed_objects:
continue
current_obj = obj
while True:
if current_obj in traversed_objects:
break
if current_obj.parent is None:
if is_polygoniq_object(current_obj, addon_name_filter):
root_objects.add(current_obj)
break
if is_polygoniq_object(current_obj, addon_name_filter) and not is_polygoniq_object(
current_obj.parent, addon_name_filter
):
root_objects.add(current_obj)
break
traversed_objects.add(current_obj)
current_obj = current_obj.parent
return root_objects
def get_polygoniq_objects(
objects: typing.Iterable[bpy.types.Object],
addon_name: typing.Optional[str] = None,
include_editable: bool = True,
include_linked: bool = True,
) -> typing.Iterable[bpy.types.Object]:
"""Filters given objects and returns only those that contain the polygoniq_addon property"""
addon_name_filter = None if addon_name is None else lambda x: x == addon_name
for obj in objects:
if is_polygoniq_object(obj, addon_name_filter, include_editable, include_linked):
yield obj
class TraffiqAssetPart(enum.Enum):
Body = 'Body'
Lights = 'Lights'
Wheel = 'Wheel'
Brake = 'Brake'
def is_traffiq_asset_part(obj: bpy.types.Object, part: TraffiqAssetPart) -> bool:
addon_name = obj.get("polygoniq_addon", "")
if addon_name != "traffiq":
return False
obj_name = utils_bpy.remove_object_duplicate_suffix(obj.name)
if part in {TraffiqAssetPart.Body, TraffiqAssetPart.Lights}:
splitted_name = obj_name.rsplit("_", 1)
if len(splitted_name) != 2:
return False
_, obj_part_name = splitted_name
if obj_part_name != part.name:
return False
return True
elif part in {TraffiqAssetPart.Wheel, TraffiqAssetPart.Brake}:
splitted_name = obj_name.rsplit("_", 3)
if len(splitted_name) != 4:
return False
_, obj_part_name, position, wheel_number = splitted_name
if obj_part_name != part.name:
return False
if position not in {"FL", "FR", "BL", "BR", "F", "B"}:
return False
if not wheel_number.isdigit():
return False
return True
return False
DecomposedCarType = typing.Tuple[
bpy.types.Object,
bpy.types.Object,
bpy.types.Object,
typing.List[bpy.types.Object],
typing.List[bpy.types.Object],
]
def get_root_object_of_asset(asset: bpy.types.Object) -> typing.Optional[bpy.types.Object]:
"""Returns the root linked object if given a linked asset (instanced collection empty).
Returns the object itself if given an editable asset. In case there are multiple roots
or no roots at all it returns None and logs a warning.
"""
if asset.instance_type == 'COLLECTION':
# we have to iterate through objects in the collection and return the one
# that has no parent.
root_obj = None
for obj in asset.instance_collection.objects:
if obj.parent is None:
if root_obj is not None:
logger.warning(
f"Found multiple root objects in the given collection instance "
f"empty (name='{asset.name}')"
)
return None
root_obj = obj
if root_obj is None:
logger.warning(
f"Failed to find the root object of a given collection instance empty "
f"(name='{asset.name}')"
)
return root_obj
else:
# given object is editable
return asset
def get_entire_object_hierarchy(obj: bpy.types.Object) -> typing.Iterable[bpy.types.Object]:
"""List entire hierarchy of an instanced or editable object
Returns object hierarchy (the object itself and all descendants) in case the object is
editable. In case the object is instanced it looks through the instance_collection.objects
and returns all descendants from there.
Example: If you pass a traffiq car object it will return body, wheels and lights.
"""
for child in obj.children:
yield from get_entire_object_hierarchy(child)
if obj.instance_type == 'COLLECTION':
yield from obj.instance_collection.objects
else:
yield obj
def decompose_traffiq_vehicle(obj: bpy.types.Object) -> DecomposedCarType:
if obj is None:
return None, None, None, [], []
root_object = get_root_object_of_asset(obj)
body = None
lights = None
wheels = []
brakes = []
hierarchy_objects = get_entire_object_hierarchy(obj)
for hierarchy_obj in hierarchy_objects:
if is_traffiq_asset_part(hierarchy_obj, TraffiqAssetPart.Body):
# there should be only one body
assert body is None
body = hierarchy_obj
elif is_traffiq_asset_part(hierarchy_obj, TraffiqAssetPart.Lights):
# there should be only one lights
assert lights is None
lights = hierarchy_obj
elif is_traffiq_asset_part(hierarchy_obj, TraffiqAssetPart.Wheel):
wheels.append(hierarchy_obj)
elif is_traffiq_asset_part(hierarchy_obj, TraffiqAssetPart.Brake):
brakes.append(hierarchy_obj)
return root_object, body, lights, wheels, brakes
def find_traffiq_asset_parts(
obj: bpy.types.Object, part: TraffiqAssetPart
) -> typing.Iterable[bpy.types.Object]:
"""Find all asset parts of a specific type."""
for hierarchy_obj in get_entire_object_hierarchy(obj):
if is_traffiq_asset_part(hierarchy_obj, part):
yield hierarchy_obj
def is_pps(name: str) -> bool:
split = name.split("_")
if len(split) < 3:
return False
return split[1] == PARTICLE_SYSTEM_TOKEN
def make_selection_editable(
context: bpy.types.Context,
delete_base_empty: bool,
keep_selection: bool = True,
keep_active: bool = True,
) -> typing.List[str]:
def apply_botaniq_particle_system_modifiers(obj: bpy.types.Object):
for child in obj.children:
apply_botaniq_particle_system_modifiers(child)
for modifier in obj.modifiers:
if modifier.type != 'PARTICLE_SYSTEM' or is_pps(modifier.name):
continue
clear_selection(context)
obj.select_set(True)
bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True)
obj.select_set(False)
# Remove collection with unused origin objects previously used for particle system
if modifier.name in bpy.data.collections:
collection = bpy.data.collections[modifier.name]
particle_origins = [obj for obj in collection.objects if obj.users == 1]
bpy.data.batch_remove(particle_origins)
if len(collection.objects) == 0:
bpy.data.collections.remove(collection)
obj.modifiers.remove(modifier)
InstancedObjectInfo = typing.Tuple[
bpy.types.Object, bpy.types.Collection, str, typing.Tuple[float, float, float, float]
]
def find_instanced_collection_objects(
obj: bpy.types.Object, instanced_collection_objects: typing.Dict[str, InstancedObjectInfo]
):
for child in obj.children:
find_instanced_collection_objects(child, instanced_collection_objects)
if obj.instance_type == 'COLLECTION':
if obj.name not in instanced_collection_objects:
instanced_collection_objects[obj.name] = (
obj,
obj.instance_collection,
obj.parent.name if obj.parent else None,
obj.color,
)
def copy_polygoniq_custom_props_from_children(obj: bpy.types.Object) -> None:
"""Tries to copy Polygoniq custom properties from children to 'obj'.
Tries to find child with all polygoniq custom properties
if such a child exists, values of its properties are copied to 'obj'.
"""
for child in obj.children:
copyright = child.get("copyright", None)
polygoniq_addon = child.get("polygoniq_addon", None)
polygoniq_blend_path = child.get("polygoniq_addon_blend_path", None)
if all(prop is not None for prop in [copyright, polygoniq_addon, polygoniq_blend_path]):
obj["copyright"] = copyright
obj["polygoniq_addon"] = polygoniq_addon
obj["polygoniq_addon_blend_path"] = polygoniq_blend_path
mapr_id = child.get("mapr_asset_id", None)
mapr_data_id = child.get("mapr_asset_data_id", None)
if mapr_id is not None:
obj["mapr_asset_id"] = mapr_id
if mapr_data_id is not None:
obj["mapr_asset_data_id"] = mapr_data_id
return
def get_mesh_to_objects_map(
obj: bpy.types.Object, result: typing.DefaultDict[str, typing.List[bpy.types.ID]]
) -> None:
for child in obj.children:
get_mesh_to_objects_map(child, result)
if obj.type == 'MESH' and obj.data is not None:
original_mesh_name = utils_bpy.remove_object_duplicate_suffix(obj.data.name)
result[original_mesh_name].append(obj)
def get_material_to_slots_map(
obj: bpy.types.Object, result: typing.DefaultDict[str, typing.List[bpy.types.ID]]
) -> None:
for child in obj.children:
get_material_to_slots_map(child, result)
if obj.type == 'MESH':
for material_slot in obj.material_slots:
if material_slot.material is None:
continue
original_material_name = utils_bpy.remove_object_duplicate_suffix(
material_slot.material.name
)
result[original_material_name].append(material_slot)
def get_armatures_to_objects_map(
obj: bpy.types.Object, result: typing.DefaultDict[str, typing.List[bpy.types.ID]]
) -> None:
for child in obj.children:
get_armatures_to_objects_map(child, result)
if obj.type == 'ARMATURE' and obj.data is not None:
original_armature_name = utils_bpy.remove_object_duplicate_suffix(obj.data.name)
result[original_armature_name].append(obj)
GetNameToUsersMapCallable = typing.Callable[
[bpy.types.Object, typing.DefaultDict[str, typing.List[bpy.types.ID]]], None
]
def make_datablocks_unique_per_object(
obj: bpy.types.Object,
get_data_to_struct_map: GetNameToUsersMapCallable,
datablock_name: str,
):
datablocks_to_owner_structs: typing.DefaultDict[str, typing.List[bpy.types.ID]] = (
collections.defaultdict(list)
)
get_data_to_struct_map(obj, datablocks_to_owner_structs)
for owner_structs in datablocks_to_owner_structs.values():
if len(owner_structs) == 0:
continue
first_datablock = getattr(owner_structs[0], datablock_name)
if first_datablock.library is None and first_datablock.users == len(owner_structs):
continue
# data block is linked from library or it is used outside of object 'obj' -> create copy
datablock_duplicate = first_datablock.copy()
for owner_struct in owner_structs:
setattr(owner_struct, datablock_name, datablock_duplicate)
selected_objects_names = [obj.name for obj in context.selected_objects]
prev_active_object_name = context.active_object.name if context.active_object else None
instanced_collection_objects: typing.Dict[str, InstancedObjectInfo] = {}
for obj in context.selected_objects:
find_instanced_collection_objects(obj, instanced_collection_objects)
for obj_name in selected_objects_names:
if obj_name in bpy.data.objects:
apply_botaniq_particle_system_modifiers(bpy.data.objects[obj_name])
# origin objects from particle systems were removed from scene
selected_objects_names = [
obj_name for obj_name in selected_objects_names if obj_name in bpy.data.objects
]
clear_selection(context)
for instance_object, _, _, _ in instanced_collection_objects.values():
# Operator duplicates_make_real converts each instance collection to empty (base parent) and its contents,
# we change the name of the instance collection object (which becomes the empty) so it doesn't clash
# with the naming of the actual objects (and doesn't increment duplicate suffix).
# To keep track of what was converted and to not mess up names of objects
# we use the '[0-9]+bp_' prefix for the base parent
i = 0
name = f"{i}bp_" + instance_object.name
while name in bpy.data.objects:
i += 1
name = f"{i}bp_" + instance_object.name
instance_object.name = name
instance_object.select_set(True)
bpy.ops.object.duplicates_make_real(use_base_parent=True, use_hierarchy=True)
instance_object.select_set(False)
for obj, instance_collection, parent_name, prev_color in instanced_collection_objects.values():
assert obj is not None
for child in obj.children:
child.color = prev_color
# Create mapr_asset_id and mapr_data_asset_id custom properties on the child if they
# don't exist already. Otherwise the properties would not get copied because we use
# only_existing=True with copy_custom_props.
if child.get("mapr_asset_id", None) is None:
child["mapr_asset_id"] = ""
if child.get("mapr_asset_data_id", None) is None:
child["mapr_asset_data_id"] = ""
# Copy custom property values from each instanced obj to all children recursively
# only if the property exists on the target object
hatchery.utils.copy_custom_props(obj, child, only_existing=True, recursive=True)
# reorder the hierarchy in following way (car example):
# base_parent_CAR -> [CAR, base_parent_CAR_Lights, WHEEL1..N -> [CAR_Lights]] to CAR -> [CAR_Lights, WHEEL1..N]
if parent_name is not None and parent_name in bpy.data.objects:
parent = bpy.data.objects[parent_name]
for child in obj.children:
# after setting parent object here, child.parent_type is always set to 'OBJECT'
child.parent = parent
child_source_name = utils_bpy.remove_object_duplicate_suffix(child.name)
if (
child_source_name in instance_collection.objects
and instance_collection.objects[child_source_name].parent is not None
):
# set parent_type from source blend, for example our _Lights need to have parent_type = 'BONE'
child.parent_type = instance_collection.objects[child_source_name].parent_type
child.matrix_local = instance_collection.objects[child_source_name].matrix_local
bpy.data.objects.remove(obj)
continue
if delete_base_empty:
if len(obj.children) > 1:
# instanced collection contained multiple top-level objects, keep base empty as container
splitted_name = obj.name.split("_", 1)
if len(splitted_name) == 2 and splitted_name[0].endswith("bp"):
obj.name = splitted_name[1]
# empty parent newly created in duplicates_make_real does not have polygoniq custom properties
copy_polygoniq_custom_props_from_children(obj)
else:
# remove the parent from children which were not reparented above
# if they were reparented they are no longer in obj.children and we can
# safely delete the base parent
for child in obj.children:
child_matrix = child.matrix_world.copy()
child.parent = None
# Original child_matrix took account also for transforms of the parent, apply
# the original matrix, otherwise child's pos/rot/scale would change after parent
# was removed
child.matrix_world = child_matrix
bpy.data.objects.remove(obj)
selected_objects = []
for obj_name in selected_objects_names:
if obj_name not in bpy.data.objects:
logger.error(f"Previously selected object: {obj_name} is no longer in bpy.data")
continue
obj = bpy.data.objects[obj_name]
# Create copy of meshes shared with other objects or linked from library
make_datablocks_unique_per_object(obj, get_mesh_to_objects_map, "data")
# Create copy of materials shared with other objects or linked from library
make_datablocks_unique_per_object(obj, get_material_to_slots_map, "material")
# Create copy of armature data shared with other objects or linked from library
make_datablocks_unique_per_object(obj, get_armatures_to_objects_map, "data")
# Blender operator duplicates_make_real doesn't append animation data with drivers.
# Thus we have to create those drivers dynamically based on bone names.
if rigs_shared_bpy.is_object_rigged(obj):
# set object as active to be able to go into POSE mode
context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='POSE')
driver_creator = rigs_shared_bpy.RigDrivers(obj)
driver_creator.create_all_drivers()
bpy.ops.object.mode_set(mode='OBJECT')
if keep_selection:
selected_objects.append(obj_name)
obj.select_set(True)
if keep_active and prev_active_object_name is not None:
if prev_active_object_name in bpy.data.objects:
context.view_layer.objects.active = bpy.data.objects[prev_active_object_name]
return selected_objects
HierarchyNameComparator = typing.Callable[
[bpy.types.Object, typing.Optional[bpy.types.Object]], bool
]
def find_object_in_hierarchy(
root_obj: bpy.types.Object,
name_comparator: HierarchyNameComparator,
) -> typing.Optional[bpy.types.Object]:
# We don't use get_hierarchy function, because here we can return the desired
# object before going through the whole hierarchy
def search_hierarchy(parent_obj: bpy.types.Object) -> typing.Optional[bpy.types.Object]:
if name_comparator(parent_obj, root_obj):
return parent_obj
for obj in parent_obj.children:
candidate = search_hierarchy(obj)
if candidate is not None:
return candidate
return None
return search_hierarchy(root_obj)
def get_root_objects_with_matched_child(
objects: typing.Iterable[bpy.types.Object], comparator: HierarchyNameComparator
) -> typing.Iterable[typing.Tuple[bpy.types.Object, bpy.types.Object]]:
"""Searches hierarchies of objects and returns objects that satisfy the 'comparator', and their root objects"""
for root_obj in find_polygoniq_root_objects(objects):
searched_obj = find_object_in_hierarchy(root_obj, comparator)
if searched_obj is not None:
yield (root_obj, searched_obj)
def get_hierarchy(root: bpy.types.ID) -> typing.List[bpy.types.ID]:
"""Gathers children of 'root' recursively"""
assert hasattr(root, "children")
ret = [root]
for child in root.children:
ret.extend(get_hierarchy(child))
return ret
def collection_get(
context: bpy.types.Context, name: str, parent: typing.Optional[bpy.types.Collection] = None
) -> bpy.types.Collection:
scene_collections = get_hierarchy(context.scene.collection)
for coll in scene_collections:
if utils_bpy.remove_object_duplicate_suffix(coll.name) == name:
return coll
coll = bpy.data.collections.new(name)
if parent is None:
context.scene.collection.children.link(coll)
else:
parent.children.link(coll)
if hasattr(coll, "color_tag"): # coloring is only supported if this attribute is present
coll_color = ASSET_PACK_COLLECTION_COLOR_MAP.get(name, None)
if coll_color is not None:
coll.color_tag = coll_color
elif (
parent is not None
): # color direct descendants by their parent color - e.g. botaniq/weed
parent_name = utils_bpy.remove_object_duplicate_suffix(parent.name)
parent_color = ASSET_PACK_COLLECTION_COLOR_MAP.get(parent_name, None)
if parent_color is not None:
coll.color_tag = parent_color
return coll
def collection_add_object(collection: bpy.types.Collection, obj: bpy.types.Object) -> None:
"""Unlinks 'obj' from all collections and links it into 'collection'"""
for coll in obj.users_collection:
coll.objects.unlink(obj)
collection.objects.link(obj)
def copy_object_hierarchy(root_obj: bpy.types.Object) -> bpy.types.Object:
"""Copies 'root_obj' and its hierarchy while preserving parenting, returns the root copy"""
def copy_hierarchy(obj: bpy.types.Object, parent: bpy.types.Object) -> None:
obj_copy = obj.copy()
obj_copy.parent = parent
for child in obj.children:
copy_hierarchy(child, obj_copy)
root_obj_copy = root_obj.copy()
for obj in root_obj.children:
copy_hierarchy(obj, root_obj_copy)
return root_obj_copy
def collection_link_hierarchy(collection: bpy.types.Collection, root_obj: bpy.types.Object) -> None:
"""Links 'root_obj' and its hierarachy to 'collection' and unlinks it from all other collections"""
for obj in get_hierarchy(root_obj):
for coll in obj.users_collection:
coll.objects.unlink(obj)
collection.objects.link(obj)
def collection_unlink_hierarchy(
collection: bpy.types.Collection, root_obj: bpy.types.Object
) -> None:
"""Unlinks 'root_obj' and it's hierarchy from 'collection'"""
for obj in get_hierarchy(root_obj):
collection.objects.unlink(obj)
def find_layer_collection(
view_layer_root: bpy.types.LayerCollection, target: bpy.types.Collection
) -> typing.Optional[bpy.types.LayerCollection]:
"""Finds corresponding LayerCollection from 'view_layer_coll' hierarchy
which contains 'target' collection.
"""
if view_layer_root.collection == target:
return view_layer_root
for layer_child in view_layer_root.children:
found_layer_collection = find_layer_collection(layer_child, target)
if found_layer_collection is not None:
return found_layer_collection
return None
def clear_selection(context: bpy.types.Context) -> None:
for obj in context.selected_objects:
obj.select_set(False)
def append_modifiers_from_library(
modifier_container_name: str, library_path: str, target_objs: typing.Iterable[bpy.types.Object]
) -> None:
"""Add all modifiers from object with given name in given .blend library to 'target_objects'.
It doesn't copy complex and readonly properties, e.g. properties that are driven by FCurve.
"""
if modifier_container_name not in bpy.data.objects:
with bpy.data.libraries.load(library_path) as (data_from, data_to):
assert modifier_container_name in data_from.objects
data_to.objects = [modifier_container_name]
assert modifier_container_name in bpy.data.objects
modifier_container = bpy.data.objects[modifier_container_name]
for obj in target_objs:
for src_modifier in modifier_container.modifiers:
assert src_modifier.name not in obj.modifiers
dest_modifier = obj.modifiers.new(src_modifier.name, src_modifier.type)
# collect names of writable properties
properties = [p.identifier for p in src_modifier.bl_rna.properties if not p.is_readonly]
# copy those properties
for prop in properties:
setattr(dest_modifier, prop, getattr(src_modifier, prop))
def update_custom_prop(
context: bpy.types.Context,
objs: typing.Iterable[bpy.types.Object],
prop_name: str,
value: CustomAttributeValueType,
update_tag_refresh: typing.Set[str] = {'OBJECT'},
) -> None:
"""Update custom properties of given objects and force 3D view to redraw
When we set values of custom properties from code, affected objects don't get updated in 3D View
automatically. We need to call obj.update_tag() and then refresh 3D view areas manually.
'update_tag_refresh' set of enums {'OBJECT', 'DATA', 'TIME'}, updating DATA is really slow
as it forces Blender to recompute the whole mesh, we should use 'OBJECT' wherever it's enough.
"""
for obj in objs:
if prop_name in obj:
obj[prop_name] = value
obj.update_tag(refresh=update_tag_refresh)
for area in context.screen.areas:
if area.type == 'VIEW_3D':
area.tag_redraw()
@@ -0,0 +1,71 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
import ast
import re
import typing
import zipfile
import pathlib
BL_INFO_REGEX = r"^bl_info[\s]*=[\s]*(\{[^\}]*\})"
def find_bl_info_in_string(input: str) -> typing.Optional[typing.Dict[str, typing.Any]]:
match = re.search(BL_INFO_REGEX, input, flags=re.MULTILINE)
if not match:
return None
# Use ast.literal_eval as it restricts evaluation only to literal structures
# https://docs.python.org/3/library/ast.html#ast.literal_eval
return ast.literal_eval(match.group(1))
def get_bl_info_from_init_py(init_py_path: str) -> typing.Optional[typing.Dict[str, typing.Any]]:
"""Retrieves the bl_info dictionary of given __init__.py file without running it. It only
evaluates the bl_info dictionary itself. Assumes that bl_info is self-contained. This is
the same assumption that Blender itself requires.
"""
with open(init_py_path) as f:
src = f.read()
return find_bl_info_in_string(src)
def infer_version_from_bl_info(init_py_path: str) -> typing.Optional[typing.Tuple[int, int, int]]:
"""Figures out the version of given __init__.py file without running the whole thing. Returns
None in case of failure.
"""
bl_info = get_bl_info_from_init_py(init_py_path)
if bl_info is None:
return None
return bl_info.get("version")
def infer_version_from_bl_info_from_zip_file(
zip_file_path: str,
) -> typing.Optional[typing.Tuple[int, int, int]]:
if not zipfile.is_zipfile(zip_file_path):
return None
zip_file = zipfile.ZipFile(zip_file_path, 'r')
# Find the root __init__.py file
root_init_py_path = None
for file_ in zip_file.namelist():
path = pathlib.Path(file_)
# one part for the root folder, second for the __init__.py itself
if len(path.parts) == 2 and path.name == "__init__.py":
root_init_py_path = file_
break
assert root_init_py_path is not None
with zip_file.open(root_init_py_path) as zf:
src = zf.read().decode()
bl_info = find_bl_info_in_string(src)
if bl_info is None:
return None
return bl_info.get("version")
@@ -0,0 +1,148 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
# adapted code from http://www.easyrgb.com/en/math.php
import math
import numpy
def RGB_to_XYZ(rgb: tuple[float, float, float]) -> tuple[float, float, float]:
"""Converts RGB coordinates to XYZ coordinates
Expects RGB values between 0 and 1.
Returns XYZ values between (0 to 0.9505, 0 to 1.0000, 0 to 1.0888).
For use with Observer = 2°, Illuminant = D65.
"""
gamma_neutral = [0.0, 0.0, 0.0]
for i, color in enumerate(rgb):
if color > 0.04045:
color = ((color + 0.055) / 1.055) ** 2.4
else:
color = color / 12.92
gamma_neutral[i] = color
# Observer = 2°, Illuminant = D65
x = gamma_neutral[0] * 0.4124 + gamma_neutral[1] * 0.3576 + gamma_neutral[2] * 0.1805
y = gamma_neutral[0] * 0.2126 + gamma_neutral[1] * 0.7152 + gamma_neutral[2] * 0.0722
z = gamma_neutral[0] * 0.0193 + gamma_neutral[1] * 0.1192 + gamma_neutral[2] * 0.9505
return (x, y, z)
def XYZ_to_LAB(xyz: tuple[float, float, float]) -> tuple[float, float, float]:
"""Converts XYZ coordinates to CIELAB coordinates
Expects XYZ values between (0 to 0.9505, 0 to 1.0000, 0 to 1.0888).
Returns LAB values as (0 to 100, -128 to 128, -128 to 128).
For use with Observer = 2°, Illuminant = D65.
"""
X, Y, Z = xyz
# Normalize the input values
# Observer = 2°, Illuminant = D65
var_X = X / 0.95047
var_Y = Y
var_Z = Z / 1.08883
# Apply the transformation functions
var_X = (var_X ** (1 / 3)) if var_X > 0.008856 else (7.787 * var_X + 16 / 116)
var_Y = (var_Y ** (1 / 3)) if var_Y > 0.008856 else (7.787 * var_Y + 16 / 116)
var_Z = (var_Z ** (1 / 3)) if var_Z > 0.008856 else (7.787 * var_Z + 16 / 116)
CIE_L_star = (116 * var_Y) - 16
CIE_a_star = 500 * (var_X - var_Y)
CIE_b_star = 200 * (var_Y - var_Z)
return (CIE_L_star, CIE_a_star, CIE_b_star)
def perceptual_color_distance(
rgb_1: tuple[float, float, float],
rgb_2: tuple[float, float, float],
weight_luminosity: float = 1.0,
weight_chroma: float = 1.0,
weight_hue: float = 1.0,
) -> float:
"""Implements CIEDE2000 formula for perceptual color distance.
Expects RGB values between 0 and 1.
Returns a value between 0 and 1, where 0 represents identical color and 1 an opposite color.
"""
lab_1 = XYZ_to_LAB(RGB_to_XYZ(rgb_1))
lab_2 = XYZ_to_LAB(RGB_to_XYZ(rgb_2))
# Implementation borrowed from: https://github.com/gtaylor/python-colormath
lab_color_vector = numpy.array([lab_1[0], lab_1[1], lab_1[2]])
L, a, b = lab_color_vector
lab_color_matrix = numpy.array([(lab_2[0], lab_2[1], lab_2[2])])
avg_Lp = (L + lab_color_matrix[:, 0]) / 2.0
C1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2)))
C2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1))
avg_C1_C2 = (C1 + C2) / 2.0
G = 0.5 * (
1
- numpy.sqrt(
numpy.power(avg_C1_C2, 7.0) / (numpy.power(avg_C1_C2, 7.0) + numpy.power(25.0, 7.0))
)
)
a1p = (1.0 + G) * a
a2p = (1.0 + G) * lab_color_matrix[:, 1]
C1p = numpy.sqrt(numpy.power(a1p, 2) + numpy.power(b, 2))
C2p = numpy.sqrt(numpy.power(a2p, 2) + numpy.power(lab_color_matrix[:, 2], 2))
avg_C1p_C2p = (C1p + C2p) / 2.0
h1p = numpy.degrees(numpy.arctan2(b, a1p))
h1p += (h1p < 0) * 360
h2p = numpy.degrees(numpy.arctan2(lab_color_matrix[:, 2], a2p))
h2p += (h2p < 0) * 360
avg_Hp = (((numpy.fabs(h1p - h2p) > 180) * 360) + h1p + h2p) / 2.0
T = (
1
- 0.17 * numpy.cos(numpy.radians(avg_Hp - 30))
+ 0.24 * numpy.cos(numpy.radians(2 * avg_Hp))
+ 0.32 * numpy.cos(numpy.radians(3 * avg_Hp + 6))
- 0.2 * numpy.cos(numpy.radians(4 * avg_Hp - 63))
)
diff_h2p_h1p = h2p - h1p
delta_hp = diff_h2p_h1p + (numpy.fabs(diff_h2p_h1p) > 180) * 360
delta_hp -= (h2p > h1p) * 720
delta_Lp = lab_color_matrix[:, 0] - L
delta_Cp = C2p - C1p
delta_Hp = 2 * numpy.sqrt(C2p * C1p) * numpy.sin(numpy.radians(delta_hp) / 2.0)
S_L = 1 + (
(0.015 * numpy.power(avg_Lp - 50, 2)) / numpy.sqrt(20 + numpy.power(avg_Lp - 50, 2.0))
)
S_C = 1 + 0.045 * avg_C1p_C2p
S_H = 1 + 0.015 * avg_C1p_C2p * T
delta_ro = 30 * numpy.exp(-(numpy.power(((avg_Hp - 275) / 25), 2.0)))
R_C = numpy.sqrt(
(numpy.power(avg_C1p_C2p, 7.0)) / (numpy.power(avg_C1p_C2p, 7.0) + numpy.power(25.0, 7.0))
)
R_T = -2 * R_C * numpy.sin(2 * numpy.radians(delta_ro))
distance = numpy.sqrt(
numpy.power(delta_Lp / (S_L * weight_luminosity), 2)
+ numpy.power(delta_Cp / (S_C * weight_chroma), 2)
+ numpy.power(delta_Hp / (S_H * weight_hue), 2)
+ R_T * (delta_Cp / (S_C * weight_chroma)) * (delta_Hp / (S_H * weight_hue))
)[0]
# distance can be theoretically uncapped, but values above 100 are considered extremely different
cap = 100
if distance > cap:
distance == cap
return distance / cap
@@ -0,0 +1,178 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
# Functionalities to work with geometry nodes modifiers
import bpy
import typing
from . import node_utils_bpy
# Mapping of input.identifier to (input.name, input.value)
NodeGroupInputs = typing.Dict[
str, typing.Tuple[node_utils_bpy.NodeSocketInterfaceCompat, typing.Any]
]
class NodesModifierInput:
"""Mapping of one node group and its inputs"""
def __init__(self, modifier: bpy.types.NodesModifier) -> None:
assert modifier.node_group is not None
self.inputs: NodeGroupInputs = {}
self.node_group = modifier.node_group
self.original_inputs = node_utils_bpy.get_node_tree_inputs_map(modifier.node_group)
for input_ in self.original_inputs.values():
if input_.identifier in modifier:
self.inputs[input_.identifier] = (input_, modifier[input_.identifier])
def get_modifiers_inputs_map(
modifiers: typing.Iterable[bpy.types.Modifier],
) -> typing.Dict[str, NodesModifierInput]:
"""Returns mapping of geometry nodes modifiers to their respective inputs"""
ret: typing.Dict[str, NodesModifierInput] = {}
for mod in modifiers:
if mod.type != 'NODES':
continue
mod = typing.cast(bpy.types.NodesModifier, mod)
if mod.node_group is None:
continue
ret[mod.name] = NodesModifierInput(mod)
return ret
class NodesModifierInputsNameView:
"""View of Geometry Nodes modifier that allows changing inputs by input name"""
def __init__(self, mod: bpy.types.Modifier):
assert mod.type == 'NODES'
self.mod = mod
self.name_to_identifier_map = {}
self.node_tree_inputs = node_utils_bpy.get_node_tree_inputs_map(mod.node_group)
for input_ in self.node_tree_inputs.values():
# Is the input exposed in the modifier -> modifiers["RG_"]
if input_.identifier in mod:
self.name_to_identifier_map[input_.name] = input_.identifier
def set_input_value(self, input_name: str, value: typing.Any) -> None:
identifier = self.name_to_identifier_map.get(input_name)
input_ = self.node_tree_inputs.get(identifier, None)
# Input cannot be None, this would fail on the identifier already, we expect
# setting of the inputs to throw errors if the input doesn't exist to not fail
# silently.
assert input_ is not None
socket_type = node_utils_bpy.get_socket_type(input_)
# bool needs special handling, as through versions it became statically typed
# boolean from an integer value of 0 or 1
if socket_type == "NodeSocketBool":
self.mod[identifier] = bool(value)
else:
self.mod[identifier] = value
def set_obj_input_value(self, input_name: str, obj_name: str) -> None:
identifier = self.name_to_identifier_map.get(input_name)
# Object reference has to be set directly from bpy.data.objects
self.mod[identifier] = bpy.data.objects[obj_name]
def set_material_input_value(self, input_name: str, mat_name: str) -> None:
identifier = self.name_to_identifier_map.get(input_name)
# Materials reference has to be set directly from bpy.data.materials
self.mod[identifier] = bpy.data.materials[mat_name]
def set_collection_input_value(self, input_name: str, collection_name: str) -> None:
identifier = self.name_to_identifier_map.get(input_name)
# Collections reference has to be set directly from bpy.data.collections
self.mod[identifier] = bpy.data.collections[collection_name]
def set_array_input_value(self, input_name: str, value: typing.List[typing.Any]) -> None:
identifier = self.name_to_identifier_map.get(input_name)
for i, v in enumerate(value):
self.mod[identifier][i] = v
def get_input_value(self, input_name: str) -> typing.Any:
identifier = self.name_to_identifier_map.get(input_name)
return self.mod[identifier]
def __contains__(self, input_name: str) -> bool:
return input_name in self.name_to_identifier_map
class GeoNodesModifierInputsPanelMixin:
"""Mixin for displaying Geometry Nodes modifier inputs.
Adds functionally to draw inputs of Geometry Nodes modifiers of active objects
using a provided template.
"""
DRAW_ALL = -1
def draw_object_modifiers_node_group_inputs_template(
self,
obj: bpy.types.Object,
layout: bpy.types.UILayout,
inputs: node_utils_bpy.NodeSocketsDrawTemplate,
draw_modifier_header: bool = False,
max_occurrences: int = 1,
) -> None:
mods = get_geometry_nodes_modifiers_by_node_group(obj, inputs.name)
if len(mods) == 0:
return
root_layout = layout
for i, mod in enumerate(mods):
if (
max_occurrences != GeoNodesModifierInputsPanelMixin.DRAW_ALL
and i >= max_occurrences
):
break
if draw_modifier_header:
layout = self.draw_geonodes_modifier_ui_box(root_layout, mod)
if not mod.show_expanded:
continue
col = layout.column(align=True)
inputs.draw_from_geonodes_modifier(col, mods[i])
def draw_active_object_modifiers_node_group_inputs_template(
self,
layout: bpy.types.UILayout,
context: bpy.types.Context,
inputs: node_utils_bpy.NodeSocketsDrawTemplate,
draw_modifier_header: bool = False,
max_occurrences: int = 1,
) -> None:
obj = context.active_object
if obj is None:
return
self.draw_object_modifiers_node_group_inputs_template(
obj, layout, inputs, draw_modifier_header, max_occurrences
)
def draw_show_viewport_and_render(
self, layout: bpy.types.UILayout, mod: bpy.types.NodesModifier
) -> None:
layout.prop(mod, "show_viewport", text="")
layout.prop(mod, "show_render", text="")
def draw_geonodes_modifier_ui_box(
self, layout: bpy.types.UILayout, mod: bpy.types.NodesModifier
) -> bpy.types.UILayout:
box = layout.box()
row = box.row(align=True)
row.prop(mod, "show_expanded", text="", emboss=False)
row.prop(mod, "name", text="")
row.prop(mod, "show_in_editmode", text="")
self.draw_show_viewport_and_render(row, mod)
row.operator("object.modifier_copy", text="", icon='DUPLICATE').modifier = mod.name
row.operator("object.modifier_remove", text="", icon='X', emboss=False).modifier = mod.name
return box
def get_geometry_nodes_modifiers_by_node_group(
obj: bpy.types.Object, node_group_name: str
) -> typing.List[bpy.types.NodesModifier]:
output: typing.List[bpy.types.NodesModifier] = []
for mod in obj.modifiers:
if mod.type == 'NODES' and mod.node_group is not None:
if mod.node_group.name == node_group_name:
output.append(mod)
return output
@@ -0,0 +1,76 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
import os
import zipfile
import bpy
import typing
import addon_utils
def get_addon_version_in_blender(full_name: str) -> typing.Optional[typing.Iterable[int]]:
"""Retrieves the version of given addon by full name
The given name of the addon is the actual full / implementation name.
For example "botaniq_lite" or "traffiq_starter".
"""
for module in addon_utils.modules():
name = getattr(module, "__name__")
if name is None or name != full_name:
continue
enabled, _ = addon_utils.check(name)
if not enabled:
continue
bl_info = getattr(module, "bl_info", None)
if bl_info is None:
continue
version = bl_info.get("version")
return version
return None
def install_addon_zip(zip_file_path: str, module_name: str) -> None:
"""From zip file in 'zip_file_path' installs module named 'module_name'"""
if not zipfile.is_zipfile(zip_file_path):
raise RuntimeError(f"{zip_file_path} is not a valid ZIP file!")
path_addons = bpy.utils.user_resource('SCRIPTS', path="addons", create=True)
os.makedirs(path_addons, exist_ok=True)
file_to_extract = zipfile.ZipFile(zip_file_path, 'r')
def module_filesystem_remove(path_base: str, module_name: str) -> None:
# ported from bl_operators/userpref.py
module_name = os.path.splitext(module_name)[0]
for f in os.listdir(path_base):
f_base = os.path.splitext(f)[0]
if f_base == module_name:
f_full = os.path.join(path_base, f)
if os.path.isdir(f_full):
os.rmdir(f_full)
else:
os.remove(f_full)
# remove existing addon files
for f in file_to_extract.namelist():
module_filesystem_remove(path_addons, f)
file_to_extract.extractall(path_addons)
def refresh_and_enable(module_name: str):
addon_utils.modules_refresh()
bpy.ops.preferences.addon_enable(module=module_name)
# we do the actual update in the blender event loop to avoid crashes in case
# grumpy_cat is updating itself
bpy.app.timers.register(
lambda: refresh_and_enable(module_name), first_interval=0, persistent=True
)
def uninstall_addon_module_name(module_name: str) -> None:
bpy.ops.preferences.addon_disable(module=module_name)
bpy.ops.preferences.addon_remove(module=module_name)
@@ -0,0 +1,103 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import math
import mathutils
import numpy
import unittest
import typing
def plane_from_points(points):
assert len(points) == 3
p1, p2, p3 = points
v1 = p3 - p1
v2 = p2 - p1
normal = numpy.cross(v1, v2)
normal_magnitude = numpy.linalg.norm(normal)
normal /= normal_magnitude
offset = numpy.dot(normal, p3)
centroid = numpy.sum(points, 0) / len(points)
return (normal, offset, centroid)
def fit_plane_to_points(points):
assert len(points) >= 3
return plane_from_points(points[:3])
# TODO: This is borked :-(
centroid = numpy.sum(points, 0) / len(points)
centered_points = points - centroid
svd = numpy.linalg.svd(numpy.transpose(centered_points))
plane_normal = svd[0][2]
# now that we have the normal let's fit the centroid to the plane to find the offset
offset = numpy.dot(plane_normal, centroid)
return (plane_normal, offset, centroid)
def is_obj_flat(obj: bpy.types.Object) -> bool:
return any(math.isclose(d, 0.0) for d in obj.dimensions)
def mean_position(vs: typing.Iterable[mathutils.Vector]) -> mathutils.Vector:
sum_v = mathutils.Vector()
n = 0
for v in vs:
n += 1
sum_v += v
return sum_v / n
class PlaneFittingTest(unittest.TestCase):
def test_3pts(self):
# unit plane - (0, 0, 1), 0
normal, offset, _ = fit_plane_to_points([(1, -1, 0), (-1, 0, 0), (0, 1, 0)])
self.assertAlmostEqual(normal[0], 0)
self.assertAlmostEqual(normal[1], 0)
self.assertAlmostEqual(normal[2], 1)
self.assertAlmostEqual(offset, 0)
normal, offset, _ = fit_plane_to_points([(2, -2, 0), (-1, 0, 0), (0, 1, 0)])
self.assertAlmostEqual(normal[0], 0)
self.assertAlmostEqual(normal[1], 0)
self.assertAlmostEqual(normal[2], 1)
self.assertAlmostEqual(offset, 0)
# offset unit plane - (0, 0, 1), 1
normal, offset, _ = fit_plane_to_points([(2, -2, 1), (-1, 0, 1), (0, 1, 1)])
self.assertAlmostEqual(normal[0], 0)
self.assertAlmostEqual(normal[1], 0)
self.assertAlmostEqual(normal[2], 1)
self.assertAlmostEqual(offset, 1)
def test_4pts(self):
# unit plane - (0, 0, 1), 0
normal, offset, _ = fit_plane_to_points([(1, -1, 0), (-1, 0, 0), (0, 1, 0), (1, 1, 0)])
self.assertAlmostEqual(normal[0], 0)
self.assertAlmostEqual(normal[1], 0)
self.assertAlmostEqual(normal[2], 1)
self.assertAlmostEqual(offset, 0)
# can't fit precisely! unit plane - (0, 0, 1), 0
large = 100000000000
normal, offset, _ = fit_plane_to_points(
[
(-large, -large, 0.1),
(-large, large, -0.1),
(large, -large, 0.1),
(large, large, -0.1),
]
)
self.assertAlmostEqual(normal[0], 0)
self.assertAlmostEqual(normal[1], 0)
self.assertAlmostEqual(normal[2], 1)
self.assertAlmostEqual(offset, 0)
if __name__ == "__main__":
unittest.main()
@@ -0,0 +1,155 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import datetime
import tempfile
import typing
import time
import logging
import os
import shutil
from . import telemetry_module_bpy
def logged_operator(cls: typing.Type[bpy.types.Operator]):
assert issubclass(
cls, bpy.types.Operator
), "logged_operator only accepts classes inheriting bpy.types.Operator"
logger = logging.getLogger(f"polygoniq.{cls.__module__}")
if hasattr(cls, "draw"):
cls._original_draw = cls.draw
def new_draw(self, context: bpy.types.Context):
try:
return cls._original_draw(self, context)
except:
logger.exception(f"Uncaught exception raised in {cls}.draw")
cls.draw = new_draw
if hasattr(cls, "modal"):
cls._original_modal = cls.modal
def new_modal(self, context: bpy.types.Context, event: bpy.types.Event):
try:
return cls._original_modal(self, context, event)
except:
logger.exception(f"Uncaught exception raised in {cls}.modal")
# If exception is thrown out of the modal we want to exit it. If there are possible
# exceptions that can occur, they should be handled in the modal itself.
return {'FINISHED'}
cls.modal = new_modal
if hasattr(cls, "execute"):
cls._original_execute = cls.execute
def new_execute(self, context: bpy.types.Context):
logger.info(
f"{cls.__name__} operator execute started with arguments {self.as_keywords()}"
)
start_time = time.time()
try:
ret = cls._original_execute(self, context)
logger.info(
f"{cls.__name__} operator execute finished in {time.time() - start_time:.3f} "
f"seconds with result {ret}"
)
return ret
except:
logger.exception(f"Uncaught exception raised in {cls}.execute")
# We return finished even in case an error happened, that way the user will be able
# to undo any changes the operator has made up until the error happened
return {'FINISHED'}
cls.execute = new_execute
if hasattr(cls, "invoke"):
cls._original_invoke = cls.invoke
def new_invoke(self, context: bpy.types.Context, event: bpy.types.Event):
logger.debug(f"{cls.__name__} operator invoke started")
try:
ret = cls._original_invoke(self, context, event)
logger.debug(f"{cls.__name__} operator invoke finished")
return ret
except:
logger.exception(f"Uncaught exception raised in {cls}.invoke")
# We return finished even in case an error happened, that way the user will be able
# to undo any changes the operator has made up until the error happened
return {'FINISHED'}
cls.invoke = new_invoke
return cls
def logged_panel(cls: typing.Type[bpy.types.Panel]):
assert issubclass(
cls, bpy.types.Panel
), "logged_panel only accepts classes inheriting bpy.types.Panel"
logger = logging.getLogger(f"polygoniq.{cls.__module__}")
if hasattr(cls, "draw_header"):
cls._original_draw_header = cls.draw_header
def new_draw_header(self, context: bpy.types.Context):
try:
return cls._original_draw_header(self, context)
except:
logger.exception(f"Uncaught exception raised in {cls}.draw_header")
cls.draw_header = new_draw_header
if hasattr(cls, "draw"):
cls._original_draw = cls.draw
def new_draw(self, context: bpy.types.Context):
try:
return cls._original_draw(self, context)
except:
logger.exception(f"Uncaught exception raised in {cls}.draw")
cls.draw = new_draw
return cls
def logged_preferences(cls: typing.Type[bpy.types.AddonPreferences]):
assert issubclass(
cls, bpy.types.AddonPreferences
), "logged_preferences only accepts classes inheriting bpy.types.AddonPreferences"
logger = logging.getLogger(f"polygoniq.{cls.__module__}")
if hasattr(cls, "draw"):
cls._original_draw = cls.draw
def new_draw(self, context: bpy.types.Context):
try:
return cls._original_draw(self, context)
except:
logger.exception(f"Uncaught exception raised in {cls}.draw")
cls.draw = new_draw
return cls
def pack_logs(telemetry: telemetry_module_bpy.TelemetryWrapper) -> str:
"""Pack all logs into zip, create new timestamped directory in tempdir and save the zip there."""
temp_folder = tempfile.gettempdir()
log_path = os.path.join(temp_folder, "polygoniq_logs")
os.makedirs(log_path, exist_ok=True)
with open(os.path.join(log_path, "latest_telemetry.txt"), "w") as f:
f.write(telemetry.dump())
now = datetime.datetime.now()
output_folder_name = f"polygoniq_logs--{now.year:04d}-{now.month:02d}-{now.day:02d}T{now.hour:02d}-{now.minute:02d}-{now.second:02d}"
output_folder_path = os.path.join(temp_folder, output_folder_name)
os.mkdir(output_folder_path)
shutil.make_archive(os.path.join(output_folder_path, "polygoniq_logs"), "zip", log_path)
return output_folder_path
@@ -0,0 +1,132 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import numpy
import typing
from . import node_utils_bpy
def safe_get_active_material(
obj: typing.Optional[bpy.types.Object],
) -> typing.Optional[bpy.types.Material]:
"""Returns active material of object. Returns None if object is None"""
if obj is None:
return None
return obj.active_material
def is_material_slot_used_on_geometry(
obj: bpy.types.Object,
material_index: int,
used_indices: typing.Optional[typing.FrozenSet[int]] = None,
) -> bool:
"""Returns whether a material slot on given index contains a material that is used
by a given Object's geometry.
Pass used_indices if this function is used in a loop for performance reasons.
"""
try:
slot = obj.material_slots[material_index]
except IndexError:
raise Exception(f"Invalid material index {material_index} on {obj}")
if slot.material is None:
return False
if used_indices is None:
used_indices = get_material_slots_used_by_mesh(obj)
used_indices |= get_material_slots_used_by_spline(obj)
used_indices |= get_material_slots_used_by_text(obj)
return material_index in used_indices
def is_material_used_on_geonodes(
obj: bpy.types.Object,
material_index: int,
geonode_materials: typing.Optional[typing.FrozenSet[bpy.types.Material]] = None,
) -> bool:
"""Returns whether a material slot on given index contains a material that is used
by a given Object's geometry nodes modifiers.
Pass geonode_materials if this function is used in a loop for performance reasons.
"""
try:
slot = obj.material_slots[material_index]
except IndexError:
raise Exception(f"Invalid material index {material_index} on {obj}")
if slot.material is None:
return False
if geonode_materials is None:
geonode_materials = get_materials_used_by_geonodes(obj)
obj_mat_name = slot.material.name
geonode_mats_names = [material.name for material in geonode_materials]
return obj_mat_name in geonode_mats_names
def get_material_slots_used_by_mesh(obj: bpy.types.Object) -> typing.FrozenSet[int]:
"""Return a FrozenSet[material_index] used by a given Object's mesh"""
if not hasattr(obj.data, "polygons"):
return frozenset()
material_indices = numpy.zeros(len(obj.data.polygons), dtype=numpy.int32)
obj.data.polygons.foreach_get('material_index', material_indices)
unique_indices = numpy.unique(material_indices)
return frozenset(unique_indices)
def get_material_slots_used_by_spline(obj: bpy.types.Object) -> typing.FrozenSet[int]:
"""Return a FrozenSet[material_index] used by a given Object's splines"""
if not hasattr(obj.data, "splines"):
return frozenset()
seen_indices = set()
for spline in obj.data.splines:
seen_indices.add(spline.material_index)
return frozenset(seen_indices)
def get_material_slots_used_by_text(obj: bpy.types.Object) -> typing.FrozenSet[int]:
"""Return a FrozenSet[material_index] used by a given Object's texts"""
if not hasattr(obj.data, "body_format"):
return frozenset()
seen_indices = set()
for character in obj.data.body_format:
seen_indices.add(character.material_index)
return frozenset(seen_indices)
def get_materials_used_by_geonodes(obj: bpy.types.Object) -> typing.FrozenSet[bpy.types.Material]:
"""Returns a FrozenSet[Material] used by a given Object's geometry nodes modifiers."""
used_materials = set()
for mod in obj.modifiers:
if mod.type != 'NODES':
continue
if mod.node_group is None:
continue
# Scan modifier inputs
for input_ in node_utils_bpy.get_node_tree_inputs_map(mod.node_group).values():
if node_utils_bpy.get_socket_type(input_) == 'NodeSocketMaterial':
mat = mod[input_.identifier]
if mat is not None:
used_materials.add(mat)
for node in node_utils_bpy.find_nodes_in_tree(mod.node_group):
for node_input in filter(lambda i: i.type == 'MATERIAL', node.inputs):
if node_input.default_value is not None:
used_materials.add(node_input.default_value)
if hasattr(node, 'material'):
if node.material is not None:
used_materials.add(node.material)
return frozenset(used_materials)
@@ -0,0 +1,158 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
import typing
import importlib
import importlib.util
import dataclasses
import sys
import os
import bpy
import logging
logger = logging.getLogger(__name__)
if "utils_bpy" not in locals():
from . import utils_bpy
else:
import importlib
utils_bpy = importlib.reload(utils_bpy)
@dataclasses.dataclass
class RequiredModule:
"""Container class for defining required module names
Example: RequiredModule("PIL.Image", "Pillow")
"""
import_name: str # Name used to import the module in source code
install_name: str # Name used to install the module with pip
class ModuleProvider:
"""Class that encapsulates installation of additional Python modules.
It is supposed to be used as singleton with only one instance and one 'install_path'. Because
all addons using ModuleProvider should install their dependencies to the same place.
Otherwise they could install potentially incompatible modules.
TODO: Adjust this after transition to engon
Currently 'install_path' is stored in preferences in each addon using ModuleProvider and nothing
enforces they store the same path. This will be inherently resolved after transition to one
common addon - engon which would define only one 'install_path' property in preferences.
It shouldn't be problem till that as we won't release multiple addons that needs additional
modules before full transition to engon.
"""
def __init__(self) -> None:
self._install_path: typing.Optional[str] = None
# Cache which allows fast query if module given by name is installed.
# If module is not in the cache, we try to import it (which is slow) and then store boolean
# indicating whether the module can be imported or not.
self._installed_modules_cache: typing.Dict[str, bool] = {}
def is_initialized(self) -> bool:
return self._install_path is not None
@property
def install_path(self) -> str:
if not self.is_initialized():
raise RuntimeError("Accessing uninitialized install path in ModuleProvider!")
assert self._install_path is not None
return self._install_path
@install_path.setter
def install_path(self, value: str) -> None:
if not os.path.isdir(value):
raise ValueError("Provided install_path is not a valid, existing directory!")
self._install_path = value
# installed path changed, clear the cache of available modules
self._installed_modules_cache.clear()
def is_module_installed(self, module_name: str) -> bool:
"""Returns True if module is installed either in sys.path or in self.install_path"""
if module_name in self._installed_modules_cache:
return self._installed_modules_cache[module_name]
module_found = self._get_module_spec(module_name) is not None
logger.debug(f"Module '{module_name}' was {'found' if module_found else 'not found'}")
self._installed_modules_cache[module_name] = module_found
return module_found
def install_modules(self, module_install_names: typing.Iterable[str]) -> None:
# Toggle console to show progress to users.
# Console is available only on Windows :( and we can't check if it's already opened,
# so we expect users don't have it usually opened.
if sys.platform == "win32":
bpy.ops.wm.console_toggle()
python_exe = sys.executable
logger.info(f"Preparing to install modules '{module_install_names}'")
try:
args = [python_exe, "-m", "ensurepip", "--default-pip"]
logger.info(f"Running ensurepip")
if utils_bpy.run_logging_subprocess(args) != 0:
logger.error("Couldn't ensured pip in Blender's python!")
for module_install_name in module_install_names:
args = [
python_exe,
"-m",
"pip",
"install",
"--upgrade",
module_install_name,
"--target",
self.install_path,
]
logger.info(f"Installing '{module_install_name}'")
if utils_bpy.run_logging_subprocess(args) == 0:
logger.info(f"Modules '{module_install_name}' successfully installed")
self._installed_modules_cache[module_install_name] = True
else:
logger.error(f"Error occurred while installing '{module_install_name}' module!")
finally:
if sys.platform == "win32":
bpy.ops.wm.console_toggle()
def enable_module(self, module_name: str) -> None:
"""Stores module into sys.modules, so we can import it later from any other place"""
if module_name in sys.modules:
return
module_spec = self._get_module_spec(module_name)
if module_spec is None:
raise RuntimeError(f"Module {module_name} is not installed, can't enable it!")
else:
# Load module from module_spec
import importlib
try:
was_in_path = self.install_path in sys.path
if not was_in_path:
sys.path.insert(0, self.install_path)
importlib.import_module(module_name)
logger.debug(
f"Module '{module_name}' successfully enabled, it can be imported now!"
)
finally:
if not was_in_path and self.install_path in sys.path:
sys.path.remove(self.install_path)
def _get_module_spec(self, module_name: str) -> typing.Optional[importlib.machinery.ModuleSpec]:
was_in_path = self.install_path in sys.path
try:
if not was_in_path:
sys.path.insert(0, self.install_path)
return importlib.util.find_spec(module_name)
except ModuleNotFoundError:
# Module was found but it's not valid (doesn't contain __path__), we need to re-install
return None
finally:
if not was_in_path and self.install_path in sys.path:
sys.path.remove(self.install_path)
@@ -0,0 +1,590 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import typing
import itertools
import collections
import dataclasses
if "utils_bpy" not in locals():
from . import utils_bpy
else:
import importlib
utils_bpy = importlib.reload(utils_bpy)
# Type that's compatible with both old and new node tree interfaces
if bpy.app.version < (4, 0, 0):
NodeSocketInterfaceCompat = bpy.types.NodeSocketInterfaceStandard
else:
NodeSocketInterfaceCompat = bpy.types.NodeTreeInterfaceSocket
def get_node_tree_inputs_map(
node_tree: bpy.types.NodeTree,
) -> typing.Dict[str, NodeSocketInterfaceCompat]:
"""Returns map of {identifier: input} of given 'node_tree' reassuring compatibility pre and post Blender 4.0"""
assert isinstance(node_tree, bpy.types.NodeTree)
if bpy.app.version < (4, 0, 0):
return {input_.identifier: input_ for input_ in node_tree.inputs}
else:
return {
item.identifier: item
for item in node_tree.interface.items_tree
if item.in_out == 'INPUT' and item.item_type == 'SOCKET'
}
def get_socket_type(socket: NodeSocketInterfaceCompat) -> str:
"""Returns the Blender 4.0 version of socket type from a NodeTree.
Note 1: This accepts either `bpy.types.NodeSocketInterfaceStandard` or
`bpy.types.NodeTreeInterfaceSocket` based on Blender version, but basically it is what
the `.inputs` or `.interface.items_tree` gives you.
Note 2: NodeTree is different from a NodeGroup!
"""
# Inspired by a post on the 'bpy' discord by a user named 'Reigen'
if bpy.app.version < (4, 0, 0):
assert isinstance(socket, bpy.types.NodeSocketInterfaceStandard)
socket_type = socket.type
else:
assert isinstance(
socket, bpy.types.NodeTreeInterfaceSocket
), "Given socket is not a Node Tree interface! Isn't it a node group?"
socket_type = socket.bl_socket_idname
# We remap the values to their newer versions, in 4.0 the values changed
# from 'key' to the 'value' in MAP
key = socket_type.upper()
MAP = {
'STRING': 'NodeSocketString',
'BOOLEAN': 'NodeSocketBool',
'MATERIAL': 'NodeSocketMaterial',
'VECTOR': 'NodeSocketVector',
'INT': 'NodeSocketInt',
'GEOMETRY': 'NodeSocketGeometry',
'COLLECTION': 'NodeSocketCollection',
'TEXTURE': 'NodeSocketTexture',
'VALUE': 'NodeSocketFloat',
'RGBA': 'NodeSocketColor',
'OBJECT': 'NodeSocketObject',
'IMAGE': 'NodeSocketImage',
'ROTATION': 'NodeSocketRotation',
}
new_value = MAP.get(key, None)
if bpy.app.version >= (4, 0, 0):
assert new_value is None
return socket_type
else:
return new_value
def find_nodes_in_tree(
node_tree: typing.Optional[bpy.types.NodeTree],
filter_: typing.Optional[typing.Callable[[bpy.types.Node], bool]] = None,
local_only: bool = False,
) -> typing.Set[bpy.types.Node]:
"""Returns a set of nodes from a given node tree that comply with the filter"""
ret = set()
if node_tree is None:
return ret
for node in node_tree.nodes:
if getattr(node, "node_tree", None) is not None:
if node.node_tree.library is None or not local_only:
ret.update(find_nodes_in_tree(node.node_tree, filter_, local_only))
if filter_ is not None and not filter_(node):
continue
ret.add(node)
return ret
def get_top_level_material_nodes_with_name(
obj: bpy.types.Object,
node_names: typing.Set[str],
) -> typing.Iterable[bpy.types.Node]:
"""Searches for top level nodes or node groups = not nodes nested in other node groups.
Raise exception if 'obj' is instanced collection. If linked object links materials from another
blend then Blender API doesn't allow us easily access these materials. We would be able only
to access materials that are local inside blend of linked object. This could be confusing
behavior of this function, so this function doesn't search for any nodes in linked objects.
"""
assert obj.instance_collection != 'COLLECTION'
for material_slot in obj.material_slots:
if material_slot.material is None:
continue
if material_slot.material.node_tree is None:
continue # material is not using nodes or the node_tree is invalid
for node in material_slot.material.node_tree.nodes:
if node.type == 'GROUP':
if utils_bpy.remove_object_duplicate_suffix(node.node_tree.name) in node_names:
yield node
else:
if utils_bpy.remove_object_duplicate_suffix(node.name) in node_names:
yield node
def find_nodes_by_bl_idname(
nodes: typing.Iterable[bpy.types.Node], bl_idname: str, recursive: bool = False
) -> typing.Iterable[bpy.types.Node]:
for node in nodes:
if node.bl_idname == bl_idname:
yield node
if recursive and node.node_tree is not None:
yield from find_nodes_by_bl_idname(node.node_tree.nodes, bl_idname)
def find_nodes_by_name(node_tree: bpy.types.NodeTree, name: str) -> typing.Set[bpy.types.Node]:
"""Returns set of nodes from 'node_tree' which name without duplicate suffix is 'name'"""
nodes = find_nodes_in_tree(
node_tree, lambda x: utils_bpy.remove_object_duplicate_suffix(x.name) == name
)
return nodes
def find_nodegroups_by_name(
node_tree: typing.Optional[bpy.types.NodeTree], name: str, use_node_tree_name: bool = True
) -> typing.Set[bpy.types.NodeGroup]:
"""Returns set of node groups from 'node_tree' which name without duplicate suffix is 'name'
Nodegroups have node.label, node.name and node.node_tree.name, if node.label is empty,
Blender UI, displays node_tree.name in nodegroup header. That's why node.name is often not
renamed to anything reasonable. So most of the times we want to search nodegroups by
node_tree.name. If use_node_tree_name is True and the nodegroup has no node_tree, it is skipped.
"""
def nodegroup_filter(node: bpy.types.Node) -> bool:
if node.type != 'GROUP':
return False
if use_node_tree_name and node.node_tree is None:
return False
name_for_comparing = node.node_tree.name if use_node_tree_name else node.name
return utils_bpy.remove_object_duplicate_suffix(name_for_comparing) == name
nodes = find_nodes_in_tree(node_tree, nodegroup_filter)
return nodes
def find_incoming_nodes(node: bpy.types.Node) -> typing.Set[bpy.types.Node]:
"""Finds and returns all nodes connecting to 'node'"""
ret: typing.Set[bpy.types.Node] = set()
for input_ in node.inputs:
for link in input_.links:
ret.add(link.from_node)
return ret
def find_link_connected_to(
links: typing.Iterable[bpy.types.NodeLink],
to_node: bpy.types.Node,
to_socket_name: str,
skip_reroutes: bool = False,
) -> typing.Optional[bpy.types.NodeLink]:
"""Find the link connected to given target node (to_node) to given socket name (to_socket_name)
There can be at most 1 such link. In Blender it is not allowed to connect more than one link
to a socket. It is allowed to connect multiple links *from* one socket, but not *to* one socket.
"""
ret: typing.List[bpy.types.NodeLink] = []
for link in links:
if to_node != link.to_node:
continue
if to_socket_name != link.to_socket.name:
continue
if skip_reroutes and isinstance(link.from_node, bpy.types.NodeReroute):
return find_link_connected_to(links, link.from_node, link.from_node.inputs[0].name)
ret.append(link)
if len(ret) > 1:
raise RuntimeError(
"Found multiple nodes connected to given node and socket. This is not valid!"
)
elif len(ret) == 0:
return None
return ret[0]
def find_links_connected_from(
links: typing.Iterable[bpy.types.NodeLink], from_node: bpy.types.Node, from_socket_name: str
) -> typing.Iterable[bpy.types.NodeLink]:
"""Find links connected from given node (from_node) from given socket name (from_socket_name)
There can be any number of such links.
"""
for link in links:
if from_node != link.from_node:
continue
if from_socket_name != link.from_socket.name:
continue
yield link
def is_node_socket_connected_to(
links: typing.Iterable[bpy.types.NodeLink],
from_node: bpy.types.Node,
from_socket_name: str,
to_nodes: typing.List[bpy.types.Node],
to_socket_name: typing.Optional[str],
recursive: bool = True,
) -> bool:
for link in find_links_connected_from(links, from_node, from_socket_name):
if link.to_node in to_nodes and (
to_socket_name is None or to_socket_name == link.to_socket.name
):
return True
if recursive and is_node_socket_connected_to(
links, link.to_node, link.to_socket.name, to_nodes, to_socket_name, True
):
return True
return False
def get_node_input_socket(
node: bpy.types.Node, socket_name: str
) -> typing.Optional[bpy.types.NodeSocket]:
ret = None
for input_ in node.inputs:
if input_.name != socket_name:
continue
if ret is not None:
raise RuntimeError("Multiple matches!")
ret = input_
return ret
def get_node_output_socket(
node: bpy.types.Node, socket_name: str
) -> typing.Optional[bpy.types.NodeSocket]:
ret = None
for output in node.outputs:
if output.name != socket_name:
continue
if ret is not None:
raise RuntimeError("Multiple matches!")
ret = output
return ret
def find_nodegroup_users(
nodegroup_name: str,
) -> typing.Iterable[typing.Tuple[bpy.types.Object, typing.Iterable[bpy.types.Object]]]:
"""Returns iterable of (obj, user_objs) that use nodegroup with name 'nodegroup_name'
In case of instanced object this checks the instanced collection and the nested
objects in order to find the mesh object that can be potentional user of 'nodegroup_name'.
In this case this returns the original instanced object and list of non-empty objects that are
instanced.
In case of editable objects this returns the object itself and list with the object in it.
"""
def find_origin_objects(instancer_obj: bpy.types.Object) -> typing.Iterable[bpy.types.Object]:
if instancer_obj.type != 'EMPTY':
return [instancer_obj]
objects = {instancer_obj}
while len(objects) > 0:
obj = objects.pop()
if (
obj.type == 'EMPTY'
and obj.instance_type == 'COLLECTION'
and obj.instance_collection is not None
):
objects.update(obj.instance_collection.all_objects)
else:
yield obj
# Firstly gather all the materials that use the nodegroup with given name
materials_using_nodegroup = set()
for material in bpy.data.materials:
if material.node_tree is None:
continue
nodes = find_nodes_in_tree(
material.node_tree,
lambda x: isinstance(x, bpy.types.ShaderNodeGroup)
and x.node_tree.name == nodegroup_name,
)
if len(nodes) > 0:
materials_using_nodegroup.add(material)
if len(materials_using_nodegroup) == 0:
return []
# Go through all objects and yield ones that have one of the found materials
for obj in bpy.data.objects:
# We skip objects with library here as they will be gathered by 'find_origin_objects'
if obj.library is not None:
continue
# In case of instanced collection we find the actual instanced objects and gather all
# used materials.
if (
obj.type == 'EMPTY'
and obj.instance_type == 'COLLECTION'
and obj.instance_collection is not None
):
instance_materials = set()
instanced_objs = set(itertools.chain(find_origin_objects(obj)))
for instanced_obj in instanced_objs:
instance_materials.update(
{
slot.material
for slot in instanced_obj.material_slots
if slot.material is not None
}
)
if len(instance_materials.intersection(materials_using_nodegroup)) > 0:
yield obj, instanced_objs
else:
if not hasattr(obj, "material_slots"):
continue
obj_materials = {
slot.material for slot in obj.material_slots if slot.material is not None
}
if len(obj_materials & materials_using_nodegroup) > 0:
yield obj, [obj]
def get_channel_nodes_map(
node_tree: bpy.types.NodeTree,
) -> typing.DefaultDict[str, typing.List[bpy.types.ShaderNodeTexImage]]:
"""Returns all image nodes from given nodegroup mapping to filepath"""
image_nodes = find_nodes_in_tree(
node_tree, lambda x: isinstance(x, bpy.types.ShaderNodeTexImage)
)
channel_nodes_map: typing.DefaultDict[str, typing.List[bpy.types.ShaderNodeTexImage]] = (
collections.defaultdict(list)
)
for node in image_nodes:
name_wo_suffix = utils_bpy.remove_object_duplicate_suffix(node.name)
split = name_wo_suffix.rsplit("_", 1)
if len(split) == 2:
# channel name = {"mq_Diffuse", "mq_Normal", "mq_Height", ...}
channel_name, _ = split
else:
# fallback channel name to display the information about texture node anyways
channel_name = "unknown"
channel_nodes_map[channel_name].append(node)
for list_ in channel_nodes_map.values():
list_.sort(key=lambda x: x.name)
return channel_nodes_map
def filter_node_socket_name(
socket: bpy.types.NodeSocket | NodeSocketInterfaceCompat,
*names: str,
case_sensitive: bool = False,
) -> bool:
socket_name = socket.name if case_sensitive else socket.name.lower()
names = names if case_sensitive else map(lambda x: x.lower(), names)
for name in names:
if name in socket_name:
return True
return False
@dataclasses.dataclass
class NodeSocketsDrawTemplate:
"""Template for drawing node sockets from a nodegroup in a material or geonodes modifier.
The 'filter_' and 'socket_names_drawn_first' are optional and they are mutually exclusive if provided.
If 'socket_names_drawn_first' is not None, their relative inputs are drawn first if they exist
and 'filter_' is applied to the rest.
"""
name: str
filter_: typing.Callable[[bpy.types.NodeSocket | NodeSocketInterfaceCompat], bool] = (
lambda _: True
)
socket_names_drawn_first: typing.Optional[typing.List[str]] = None
def draw_from_material(
self,
mat: bpy.types.Material,
layout: bpy.types.UILayout,
draw_max_first_occurrences: int = 1,
) -> None:
if draw_max_first_occurrences < 1:
return
nodegroups = list(
itertools.chain(
find_nodes_by_name(mat.node_tree, self.name),
find_nodegroups_by_name(mat.node_tree, self.name),
)
)
if len(nodegroups) == 0:
layout.label(text=f"No '{self.name}' nodegroup found", icon='INFO')
return
for i, group in enumerate(nodegroups):
if i >= draw_max_first_occurrences:
break
inputs = list(filter(is_drawable_node_input, group.inputs))
self._draw_template(
inputs, lambda input_: layout.row().prop(input_, "default_value", text=input_.name)
)
def draw_from_geonodes_modifier(
self,
layout: bpy.types.UILayout,
mod: bpy.types.NodesModifier,
) -> None:
assert mod.type == 'NODES'
if mod.node_group is None or mod.node_group.name != self.name:
layout.label(text=f"No '{self.name}' nodegroup found", icon='INFO')
return
inputs = list(
filter(is_drawable_node_tree_input, get_node_tree_inputs_map(mod.node_group).values())
)
self._draw_template(inputs, lambda input_: draw_modifier_input(layout, mod, input_))
def _draw_template(
self,
inputs: typing.List[NodeSocketInterfaceCompat] | typing.List[bpy.types.NodeSocket],
draw_function: typing.Callable[[NodeSocketInterfaceCompat | bpy.types.NodeSocket], None],
) -> None:
already_drawn = set()
if self.socket_names_drawn_first is not None:
socket_name_to_input_map = {input_.name.lower(): input_ for input_ in inputs}
for name in self.socket_names_drawn_first:
input_ = socket_name_to_input_map.get(name.lower(), None)
if input_ is None:
continue
already_drawn.add(input_)
draw_function(input_)
for input_ in inputs:
if input_ not in already_drawn and self.filter_(input_):
draw_function(input_)
def is_drawable_node_input(input_: bpy.types.NodeSocket) -> bool:
return (
hasattr(input_, "default_value")
and input_.enabled
and not input_.hide_value
and not input_.is_linked
)
def is_drawable_node_tree_input(input_: NodeSocketInterfaceCompat) -> bool:
return get_socket_type(input_) != 'NodeSocketGeometry' and not input_.hide_value
def draw_node_inputs_filtered(
layout: bpy.types.UILayout,
node: bpy.types.Node,
filter_: typing.Callable[[bpy.types.NodeSocket], bool] = lambda _: True,
) -> None:
for input_ in node.inputs:
if not is_drawable_node_input(input_):
continue
if filter_(input_):
layout.row().prop(input_, "default_value", text=input_.name)
def draw_modifier_input(
layout: bpy.types.UILayout, mod: bpy.types.NodesModifier, input_: NodeSocketInterfaceCompat
):
if get_socket_type(input_) == 'NodeSocketObject':
layout.row().prop_search(
mod,
f"[\"{input_.identifier}\"]",
bpy.data,
"objects",
text=input_.name,
icon='OBJECT_DATA',
)
elif get_socket_type(input_) == 'NodeSocketMaterial':
layout.row().prop_search(
mod,
f"[\"{input_.identifier}\"]",
bpy.data,
"materials",
text=input_.name,
icon='MATERIAL_DATA',
)
elif get_socket_type(input_) == 'NodeSocketCollection':
layout.row().prop_search(
mod,
f"[\"{input_.identifier}\"]",
bpy.data,
"collections",
text=input_.name,
icon='OUTLINER_COLLECTION',
)
else:
layout.row().prop(mod, f"[\"{input_.identifier}\"]", text=input_.name)
def draw_node_tree(
layout: bpy.types.UILayout,
node_tree: bpy.types.NodeTree,
depth_limit: int = 5,
) -> None:
def draw_node_and_recurse(
layout: bpy.types.UILayout,
node: bpy.types.Node,
parent_node: typing.Optional[bpy.types.Node],
depth: int,
) -> None:
if depth == depth_limit:
return
box = layout.box()
row = box.row()
row.prop(
node, "hide", text="", emboss=False, icon='TRIA_RIGHT' if node.hide else 'TRIA_DOWN'
)
row.label(text=node.name)
if parent_node is not None:
right = row.row()
right.enabled = False
right.label(text=f"(from {parent_node.name} node)")
if not node.hide:
col = box.column(align=True)
node.draw_buttons(bpy.context, col)
draw_node_inputs_filtered(col, node)
for incoming_node in find_incoming_nodes(node):
draw_node_and_recurse(layout, incoming_node, node, depth + 1)
material_output_nodes = find_nodes_in_tree(
node_tree, lambda x: isinstance(x, bpy.types.ShaderNodeOutputMaterial)
)
if len(material_output_nodes) != 1:
return
draw_node_and_recurse(layout, material_output_nodes.pop(), None, 0)
@@ -0,0 +1,125 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import bpy.utils.previews
import os
import logging
import typing
import threading
logger = logging.getLogger(f"polygoniq.{__name__}")
class PreviewManager:
"""Loads previews from provided paths on demand based on basenames or custom ids."""
def __init__(self):
self.preview_collection = bpy.utils.previews.new()
self.lock = threading.Lock()
self.id_path_map: typing.Dict[str, str] = {}
self.allowed_extensions = {".png", ".jpg"}
def add_preview_path(self, path: str, id_override: typing.Optional[str] = None) -> None:
"""Adds 'path' as a possible place from where preview can be loaded if requested.
By default the ID of the preview is the basename of the file without extension. If 'path'
is a single file, then 'id_override' can be used to override the default behavior.
If 'path' is a directory, then all files with allowed extension are considered.
The preview is then loaded on demand when requested by its ID using 'get_icon_id'.
"""
self._update_path_map_entry(path, id_override)
def get_icon_id(self, id_: str) -> int:
"""Return icon_id for preview with id 'id_'
Returns question mark icon id if 'id_' is not found.
"""
if id_ in self.preview_collection:
return self.preview_collection[id_].icon_id
else:
path = self.id_path_map.get(id_, None)
if path is None:
return 1
# There might be paths, that weren't removed from the map, but the file was already
# deleted on the filesystem. In that case (else branch) we remove the id_ from
# the path map.
if os.path.isfile(path):
logger.debug(f"Preview: {id_} loaded on demand {id_}")
self._load_preview(path, id_)
assert id_ in self.preview_collection
return self.preview_collection[id_].icon_id
else:
del self.id_path_map[id_]
# Unknown preview ID
return 1
def get_polygoniq_addon_icon_id(self, addon_name: str) -> int:
return self.get_icon_id(f"logo_{addon_name}")
def get_engon_feature_icon_id(self, feature_name: str) -> int:
return self.get_icon_id(f"logo_{feature_name}_features")
def clear(self, ids: typing.Optional[typing.Set[str]] = None) -> None:
"""Clears the whole preview collection or only 'ids' if provided.
This doesn't clear the paths where previews can be found. If there is some invalid path,
it is cleared when the preview should be loaded, but it wasn't be found.
"""
if ids is None:
self.preview_collection.clear()
else:
for id_ in ids:
if id_ in self.preview_collection:
del self.preview_collection[id_]
def _update_path_map_entry(self, path: str, id_override: typing.Optional[str] = None) -> None:
if os.path.isdir(path):
if id_override is not None:
raise RuntimeError("id_override is not allowed for directories!")
for file in os.listdir(path):
filename, ext = os.path.splitext(file)
basename = os.path.basename(filename)
if ext.lower() in self.allowed_extensions:
self.id_path_map[basename] = os.path.join(path, file)
if basename in self.preview_collection:
del self.preview_collection[basename]
elif os.path.isfile(path):
filename, ext = os.path.splitext(path)
basename = os.path.basename(filename)
key = id_override if id_override is not None else basename
if ext.lower() in self.allowed_extensions:
self.id_path_map[key] = path
if key in self.preview_collection:
del self.preview_collection[key]
def _load_preview(self, full_path: str, id_: str) -> None:
"""Loads previews from 'full_path' and saves on key 'id_'
Assumes 'full_path' is already existing file in the filesystem.
"""
with self.lock:
if id_ in self.preview_collection:
return
assert os.path.isfile(full_path)
try:
self.preview_collection.load(id_, full_path, 'IMAGE', True)
except KeyError as e:
logger.exception(f"Preview {id_} already loaded!")
def __del__(self):
self.preview_collection.close()
def __contains__(self, id_: str) -> bool:
return id_ in self.preview_collection
def __repr__(self) -> str:
return f"{self.__class__.__name__}: Loaded {len(self.preview_collection)} previews."
@@ -0,0 +1,96 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import re
import os
import typing
from . import utils_bpy
import logging
logger = logging.getLogger(f"polygoniq.{__name__}")
def polygoniq_duplicate_data_filter(
data: bpy.types.ID, data_filepaths: typing.Optional[typing.Set[str]] = None
) -> bool:
"""Filters polygoniq duplicate data based on addon prefix and duplicate suffix.
If 'data_filepaths' argument is provided, images with path common to paths provided are also
considered duplicates.
"""
# Pattern to check if the object contains a duplicate suffix - .001 - .999 after the name
pattern = re.compile(r"^\.[0-9]{3}$")
if not pattern.match(data.name[-4:]):
return False
if data_filepaths is None:
data_filepaths = set()
KNOWN_PREFIXES = ("aq_", "bq_", "mq_", "tq_", "iq_", "eq_", "st_", "am154_", "am176_")
orig_name = utils_bpy.remove_object_duplicate_suffix(data.name)
if isinstance(data, bpy.types.NodeTree):
return orig_name.startswith(KNOWN_PREFIXES)
if isinstance(data, bpy.types.Material):
return orig_name.startswith(KNOWN_PREFIXES)
if isinstance(data, bpy.types.Image):
img_path = os.path.abspath(bpy.path.abspath(data.filepath, library=data.library))
for path in data_filepaths:
try:
if os.path.commonpath([img_path, path]) == path:
return True
except ValueError:
continue
# TODO: log warning or raise exception?
return False
DuplicateFilter = typing.Callable[[bpy.types.ID, typing.Optional[typing.Set[str]]], bool]
def _is_duplicate_filtered(
data: bpy.types.ID,
filters: typing.Iterable[DuplicateFilter],
install_paths: typing.Optional[typing.Set[str]] = None,
) -> bool:
filtered = False
for filter_ in filters:
if not filter_(data, install_paths):
filtered = True
break
return filtered
def remove_duplicate_datablocks(
datablocks: bpy.types.bpy_prop_collection,
filters: typing.Optional[typing.Iterable[DuplicateFilter]] = None,
install_paths: typing.Optional[typing.Set[str]] = None,
) -> typing.List[str]:
to_remove = []
for datablock in datablocks:
if filters is not None and _is_duplicate_filtered(datablock, filters, install_paths):
continue
# ok, so it's a duplicate, let's figure out the "proper" datablock
orig_datablock_name = utils_bpy.remove_object_duplicate_suffix(datablock.name)
if orig_datablock_name in datablocks:
orig_node_group = datablocks[orig_datablock_name]
datablock.user_remap(orig_node_group)
if datablock.users == 0:
to_remove.append(datablock)
else:
# the original datablock is gone, we should rename this one
datablock.name = orig_datablock_name
ret = []
for datablock in to_remove:
ret.append(datablock.name)
datablocks.remove(datablock)
return ret
@@ -0,0 +1,150 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
# Code is inspired by the 'MeasureIt' addon by Antonio Vazquez that is shipped natively in Blender
import bpy
import bpy_extras
import blf
import dataclasses
import gpu
import gpu_extras.batch
import gpu_extras.presets
import mathutils
import logging
import typing
logger = logging.getLogger(f"polygoniq.{__name__}")
if not bpy.app.background:
# Blender 4.0 dropped the 3D_ and 2D_ prefixes from the shader names
SHADER_LINE_BUILTIN = (
gpu.shader.from_builtin('POLYLINE_UNIFORM_COLOR')
if bpy.app.version >= (4, 0, 0)
else gpu.shader.from_builtin('3D_POLYLINE_UNIFORM_COLOR')
)
SHADER_2D_UNIFORM_COLOR_BUILTIN = (
gpu.shader.from_builtin('UNIFORM_COLOR')
if bpy.app.version >= (4, 0, 0)
else gpu.shader.from_builtin('2D_UNIFORM_COLOR')
)
else:
logger.info(f"'{__name__}' module is not available in background mode!")
VIEWPORT_SIZE = (0, 0)
Color = typing.Tuple[float, float, float, float]
def set_context(context: bpy.types.Context) -> None:
"""Sets viewport size from context, to be further used as native bpy uniform in shaders"""
global VIEWPORT_SIZE
VIEWPORT_SIZE = (context.region.width, context.region.height)
def line(v1: mathutils.Vector, v2: mathutils.Vector, color: Color, width: float):
"""Draws a line from 'v1' to 'v2' of desired 'color' and 'width'"""
pos = [v1, v2]
batch = gpu_extras.batch.batch_for_shader(SHADER_LINE_BUILTIN, 'LINES', {"pos": pos})
SHADER_LINE_BUILTIN.bind()
SHADER_LINE_BUILTIN.uniform_float("color", color)
SHADER_LINE_BUILTIN.uniform_float("lineWidth", width)
SHADER_LINE_BUILTIN.uniform_float("viewportSize", VIEWPORT_SIZE)
batch.draw(SHADER_LINE_BUILTIN)
def rectangle(pos: typing.Tuple[float, float], size: typing.Tuple[float, float], color: Color):
"""Draws rectangle starting at 'pos' of width and height from 'size' of desired 'color'"""
batch = gpu_extras.batch.batch_for_shader(
SHADER_2D_UNIFORM_COLOR_BUILTIN,
'TRI_FAN',
{
"pos": [
(pos[0], pos[1]),
(pos[0] + size[0], pos[1]),
(pos[0] + size[0], pos[1] + size[1]),
(pos[0], pos[1] + size[1]),
]
},
)
SHADER_2D_UNIFORM_COLOR_BUILTIN.bind()
SHADER_2D_UNIFORM_COLOR_BUILTIN.uniform_float("color", color)
batch.draw(SHADER_2D_UNIFORM_COLOR_BUILTIN)
def circle(center: mathutils.Vector, radius: float, color: Color, segments: int):
gpu_extras.presets.draw_circle_2d(center, color, radius, segments=segments)
@dataclasses.dataclass
class TextStyle:
"""Style of rendered text
If 'consider_ui_scale' is True, then actual 'font_size' is constructed
on initialization based on preferences user interface scale
"""
font_id: int = 0
font_size: int = 15
color: Color = (1.0, 1.0, 1.0, 1.0)
dpi: int = 72
consider_ui_scale: bool = True
def __post_init__(self):
if self.consider_ui_scale:
self.font_size *= bpy.context.preferences.system.ui_scale
def text(pos: mathutils.Vector, string: str, style: TextStyle) -> None:
blf.position(style.font_id, pos[0], pos[1], 0)
if bpy.app.version >= (4, 0, 0): # dpi argument has been dropped in Blender 4.0
blf.size(style.font_id, style.font_size)
else:
blf.size(style.font_id, style.font_size, style.dpi)
blf.color(style.font_id, *style.color)
blf.draw(style.font_id, str(string))
def text_3d(
world_pos: mathutils.Vector,
string: str,
style: TextStyle,
region: bpy.types.Region,
rv3d: bpy.types.RegionView3D,
) -> None:
pos_2d = bpy_extras.view3d_utils.location_3d_to_region_2d(region, rv3d, world_pos)
text(pos_2d, string, style)
def text_box(
pos: mathutils.Vector,
width: int,
padding: int,
text_margin: float,
background: typing.Optional[Color],
texts: typing.List[typing.Tuple[str, TextStyle]],
) -> None:
height = sum(t[1].font_size for t in texts) + (len(texts) - 1) * text_margin
if background is not None:
rectangle(pos, (width, height + 2 * padding), background)
x_pos = pos.x + padding
y_pos = pos.y + height
for string, style in texts:
text((x_pos, y_pos), string, style)
y_pos -= style.font_size + text_margin
def text_box_3d(
world_pos: mathutils.Vector,
width: int,
padding: int,
text_margin: float,
background: typing.Optional[Color],
texts: typing.List[typing.Tuple[str, TextStyle]],
region: bpy.types.Region,
rv3d: bpy.types.RegionView3D,
) -> None:
"""Draws text box based on world position aligned to view"""
pos_2d = bpy_extras.view3d_utils.location_3d_to_region_2d(region, rv3d, world_pos)
text_box(pos_2d, width, padding, text_margin, background, texts)
@@ -0,0 +1,138 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import typing
class TraffiqRigProperties:
CAR_RIG = "tq_Car_Rig"
WHEELS_Y_ROLLING = "tq_WheelsYRolling"
STEERING = "tq_SteeringRotation"
WHEEL_ROTATION = "tq_WheelRotation"
SUSPENSION_FACTOR = "tq_SuspensionFactor"
SUSPENSION_ROLLING_FACTOR = "tq_SuspensionRollingFactor"
@classmethod
def is_rig_property(cls, prop: str) -> bool:
if prop.startswith(TraffiqRigProperties.WHEEL_ROTATION):
return True
return prop in {
cls.CAR_RIG,
cls.WHEELS_Y_ROLLING,
cls.STEERING,
cls.WHEEL_ROTATION,
cls.SUSPENSION_FACTOR,
cls.SUSPENSION_ROLLING_FACTOR,
}
class RigDrivers:
"""Class used to generate back drivers for rig variables
Unfortunately blender operator duplicates_make_real doesn't
append animation data, where drivers are stored https://developer.blender.org/T81577
Our version of rigacar generates the drivers in the source files, but they aren't
available after duplicates_make_real is called in converted to editable, thus this class
exists and is used to create those drivers dynamically based on bone names.
"""
INFLUENCE_VAR_NAME = "influence"
ROTATION_EULER_X_VAR_NAME = "rotationAngle"
def __init__(self, obj: bpy.types.Object):
assert "tq_Car_Rig" in obj.data
self.target_obj = obj
self.pose = obj.pose
def create_all_drivers(self):
for bone in self.pose.bones.values():
if bone.name.startswith("MCH_WheelRotation"):
_, _, suffix = bone.name.split("_", 2)
data_path = f'["{TraffiqRigProperties.WHEEL_ROTATION}_{suffix}"]'
self.__create_rotation_euler_x_driver(bone, data_path)
elif bone.name == "MCH_SteeringRotation":
self.__create_translation_x_driver(bone, f'["{TraffiqRigProperties.STEERING}"]')
elif bone.name == "MCH_Axis":
front_constraint = bone.constraints.get("Rotation from MCH_Axis_F", None)
if front_constraint is not None:
self.__create_constraint_influence_driver(
front_constraint,
f'["{TraffiqRigProperties.SUSPENSION_ROLLING_FACTOR}"]',
1.0,
)
rear_constraint = bone.constraints.get("Rotation from MCH_Axis_B", None)
if rear_constraint is not None:
self.__create_constraint_influence_driver(
rear_constraint,
f'["{TraffiqRigProperties.SUSPENSION_ROLLING_FACTOR}"]',
0.5,
)
def __create_constraint_influence_driver(
self,
constraint: bpy.types.CopyLocationConstraint,
driver_data_path: str,
base_influence: typing.Optional[float] = 1.0,
) -> None:
fcurve = constraint.driver_add("influence")
drv = fcurve.driver
drv.type = 'AVERAGE'
var = drv.variables.get(RigDrivers.INFLUENCE_VAR_NAME, None)
if var is None:
var = drv.variables.new()
var.name = RigDrivers.INFLUENCE_VAR_NAME
var.type = 'SINGLE_PROP'
targ = var.targets[0]
targ.id_type = 'OBJECT'
targ.id = self.target_obj
targ.data_path = driver_data_path
if base_influence != 1.0:
fmod = fcurve.modifiers[0]
fmod.mode = 'POLYNOMIAL'
fmod.poly_order = 1
fmod.coefficients = (0, base_influence)
def __create_translation_x_driver(self, bone: bpy.types.PoseBone, driver_data_path: str):
fcurve = bone.driver_add("location", 0)
drv = fcurve.driver
drv.type = 'AVERAGE'
var = drv.variables.get(RigDrivers.ROTATION_EULER_X_VAR_NAME, None)
if var is None:
var = drv.variables.new()
var.name = RigDrivers.ROTATION_EULER_X_VAR_NAME
var.type = 'SINGLE_PROP'
targ = var.targets[0]
targ.id_type = 'OBJECT'
targ.id = self.target_obj
targ.data_path = driver_data_path
def __create_rotation_euler_x_driver(self, bone: bpy.types.PoseBone, driver_data_path: str):
fcurve = bone.driver_add("rotation_euler", 0)
drv = fcurve.driver
drv.type = 'AVERAGE'
var = drv.variables.get(RigDrivers.ROTATION_EULER_X_VAR_NAME, None)
if var is None:
var = drv.variables.new()
var.name = RigDrivers.ROTATION_EULER_X_VAR_NAME
var.type = 'SINGLE_PROP'
targ = var.targets[0]
targ.id_type = 'OBJECT'
targ.id = self.target_obj
targ.data_path = driver_data_path
def is_object_rigged(obj: bpy.types.Object) -> bool:
if obj is None:
return False
if obj.data is None:
return False
return TraffiqRigProperties.CAR_RIG in obj.data and obj.data[TraffiqRigProperties.CAR_RIG] == 1
@@ -0,0 +1,318 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import mathutils
import typing
import math
import copy
import logging
logger = logging.getLogger(f"polygoniq.{__name__}")
if "linalg_bpy" not in locals():
from . import linalg_bpy
from . import utils_bpy
else:
import importlib
linalg_bpy = importlib.reload(linalg_bpy)
utils_bpy = importlib.reload(utils_bpy)
def find_bounding_wheels(wheels: typing.List[bpy.types.Object]) -> typing.List[bpy.types.Object]:
# we take first front wheels and then find maximum index of rear wheels and return it as a list
assert len(wheels) > 4
frontmost_wheels = []
rearmost_wheels = {}
for wheel_obj in wheels:
_, position, wheel_number = utils_bpy.remove_object_duplicate_suffix(wheel_obj.name).split(
"_"
)[-3:]
if position.endswith("F"):
if int(wheel_number) == 0:
frontmost_wheels.append(wheel_obj)
else:
if position not in rearmost_wheels:
rearmost_wheels[position] = (wheel_obj, wheel_number)
else:
if wheel_number > rearmost_wheels[position][1]:
rearmost_wheels[position] = (wheel_obj, wheel_number)
rearmost_wheels_list = [v[0] for v in rearmost_wheels.values()]
return frontmost_wheels + rearmost_wheels_list
def get_wheel_contact_points(
wheels: typing.List[bpy.types.Object], instance: bpy.types.Object, debug: bool = False
) -> typing.List[mathutils.Vector]:
wheel_contact_points = []
one_track_vehicle = True if len(wheels) == 2 else False
# when vehicle has more than 4 wheels take only the outer ones
if len(wheels) > 4:
wheels = find_bounding_wheels(wheels)
for wheel_obj in wheels:
wheel_center = wheel_obj.location
radius = wheel_obj.dimensions.y / 2
contact_point = wheel_center - mathutils.Vector((0, 0, radius))
contact_point_world_space = instance.matrix_world @ contact_point
wheel_contact_points.append(contact_point_world_space)
# hack-fix for one track vehicles, just pretend it has another 2
# wheels nearby so we can raycast the plane
if one_track_vehicle:
fixture_contact_point_ws = (
instance.matrix_world @ mathutils.Matrix.Translation((0.1, 0, 0)) @ contact_point
)
wheel_contact_points.append(fixture_contact_point_ws)
if debug:
bpy.ops.object.empty_add(location=contact_point_world_space)
obj = bpy.context.object
obj.name = "B: " + wheel_obj.name
obj.show_name = True
return wheel_contact_points
GetRayCastedPlaneCallable = typing.Callable[
[], typing.Tuple[typing.List[mathutils.Vector], typing.Optional[typing.List[mathutils.Vector]]]
]
def snap_to_ground_iterate(
instance: bpy.types.Object,
obj: bpy.types.Object,
instance_old_matrix_world: mathutils.Matrix,
get_ray_casted_plane: GetRayCastedPlaneCallable,
debug: bool = False,
) -> None:
"""Snap to ground iteratively, we first estimate final rotation until angular delta
is lower than our tolerance. Only then we can get an accurate raycast position delta.
"""
ANGULAR_DELTA_TOLERANCE = math.radians(1)
MAXIMUM_ITERATIONS = 10
iteration = 1
while True:
bottom_corners, altered_bottom_corners = get_ray_casted_plane()
if altered_bottom_corners is None:
if debug:
logger.debug(
f"Failed to raycast all corners while estimating rotation "
f"for {obj.name}, instance={instance.name}. Skipping..."
)
instance.matrix_world = instance_old_matrix_world
return
orig_plane_normal, _, orig_plane_centroid = linalg_bpy.fit_plane_to_points(bottom_corners)
altered_plane_normal, _, altered_plane_centroid = linalg_bpy.fit_plane_to_points(
altered_bottom_corners
)
if debug:
orig_plane_rotation = mathutils.Vector([0, 0, 1]).rotation_difference(orig_plane_normal)
altered_plane_rotation = mathutils.Vector([0, 0, 1]).rotation_difference(
altered_plane_normal
)
bpy.ops.mesh.primitive_plane_add(
location=orig_plane_centroid, rotation=orig_plane_rotation.to_euler(), size=3
)
bpy.ops.mesh.primitive_plane_add(
location=altered_plane_centroid, rotation=altered_plane_rotation.to_euler(), size=3
)
delta_rotation = mathutils.Vector(orig_plane_normal).rotation_difference(
altered_plane_normal
)
# Since matrix_world is composed as location @ rotation @ scale, we need to decompose it
# into separate matrices, multiply only rotation and then compose it back.
# See https://blender.stackexchange.com/a/44783
# We could also use e.g. instance.rotation_quaternion but we would need to call
# bpy.context.view_layer.update() after each change to update matrix_world, which is slower.
orig_loc, orig_rot, orig_scale = instance.matrix_world.decompose()
orig_loc_mat = mathutils.Matrix.Translation(orig_loc)
orig_rot_mat = orig_rot.to_matrix().to_4x4()
delta_rot_mat = delta_rotation.to_matrix().to_4x4()
orig_scale_mat = mathutils.Matrix.Diagonal(orig_scale).to_4x4()
# assemble the new matrix
instance.matrix_world = orig_loc_mat @ delta_rot_mat @ orig_rot_mat @ orig_scale_mat
if debug:
logger.debug(f"iteration: {iteration}, angular error: {delta_rotation.angle}")
if abs(delta_rotation.angle) < ANGULAR_DELTA_TOLERANCE:
break
iteration += 1
if iteration > MAXIMUM_ITERATIONS:
break
bottom_corners, altered_bottom_corners = get_ray_casted_plane()
if altered_bottom_corners is None:
if debug:
logger.debug(
f"Failed to raycast all corners while estimating position "
f"for {obj.name}, instance={instance.name}. Skipping..."
)
instance.matrix_world = instance_old_matrix_world
return
orig_plane_normal, _, orig_plane_centroid = linalg_bpy.fit_plane_to_points(bottom_corners)
altered_plane_normal, _, altered_plane_centroid = linalg_bpy.fit_plane_to_points(
altered_bottom_corners
)
delta_location = altered_plane_centroid - orig_plane_centroid
instance.matrix_world = mathutils.Matrix.Translation(delta_location) @ instance.matrix_world
def ray_cast_plane(
ground_objects: typing.Iterable[bpy.types.Object],
bottom_corners: typing.List[mathutils.Vector],
grace_padding: float = 0.1,
debug: bool = False,
) -> typing.Tuple[typing.List[mathutils.Vector], typing.Optional[typing.List[mathutils.Vector]]]:
"""Raycast from 'bottom_corners' points downwards to 'ground_objects'.
Return 'bottom_corners' and list of intersection points closest to each bottom_corner point.
"""
altered_bottom_corners = copy.deepcopy(bottom_corners)
altered_bottom_distances = [math.inf for _ in bottom_corners]
for ground_object in ground_objects:
for i, bottom_corner in enumerate(bottom_corners):
if debug:
logger.debug("Raycast from: " + str(bottom_corner))
bottom_corner_obj_space = ground_object.matrix_world.inverted() @ (
bottom_corner + mathutils.Vector([0, 0, grace_padding])
)
bottom_corner2_obj_space = ground_object.matrix_world.inverted() @ (
bottom_corner + mathutils.Vector([0, 0, grace_padding - 1])
)
direction_obj_space = bottom_corner2_obj_space - bottom_corner_obj_space
try:
result, new_bottom_corner_obj_space, _, _ = ground_object.ray_cast(
bottom_corner_obj_space, direction_obj_space
)
except:
logger.exception("Uncaught exception while raycasting to the ground")
result = None
new_bottom_corner_obj_space = bottom_corner_obj_space
if not result:
continue
new_bottom_corner = ground_object.matrix_world @ new_bottom_corner_obj_space
distance = (bottom_corners[i] - new_bottom_corner).length
if distance < altered_bottom_distances[i]:
altered_bottom_corners[i] = new_bottom_corner
if debug:
bpy.ops.object.empty_add(type="SINGLE_ARROW", location=new_bottom_corner)
altered_bottom_distances[i] = distance
if math.inf in altered_bottom_distances:
return bottom_corners, None
else:
return bottom_corners, altered_bottom_corners
def snap_to_ground_separate_wheels(
instance: bpy.types.Object,
obj: bpy.types.Object,
wheels: typing.List[bpy.types.Object],
ground_objects: typing.List[bpy.types.Object],
debug: bool = False,
) -> None:
instance_old_matrix_world = copy.deepcopy(instance.matrix_world)
def get_ray_casted_plane() -> (
typing.Tuple[typing.List[mathutils.Vector], typing.Optional[typing.List[mathutils.Vector]]]
):
bottom_corners = get_wheel_contact_points(wheels, instance, debug)
return ray_cast_plane(ground_objects, bottom_corners)
snap_to_ground_iterate(instance, obj, instance_old_matrix_world, get_ray_casted_plane, debug)
def snap_to_ground_adjust_rotation(
instance: bpy.types.Object,
obj: bpy.types.Object,
ground_objects: typing.List[bpy.types.Object],
debug: bool = False,
) -> None:
instance_old_matrix_world = copy.deepcopy(instance.matrix_world)
def get_ray_casted_plane():
# get bounding box corners in world space
bbox_corners = [
instance.matrix_world @ mathutils.Vector(corner) for corner in obj.bound_box
]
# I hope Blender never changes this, it's quite difficult to autodetect
bottom_corners = [bbox_corners[0], bbox_corners[3], bbox_corners[4], bbox_corners[7]]
return ray_cast_plane(ground_objects, bottom_corners)
snap_to_ground_iterate(instance, obj, instance_old_matrix_world, get_ray_casted_plane, debug)
def snap_to_ground_no_rotation(
instance: bpy.types.Object,
obj: bpy.types.Object,
ground_objects: typing.List[bpy.types.Object],
debug: bool = False,
) -> None:
def get_ray_casted_point(
grace_padding: float = 0.1,
) -> typing.Tuple[mathutils.Vector, mathutils.Vector]:
if obj.data is None:
# obj is not 'MESH', it can be 'EMPTY' for example, don't do anything with it
return None, None
# get lowest point in world space
obj_lowest_vertex = min(obj.data.vertices, key=lambda v: (instance.matrix_world @ v.co).z)
obj_lowest_point = instance.matrix_world @ obj_lowest_vertex.co
altered_highest_point = None
altered_highest_point_distance = math.inf
for ground_object in ground_objects:
if debug:
logger.debug("Raycast from: " + str(obj_lowest_point))
lowest_point_obj_space = ground_object.matrix_world.inverted() @ (
obj_lowest_point + mathutils.Vector([0, 0, grace_padding])
)
lowest_point2_obj_space = ground_object.matrix_world.inverted() @ (
obj_lowest_point + mathutils.Vector([0, 0, grace_padding - 1])
)
direction_obj_space = lowest_point2_obj_space - lowest_point_obj_space
try:
result, altered_point_obj_space, _, _ = ground_object.ray_cast(
lowest_point_obj_space, direction_obj_space
)
except Exception as e:
logger.exception("Uncaught exception while raycasting to the ground")
result = None
altered_point_obj_space = lowest_point_obj_space
if not result:
continue
altered_point = ground_object.matrix_world @ altered_point_obj_space
distance = (obj_lowest_point - altered_point).length
if distance < altered_highest_point_distance:
altered_highest_point = altered_point
if debug:
bpy.ops.object.empty_add(location=altered_point)
altered_highest_point_distance = distance
if math.isinf(altered_highest_point_distance):
return obj_lowest_point, None
else:
return obj_lowest_point, altered_highest_point
obj_lowest_point, altered_highest_point = get_ray_casted_point()
if altered_highest_point is None:
if debug:
logger.debug(
f"Failed to raycast the highest altered point while estimating position "
f"for {obj.name}, instance={instance.name}. Skipping..."
)
return
delta_location = altered_highest_point - obj_lowest_point
instance.matrix_world = mathutils.Matrix.Translation(delta_location) @ instance.matrix_world
@@ -0,0 +1,122 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
# Module containing various utilities and wrappers around bpy to ease work with bpy bezier splines
import bpy
import mathutils
import typing
def copy_bezier_point(src: bpy.types.BezierSplinePoint, dst: bpy.types.BezierSplinePoint) -> None:
dst.co = src.co
dst.handle_left = src.handle_left
dst.handle_left_type = src.handle_left_type
dst.handle_right = src.handle_right
dst.handle_right_type = src.handle_right_type
dst.tilt = src.tilt
dst.radius = src.radius
def add_bezier_point_to_spline(
spline: bpy.types.Spline,
position: mathutils.Vector,
prepend: bool = False,
handle_type: str = 'VECTOR',
) -> bpy.types.BezierSplinePoint:
assert spline.type == 'BEZIER'
spline.bezier_points.add(1)
new_point = spline.bezier_points[-1]
if prepend:
# It is not possible to prepend points, thus we move the data of the other points...
# We could also extrude, but that would introduce bpy.ops overhead here.
for i in range(len(spline.bezier_points) - 1, 0, -1):
copy_bezier_point(spline.bezier_points[i - 1], spline.bezier_points[i])
new_point = spline.bezier_points[0]
new_point.co = position
new_point.handle_left_type = handle_type
new_point.handle_right_type = handle_type
return new_point
def remove_bezier_point(
curve_obj: bpy.types.Object, spline: bpy.types.Spline, remove_idx: int
) -> None:
"""Removes bezier point from a spline by changing selection and calling bpy.ops.curve.delete"""
bpy.context.view_layer.objects.active = curve_obj
bpy.ops.curve.select_all(action='DESELECT')
for i, bezier_point in enumerate(spline.bezier_points):
bezier_point.select_control_point = i == remove_idx
if 'FINISHED' not in bpy.ops.curve.delete(type='VERT'):
raise RuntimeError(f"Failed to remove bezier point from '{repr(spline)}' on {remove_idx}")
def join_splines(
curve: bpy.types.Curve,
spline1: bpy.types.Spline,
spline2: bpy.types.Spline,
reverse: bool = False,
prepend: bool = False,
) -> bpy.types.Spline:
"""Joins splines 'spline1' and 'spline2' into a new spline in 'curve', new spline is returned."""
points = reversed(spline2.bezier_points) if reverse else spline2.bezier_points
l1 = len(spline1.bezier_points)
l2 = len(spline2.bezier_points)
new_spline = curve.splines.new(type='BEZIER')
new_spline.bezier_points.add(l1 + l2 - 2)
dst_start_idx = 0 if prepend else l1 - 1
if prepend:
# Copy the original point after the prepended point, so it is not lost
for i, point in reversed(list(enumerate(spline1.bezier_points[:l1]))):
copy_bezier_point(point, new_spline.bezier_points[l2 - 1 + i])
else:
# Copy the original values before the appended ones
for i, point in enumerate(spline1.bezier_points):
copy_bezier_point(point, new_spline.bezier_points[i])
for i, bezier_point in enumerate(points):
copy_bezier_point(bezier_point, new_spline.bezier_points[dst_start_idx + i])
return new_spline
def split_spline(
curve: bpy.types.Curve, spline: bpy.types.Spline, split_idx: int
) -> typing.Tuple[bpy.types.Spline, bpy.types.Spline]:
"""Splits bezier spline 'spline' into two splines inside 'curve' on 'split_idx'
Point on 'split_idx' will become present on both splines. Original spline has to be removed
by caller.
"""
left = curve.splines.new(type='BEZIER')
right = curve.splines.new(type='BEZIER')
left.bezier_points.add(split_idx)
right.bezier_points.add(len(spline.bezier_points) - split_idx - 1)
for i in range(len(spline.bezier_points)):
if i < split_idx:
copy_bezier_point(spline.bezier_points[i], left.bezier_points[i])
elif i == split_idx:
copy_bezier_point(spline.bezier_points[i], left.bezier_points[i])
copy_bezier_point(spline.bezier_points[i], right.bezier_points[i - split_idx])
else:
copy_bezier_point(spline.bezier_points[i], right.bezier_points[i - split_idx])
return left, right
def new_bezier_spline(
curve_obj: bpy.types.Object, position: mathutils.Vector, handle_type: str
) -> typing.Tuple[bpy.types.Spline, bpy.types.BezierSplinePoint]:
"""Creates new spline on 'curve_obj', returns new spline and its first bezier point
Arguments 'position' and 'handle_type' apply to the 0th created point that's in each
new created spline in Blender.
"""
spline = curve_obj.splines.new(type='BEZIER')
bezier_point = spline.bezier_points[0]
bezier_point.co = position
bezier_point.handle_left_type = handle_type
bezier_point.handle_right_type = handle_type
return spline, bezier_point
@@ -0,0 +1,553 @@
# copyright (c) 2018- polygoniq xyz s.r.o.
# original author: Xavier Halloran
# https://gitlab.com/Reivax
"""
split_file_reader
Copyright (C) 2022 Xavier Halloran, United States
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import io
import logging
import os
import typing
logger = logging.getLogger(f"polygoniq.{__name__}")
# The generator has a need to know file direction to set the pointer correctly.
_BACKWARD = -1
_STATIONARY = 0
_FORWARD = 1
class SplitFileReader(io.RawIOBase):
"""Acts file-like for a list of files opened readably in binary mode.
Provides `readable`, `writable`, `seekable`, `tellable`.
Implements `read`, `readinto`, `seek`, `tell`
Prohibits `write`, `writelines`, readline`, `readlines`, `truncate`
Also implements `open`, `close`, `closed`, `__repr__`, `__iter__` and `__next__`., `__enter__`, and `__exit__`
This library makes use of multiple file objects. Unlike the io.IOBase specs, this class can make multiple system
calls for any given `seek` or `read` call; therefore it does not extend the io.IOBase Abstract Base Class. This
class cannot write.
This class can be used directly with ZipFile or TarFile, as follows:
with SplitFileReader([zip_files]) as sfr:
with zipfile.ZipFile(sfr, mode="r") as zf:
with SplitFileReader([tar_files]) as sfr:
with tarfile.open(fileobj=sfr, mode="r") as tf:
There is no actual enforcement of integrity of the `files` list, one could swap out names other than the
currently open file. The list order is important, and it must be indexable. The entries in `files` do not
need to be unique.
`close` must be called just like any other file, or a single file descriptor may be left open. Making use of the
context managed approach will take care of that as well. If str or path-like values are passed, they will be closed
automatically; but if file-like objects are passed, they will not be closed, manage those objects externally with
their own context managers.
This class is not thread-safe; no method is idempotent, all of them affect the object state. However, since the
underlying files are all read-only, multiple concurrent instances of this class, attached to the same underlying
files, is allowed.
"""
def __init__( # noqa: PLR0913
self,
files: typing.List[typing.Union[str, os.PathLike, typing.Any]],
mode: str = "rb",
stream_only: bool = False,
validate_all_readable: bool = False,
iter_size: int = 1,
) -> None:
"""Creates the file-like object around a series of files. At return, there will be a single open file descriptor,
on the first file in the list.
`files` may be any of os.PathLike, a `str` or any `file-like` object available. If it is a `str` or `PathLike`
a new `open()` will be called with that value as a parameter in a context manager. Otherwise, the file-like
will have `seek`, `tell`, and `read` called on it directly. Any mix of types is allowed in the list.
"""
if mode not in ["rb", "br", "r"]:
# On Unix, "r" and "rb" are the same. On windows, "r" will alter line endings.
raise ValueError(f"mode must be 'rb', was {mode}")
if stream_only and validate_all_readable:
raise ValueError("`stream_only` and `validate_all_readable` cannot both be set.")
# Need to track a list of files, in order, to concat. Must be random-accessible.
self._files = files
# When using this class as an iterable, or if attached to some sort of streaming output systems, set this to
# prevent seeking. tarfile stream reading blocks seek on its own, this is not a requirement for that.
self._stream_only = stream_only
# Only applicable to using this object as an iterable. On next(), this is the length applied to the read()
# function. This can be set at any time between read/__next__ calls.
self._iter_size = iter_size
# index of where in the `files` list to currently process. Starts at -1, to allow the generator to advance
# into the first file immediately.
self._current_file_desc_idx = 0
# Create the generator function to move through the list.
self._file_desc_generator = self._generate_next_file(_STATIONARY)
# Init the file pointer, and open a true file pointer to an underlying file-like object.
self._current_file_desc = next(self._file_desc_generator)
# Value that `tell()` responds with.
self._told = 0
if validate_all_readable:
self.test_all_readable()
def readable(self) -> bool:
return True
def readall(self) -> bytes:
return self._read(-1, read_once=False)
def read1(self, size: typing.Optional[int] = None) -> typing.AnyStr:
"""Read the specified amount, making underlying file boundaries invisible to the caller.
If the current file pointer has been set to None, indicating an earlier call to `close()`, raises IOError.
May make multiple system calls, but will only make a single `read` system call in total. May close and open
a file pointer and attempt a `seek`.
"""
if size is None or size < 0:
return self._read(-1, read_once=True)
else:
return self._read(size, read_once=True)
def read(self, size: typing.Optional[int] = None) -> typing.AnyStr:
"""
Read the specified amount, making underlying file boundaries invisible to the caller.
If the current file pointer has been set to None, indicating an earlier call to `close()`, raises IOError.
May make multiple system calls, but only one to each File Descriptor.
"""
if size is None or size < 0:
return self.readall()
else:
return self._read(size)
def _read(self, target_size: int, read_once=False):
if not self._current_file_desc:
raise OSError("SplitFileReader is closed.")
if target_size >= 0:
# file.read() may return zero-length data, even if only 1 byte is requested and there is actually more data.
# This is because the end of a single file may have been reached, and more files need to be opened.
ret = self._current_file_desc.read(target_size)
remaining = target_size - len(ret)
# Reads less than the total size are indicative that the end of a file has been reached, and the next one
# should be cycled in.
while remaining > 0:
if not self._safe_advance_file_desc(_FORWARD):
# More requested to be read, but there are no more files to open.
break
if read_once:
# read1 calls only do a single filestream read, but file pointers still need to advance.
break
read = self._current_file_desc.read(remaining)
remaining -= len(read)
ret += read
self._told += len(ret)
else:
# Read -1/None behaves differently.
ret = self._current_file_desc.read(target_size)
while self._safe_advance_file_desc(_FORWARD):
read = self._current_file_desc.read(target_size)
ret += read
self._told += len(ret)
return ret
def readinto(self, buffer: bytearray) -> typing.Optional[int]:
"""This is the copy/paste implementation of `io.FileIO.readinto()`"""
data = self._read(len(buffer), read_once=False)
n = len(data)
buffer[:n] = data
return n
def readinto1(self, buffer: bytearray) -> typing.Optional[int]:
"""This is the copy/paste implementation of `io.FileIO.readinto()`"""
data = self._read(len(buffer), read_once=True)
n = len(data)
buffer[:n] = data
return n
def seekable(self) -> bool:
return not self._stream_only
def seek(self, offset: int, whence: int = 0) -> int: # noqa: PLR0912
"""Move the file pointer along a file. May advance over zero or more actual files.
POSIX allows to seek before or after the end of a file, even in read-only mode. `seek()` before the start of
the first file will fail; `seek()` beyond the end of the last file is fine.
If the current file pointer has been set to None, indicating an earlier call to `close()`, raises IOError.
If the net action of a seek() will move nowhere, no seek call is passed to the underlying file descriptor.
Some tools, like `zipfile.ZipFile` after a `read()`, will cause a `seek()` to the end of the read location.
This is a redundant call, with no net movement, but can make network disk based seeks very expensive, especially
for large numbers of large files, so does nothing here.
`os.SEEK_SET`, `os.SEEK_CUR`, and `os.SEEK_END` are whence 0, 1, and 2, respectively. `os.SEEK_HOLE` and
`os.SEEK_DATA` are not supported.
"""
if self._stream_only:
raise OSError("Seek performed on a streaming file.")
if not self._current_file_desc:
raise OSError("SplitFileReader is closed.")
if whence == 0:
# From the start
# Do not always immediately `_seek_to_head`, and then scan forward the offset. There are many libraries
# out there that make excessive use of `seek(x, 0)` when either `seek(x, 1) or even `seek(0, 1)` would be
# more reasonable. zipfile is one such library, making use of one `seek(x, 2)`, and then exclusively
# `seek(x, 0)`
how_far_to_go = -(self._told - offset)
if offset == 0:
# A `seek(0, 0)` is just a shortcut to `_seek_to_head`
self._seek_to_head()
elif how_far_to_go == 0:
pass
elif how_far_to_go > 0:
self._scan_forward(how_far_to_go)
elif how_far_to_go < 0:
self._scan_backward(how_far_to_go)
elif whence == 1:
# From the current position
how_far_to_go = offset
if how_far_to_go > 0:
self._scan_forward(how_far_to_go)
elif how_far_to_go < 0:
self._scan_backward(how_far_to_go)
elif whence == 2: # noqa: PLR2004
# From the end.
how_far_to_go = offset
# Without a-priori knowledge of the total file sizes, we can't really calculate the offset to go.
# So, zip all thw way to the end, then navigate as appropriate.
self._seek_to_tail()
if how_far_to_go < 0:
self._scan_backward(offset)
elif how_far_to_go > 0:
self._scan_forward(how_far_to_go)
else:
raise IndexError("Whence must be 0, 1, or 2")
return self.tell()
def _scan_forward(self, offset: int) -> None:
# Forward seeking is tricky; it is possible to seek beyond the end of a file, even in read-only mode. So seek
# immediately to the end of the current file descriptor, and check the distance moved. If moved too far, back up
# to the correct position. If moved not far enough, go to the next file descriptor, and try again.
remaining = offset
while remaining > 0:
start_pos = self._current_file_desc.tell()
# Go all the way to the _end_ of file explicitly, because it is allowed to seek() beyond that and get
# misleading tell() information.
self._current_file_desc.seek(0, 2)
# Track the actual net movement.
end_pos = self._current_file_desc.tell()
moved = end_pos - start_pos
# Did the seek to the end of the file go too far?
if moved > remaining:
# Overshot the seek forward. Move backward again.
corrective_move = remaining - moved
self._current_file_desc.seek(corrective_move, 1)
end_pos = self._current_file_desc.tell()
moved = end_pos - start_pos
# It moved backwards, but `moved` is still positive, because it holds the net movement in this loop.
self._told += moved
remaining = 0
# Did the seek to the end of the file not go far enough?
else:
remaining -= moved
self._told += moved
# The generator for advancing file descriptors will ensure the pointer is at the start of the file part
if not self._safe_advance_file_desc(_FORWARD):
break
def _scan_backward(self, offset: int) -> None:
# The backward scan is implemented by moving the file pointer all the way to the front of the current file
# descriptor, and checking if the movement has gone far enough. If more to go, move to the previous file,
# and repeat. If too far, seek forward again to the correct position.
# This is a negative value.
remaining = offset
# Remaining is a negative amount, because the scan is going backwards.
while remaining < 0:
start_pos = self._current_file_desc.tell()
self._current_file_desc.seek(0, 0)
end_pos = self._current_file_desc.tell()
# This is a negative value
moved = end_pos - start_pos
if moved < remaining:
# Overshot the backup. Move forward again.
corrective_seek = -(moved - remaining)
self._current_file_desc.seek(corrective_seek, 1)
end_pos = self._current_file_desc.tell()
moved = end_pos - start_pos
# It moved forwards, but `moved` is still negative, because it holds the net movement in this loop.
self._told += moved
remaining = 0
else:
# It moved backwards, and `moved` is negative.
remaining -= moved
self._told += moved
# The generator for advancing file descriptors will ensure the pointer is at the tail of the file part
if not self._safe_advance_file_desc(_BACKWARD):
break
def _seek_to_head(self) -> None:
"""Set the position to zero, tell to zero, and at the head of the first file.
No need to traverse the list and move through the intermediaries, as the zero position is always known.
"""
self._current_file_desc_idx = 0
self._current_file_desc = self._file_desc_generator.send(_STATIONARY)
self._current_file_desc.seek(0, 0)
self._told = 0
def _seek_to_tail(self) -> None:
# Unlike a true file-like object, zipping to the end will omit some information. Must open each file, in order,
# and skip to their end, to count the individual file sizes, all the way to the end of that list.
# This may be slow if the disk is slow.
# This process could be accelerated by doing a one-time pass and counting the file sizes directly, but this may
# not be desirable. In practice, `seek(x, 2)` is rare, used by ZipFile, and even then, just at the start.
while True:
# Save starting position, might not be zero.
start = self._current_file_desc.tell()
# The generator for advancing file descriptors will ensure the pointer is at the start of the file part.
# Go to the end.
self._current_file_desc.seek(0, 2)
# Check position
end = self._current_file_desc.tell()
# Keep count of the file sizes.
self._told += end - start
# Don't roll off the last one.
if not self._safe_advance_file_desc(_FORWARD):
break
def _safe_advance_file_desc(self, direction: int) -> bool:
"""Advance the file descriptor, but do not advance off the end, either way.
Return true if advanced, False if stalled.
"""
if (direction == _FORWARD and self._current_file_desc_idx + 1 >= len(self._files)) or (
direction == _BACKWARD and self._current_file_desc_idx <= 0
):
return False
else:
self._advance_file_desc(direction)
return True
def _advance_file_desc(self, direction: int) -> None:
"""Move the file actual file descriptor around, to allow this class to continue to act as a single file-like reader
Fix the file pointer position to either the start or end of the underlying real file, depending on the
direction of movement.
To close, call `close()` directly on the generator.
"""
try:
self._current_file_desc = self._file_desc_generator.send(direction)
# Force the file pointer to the front ot the file. This should be a given.
if direction == _BACKWARD:
self._current_file_desc.seek(0, 2)
# If going backwards, need to make sure that the rollover put the file pointer at the _end_ of the file,
# not the start.
elif direction == _FORWARD:
self._current_file_desc.seek(0, 0)
except StopIteration:
self._current_file_desc = None
def _generate_next_file(
self, direction: int = _FORWARD
) -> typing.Generator[typing.BinaryIO, int, None]:
# Only call from `_advance_file_desc`, or, `_seek_to_head` for shortcut operation.
# Only create in `__init__`
# `send()` the direction of travel to this generator. Backward -1, Stationary 0, Forward 1, or Closing 2.
# This can raise a `FileNotFoundError`, and as such may propagate up through `seek` or `read`
# Using this generator allows the context manager to open and close the file pointers automatically when required.
while True:
self._current_file_desc_idx += direction
if self._current_file_desc_idx < 0 or self._current_file_desc_idx >= len(self._files):
logger.info("Moved off end of files list. No current fd.")
direction = yield None
else:
file = self._files[self._current_file_desc_idx]
# This logic is boosted right out of the zipfile.ZipFile.__init__, which takes either a filepath,
# path-like, or file-like for its `file` argument.
# Check if we were passed a path-like object
if isinstance(file, os.PathLike):
file = os.fspath(file)
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
with open(file, "rb") as self._current_file_desc:
logger.info(f"Opening new fd on {file}.")
direction = yield self._current_file_desc
logger.info(f"Closing fd on {file}.")
else:
# No, its (probably) already file-like.
self._current_file_desc = file
logger.info(f"Passthrough file-like yielding {file}.")
direction = yield self._current_file_desc
logger.info(f"Passthrough file-like done {file}.")
def test_all_readable(self):
"""Validate every file in the `files` parameter at `__init__` is actually readable.
Seeks to beginning of file list, then opens each file in read-only mode, seeks to the end of each of them, then
back to current position. Will raise `IOError` or `FileNotFoundError` or other, appropriate error for files
that cannot be opened for reading and seeking.
"""
if self._stream_only:
raise OSError("Seek performed on a streaming file.")
saved_tell = self._told
self._seek_to_head()
self._seek_to_tail()
self.seek(saved_tell, 0)
def tellable(self) -> bool:
return True
def tell(self) -> int:
"""Logically identical to tell() on any other file-like object.
Returns the offset as a sum of all previous file sizes, plus current file tell()
"""
if self.closed:
raise ValueError("tell on a closed file")
return self._told
@classmethod
def open(cls, *args, **kwargs):
"""Wraps the init constructor"""
return cls(*args, **kwargs)
def close(self) -> None:
"""Closes the existing file descriptor, sets the current file descriptor to None, and disables the ability to seek or read"""
logger.info("Closing last file descriptor.")
self._file_desc_generator.close()
self._current_file_desc = None
@property
def closed(self) -> bool:
"""Checks the status of the underlying streams.
If there is no open File Descriptor attached to any file in the
list, then this object is closed.
"""
return self._current_file_desc is None
def __iter__(self) -> typing.Iterable:
# Iterable operation implies streaming mode, logically, although there is not a technical reason why this
# module could not permit a seek() between calls to __iter__.
self._stream_only = True
return self
def set_iter_size(self, iter_size: int = 1) -> None:
self._iter_size = iter_size
def __next__(self) -> typing.AnyStr:
if not self._stream_only:
raise OSError("Not in streaming mode.")
read = self.read(self._iter_size)
if not read:
raise StopIteration
return read
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __repr__(self):
from json import dumps
try:
# Might be closed, or might be an empty list.
cfile = dumps(self._files[self._current_file_desc_idx])
ctell = self._current_file_desc.tell()
cdesc = self._current_file_desc.fileno()
except IndexError:
cfile = None
ctell = 0
cdesc = 0
return "<{cls}, {id}: Tell: {tell}, File Desc: {fdesc}, File Name: {fname}, File Tell: {ftell}>".format(
cls=self.__class__.__name__,
id=hex(id(self)),
tell=self.tell(),
fdesc=cdesc,
fname=cfile,
ftell=ctell,
)
# The following methods exist to support the io.RawIO behavior, and mostly disables their use.
# This permits the SplitFileReader to work within a context that expects the io.IOBase capabilities,
# such as TextIOWrapper
def writable(self) -> bool:
return False
def write(self, b: typing.Union[bytes, bytearray]) -> typing.Optional[int]:
# No writing allowed with this class.
raise io.UnsupportedOperation(f"{self.__class__.__name__} cannot write.")
def writelines(self, lines: typing.Iterable[typing.Union[bytes, bytearray]]) -> None:
# No writing allowed with this class.
raise io.UnsupportedOperation(f"{self.__class__.__name__} cannot write.")
def truncate(self, size: typing.Optional[int] = None) -> int:
# No writing allowed with this class.
raise io.UnsupportedOperation(f"{self.__class__.__name__} cannot truncate.")
def isatty(self) -> bool:
# Definitely cannot be a TTY.
return False
def flush(self) -> None:
# No writing allowed with this class.
return
def fileno(self) -> int:
# It is usually used by os.stat to get a filesize, which is meaningless here.
raise io.UnsupportedOperation(f"{self.__class__.__name__} should not return a fileno")
def readline(self, size: int = 0) -> bytes:
raise io.UnsupportedOperation(
f"{self.__class__.__name__} cannot decode text; use io.TextIOWrapper."
)
def readlines(self, hint: int = 0) -> typing.List[bytes]:
raise io.UnsupportedOperation(
f"{self.__class__.__name__} cannot decode text; use io.TextIOWrapper."
)
@@ -0,0 +1,435 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import addon_utils
import datetime
import functools
import hashlib
import json
import multiprocessing
import os
import platform
import socket
import traceback
import typing
import uuid
import enum
import threading
import logging
logger = logging.getLogger(f"polygoniq.{__name__}")
API_VERSION = 2
# useful for debugging
PRINT_MESSAGES = False
BOOTSTRAPPED = False
BOOTSTRAP_LOCK = threading.Lock()
SESSION = None
MACHINE = None
MESSAGES = []
class VerboseLevel(enum.IntEnum):
"""Determines what messages are printed to console when logging
Lower number -> Lower restrictions (DEBUG includes all categories)
"""
DEBUG = 0
INFO = 1
WARNING = 2
ERROR = 3
NONE = 4
VERBOSE_LEVEL = getattr(VerboseLevel, os.environ.get("PQ_TELEMETRY", "WARNING"))
class Session:
def __init__(self):
self._uuid = uuid.uuid4().hex
self.telemetry_api_version = API_VERSION
self.telemetry_implementation_path = os.path.abspath(__file__)
self.start_timestamp = datetime.datetime.utcnow().isoformat()
class Machine:
def __init__(self):
def safe_get(fn, default="N/A"):
"""Run given functor to retrieve data. Catch all exceptions and provide
a default value in case of failure.
"""
try:
return fn()
except:
return default
self._uuid = uuid.UUID(int=uuid.getnode()).hex
self.hardware = {
"architecture": platform.machine(),
"processor": platform.processor(),
"cpu_count": multiprocessing.cpu_count(),
}
self.operating_system = (platform.system(), platform.release(), platform.version())
self.networking = {
"hostname": safe_get(lambda: socket.gethostname()),
"ip-address": safe_get(lambda: socket.gethostbyname(socket.gethostname())),
"has-ipv6": socket.has_ipv6,
}
self.python = {
"version": platform.python_version(),
"build": platform.python_build(),
}
self.blender = {
"version": bpy.app.version_string,
"path": bpy.app.binary_path,
"window_size": Machine.get_blender_window_size(),
}
@staticmethod
def get_blender_window_size():
width = -1
height = -1
try:
width = int(bpy.context.window_manager.windows[0].width)
height = int(bpy.context.window_manager.windows[0].height)
except:
pass
return width, height
@staticmethod
def get_blender_addons() -> (
typing.Dict[str, typing.Union[typing.List[str], typing.Dict[str, typing.Any]]]
):
addon_utils_modules: typing.Dict[str, typing.Dict[str, typing.Any]] = {}
for module in addon_utils.modules():
try:
name = module.__name__
assert name not in addon_utils_modules
bl_info = getattr(module, "bl_info", {})
path = str(module.__file__)
addon_utils_modules[name] = {"path": path, "bl_info": bl_info}
except Exception as e:
addon_utils_modules[uuid.uuid4().hex] = {
"error": f"Uncaught Exception while querying modules: {e}"
}
loaded_modules = []
missing_modules = []
for addon in bpy.context.preferences.addons:
loaded_default, loaded_state = addon_utils.check(addon.module)
if not loaded_default:
continue
if loaded_state:
loaded_modules.append(str(addon.module))
else:
missing_modules.append(str(addon.module))
return {
"loaded": loaded_modules,
"missing": missing_modules,
"addon_utils_modules": addon_utils_modules,
}
class MessageType:
SESSION_STARTED = "session_started"
MACHINE_REGISTERED = "machine_registered"
# this is used by polygoniq addons to report version, etc...
ADDON_REPORTED = "addon_reported"
# this reports all registered addons, polygoniq or other vendors
ALL_ADDONS_REPORTED = "all_addons_reported"
UNCAUGHT_EXCEPTION = "uncaught_exception"
WARNING_MESSAGE = "warning_message"
ERROR_MESSAGE = "error_message"
DEBUG_MESSAGE = "debug_message"
class Message:
def __init__(
self,
type: str,
data: typing.Any = None,
text: typing.Optional[str] = None,
product: str = "unknown",
):
self._session_uuid: str = "unknown"
self._timestamp = datetime.datetime.utcnow().isoformat()
self._type = type
self.data: typing.Any = None
if text is not None:
assert data is None
self.data = {"text": text}
else:
self.data = data
self.product = product
class PrivateWrapper:
"""Used to wrap private data such as object names in a way that can be recovered
locally but is hidden when telemetry is sent remotely.
This allows more information to be used in local debugging without leaking
users scene.
"""
def __init__(self, value: str):
self.value = value
@property
def private_value(self):
return "private:" + hashlib.sha256(self.value.encode("utf-8")).hexdigest()
class TelemetryJSONEncoder(json.JSONEncoder):
def default(self, obj: typing.Any) -> typing.Any:
if isinstance(obj, Machine):
altered_dict = obj.__dict__.copy()
altered_dict["__class__"] = "telemetry.Machine"
return altered_dict
elif isinstance(obj, Session):
altered_dict = obj.__dict__.copy()
altered_dict["__class__"] = "telemetry.Session"
return altered_dict
elif isinstance(obj, Message):
altered_dict = obj.__dict__.copy()
altered_dict["__class__"] = "telemetry.Message"
return altered_dict
elif isinstance(obj, PrivateWrapper):
return obj.value
return json.JSONEncoder.default(self, obj)
class RemoteTelemetryJSONEncoder(TelemetryJSONEncoder):
def default(self, obj: typing.Any) -> typing.Any:
if isinstance(obj, PrivateWrapper):
return obj.private_value
return TelemetryJSONEncoder.default(self, obj)
def _log(msg: Message) -> None:
global SESSION
global MESSAGES
global PRINT_MESSAGES
if SESSION is not None:
msg._session_uuid = SESSION._uuid
MESSAGES.append(msg)
if PRINT_MESSAGES:
print(json.dumps(msg, indent=4, sort_keys=True, cls=TelemetryJSONEncoder))
def log_installed_addons() -> None:
global MACHINE
assert MACHINE is not None, "logging before telemetry has been bootstrapped!"
_log(
Message(MessageType.ALL_ADDONS_REPORTED, data=Machine.get_blender_addons(), product="polib")
)
def bootstrap_telemetry():
global BOOTSTRAPPED
global BOOTSTRAP_LOCK
global MACHINE
global SESSION
# it is very unlikely but 2 addons might concurrently bootstrap telemetry which
# would result in multiple machine definitions and overwrites
with BOOTSTRAP_LOCK:
if BOOTSTRAPPED:
return
# due to reloading of modules this can happen multiple times!
# raise RuntimeError("Telemetry already bootstrapped!")
SESSION = Session()
_log(Message(MessageType.SESSION_STARTED, data=SESSION, product="polib"))
MACHINE = Machine()
_log(Message(MessageType.MACHINE_REGISTERED, data=MACHINE, product="polib"))
# wait 5 seconds to give all addons time to register
bpy.app.timers.register(lambda: log_installed_addons(), first_interval=5, persistent=True)
BOOTSTRAPPED = True
class TelemetryWrapper:
def __init__(self, product: str):
self.product = product
self.PrivateWrapper = PrivateWrapper
def log(self, msg: Message) -> None:
_log(msg)
def dump(self) -> str:
global MESSAGES
return json.dumps(MESSAGES, indent=4, sort_keys=True, cls=TelemetryJSONEncoder)
def report_addon(self, bl_info, init_path: str) -> None:
data = {}
data["__init__path"] = os.path.abspath(init_path)
data["name"] = bl_info["name"]
data["version"] = bl_info["version"]
self.log(Message(MessageType.ADDON_REPORTED, data=data, product=self.product))
def log_exception(self, e: Exception) -> None:
"""Deprecated!
Use the python logging module (logger.exception) instead..
"""
self.log(
Message(
MessageType.UNCAUGHT_EXCEPTION,
data=traceback.format_exception(type(e), e, e.__traceback__),
product=self.product,
)
)
def exception(self, f):
"""A decorator that wraps the passed in function and logs
exceptions in telemetry should they occur
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
logger.exception(f"Uncaught exception raised in {f}")
raise e
return wrapped
def log_warning(self, message: str) -> None:
"""Deprecated!
Use the python logging module (logger.warning) instead..
"""
global VERBOSE_LEVEL
self.log(
Message(
MessageType.WARNING_MESSAGE,
data=[message] + traceback.extract_stack().format(),
product=self.product,
)
)
if VERBOSE_LEVEL <= VerboseLevel.WARNING:
print(f"WARNING[{self.product}]: {message}")
def log_debug(self, message: str) -> None:
"""Deprecated!
Use the python logging module (logger.debug) instead..
"""
global VERBOSE_LEVEL
self.log(
Message(
MessageType.DEBUG_MESSAGE,
data=[message] + traceback.extract_stack().format(),
product=self.product,
)
)
if VERBOSE_LEVEL <= VerboseLevel.DEBUG:
print(f"DEBUG[{self.product}]: {message}")
def log_error(self, message: str) -> None:
"""Deprecated!
Use the python logging module (logger.error) instead..
"""
global VERBOSE_LEVEL
self.log(
Message(
MessageType.ERROR_MESSAGE,
data=[message] + traceback.extract_stack().format(),
product=self.product,
)
)
if VERBOSE_LEVEL <= VerboseLevel.ERROR:
print(f"ERROR[{self.product}]: {message}")
def wrap_blender_class(self, cls_):
if hasattr(cls_, "__init__"):
cls_.__init__ = self.exception(cls_.__init__)
# we have to use these wrappers because bpy doesn't accept decorators for some reason
# shows up as "ValueError: expected Operator, ... class "draw" function to have 2 args, found 0"
def draw_wrapper(self_, context):
try:
return self_._original_draw(context)
except Exception as e:
logger.exception(f"Uncaught exception raised in {cls_}.draw")
raise e
def invoke_wrapper(self_, context, event):
try:
return self_._original_invoke(context, event)
except Exception as e:
logger.exception(f"Uncaught exception raised in {cls_}.invoke")
raise e
def execute_wrapper(self_, context):
try:
return self_._original_execute(context)
except Exception as e:
logger.exception(f"Uncaught exception raised in {cls_}.execute")
raise e
if hasattr(cls_, "draw"):
if not hasattr(cls_, "_original_draw"):
cls_._original_draw = cls_.draw
cls_.draw = draw_wrapper
if hasattr(cls_, "invoke"):
if not hasattr(cls_, "_original_invoke"):
cls_._original_invoke = cls_.invoke
cls_.invoke = invoke_wrapper
if hasattr(cls_, "execute"):
if not hasattr(cls_, "_original_execute"):
cls_._original_execute = cls_.execute
cls_.execute = execute_wrapper
def Message(self, type: str, data: typing.Any = None, text: typing.Optional[str] = None):
return Message(type, data, text, self.product)
RETURNED_TELEMETRY_CLASSES: typing.Dict[str, TelemetryWrapper] = {}
def get_telemetry(product: str) -> TelemetryWrapper:
global RETURNED_TELEMETRY_CLASSES
if product in RETURNED_TELEMETRY_CLASSES:
return RETURNED_TELEMETRY_CLASSES[product]
else:
RETURNED_TELEMETRY_CLASSES[product] = TelemetryWrapper(product)
return RETURNED_TELEMETRY_CLASSES[product]
__all__ = ["API_VERSION", "bootstrap_telemetry", "get_telemetry"]
@@ -0,0 +1,344 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import addon_utils
import sys
import typing
import re
import functools
import textwrap
import os
from . import utils_bpy
from . import preview_manager_bpy
import logging
logger = logging.getLogger(f"polygoniq.{__name__}")
# Global icon manager for polib icons, it NEEDS to be CLEARED from each addon module separately
# as we cannot detect from inside of polib, whether it is in use or not.
# This means the preview manager can be cleared even if it is already used, but the icons will
# be reloaded on demand on the next use.
ICON_DIR_NAME = "icons"
icon_manager = preview_manager_bpy.PreviewManager()
icon_manager.add_preview_path(os.path.join(os.path.dirname(__file__), ICON_DIR_NAME))
class SocialMediaURL:
DISCORD = "https://polygoniq.com/discord/"
FACEBOOK = "https://www.facebook.com/polygoniq/"
INSTAGRAM = "https://www.instagram.com/polygoniq.xyz/"
BLENDERMARKET = "https://blendermarket.com/creators/polygoniq?ref=673"
WEBPAGE = "https://polygoniq.com/"
GUMROAD = "https://gumroad.com/polygoniq"
def get_asset_pack_icon_parameters(
icon_id: typing.Optional[int], bpy_icon_name: str
) -> typing.Dict:
"""Returns dict of parameters that can be expanded in UILayout.label()
Uses our icon with given 'icon_id' and populates the 'icon_value',
or populates the 'icon' by the Blender's icon name 'bpy_icon' as fallback.
"""
if icon_id is not None:
return {"icon_value": icon_id}
else:
return {"icon": bpy_icon_name}
def draw_social_media_buttons(layout: bpy.types.UILayout, show_text: bool = False):
layout.operator(
"wm.url_open",
text="Discord" if show_text else "",
icon_value=icon_manager.get_icon_id("logo_discord"),
).url = SocialMediaURL.DISCORD
layout.operator(
"wm.url_open",
text="Facebook" if show_text else "",
icon_value=icon_manager.get_icon_id("logo_facebook"),
).url = SocialMediaURL.FACEBOOK
layout.operator(
"wm.url_open",
text="Instagram" if show_text else "",
icon_value=icon_manager.get_icon_id("logo_instagram"),
).url = SocialMediaURL.INSTAGRAM
layout.operator(
"wm.url_open",
text="BlenderMarket" if show_text else "",
icon_value=icon_manager.get_icon_id("logo_blendermarket"),
).url = SocialMediaURL.BLENDERMARKET
layout.operator(
"wm.url_open",
text="Gumroad" if show_text else "",
icon_value=icon_manager.get_icon_id("logo_gumroad"),
).url = SocialMediaURL.GUMROAD
layout.operator(
"wm.url_open",
text="Website" if show_text else "",
icon_value=icon_manager.get_icon_id("logo_polygoniq"),
).url = SocialMediaURL.WEBPAGE
def draw_settings_footer(layout: bpy.types.UILayout):
row = layout.row(align=True)
row.alignment = 'CENTER'
row.scale_x = 1.27
row.scale_y = 1.27
draw_social_media_buttons(row, show_text=False)
row.label(text="© polygoniq xyz s.r.o")
def show_message_box(message: str, title: str, icon: str = 'INFO') -> None:
lines = message.split("\n")
def draw(self, context):
for line in lines:
row = self.layout.row()
row.label(text=line)
bpy.context.window_manager.popup_menu(draw, title=title, icon=icon)
def multi_column(
layout: bpy.types.UILayout, column_sizes: typing.List[float], align: bool = False
) -> typing.List[bpy.types.UILayout]:
columns = []
for i in range(len(column_sizes)):
# save first column, create split from the other with recalculated size
size = 1.0 - sum(column_sizes[:i]) if i > 0 else 1.0
s = layout.split(factor=column_sizes[i] / size, align=align)
a = s.column(align=align)
b = s.column(align=align)
columns.append(a)
layout = b
return columns
def scaled_row(layout: bpy.types.UILayout, scale: float, align: bool = False) -> bpy.types.UILayout:
row = layout.row(align=align)
row.scale_x = row.scale_y = scale
return row
def row_with_label(
layout: bpy.types.UILayout,
text: str = "",
align: bool = False,
enabled: bool = False,
icon: str = 'NONE',
) -> bpy.types.UILayout:
"""Creates a row with label based on 'layout'.
Additional parameters specify appearance of this row and label. For example enabled = False can
be used to display row that is grayed out. This can be useful to separate UI flow.
"""
row = layout.row(align=align)
row.enabled = enabled
row.label(text=text, icon=icon)
return row
def collapsible_box(
layout: bpy.types.UILayout,
data: typing.Any,
show_prop_name: str,
title: str,
content_draw: typing.Callable[[bpy.types.UILayout], None],
docs_module: typing.Optional[str] = None,
docs_rel_url: str = "",
) -> bpy.types.UILayout:
"""Creates a collapsible box with 'title' and 'content' inside, based on 'layout'.
The box is shown based on the 'show_prop_name' property of 'data' object. Optionally, a button
leading to a documentation page can be added based on 'docs_module' and 'docs_rel_url'.
"""
show = getattr(data, show_prop_name)
if show is None:
raise ValueError(f"Property '{show_prop_name}' not found in data object!")
box = layout.box()
row = box.row()
row.prop(
data,
show_prop_name,
icon='DISCLOSURE_TRI_DOWN' if show else 'DISCLOSURE_TRI_RIGHT',
text="",
emboss=False,
)
row.label(text=title)
if docs_module is not None:
draw_doc_button(row, docs_module, docs_rel_url)
if show:
content_draw(box)
return box
def center_mouse(context: bpy.types.Context) -> None:
region = context.region
x = region.width // 2 + region.x
y = region.height // 2 + region.y
context.window.cursor_warp(x, y)
def get_mouseovered_region(
context: bpy.types.Context, event: bpy.types.Event
) -> typing.Tuple[typing.Optional[bpy.types.Area], typing.Optional[bpy.types.Region]]:
"""Returns tuple (area, region) of underlying area and region in mouse event 'event'"""
# Method taken from the 'Screencast Keys' addon
# available at: https://github.com/nutti/Screencast-Keys
x, y = event.mouse_x, event.mouse_y
for area in context.screen.areas:
for region in area.regions:
if region.type == "":
continue
within_x = region.x <= x < region.x + region.width
within_y = region.y <= y < region.y + region.height
if within_x and within_y:
return area, region
return None, None
def get_all_space_types() -> typing.Dict[str, bpy.types.Space]:
"""Returns mapping of space type to its class - 'VIEW_3D -> bpy.types.SpaceView3D"""
# Code taken and adjusted from ScreenCastKeys addon -> https://github.com/nutti/Screencast-Keys/
def add_if_exist(
cls_name: str, space_name: str, space_types: typing.Dict[str, bpy.types.Space]
) -> None:
cls = getattr(sys.modules["bpy.types"], cls_name, None)
if cls is not None:
space_types[space_name] = cls
space_types = {}
add_if_exist("SpaceView3D", 'VIEW_3D', space_types)
add_if_exist("SpaceClipEditor", 'CLIP_EDITOR', space_types)
add_if_exist("SpaceConsole", 'CONSOLE', space_types)
add_if_exist("SpaceDopeSheetEditor", 'DOPESHEET_EDITOR', space_types)
add_if_exist("SpaceFileBrowser", 'FILE_BROWSER', space_types)
add_if_exist("SpaceGraphEditor", 'GRAPH_EDITOR', space_types)
add_if_exist("SpaceImageEditor", 'IMAGE_EDITOR', space_types)
add_if_exist("SpaceInfo", 'INFO', space_types)
add_if_exist("SpaceLogicEditor", 'LOGIC_EDITOR', space_types)
add_if_exist("SpaceNLA", 'NLA_EDITOR', space_types)
add_if_exist("SpaceNodeEditor", 'NODE_EDITOR', space_types)
add_if_exist("SpaceOutliner", 'OUTLINER', space_types)
add_if_exist("SpacePreferences", 'PREFERENCES', space_types)
add_if_exist("SpaceUserPreferences", 'PREFERENCES', space_types)
add_if_exist("SpaceProperties", 'PROPERTIES', space_types)
add_if_exist("SpaceSequenceEditor", 'SEQUENCE_EDITOR', space_types)
add_if_exist("SpaceSpreadsheet", 'SPREADSHEET', space_types)
add_if_exist("SpaceTextEditor", 'TEXT_EDITOR', space_types)
add_if_exist("SpaceTimeline", 'TIMELINE', space_types)
return space_types
def expand_addon_prefs(module_name: str) -> None:
"""Opens preferences of an add-on based on its module name"""
mod_info = utils_bpy.get_addon_mod_info(module_name)
mod_info["show_expanded"] = True
def draw_doc_button(layout: bpy.types.UILayout, module: str, rel_url: str = "") -> None:
"""Draws a button leading to an add-on's docs URL based on its module name.
Points to the homepage by default, but can be changed by passing 'rel_url' parameter.
"""
url = f"{utils_bpy.get_addon_docs_page(module)}/{rel_url}"
layout.operator("wm.url_open", text="", icon='HELP', emboss=False).url = url
def draw_markdown_text(layout: bpy.types.UILayout, text: str, max_length: int = 100) -> None:
col = layout.column(align=True)
# Remove unicode characters from the text
# We do this to remove emojis, because Blender does not support them
text = text.encode("ascii", "ignore").decode()
# Remove markdown images
text = re.sub(r"!\[[^\]]*\]\([^)]*\)", "", text)
# Convert markdown links to just the description
text = re.sub(r"\[([^\]]*)\]\([^)]*\)", r"\1", text)
# Convert bold and italic text to UPPERCASE
text = re.sub(r"(\*\*|__)(.*?)\1", lambda match: match.group(2).upper(), text)
text = re.sub(r"(\*|_)(.*?)\1", lambda match: match.group(2).upper(), text)
# Replace bullet list markers with classic bullet character (•), respecting indentation
text = re.sub(r"(^|\n)(\s*)([-*+])\s", r"\1\2• ", text)
# Regex for matching markdown headings
headings = re.compile(r"^#+")
lines = text.split("\r\n")
# Let's offset the text based on the heading level to make it more readable
offset = 0
for line in lines:
heading = headings.search(line)
if heading:
offset = len(heading.group()) - 1
line = line.replace(heading.group(), "")
line = line.strip().upper()
# Let's do a separator for empty lines
if len(line) == 0:
col.separator()
continue
split_lines = textwrap.wrap(line, max_length)
for split_line in split_lines:
col.label(text=4 * offset * " " + split_line)
def show_release_notes_popup(
context: bpy.types.Context, module_name: str, release_tag: str = ""
) -> None:
if bpy.app.version >= (4, 2, 0) and not bpy.app.online_access:
show_message_box(
"This requires online access. You have to \"Allow Online Access\" in "
"\"Preferences -> System -> Network\" to proceed",
"Online Access Disabled",
icon='INTERNET',
)
return
mod_info = utils_bpy.get_addon_mod_info(module_name)
# Get only the name without suffix (_full, _lite, etc.)
addon_name = mod_info["name"].split("_", 1)[0]
release_info = utils_bpy.get_addon_release_info(addon_name, release_tag)
error_msg = f"Cannot retrieve release info for {addon_name}!"
if release_info is None:
logger.error(error_msg)
show_message_box(error_msg, "Error", icon='ERROR')
return
version = release_info.get("tag_name", None)
if version is None:
logger.error("Release info does not contain version!")
show_message_box(error_msg, "Error", icon='ERROR')
return
body = release_info.get("body", None)
if not body:
logger.error("Release info does not contain body!")
show_message_box(error_msg, "Error", icon='ERROR')
return
context.window_manager.popup_menu(
lambda self, context: draw_markdown_text(self.layout, text=body, max_length=100),
title=f"{addon_name} {version} Release Notes",
icon='INFO',
)
@@ -0,0 +1,408 @@
#!/usr/bin/python3
# copyright (c) 2018- polygoniq xyz s.r.o.
import bpy
import addon_utils
import sys
import os
import pathlib
import typing
import datetime
import functools
import urllib.request
import urllib.error
import ssl
import json
import subprocess
import math
import time
import re
import logging
logger = logging.getLogger(f"polygoniq.{__name__}")
POLYGONIQ_DOCS_URL = "https://docs.polygoniq.com"
POLYGONIQ_GITHUB_REPO_API_URL = "https://api.github.com/repos/polygoniq"
def autodetect_install_path(
product: str, init_path: str, install_path_checker: typing.Callable[[str], bool]
) -> str:
# TODO: We should submit a patch to blender_vscode and deal with this from there in the future
try:
vscode_product_path = os.path.expanduser(
os.path.join("~", "polygoniq", "blender_addons", product)
)
try:
if (
os.path.commonpath(
[os.path.abspath(os.path.realpath(init_path)), vscode_product_path]
)
== vscode_product_path
):
staging_path_base = os.path.expanduser(
os.path.join("~", "polygoniq", "bazel-bin", "blender_addons", product)
)
# Possible sources of built assets from bazel
FLIP_OF_THE_COIN = [
os.path.join(staging_path_base, f"{product}_staging"),
os.path.join(staging_path_base, f"data_final"),
]
for flip in FLIP_OF_THE_COIN:
if os.path.isdir(flip):
print(
f"Detected blender_vscode development environment. Going to use {flip} as "
f"the install path for {product}."
)
return flip
except:
# not on the same drive
pass
except ValueError: # Paths don't have the same drive
pass
big_zip_path = os.path.abspath(os.path.dirname(init_path))
if install_path_checker(big_zip_path):
print(f"{product} install dir autodetected as {big_zip_path} (big zip embedded)")
return big_zip_path
if sys.platform == "win32":
SHOTS_IN_THE_DARK = [
f"C:/{product}",
f"D:/{product}",
f"C:/polygoniq/{product}",
f"D:/polygoniq/{product}",
]
for shot in SHOTS_IN_THE_DARK:
if install_path_checker(shot):
print(f"{product} install dir autodetected as {shot}")
return os.path.abspath(shot)
elif sys.platform in ["linux", "darwin"]:
SHOTS_IN_THE_DARK = [
os.path.expanduser(f"~/{product}"),
os.path.expanduser(f"~/Desktop/{product}"),
os.path.expanduser(f"~/Documents/{product}"),
os.path.expanduser(f"~/Downloads/{product}"),
os.path.expanduser(f"~/polygoniq/{product}"),
os.path.expanduser(f"~/Desktop/polygoniq/{product}"),
os.path.expanduser(f"~/Documents/polygoniq/{product}"),
os.path.expanduser(f"~/Downloads/polygoniq/{product}"),
f"/var/lib/{product}",
f"/usr/local/{product}",
f"/opt/{product}",
]
for shot in SHOTS_IN_THE_DARK:
if install_path_checker(shot):
print(f"{product} install dir autodetected as {shot}")
return os.path.abspath(shot)
print(
f"{product} is not installed in one of the default locations, please make "
f"sure the path is set in {product} addon preferences!",
file=sys.stderr,
)
return ""
def absolutize_preferences_path(
self: bpy.types.AddonPreferences, context: bpy.types.Context, path_property_name: str
) -> None:
assert hasattr(self, path_property_name)
abs_ = os.path.abspath(getattr(self, path_property_name))
if abs_ != getattr(self, path_property_name):
setattr(self, path_property_name, abs_)
def contains_object_duplicate_suffix(name: str) -> bool:
pattern = re.compile(r"^\.[0-9]{3}$")
return bool(pattern.match(name[-4:]))
def remove_object_duplicate_suffix(name: str) -> str:
splitted_name = name.rsplit(".", 1)
if len(splitted_name) == 1:
return splitted_name[0]
if splitted_name[1].isnumeric():
return splitted_name[0]
return name
def generate_unique_name(old_name: str, container: typing.Iterable[typing.Any]) -> str:
# TODO: Unify this with renderset unique naming generation
name_without_suffix = remove_object_duplicate_suffix(old_name)
i = 1
new_name = name_without_suffix
while new_name in container:
new_name = f"{name_without_suffix}.{i:03d}"
i += 1
return new_name
def convert_size(size_bytes: int) -> str:
if size_bytes == 0:
return "0 B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
index = int(math.floor(math.log(size_bytes, 1024)))
size = round(size_bytes / math.pow(1024, index), 2)
return f"{size} {size_name[index]}"
def blender_cursor(cursor_name: str = 'WAIT'):
"""Decorator that sets a modal cursor in Blender to whatever the caller desires,
then sets it back when the function returns. This is useful for long running
functions or operators. Showing a WAIT cursor makes it less likely that the user
will think that Blender froze.
Unfortunately this can only be used in cases we control and only when 'context' is
available.
TODO: Maybe we could use bpy.context and drop the context requirement?
"""
def cursor_decorator(fn):
def wrapper(self, context: bpy.types.Context, *args, **kwargs):
context.window.cursor_modal_set(cursor_name)
try:
return fn(self, context, *args, **kwargs)
finally:
context.window.cursor_modal_restore()
return wrapper
return cursor_decorator
def timeit(fn):
def timed(*args, **kw):
ts = time.time()
result = fn(*args, **kw)
te = time.time()
print(f"{fn.__name__!r} {(te - ts) * 1000:2.2f} ms")
return result
return timed
def timed_cache(**timedelta_kwargs):
def _wrapper(f):
update_delta = datetime.timedelta(**timedelta_kwargs)
next_update = datetime.datetime.utcnow() + update_delta
f = functools.lru_cache(None)(f)
@functools.wraps(f)
def _wrapped(*args, **kwargs):
nonlocal next_update
now = datetime.datetime.utcnow()
if now >= next_update:
f.cache_clear()
next_update = now + update_delta
return f(*args, **kwargs)
return _wrapped
return _wrapper
def xdg_open_file(path):
if sys.platform == "win32":
os.startfile(path)
elif sys.platform == "darwin":
subprocess.call(["open", path])
else:
subprocess.call(["xdg-open", path])
def fork_running_blender(blend_path: typing.Optional[str] = None) -> None:
"""Opens new instance of Blender which keeps running even if the original instance is closed.
Opens 'blend_path' if provided, otherwise Blender will open with an empty scene.
"""
blender_executable = bpy.app.binary_path
args = [blender_executable]
if blend_path is not None:
args += [blend_path]
if sys.platform in ["win32", "cygwin"]:
# Detach child process and close its stdin/stdout/stderr, so it can keep running
# after parent Blender is closed.
# https://stackoverflow.com/questions/52449997/how-to-detach-python-child-process-on-windows-without-setsid
flags = 0
flags |= subprocess.DETACHED_PROCESS
flags |= subprocess.CREATE_NEW_PROCESS_GROUP
flags |= subprocess.CREATE_NO_WINDOW
subprocess.Popen(args, close_fds=True, creationflags=flags)
elif sys.platform in ["darwin", "linux", "linux2"]: # POSIX systems
subprocess.Popen(args, start_new_session=True)
else:
raise RuntimeError(f"Unsupported OS: sys.platform={sys.platform}")
def run_logging_subprocess(
subprocess_args: typing.List[str], logger_: typing.Optional[logging.Logger] = None
) -> int:
"""Runs `subprocess_args` as subprocess and logs stdout and stderr of the subprocess.
If 'logger_' is None, logger from polib will be used.
Returns returncode from the subprocess, 0 means that no errors ocurred.
"""
if logger_ is None:
logger_ = logger
process = subprocess.Popen(subprocess_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Read from indexing process till it's running
for line in process.stdout:
logger_.info(line.decode())
process.wait()
return process.returncode
def normalize_path(path: str) -> str:
"""Makes path OS independent."""
return path.replace("\\", "/")
def get_case_sensitive_path(path: str) -> str:
"""Returns the path with capitalization as it appears on disk.
Some OSes such as Windows do not consider capitalization while
resolving paths, while others such as UNIX-based systems do.
"""
if not os.path.exists(path):
raise FileNotFoundError(f"File not found: {path}")
components = pathlib.Path(path).parts
case_sensitive_path = "."
if os.path.isabs(path):
# First component of absolute path is either a drive letter or a `/`, no need to check case
case_sensitive_path = components[0]
components = components[1:]
# Reconstruct the path with case-sensitive names
for component in components:
# Check the case of each entry in the directory
# Using os.path.realpath is not reliable, as it does
# not return case-sensitive paths for google drive files
entries = os.listdir(case_sensitive_path)
# Path may contain current directory or up one level notation, we don't use realpath,
# because we don't want to change the format of the input path
entries.extend([".", ".."])
case_sensitive_entry = None
for entry in entries:
# pathlib makes sure correct case-sensitivity is used on every OS
if pathlib.Path(entry) == pathlib.Path(component):
case_sensitive_entry = entry
break
assert case_sensitive_entry is not None
case_sensitive_path = os.path.join(case_sensitive_path, case_sensitive_entry)
return case_sensitive_path
def isfile_case_sensitive(path: str) -> bool:
"""Similar to os.path.isfile, but case sensitive.
Case sensitive checks are needed on Windows and other case insensitive platforms.
"""
# fast case insensitive check, filters out folders
if not os.path.isfile(path):
return False
case_sensitive_path = get_case_sensitive_path(path)
return normalize_path(path) == normalize_path(case_sensitive_path)
def get_bpy_filepath_relative_to_dir(input_dir: str, filepath: str, library=None) -> str:
file_abspath = bpy.path.abspath(filepath, library=library)
rel_path = bpy.path.relpath(file_abspath, start=input_dir)
return normalize_path(rel_path.removeprefix("//"))
def get_first_existing_ancestor_directory(
file_path: str, whitelist: typing.Optional[set[str]] = None
) -> typing.Optional[str]:
if whitelist is None:
whitelist = set()
if file_path not in whitelist and not os.path.exists(file_path):
return None
current_dir = pathlib.Path(os.path.dirname(file_path)).resolve()
while not os.path.exists(current_dir):
current_dir = current_dir.parent.resolve()
return str(current_dir)
def get_all_datablocks(data: bpy.types.BlendData) -> typing.List[typing.Tuple[bpy.types.ID, str]]:
"""returns all datablocks and their BlendData type in the currently loaded blend file"""
# Return a materialized list, don't use generators here, those may result in Blender
# crashing due to memory issues
ret = []
for member_variable_name in dir(data):
member_variable = getattr(data, member_variable_name)
if isinstance(member_variable, bpy.types.bpy_prop_collection):
for datablock in member_variable:
if isinstance(datablock, bpy.types.ID):
ret.append((datablock, member_variable_name))
return ret
def get_addon_mod_info(module_name: str) -> typing.Dict[str, typing.Any]:
"""Returns module bl_info based on its module name."""
for mod in addon_utils.modules(refresh=False):
if mod.__name__ == module_name:
mod_info = addon_utils.module_bl_info(mod)
return mod_info
raise ValueError(f"No module '{module_name}' was found!")
def get_release_tag_from_version(version: typing.Tuple[int, int, int]) -> str:
return f"v{'.'.join(map(str, version))}"
def get_addon_docs_page(module_name: str) -> str:
"""Returns url of add-on docs based on its module name."""
mod_info = get_addon_mod_info(module_name)
# Get only the name without suffix (_full, _lite, etc.)
name = mod_info["name"].split("_", 1)[0]
version = ".".join(map(str, mod_info["version"]))
return f"{POLYGONIQ_DOCS_URL}/{name}/{version}"
def get_addon_release_info(
addon_name: str, release_tag: str = ""
) -> typing.Optional[typing.Dict[str, typing.Any]]:
if release_tag != "":
url = f"{POLYGONIQ_GITHUB_REPO_API_URL}/{addon_name}/releases/tags/{release_tag}"
else:
url = f"{POLYGONIQ_GITHUB_REPO_API_URL}/{addon_name}/releases/latest"
request = urllib.request.Request(url)
try:
ssl_context = ssl._create_unverified_context()
except:
# Some blender packaged python versions don't have this, largely
# useful for local network setups otherwise minimal impact.
ssl_context = None
try:
if ssl_context is not None:
response = urllib.request.urlopen(request, context=ssl_context)
else:
response = urllib.request.urlopen(request)
except (urllib.error.HTTPError, urllib.error.URLError) as e:
logger.error(e)
else:
result_string = response.read()
response.close()
try:
return json.JSONDecoder().decode(result_string.decode())
except json.JSONDecodeError as e:
logger.error("API response has invalid JSON format")