chore: snapshot workflow migration progress

This commit is contained in:
2026-04-12 11:49:04 +02:00
parent 0cd02513d5
commit 3e810c74a3
163 changed files with 31774 additions and 2753 deletions
+16
View File
@@ -47,6 +47,13 @@ def parse_args() -> SimpleNamespace:
denoising_quality = _arg(23, "")
denoising_use_gpu = _arg(24, "")
if samples is None:
default_samples = os.environ.get("BLENDER_DEFAULT_SAMPLES", "").strip()
if default_samples:
try:
samples = int(default_samples)
except ValueError:
samples = None
if samples is None:
samples = 64 if engine == "eevee" else 256
@@ -78,6 +85,14 @@ def parse_args() -> SimpleNamespace:
_mo_idx = sys.argv.index("--material-override")
material_override = sys.argv[_mo_idx + 1] if _mo_idx + 1 < len(sys.argv) else None
template_inputs: dict = {}
if "--template-inputs" in sys.argv:
_ti_idx = sys.argv.index("--template-inputs")
try:
template_inputs = _json.loads(sys.argv[_ti_idx + 1]) if _ti_idx + 1 < len(sys.argv) else {}
except Exception:
template_inputs = {}
if template_path and not os.path.isfile(template_path):
print(f"[blender_render] ERROR: template not found: {template_path}")
sys.exit(1)
@@ -114,4 +129,5 @@ def parse_args() -> SimpleNamespace:
focal_length_mm=focal_length_mm,
sensor_width_mm=sensor_width_mm_override,
material_override=material_override,
template_inputs=template_inputs,
)
+163 -29
View File
@@ -37,6 +37,29 @@ def _find_material_with_nodes(base_name: str):
return None
def _iter_object_name_variants(raw_name: str):
"""Yield conservative object-name variants for direct material lookup."""
if not raw_name:
return
seen: set[str] = set()
def _emit(value: str):
value = (value or "").strip()
if value and value not in seen:
seen.add(value)
return value
return None
exact = _emit(raw_name)
if exact:
yield exact
no_blender_suffix = _emit(_re.sub(r'\.\d{3}$', '', raw_name))
if no_blender_suffix:
yield no_blender_suffix
def _batch_append_materials(mat_lib_path: str, names: set[str]) -> dict:
"""Append multiple materials from a .blend file in a single open.
@@ -46,7 +69,9 @@ def _batch_append_materials(mat_lib_path: str, names: set[str]) -> dict:
Handles empty material stubs left by Blender's USD importer: when a
stub exists with the target name, the library material gets renamed
with a .NNN suffix. We find it via _find_material_with_nodes().
with a .NNN suffix. Blender returns the actual loaded datablocks in
data_to.materials, so we can use those directly instead of re-scanning
bpy.data.materials after the library load.
"""
import bpy # type: ignore[import]
@@ -60,19 +85,22 @@ def _batch_append_materials(mat_lib_path: str, names: set[str]) -> dict:
available = set(data_from.materials)
to_load = [n for n in names if n in available]
not_found = names - available
data_to.materials = to_load
# After the context manager closes, materials are loaded into bpy.data.
# If a USD stub occupied the name, the real material gets a .NNN suffix.
for mat_name in to_load:
mat = _find_material_with_nodes(mat_name)
requested_names = [str(n) for n in to_load]
data_to.materials = list(requested_names)
loaded_materials = list(data_to.materials)
# After the context manager closes, data_to.materials contains the actual
# appended datablocks in the same order as to_load, including any .NNN
# renames Blender introduced to avoid collisions with USD stubs.
for mat_name, mat in zip(requested_names, loaded_materials):
if mat:
result[mat_name] = mat
if mat.name != mat_name:
print(f"[blender_render] batch-appended material: {mat_name} (as '{mat.name}', stub collision)")
else:
print(f"[blender_render] batch-appended material: {mat_name}")
else:
print(f"[blender_render] WARNING: material '{mat_name}' not found after batch append")
continue
print(f"[blender_render] WARNING: material '{mat_name}' not returned after batch append")
if not_found:
print(f"[blender_render] WARNING: materials not in library: {sorted(not_found)[:10]}")
except Exception as exc:
@@ -149,6 +177,126 @@ def build_mat_map_lower(material_map: dict) -> dict:
return mat_map_lower
def _common_prefix_len(left: str, right: str) -> int:
limit = min(len(left), len(right))
idx = 0
while idx < limit and left[idx] == right[idx]:
idx += 1
return idx
def _lookup_by_common_prefix(query: str, mat_map: dict) -> str | None:
"""Resolve near-matches when USD/source names omit trailing serial suffixes.
This is intentionally conservative: only return a material when the
strongest common-prefix matches all point to the same material.
"""
if not query or not mat_map:
return None
scored: list[tuple[float, int, int, str]] = []
query_len = len(query)
for key, material in mat_map.items():
prefix_len = _common_prefix_len(query, key)
if prefix_len < 12:
continue
ratio = prefix_len / max(query_len, len(key))
if ratio < 0.68:
continue
scored.append((ratio, prefix_len, len(key), material))
if not scored:
return None
scored.sort(reverse=True)
top_ratio, top_prefix, _, top_material = scored[0]
contenders = [
material
for ratio, prefix_len, _, material in scored
if ratio >= top_ratio - 0.02 and prefix_len >= top_prefix - 2
]
unique_materials = set(contenders)
if len(unique_materials) == 1:
return top_material
return None
def _lookup_by_prefix(query: str, mat_map: dict) -> str | None:
"""Resolve prefix-compatible matches when all contenders share one material."""
if not query or not mat_map:
return None
contenders: list[tuple[int, str]] = []
for key, material in mat_map.items():
if len(key) >= 5 and len(query) >= 5 and (
query.startswith(key) or key.startswith(query)
):
contenders.append((len(key), material))
if not contenders:
return None
contenders.sort(reverse=True)
top_len = contenders[0][0]
close_materials = {
material for key_len, material in contenders if key_len >= top_len - 2
}
if len(close_materials) == 1:
return contenders[0][1]
return None
def lookup_material_name(raw_name: str, mat_map: dict, *fallback_names: str) -> str | None:
"""Resolve a material name against normalized mat_map keys.
Lookup order:
1. exact normalized key
2. prefix-compatible key
3. conservative common-prefix fuzzy match
"""
candidates = [raw_name, *fallback_names]
seen: set[str] = set()
for candidate in candidates:
if not candidate:
continue
normalized = candidate.lower().strip()
variants = [normalized]
stripped = _re.sub(r'(_af\d+(_\d+)?)+$', '', normalized, flags=_re.IGNORECASE)
if stripped != normalized:
variants.append(stripped)
no_instance = _re.sub(r'_\d+$', '', stripped)
if no_instance and no_instance not in variants:
variants.append(no_instance)
for variant in list(variants):
slug = _re.sub(r'[^a-z0-9]+', '_', variant).strip('_')
if slug and slug not in variants:
variants.append(slug)
deduped_variants = [variant for variant in variants if variant and not (variant in seen or seen.add(variant))]
for variant in deduped_variants:
mat_name = mat_map.get(variant)
if mat_name:
return mat_name
for variant in deduped_variants:
mat_name = _lookup_by_prefix(variant, mat_map)
if mat_name:
return mat_name
for variant in deduped_variants:
mat_name = _lookup_by_common_prefix(variant, mat_map)
if mat_name:
return mat_name
return None
def apply_material_library_direct(
parts: list,
mat_lib_path: str,
@@ -201,7 +349,11 @@ def apply_material_library_direct(
assigned_count = 0
unmatched_names = []
for part in parts:
mat_name = material_lookup.get(part.name)
mat_name = None
for candidate in _iter_object_name_variants(part.name):
mat_name = material_lookup.get(candidate)
if mat_name:
break
if mat_name and mat_name in appended:
if part.data.users > 1:
part.data = part.data.copy()
@@ -280,30 +432,12 @@ def apply_material_library(
_prev = base_name
base_name = _re.sub(r'_AF\d+$', '', base_name, flags=_re.IGNORECASE)
part_key = base_name.lower().strip()
mat_name = mat_map.get(part_key)
# Prefix fallback: if a mat_map key starts with our base name or
# vice-versa, use the longest matching key (most-specific wins).
if not mat_name:
for key, val in sorted(mat_map.items(), key=lambda x: len(x[0]), reverse=True):
if len(key) >= 5 and len(part_key) >= 5 and (
part_key.startswith(key) or key.startswith(part_key)
):
mat_name = val
break
mat_name = lookup_material_name(part_key, mat_map)
# Fall back to index-based matching via part_names_ordered
if not mat_name and part_names_ordered and i < len(part_names_ordered):
step_name = part_names_ordered[i]
step_key = step_name.lower().strip()
mat_name = mat_map.get(step_key)
# Also try stripping AF from part_names_ordered entry
if not mat_name:
_p2 = None
while _p2 != step_key:
_p2 = step_key
step_key = _re.sub(r'_af\d+$', '', step_key)
mat_name = mat_map.get(step_key)
mat_name = lookup_material_name(step_name, mat_map, part_key)
if mat_name and mat_name in appended:
part.data.materials.clear()
@@ -22,6 +22,7 @@ from _blender_scene import (
apply_sharp_edges_from_occ,
setup_shadow_catcher,
)
from _blender_template_inputs import apply_template_inputs
def setup_scene(args, lap_fn: Callable[[str], None]) -> None:
@@ -41,6 +42,7 @@ def _setup_mode_b(args, lap_fn: Callable[[str], None]) -> None:
"""MODE B: Template-based render — load .blend, import into collection."""
print(f"[blender_render] Opening template: {args.template_path}")
bpy.ops.wm.open_mainfile(filepath=args.template_path)
apply_template_inputs(getattr(args, "template_inputs", None))
lap_fn("template_load")
target_col = ensure_collection(args.target_collection)
@@ -0,0 +1,183 @@
"""Generic template-input application for Blender template scenes.
Template inputs are exposed to the scene via custom properties and optional
visibility/selection markers. This keeps legacy templates untouched while
allowing graph workflows to pass structured overrides into `.blend` scenes.
"""
from __future__ import annotations
import json
import re
from typing import Any
import bpy # type: ignore[import]
_MARKER_PROP_NAMES = (
"hartomat_template_input",
"hartomat.template_input",
"template_input",
"schaeffler_template_input",
)
_MARKER_KEY_PROP_NAMES = (
"hartomat_template_input_key",
"hartomat.template_input_key",
"template_input_key",
"schaeffler_template_input_key",
)
_MARKER_VALUE_PROP_NAMES = (
"hartomat_template_input_value",
"hartomat.template_input_value",
"template_input_value",
"schaeffler_template_input_value",
)
_NAME_PATTERNS = (
re.compile(r"template_input__(?P<key>[^_]+)__(?P<value>[^_]+)", re.IGNORECASE),
re.compile(r"template-input:(?P<key>[^=]+)=(?P<value>.+)", re.IGNORECASE),
re.compile(r"ti::(?P<key>[^:]+)::(?P<value>.+)", re.IGNORECASE),
)
def _normalize_template_inputs(template_inputs: dict[str, Any] | None) -> dict[str, str]:
normalized: dict[str, str] = {}
for raw_key, raw_value in (template_inputs or {}).items():
key = str(raw_key or "").strip()
if not key or raw_value is None:
continue
if isinstance(raw_value, bool):
value = "true" if raw_value else "false"
else:
value = str(raw_value).strip()
if value:
normalized[key] = value
return normalized
def _scene_targets():
yield ("collection", bpy.data.collections)
yield ("object", bpy.data.objects)
yield ("world", bpy.data.worlds)
def _extract_marker(target) -> tuple[str, str] | None:
for prop_name in _MARKER_PROP_NAMES:
raw = target.get(prop_name)
if not raw:
continue
if isinstance(raw, str):
text = raw.strip()
if not text:
continue
if text.startswith("{"):
try:
payload = json.loads(text)
except Exception:
payload = None
if isinstance(payload, dict):
key = str(payload.get("key", "")).strip()
value = str(payload.get("value", "")).strip()
if key and value:
return key, value
if "=" in text:
key, value = text.split("=", 1)
key = key.strip()
value = value.strip()
if key and value:
return key, value
key = None
value = None
for prop_name in _MARKER_KEY_PROP_NAMES:
raw = target.get(prop_name)
if raw:
key = str(raw).strip()
if key:
break
for prop_name in _MARKER_VALUE_PROP_NAMES:
raw = target.get(prop_name)
if raw is not None:
value = str(raw).strip()
if value:
break
if key and value:
return key, value
name = getattr(target, "name", "") or ""
for pattern in _NAME_PATTERNS:
match = pattern.search(name)
if not match:
continue
key = match.group("key").strip()
value = match.group("value").strip()
if key and value:
return key, value
return None
def _find_layer_collection(layer_collection, collection_name: str):
if layer_collection.collection.name == collection_name:
return layer_collection
for child in layer_collection.children:
found = _find_layer_collection(child, collection_name)
if found is not None:
return found
return None
def _apply_collection_visibility(collection, *, enabled: bool) -> None:
collection.hide_render = not enabled
collection.hide_viewport = not enabled
for view_layer in bpy.context.scene.view_layers:
layer_collection = _find_layer_collection(view_layer.layer_collection, collection.name)
if layer_collection is not None:
layer_collection.exclude = not enabled
layer_collection.hide_viewport = not enabled
def _apply_object_visibility(obj, *, enabled: bool) -> None:
obj.hide_render = not enabled
obj.hide_viewport = not enabled
try:
obj.hide_set(not enabled)
except Exception:
pass
def _apply_world_selection(world, *, enabled: bool) -> None:
if enabled:
bpy.context.scene.world = world
def apply_template_inputs(template_inputs: dict[str, Any] | None) -> None:
normalized = _normalize_template_inputs(template_inputs)
if not normalized:
return
scene = bpy.context.scene
for key, value in normalized.items():
scene[f"template_input__{key}"] = value
scene[f"hartomat_template_input__{key}"] = value
matched_targets = 0
for kind, targets in _scene_targets():
for target in targets:
marker = _extract_marker(target)
if marker is None:
continue
key, expected_value = marker
if key not in normalized:
continue
enabled = normalized[key] == expected_value
matched_targets += 1
if kind == "collection":
_apply_collection_visibility(target, enabled=enabled)
elif kind == "object":
_apply_object_visibility(target, enabled=enabled)
elif kind == "world":
_apply_world_selection(target, enabled=enabled)
print(
f"[blender_render] applied template_inputs keys={sorted(normalized)} matched_targets={matched_targets}",
flush=True,
)
+2
View File
@@ -39,6 +39,8 @@ print(f"[blender_render] part_names_ordered: {len(args.part_names_ordered)} entr
print(f"[blender_render] {'template='+args.template_path+', collection='+args.target_collection+', lighting_only='+str(args.lighting_only) if args.use_template else 'no template — Mode A'}")
if args.material_library_path:
print(f"[blender_render] material_library={args.material_library_path}, material_map keys={list(args.material_map.keys())}")
if args.template_inputs:
print(f"[blender_render] template_inputs={args.template_inputs}")
# ── Early GPU activation (must happen BEFORE open_mainfile / Cycles init) ─────
_early_gpu_type = activate_gpu(args.cycles_device)
+15
View File
@@ -29,6 +29,10 @@ import json
import math
from mathutils import Vector, Matrix
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from _blender_template_inputs import apply_template_inputs
# ── Colour palette (matches turntable_render.py / blender_render.py) ─────────
PALETTE_HEX = [
"#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8",
@@ -476,6 +480,14 @@ def main():
_idx = argv.index("--material-override")
_material_override = argv[_idx + 1] if _idx + 1 < len(argv) else None
template_inputs = {}
if "--template-inputs" in argv:
_idx = argv.index("--template-inputs")
try:
template_inputs = json.loads(argv[_idx + 1]) if _idx + 1 < len(argv) else {}
except Exception:
template_inputs = {}
# Cinematic always uses camera orbit (camera moves, model stays)
camera_orbit = True
@@ -535,6 +547,8 @@ def main():
print("[cinematic_render] no template -- using factory settings (Mode A)")
if material_library_path:
print(f"[cinematic_render] material_library={material_library_path}, material_map keys={list(material_map.keys())}")
if template_inputs:
print(f"[cinematic_render] template_inputs={template_inputs}")
# ── SCENE SETUP ──────────────────────────────────────────────────────────
_usd_mat_lookup: dict = {}
@@ -543,6 +557,7 @@ def main():
# ── MODE B: Template-based render ────────────────────────────────────
print(f"[cinematic_render] Opening template: {template_path}")
bpy.ops.wm.open_mainfile(filepath=template_path)
apply_template_inputs(template_inputs)
target_col = _ensure_collection(target_collection)
+304 -77
View File
@@ -21,6 +21,7 @@ from __future__ import annotations
import argparse
import json
import os
import sys
import traceback
from pathlib import Path
@@ -436,31 +437,38 @@ def _tessellate_with_gmsh(shape, linear_deflection: float, angular_deflection: f
)
def _collect_part_key_map(shape_tool, free_labels) -> dict:
"""Return {normalized_source_name: part_key_slug} for all leaf parts in the XCAF hierarchy.
The normalized source name (XCAF label name without _AF\\d+ suffix) is what
Three.js sees after normalizeMeshName() strips the OCC assembly suffix from the
GLB mesh node name. The slug algorithm matches part_key_service.generate_part_key().
"""
import re as _re
def _collect_part_key_mappings(shape_tool, free_labels) -> tuple[dict[str, str], dict[str, list[str]]]:
"""Return canonical and occurrence-aware part-key mappings for leaf XCAF parts."""
import hashlib as _hashlib
import re as _re
from OCP.TDF import TDF_LabelSequence
from OCP.TDataStd import TDataStd_Name
from OCP.XCAFDoc import XCAFDoc_ShapeTool
_af_re = _re.compile(r'_AF\d+$', _re.IGNORECASE)
_af_re = _re.compile(r"_AF\d+$", _re.IGNORECASE)
def _slug(source_name: str, xcaf_path: str = "") -> str:
base = _af_re.sub('', source_name) if source_name else ''
# camelCase split — same as part_key_service.generate_part_key
base = _re.sub(r'([a-z])([A-Z])', r'\1_\2', base)
slug = _re.sub(r'[^a-z0-9]+', '_', base.lower()).strip('_')
def _slug(source_name: str, xcaf_path: str = "", existing_keys: set[str] | None = None) -> str:
base = _af_re.sub("", source_name) if source_name else ""
base = _re.sub(r"([a-z])([A-Z])", r"\1_\2", base)
slug = _re.sub(r"[^a-z0-9]+", "_", base.lower()).strip("_")
if not slug:
slug = f"part_{_hashlib.sha256(xcaf_path.encode()).hexdigest()[:8]}"
return slug[:50]
slug = slug[:50]
part_key_map: dict = {}
if existing_keys is None:
return slug
key = slug
n = 2
while key in existing_keys:
key = f"{slug}_{n}"
n += 1
existing_keys.add(key)
return key
part_key_map: dict[str, str] = {}
part_key_occurrences: dict[str, list[str]] = {}
existing_keys: set[str] = set()
def _collect(label, path: str = "") -> None:
name_attr = TDataStd_Name()
@@ -468,9 +476,8 @@ def _collect_part_key_map(shape_tool, free_labels) -> dict:
if label.FindAttribute(TDataStd_Name.GetID_s(), name_attr):
name = name_attr.Get().ToExtString()
# Dereference component references to their definition label
# (the definition may itself be an assembly with sub-components)
from OCP.TDF import TDF_Label as _TDF_Label
actual_label = label
if XCAFDoc_ShapeTool.IsReference_s(label):
ref_label = _TDF_Label()
@@ -481,45 +488,206 @@ def _collect_part_key_map(shape_tool, free_labels) -> dict:
XCAFDoc_ShapeTool.GetComponents_s(actual_label, components)
xcaf_path = f"{path}/{name}" if name else f"{path}/unnamed"
if components.Length() == 0:
# Leaf node — normalized source name (without _AF suffix) as key
normalized = _af_re.sub('', name) if name else ''
normalized = _af_re.sub("", name) if name else ""
if normalized:
part_key_map[normalized] = _slug(name, xcaf_path)
else:
for i in range(1, components.Length() + 1):
_collect(components.Value(i), xcaf_path)
part_key = _slug(name, xcaf_path, existing_keys)
part_key_map.setdefault(normalized, part_key)
part_key_occurrences.setdefault(normalized, []).append(part_key)
return
for i in range(1, components.Length() + 1):
_collect(components.Value(i), xcaf_path)
for i in range(1, free_labels.Length() + 1):
_collect(free_labels.Value(i))
return part_key_map, part_key_occurrences
def _collect_part_key_map(shape_tool, free_labels) -> dict[str, str]:
part_key_map, _ = _collect_part_key_mappings(shape_tool, free_labels)
return part_key_map
def _inject_glb_extras(glb_path: Path, extras: dict, part_key_map: dict | None = None) -> None:
"""Patch a GLB binary to add/update scenes[0].extras JSON field.
Also stamps per-node extras.partKey on each GLB node whose name maps to an
entry in part_key_map (the dict returned by _collect_part_key_map). Three.js
GLTFLoader propagates node extras → object.userData, so every THREE.Mesh will
carry userData.partKey after load — no runtime lookup needed in the viewer.
The GLB format stores a JSON chunk immediately after the 12-byte header.
We re-serialize it with the new extras and update chunk + total lengths.
No external dependencies — pure stdlib struct/json.
"""
def _normalize_occ_node_name(raw_name: str) -> str:
"""Collapse exporter-only suffixes back to their semantic OCC source name."""
import re as _re
name = (raw_name or "").strip()
name = _re.sub(r"\.\d{3}$", "", name)
previous = None
while previous != name:
previous = name
name = _re.sub(r"_AF\d+(_ASM)?_?$", "", name, flags=_re.IGNORECASE)
return name
def _slugify_occ_name(raw_name: str) -> str:
import re as _re
base = _re.sub(r"([a-z])([A-Z])", r"\1_\2", raw_name or "")
return _re.sub(r"[^a-z0-9]+", "_", base.lower()).strip("_")
def _is_instance_variant_name(mesh_name: str, semantic_name: str) -> bool:
if not mesh_name or not semantic_name:
return False
if mesh_name == semantic_name:
return True
if mesh_name.startswith(f"{semantic_name}_"):
return True
return mesh_name.rstrip("0123456789_") == semantic_name
def _read_vec(node: dict, key: str, default: tuple[float, ...]) -> tuple[float, ...]:
value = node.get(key)
if not isinstance(value, list) or len(value) != len(default):
return default
result: list[float] = []
for idx, fallback in enumerate(default):
item = value[idx]
result.append(float(item) if isinstance(item, (int, float)) else fallback)
return tuple(result)
def _have_matching_local_transforms(left: dict, right: dict) -> bool:
position_left = _read_vec(left, "translation", (0.0, 0.0, 0.0))
position_right = _read_vec(right, "translation", (0.0, 0.0, 0.0))
scale_left = _read_vec(left, "scale", (1.0, 1.0, 1.0))
scale_right = _read_vec(right, "scale", (1.0, 1.0, 1.0))
rotation_left = _read_vec(left, "rotation", (0.0, 0.0, 0.0, 1.0))
rotation_right = _read_vec(right, "rotation", (0.0, 0.0, 0.0, 1.0))
epsilon = 1e-5
if any(abs(a - b) > epsilon for a, b in zip(position_left, position_right)):
return False
if any(abs(a - b) > epsilon for a, b in zip(scale_left, scale_right)):
return False
dot = sum(a * b for a, b in zip(rotation_left, rotation_right))
return abs(1 - abs(dot)) <= 1e-4
def _part_key_priority(source_name: str, part_key: str) -> tuple[int, int, int]:
import re as _re
penalty = 0
if _re.search(r"_AF\d+(_ASM)?_?$", source_name, flags=_re.IGNORECASE):
penalty += 100
if _re.search(r"_af\d+$", part_key, flags=_re.IGNORECASE):
penalty += 100
if _re.search(r"_\d+_\d+$", source_name):
penalty += 25
return (penalty, len(source_name), len(part_key))
def _build_exact_part_key_lookup(part_key_map: dict[str, str]) -> dict[str, str]:
return {
_slugify_occ_name(raw_name): part_key
for raw_name, part_key in part_key_map.items()
if raw_name and part_key
}
def _build_canonical_part_key_lookup(part_key_map: dict[str, str]) -> dict[str, str]:
canonical: dict[str, tuple[tuple[int, int, int], str]] = {}
for raw_name, part_key in part_key_map.items():
normalized = _normalize_occ_node_name(raw_name)
if not normalized or not part_key:
continue
slug = _slugify_occ_name(normalized)
if not slug:
continue
candidate = (_part_key_priority(raw_name, part_key), part_key)
current = canonical.get(slug)
if current is None or candidate[0] < current[0]:
canonical[slug] = candidate
return {slug: part_key for slug, (_, part_key) in canonical.items()}
def _score_semantic_sibling(
mesh_name: str,
semantic_name: str,
part_key: str,
*,
transforms_match: bool,
) -> int:
import re as _re
transform_bonus = 10_000 if transforms_match else 0
canonical_bonus = 1_000 if not _re.search(r"_af\d+$", part_key, flags=_re.IGNORECASE) else 0
exact_bonus = 100 if mesh_name.rstrip("0123456789_") == semantic_name else 0
specificity_bonus = len(semantic_name)
return transform_bonus + canonical_bonus + exact_bonus + specificity_bonus
def _find_semantic_sibling_part_key(
node_index: int,
nodes: list[dict],
parent_by_child: dict[int, int],
canonical_lookup: dict[str, str],
) -> str | None:
node = nodes[node_index]
if node.get("mesh") is None:
return None
parent_index = parent_by_child.get(node_index)
if parent_index is None:
return None
mesh_name = _normalize_occ_node_name(node.get("name", ""))
if not mesh_name:
return None
best_match: tuple[int, str] | None = None
parent = nodes[parent_index]
for sibling_index in parent.get("children") or []:
if sibling_index == node_index:
continue
sibling = nodes[sibling_index]
if sibling.get("mesh") is not None:
continue
semantic_name = _normalize_occ_node_name(sibling.get("name", ""))
if not _is_instance_variant_name(mesh_name, semantic_name):
continue
sibling_part_key = canonical_lookup.get(_slugify_occ_name(semantic_name))
if not sibling_part_key:
continue
score = _score_semantic_sibling(
mesh_name,
semantic_name,
sibling_part_key,
transforms_match=_have_matching_local_transforms(node, sibling),
)
candidate = (score, sibling_part_key)
if best_match is None or candidate[0] > best_match[0]:
best_match = candidate
return best_match[1] if best_match else None
def _inject_glb_extras(
glb_path: Path,
extras: dict,
part_key_map: dict | None = None,
part_key_occurrences: dict[str, list[str]] | None = None,
) -> None:
"""Patch a GLB binary to add/update scenes[0].extras JSON field."""
import struct as _struct
data = glb_path.read_bytes()
# GLB header: magic(4) + version(4) + total_length(4) = 12 bytes
# JSON chunk: chunk_length(4) + chunk_type(4) + chunk_data(chunk_length bytes)
json_len = _struct.unpack_from("<I", data, 12)[0]
json_type = _struct.unpack_from("<I", data, 16)[0]
if json_type != 0x4E4F534A: # "JSON"
print("WARNING: _inject_glb_extras: unexpected chunk type, skipping extras injection",
file=sys.stderr)
if json_type != 0x4E4F534A:
print(
"WARNING: _inject_glb_extras: unexpected chunk type, skipping extras injection",
file=sys.stderr,
)
return
j = json.loads(data[20: 20 + json_len])
@@ -530,47 +698,94 @@ def _inject_glb_extras(glb_path: Path, extras: dict, part_key_map: dict | None =
else:
j.setdefault("extras", {}).update(extras)
# Stamp per-node extras.partKey so Three.js maps it to mesh.userData.partKey.
# part_key_map keys are raw OCC names with _AF\d+ stripped but not slugified
# (e.g. "GE360-HF_000_P_ASM_1" → "ge360_hf_000_p_asm_1").
# GLB node names are raw OCC names (may or may not have _AF\d+ suffix).
# Normalize both sides to slugified form for the lookup.
if part_key_map:
_norm_re = _re.compile(r'_AF\d+$', _re.IGNORECASE)
exact_lookup = _build_exact_part_key_lookup(part_key_map)
canonical_lookup = _build_canonical_part_key_lookup(part_key_map)
occurrence_lookup: dict[str, list[str]] = {}
if part_key_occurrences:
for raw_name, part_keys in part_key_occurrences.items():
normalized = _normalize_occ_node_name(raw_name)
if not normalized:
continue
cleaned_keys = [part_key for part_key in part_keys if part_key]
if cleaned_keys:
occurrence_lookup[normalized] = cleaned_keys
def _slugify(s: str) -> str:
return _re.sub(r'[^a-z0-9]+', '_', _norm_re.sub('', s).lower()).strip('_')
# Build a slug→partKey lookup from the part_key_map
# part_key_map: {raw_name_no_af_suffix: part_key_slug}
slug_to_part_key: dict = {}
for raw_key, part_key in part_key_map.items():
slug_to_part_key[_slugify(raw_key)] = part_key
occurrence_indices: dict[str, int] = {}
parent_by_child: dict[int, int] = {}
nodes = j.get("nodes", [])
for parent_index, parent in enumerate(nodes):
for child_index in parent.get("children") or []:
if isinstance(child_index, int):
parent_by_child[child_index] = parent_index
n_stamped = 0
for node in j.get("nodes", []):
raw = node.get("name", "")
if not raw:
for node_index, node in enumerate(nodes):
raw_name = node.get("name", "")
if not raw_name:
continue
slug = _slugify(raw)
part_key = slug_to_part_key.get(slug)
normalized_raw = _normalize_occ_node_name(raw_name)
canonical_part_key = canonical_lookup.get(_slugify_occ_name(normalized_raw))
exact_part_key = exact_lookup.get(_slugify_occ_name(raw_name))
if node.get("mesh") is not None:
occurrence_part_key = None
occurrence_keys = occurrence_lookup.get(normalized_raw)
if occurrence_keys:
occurrence_index = occurrence_indices.get(normalized_raw, 0)
if occurrence_index < len(occurrence_keys):
occurrence_part_key = occurrence_keys[occurrence_index]
occurrence_indices[normalized_raw] = occurrence_index + 1
semantic_sibling_part_key = _find_semantic_sibling_part_key(
node_index,
nodes,
parent_by_child,
canonical_lookup,
)
part_key = (
occurrence_part_key
or exact_part_key
or canonical_part_key
or semantic_sibling_part_key
)
else:
part_key = canonical_part_key or exact_part_key
if part_key:
node.setdefault("extras", {})["partKey"] = part_key
n_stamped += 1
print(f"Stamped partKey extras on {n_stamped} GLB nodes")
new_json = json.dumps(j, separators=(",", ":"))
# Pad to 4-byte boundary with spaces (required by GLB spec)
pad = (4 - len(new_json) % 4) % 4
new_json_bytes = new_json.encode() + b" " * pad
rest = data[20 + json_len:] # BIN chunk and anything after
rest = data[20 + json_len:]
new_chunk = _struct.pack("<II", len(new_json_bytes), 0x4E4F534A) + new_json_bytes
new_total = 12 + len(new_chunk) + len(rest)
new_header = _struct.pack("<III", 0x46546C67, 2, new_total)
glb_path.write_bytes(new_header + new_chunk + rest)
def _prepare_atomic_export_path(output_path: Path) -> Path:
"""Return a non-existent temp file path adjacent to the final GLB target."""
output_path.parent.mkdir(parents=True, exist_ok=True)
temp_path = output_path.with_name(
f".{output_path.stem}.{os.getpid()}.tmp{output_path.suffix}"
)
temp_path.unlink(missing_ok=True)
return temp_path
def _finalize_atomic_export(temp_path: Path, output_path: Path) -> None:
"""Publish the validated temp GLB atomically to the canonical output path."""
output_path.parent.mkdir(parents=True, exist_ok=True)
temp_path.replace(output_path)
def main() -> None:
args = parse_args()
color_map: dict = json.loads(args.color_map)
@@ -613,7 +828,7 @@ def main() -> None:
shape_tool.GetFreeShapes(free_labels)
# Collect partKeyMap before tessellation (XCAF names are stable at this point)
part_key_map = _collect_part_key_map(shape_tool, free_labels)
part_key_map, part_key_occurrences = _collect_part_key_mappings(shape_tool, free_labels)
print(f"partKeyMap: {len(part_key_map)} unique part names collected")
print(f"Found {free_labels.Length()} root shape(s), tessellating "
@@ -724,7 +939,10 @@ def main() -> None:
# --- Export GLB via RWGltf_CafWriter (in mm, Z-up → Y-up handled by writer) ---
from OCP.RWGltf import RWGltf_CafWriter
writer = RWGltf_CafWriter(TCollection_AsciiString(args.output_path), True) # True = binary GLB
out = Path(args.output_path)
temp_out = _prepare_atomic_export_path(out)
writer = RWGltf_CafWriter(TCollection_AsciiString(str(temp_out)), True) # True = binary GLB
# MergeFaces=True merges per-face triangulations into a single buffer per shape.
# Without this, RWGltf_CafWriter fails to find per-face Poly_Triangulation data
# from the XCAF component hierarchy and falls back to degenerate meshes (~2 verts/face).
@@ -739,13 +957,13 @@ def main() -> None:
# Older API without metadata dict
ok = writer.Perform(doc, Message_ProgressRange())
out = Path(args.output_path)
if not ok or not out.exists() or out.stat().st_size == 0:
print(f"ERROR: RWGltf_CafWriter.Perform returned ok={ok}, file exists={out.exists()}",
if not ok or not temp_out.exists() or temp_out.stat().st_size == 0:
temp_out.unlink(missing_ok=True)
print(f"ERROR: RWGltf_CafWriter.Perform returned ok={ok}, file exists={temp_out.exists()}",
file=sys.stderr)
sys.exit(1)
print(f"GLB exported: {out.name} ({out.stat().st_size // 1024} KB)")
print(f"GLB exported: {temp_out.name} ({temp_out.stat().st_size // 1024} KB)")
# --- Inject sharp edge pairs and partKeyMap into GLB extras ---
# Blender 5.0 reads scenes[0].extras as scene custom properties on import,
@@ -759,7 +977,12 @@ def main() -> None:
if part_key_map:
extras_payload["partKeyMap"] = part_key_map
if extras_payload:
_inject_glb_extras(out, extras_payload, part_key_map=part_key_map if part_key_map else None)
_inject_glb_extras(
temp_out,
extras_payload,
part_key_map=part_key_map if part_key_map else None,
part_key_occurrences=part_key_occurrences if part_key_occurrences else None,
)
if sharp_pairs:
print(f"Injected {len(sharp_pairs)} sharp edge segment pairs into GLB extras")
if part_key_map:
@@ -767,16 +990,20 @@ def main() -> None:
except Exception as _exc:
print(f"WARNING: GLB extras injection failed (non-fatal): {_exc}", file=sys.stderr)
_finalize_atomic_export(temp_out, out)
print(f"GLB finalized: {out.name} ({out.stat().st_size // 1024} KB)")
# NOTE: RWGltf_CafWriter reads unit metadata from the XDE document (set by
# STEPCAFControl_Reader from the STEP file's SI_UNIT declarations) and converts
# mm → m automatically. It also handles Z-up → Y-up coordinate transform.
# No additional scaling or BRepBuilderAPI_Transform is needed.
try:
main()
except SystemExit:
raise
except Exception:
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
try:
main()
except SystemExit:
raise
except Exception:
traceback.print_exc()
sys.exit(1)
+34 -55
View File
@@ -29,11 +29,14 @@ import argparse
import hashlib
import json
import math
import os
import re
import sys
import traceback
from pathlib import Path
from _blender_materials import build_mat_map_lower, lookup_material_name
# ── CLI ───────────────────────────────────────────────────────────────────────
@@ -574,9 +577,8 @@ def _author_xcaf_to_usd(
mat_usd_path = f"/Root/Looks/{mat_prim_name}"
if not stage.GetPrimAtPath(mat_usd_path):
UsdShade.Material.Define(stage, mat_usd_path)
UsdShade.MaterialBindingAPI(mesh.GetPrim()).Bind(
UsdShade.Material(stage.GetPrimAtPath(mat_usd_path))
)
binding_api = UsdShade.MaterialBindingAPI.Apply(mesh.GetPrim())
binding_api.Bind(UsdShade.Material(stage.GetPrimAtPath(mat_usd_path)))
manifest_parts.append({
"part_key": part_key,
@@ -968,57 +970,15 @@ def _prim_name(name: str) -> str:
return safe or "unnamed"
# ── Material map lookup (mirrors _blender_materials.build_mat_map_lower) ─────
def _build_mat_map_lower(material_map: dict) -> dict:
"""Build a lowercased material_map with AF-stripped and slug variants.
Same normalization as _blender_materials.build_mat_map_lower() so that
source_name → canonical material name lookup works consistently.
"""
mat_map_lower: dict = {}
for k, v in material_map.items():
kl = k.lower().strip()
mat_map_lower[kl] = v
# Slug variant: replace non-alphanumeric with '_' (same as _generate_part_key)
slug_key = re.sub(r'[^a-z0-9]+', '_', kl).strip('_')
if slug_key and slug_key != kl:
mat_map_lower.setdefault(slug_key, v)
# Strip OCC assembly-frame suffixes: _AF0, _AF0_1, _AF0_1_AF0, etc.
stripped = re.sub(r'(_af\d+(_\d+)?)+$', '', kl)
if stripped != kl:
mat_map_lower.setdefault(stripped, v)
slug_stripped = re.sub(r'[^a-z0-9]+', '_', stripped).strip('_')
if slug_stripped and slug_stripped != stripped:
mat_map_lower.setdefault(slug_stripped, v)
return mat_map_lower
def _lookup_material(source_name: str, part_key: str, mat_map_lower: dict) -> str | None:
"""Look up canonical material name for a part, trying multiple key variants."""
if not mat_map_lower:
return None
# Try source_name (lowered)
sn = source_name.lower().strip()
if sn in mat_map_lower:
return mat_map_lower[sn]
# Try AF-stripped source_name
stripped = re.sub(r'(_af\d+(_\d+)?)+$', '', sn, flags=re.IGNORECASE)
if stripped != sn and stripped in mat_map_lower:
return mat_map_lower[stripped]
# Try slug of source_name (matches part_key generation logic)
slug = re.sub(r'[^a-z0-9]+', '_', sn).strip('_')
if slug and slug in mat_map_lower:
return mat_map_lower[slug]
# Try part_key directly
pk = part_key.lower().strip()
if pk in mat_map_lower:
return mat_map_lower[pk]
# Prefix fallback: longest key that starts with or is started by part_key
for key in sorted(mat_map_lower.keys(), key=len, reverse=True):
if len(key) >= 5 and len(pk) >= 5 and (pk.startswith(key) or key.startswith(pk)):
return mat_map_lower[key]
return None
return lookup_material_name(source_name, mat_map_lower, part_key)
def _atomic_output_path(output_path: Path) -> Path:
return output_path.with_name(
f".{output_path.stem}.{os.getpid()}.tmp{output_path.suffix}"
)
# ── Main ──────────────────────────────────────────────────────────────────────
@@ -1027,7 +987,7 @@ def main() -> None:
args = parse_args()
color_map: dict = json.loads(args.color_map)
raw_material_map: dict = json.loads(args.material_map)
mat_map_lower = _build_mat_map_lower(raw_material_map) if raw_material_map else {}
mat_map_lower = build_mat_map_lower(raw_material_map) if raw_material_map else {}
if mat_map_lower:
print(f"Material map: {len(raw_material_map)} entries ({len(mat_map_lower)} with variants)")
@@ -1165,7 +1125,14 @@ def main() -> None:
print(f"WARNING: palette colors failed (non-fatal): {exc}", file=sys.stderr)
# ── Create USD stage ──────────────────────────────────────────────────
stage = Usd.Stage.CreateNew(str(output_path))
temp_output_path = _atomic_output_path(output_path)
try:
if temp_output_path.exists():
temp_output_path.unlink()
except OSError:
pass
stage = Usd.Stage.CreateNew(str(temp_output_path))
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
UsdGeom.SetStageMetersPerUnit(stage, 0.001) # mm; Blender handles m conversion on import
@@ -1206,7 +1173,19 @@ def main() -> None:
n_parts = counters["n_parts"]
n_empty = counters["n_empty"]
stage.Save()
try:
stage.Save()
if temp_output_path.exists():
os.chmod(temp_output_path, 0o664)
os.replace(temp_output_path, output_path)
os.chmod(output_path, 0o664)
except Exception:
try:
if temp_output_path.exists():
temp_output_path.unlink()
except OSError:
pass
raise
sz = output_path.stat().st_size // 1024 if output_path.exists() else 0
n_mat_assigned = sum(1 for p in manifest_parts if p.get("canonical_material"))
+15
View File
@@ -13,6 +13,10 @@ import json
import math
from mathutils import Vector, Matrix
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from _blender_template_inputs import apply_template_inputs
# ── Colour palette (matches blender_render.py / Three.js renderer) ───────────
PALETTE_HEX = [
"#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8",
@@ -369,6 +373,14 @@ def main():
_idx = argv.index("--material-override")
_material_override = argv[_idx + 1] if _idx + 1 < len(argv) else None
template_inputs = {}
if "--template-inputs" in argv:
_idx = argv.index("--template-inputs")
try:
template_inputs = json.loads(argv[_idx + 1]) if _idx + 1 < len(argv) else {}
except Exception:
template_inputs = {}
# Ensure scripts dir is on path for shared module imports
_scripts_dir = os.path.dirname(os.path.abspath(__file__))
if _scripts_dir not in sys.path:
@@ -421,6 +433,8 @@ def main():
print("[turntable_render] no template — using factory settings (Mode A)")
if material_library_path:
print(f"[turntable_render] material_library={material_library_path}, material_map keys={list(material_map.keys())}")
if template_inputs:
print(f"[turntable_render] template_inputs={template_inputs}")
# ── SCENE SETUP ──────────────────────────────────────────────────────────
_usd_mat_lookup: dict = {} # populated by import_usd_file when USD path is used
@@ -429,6 +443,7 @@ def main():
# ── MODE B: Template-based render ────────────────────────────────────
print(f"[turntable_render] Opening template: {template_path}")
bpy.ops.wm.open_mainfile(filepath=template_path)
apply_template_inputs(template_inputs)
# Find or create target collection
target_col = _ensure_collection(target_collection)