feat: performance optimizations + part-materials validation
- @timed_step decorator with wall-clock + RSS tracking (pipeline_logger) - Blender timing laps for sharp edges and material assignment - MeshRegistry pattern: eliminate 13 scene.traverse() calls across viewers - Lazy material cloning (clone-on-first-write in both viewers) - _pipeline_session context manager: 7 create_engine() → 2 in render_thumbnail - KD-tree spatial pre-filter for sharp edge marking (bbox-based pruning) - Batch material library append: N bpy.ops.wm.append → single bpy.data.libraries.load - GMSH single-session batching: compound all solids into one tessellation call - Validate part-materials save endpoints against parsed_objects (prevents bogus keys) - ROADMAP updated with completion status Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -3,10 +3,63 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re as _re
|
||||
import time as _time
|
||||
|
||||
FAILED_MATERIAL_NAME = "SCHAEFFLER_059999_FailedMaterial"
|
||||
|
||||
|
||||
def _batch_append_materials(mat_lib_path: str, names: set[str]) -> dict:
|
||||
"""Append multiple materials from a .blend file in a single open.
|
||||
|
||||
Uses bpy.data.libraries.load() to open the .blend once instead of
|
||||
N separate bpy.ops.wm.append() calls (each reopens the file).
|
||||
Falls back to individual append for any materials that fail to load.
|
||||
"""
|
||||
import bpy # type: ignore[import]
|
||||
|
||||
result: dict = {}
|
||||
if not names:
|
||||
return result
|
||||
|
||||
try:
|
||||
with bpy.data.libraries.load(mat_lib_path, link=False) as (data_from, data_to):
|
||||
# data_from.materials lists all material names in the .blend
|
||||
available = set(data_from.materials)
|
||||
to_load = [n for n in names if n in available]
|
||||
not_found = names - available
|
||||
data_to.materials = to_load
|
||||
# After the context manager closes, materials are loaded into bpy.data
|
||||
for mat_name in to_load:
|
||||
mat = bpy.data.materials.get(mat_name)
|
||||
if mat:
|
||||
result[mat_name] = mat
|
||||
print(f"[blender_render] batch-appended material: {mat_name}")
|
||||
else:
|
||||
print(f"[blender_render] WARNING: material '{mat_name}' not found after batch append")
|
||||
if not_found:
|
||||
print(f"[blender_render] WARNING: materials not in library: {sorted(not_found)[:10]}")
|
||||
except Exception as exc:
|
||||
print(f"[blender_render] WARNING: batch append failed ({exc}), falling back to individual append")
|
||||
# Fallback: individual append for each material
|
||||
for mat_name in names:
|
||||
if mat_name in result:
|
||||
continue
|
||||
try:
|
||||
bpy.ops.wm.append(
|
||||
filepath=f"{mat_lib_path}/Material/{mat_name}",
|
||||
directory=f"{mat_lib_path}/Material/",
|
||||
filename=mat_name,
|
||||
link=False,
|
||||
)
|
||||
mat = bpy.data.materials.get(mat_name)
|
||||
if mat:
|
||||
result[mat_name] = mat
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def assign_failed_material(part_obj) -> None:
|
||||
"""Assign the standard fallback material (magenta) when no library material matches.
|
||||
|
||||
@@ -78,32 +131,28 @@ def apply_material_library_direct(
|
||||
|
||||
import bpy # type: ignore[import]
|
||||
|
||||
_t0 = _time.monotonic()
|
||||
|
||||
# Collect unique material names needed
|
||||
needed = set(material_lookup.values())
|
||||
if not needed:
|
||||
return
|
||||
|
||||
# Append materials from library
|
||||
# Batch-append materials from library (single file open)
|
||||
appended: dict = {}
|
||||
_t_append = _time.monotonic()
|
||||
# Check already-loaded materials first
|
||||
still_needed = set()
|
||||
for mat_name in needed:
|
||||
if mat_name in bpy.data.materials:
|
||||
appended[mat_name] = bpy.data.materials[mat_name]
|
||||
continue
|
||||
inner_path = f"{mat_lib_path}/Material/{mat_name}"
|
||||
try:
|
||||
bpy.ops.wm.append(
|
||||
filepath=inner_path,
|
||||
directory=f"{mat_lib_path}/Material/",
|
||||
filename=mat_name,
|
||||
link=False,
|
||||
)
|
||||
if mat_name in bpy.data.materials:
|
||||
appended[mat_name] = bpy.data.materials[mat_name]
|
||||
print(f"[blender_render] appended material: {mat_name}")
|
||||
else:
|
||||
print(f"[blender_render] WARNING: material '{mat_name}' not found after append")
|
||||
except Exception as exc:
|
||||
print(f"[blender_render] WARNING: failed to append material '{mat_name}': {exc}")
|
||||
else:
|
||||
still_needed.add(mat_name)
|
||||
# Load remaining from .blend in one pass
|
||||
if still_needed:
|
||||
appended.update(_batch_append_materials(mat_lib_path, still_needed))
|
||||
_append_dur = _time.monotonic() - _t_append
|
||||
print(f"[blender_render] TIMING material_append_direct={_append_dur:.2f}s ({len(appended)}/{len(needed)} materials)", flush=True)
|
||||
|
||||
if not appended:
|
||||
return
|
||||
@@ -121,8 +170,11 @@ def apply_material_library_direct(
|
||||
else:
|
||||
unmatched_names.append(part.name)
|
||||
|
||||
print(f"[blender_render] direct material assignment (USD primvars): "
|
||||
f"{assigned_count}/{len(parts)} parts matched", flush=True)
|
||||
_assign_dur = _time.monotonic() - _t_append - _append_dur + (_time.monotonic() - _t0 - _append_dur)
|
||||
_total = _time.monotonic() - _t0
|
||||
print(f"[blender_render] TIMING material_assign_direct={_total:.2f}s "
|
||||
f"(append={_append_dur:.2f}s, assign={_total - _append_dur:.2f}s, "
|
||||
f"{assigned_count}/{len(parts)} matched)", flush=True)
|
||||
if unmatched_names:
|
||||
print(f"[blender_render] unmatched (no primvar): {unmatched_names[:10]}", flush=True)
|
||||
for part in parts:
|
||||
@@ -153,6 +205,8 @@ def apply_material_library(
|
||||
|
||||
import bpy # type: ignore[import]
|
||||
|
||||
_t0 = _time.monotonic()
|
||||
|
||||
if part_names_ordered is None:
|
||||
part_names_ordered = []
|
||||
|
||||
@@ -161,24 +215,12 @@ def apply_material_library(
|
||||
if not needed:
|
||||
return
|
||||
|
||||
# Append materials from library
|
||||
# Batch-append materials from library (single file open)
|
||||
appended: dict = {}
|
||||
for mat_name in needed:
|
||||
inner_path = f"{mat_lib_path}/Material/{mat_name}"
|
||||
try:
|
||||
bpy.ops.wm.append(
|
||||
filepath=inner_path,
|
||||
directory=f"{mat_lib_path}/Material/",
|
||||
filename=mat_name,
|
||||
link=False,
|
||||
)
|
||||
if mat_name in bpy.data.materials:
|
||||
appended[mat_name] = bpy.data.materials[mat_name]
|
||||
print(f"[blender_render] appended material: {mat_name}")
|
||||
else:
|
||||
print(f"[blender_render] WARNING: material '{mat_name}' not found after append")
|
||||
except Exception as exc:
|
||||
print(f"[blender_render] WARNING: failed to append material '{mat_name}': {exc}")
|
||||
_t_append = _time.monotonic()
|
||||
appended.update(_batch_append_materials(mat_lib_path, needed))
|
||||
_append_dur = _time.monotonic() - _t_append
|
||||
print(f"[blender_render] TIMING material_append={_append_dur:.2f}s ({len(appended)}/{len(needed)} materials)", flush=True)
|
||||
|
||||
if not appended:
|
||||
return
|
||||
@@ -229,7 +271,10 @@ def apply_material_library(
|
||||
else:
|
||||
unmatched_names.append(part.name)
|
||||
|
||||
print(f"[blender_render] material assignment: {assigned_count}/{len(parts)} parts matched", flush=True)
|
||||
_total = _time.monotonic() - _t0
|
||||
print(f"[blender_render] TIMING material_assign={_total:.2f}s "
|
||||
f"(append={_append_dur:.2f}s, match={_total - _append_dur:.2f}s, "
|
||||
f"{assigned_count}/{len(parts)} matched)", flush=True)
|
||||
if unmatched_names:
|
||||
print(f"[blender_render] unmatched parts → assigning {FAILED_MATERIAL_NAME}: {unmatched_names[:10]}", flush=True)
|
||||
unmatched_set = set(unmatched_names)
|
||||
|
||||
@@ -56,9 +56,12 @@ def apply_sharp_edges_from_occ(parts: list, sharp_edge_pairs: list) -> None:
|
||||
if not sharp_edge_pairs:
|
||||
return
|
||||
|
||||
import time as _time
|
||||
import bmesh # type: ignore[import]
|
||||
import mathutils # type: ignore[import]
|
||||
|
||||
_t0 = _time.monotonic()
|
||||
|
||||
SCALE = 0.001 # mm → m
|
||||
TOL = 0.0005 # 0.5 mm in metres
|
||||
|
||||
@@ -71,8 +74,33 @@ def apply_sharp_edges_from_occ(parts: list, sharp_edge_pairs: list) -> None:
|
||||
v1 = mathutils.Vector((pair[1][0] * SCALE, -pair[1][2] * SCALE, pair[1][1] * SCALE))
|
||||
occ_pairs.append((v0, v1))
|
||||
|
||||
_t_convert = _time.monotonic()
|
||||
print(f"[blender_render] TIMING sharp_edges_convert={_t_convert - _t0:.3f}s ({len(occ_pairs)} pairs)", flush=True)
|
||||
|
||||
# ── Spatial pre-filter: build a KD-tree over OCC pair midpoints ────────
|
||||
# For each part, query the midpoint KD-tree with the part's bbox radius
|
||||
# to get only nearby pairs instead of testing all N pairs × M parts.
|
||||
_t_spatial = _time.monotonic()
|
||||
pair_midpoints = []
|
||||
pair_radii = [] # half-length of each pair (max distance from midpoint to endpoint)
|
||||
for v0, v1 in occ_pairs:
|
||||
mid = (v0 + v1) * 0.5
|
||||
pair_midpoints.append(mid)
|
||||
pair_radii.append((v0 - mid).length)
|
||||
|
||||
pair_kd = mathutils.kdtree.KDTree(len(pair_midpoints))
|
||||
for i, mid in enumerate(pair_midpoints):
|
||||
pair_kd.insert(mid, i)
|
||||
pair_kd.balance()
|
||||
_t_spatial_done = _time.monotonic()
|
||||
print(f"[blender_render] TIMING sharp_edges_spatial_index={_t_spatial_done - _t_spatial:.3f}s", flush=True)
|
||||
|
||||
marked_total = 0
|
||||
kd_build_time = 0.0
|
||||
match_time = 0.0
|
||||
pairs_tested_total = 0
|
||||
for obj in parts:
|
||||
_t_kd = _time.monotonic()
|
||||
bm = bmesh.new()
|
||||
bm.from_mesh(obj.data)
|
||||
bm.verts.ensure_lookup_table()
|
||||
@@ -86,8 +114,28 @@ def apply_sharp_edges_from_occ(parts: list, sharp_edge_pairs: list) -> None:
|
||||
kd.insert(world_mat @ v.co, v.index)
|
||||
kd.balance()
|
||||
|
||||
# Compute part's world-space bounding box center and search radius
|
||||
from mathutils import Vector # type: ignore[import]
|
||||
corners = [world_mat @ Vector(c) for c in obj.bound_box]
|
||||
bbox_min = Vector((min(c.x for c in corners), min(c.y for c in corners), min(c.z for c in corners)))
|
||||
bbox_max = Vector((max(c.x for c in corners), max(c.y for c in corners), max(c.z for c in corners)))
|
||||
bbox_center = (bbox_min + bbox_max) * 0.5
|
||||
bbox_half_diag = (bbox_max - bbox_min).length * 0.5
|
||||
|
||||
kd_build_time += _time.monotonic() - _t_kd
|
||||
|
||||
_t_match = _time.monotonic()
|
||||
marked = 0
|
||||
for v0_occ, v1_occ in occ_pairs:
|
||||
|
||||
# Query pair midpoints within bbox_half_diag + max_pair_radius + tolerance
|
||||
# This guarantees we don't miss any pair whose endpoints could be inside the bbox
|
||||
max_pair_radius = max(pair_radii) if pair_radii else 0.0
|
||||
search_radius = bbox_half_diag + max_pair_radius + TOL
|
||||
nearby = pair_kd.find_range(bbox_center, search_radius)
|
||||
pairs_tested_total += len(nearby)
|
||||
|
||||
for _co, pair_idx, _dist in nearby:
|
||||
v0_occ, v1_occ = occ_pairs[pair_idx]
|
||||
_co0, idx0, dist0 = kd.find(v0_occ)
|
||||
_co1, idx1, dist1 = kd.find(v1_occ)
|
||||
if dist0 > TOL or dist1 > TOL:
|
||||
@@ -102,12 +150,18 @@ def apply_sharp_edges_from_occ(parts: list, sharp_edge_pairs: list) -> None:
|
||||
if edge is not None and edge.smooth:
|
||||
edge.smooth = False
|
||||
marked += 1
|
||||
match_time += _time.monotonic() - _t_match
|
||||
|
||||
bm.to_mesh(obj.data)
|
||||
bm.free()
|
||||
marked_total += marked
|
||||
|
||||
print(f"[blender_render] OCC sharp edges applied: {marked_total} edges marked across {len(parts)} parts", flush=True)
|
||||
_total = _time.monotonic() - _t0
|
||||
pairs_skipped = len(occ_pairs) * len(parts) - pairs_tested_total
|
||||
print(f"[blender_render] TIMING sharp_edges={_total:.2f}s "
|
||||
f"(kd_build={kd_build_time:.2f}s, matching={match_time:.2f}s, "
|
||||
f"pairs={len(occ_pairs)}, parts={len(parts)}, marked={marked_total}, "
|
||||
f"tested={pairs_tested_total}, skipped={pairs_skipped})", flush=True)
|
||||
|
||||
|
||||
def setup_shadow_catcher(parts: list) -> None:
|
||||
|
||||
@@ -647,6 +647,9 @@ def main() -> None:
|
||||
)
|
||||
|
||||
# Step 2: GMSH override for SOLID shapes (better seam topology)
|
||||
# Batch all eligible solids into a single compound and tessellate in one
|
||||
# GMSH session — avoids N × (gmsh init + brep write + brep read + finalize)
|
||||
# overhead. GMSH's internal OpenMP threading parallelizes across surfaces.
|
||||
_seen_shapes: list = [] # shapes already GMSH-tessellated; compared via IsSame()
|
||||
|
||||
solids = []
|
||||
@@ -661,6 +664,10 @@ def main() -> None:
|
||||
solids.append(exp.Current())
|
||||
exp.Next()
|
||||
|
||||
from OCP.TopoDS import TopoDS_Compound as _Compound
|
||||
from OCP.BRep import BRep_Builder as _BBuilder
|
||||
|
||||
eligible = []
|
||||
for solid in solids:
|
||||
# Skip REVERSED (mirrored) solids — keep BRepMesh tessellation.
|
||||
# GMSH produces inverted-Jacobian meshes for negative-scale shapes.
|
||||
@@ -673,9 +680,19 @@ def main() -> None:
|
||||
continue
|
||||
# Strip location: GMSH tessellates in definition space.
|
||||
# The XCAF writer applies instance transforms at GLB export time.
|
||||
solid_def = solid.Located(_TopLoc_Location())
|
||||
_tessellate_with_gmsh(solid_def, args.linear_deflection, args.angular_deflection)
|
||||
eligible.append(solid.Located(_TopLoc_Location()))
|
||||
_seen_shapes.append(solid)
|
||||
|
||||
if eligible:
|
||||
if len(eligible) == 1:
|
||||
_tessellate_with_gmsh(eligible[0], args.linear_deflection, args.angular_deflection)
|
||||
else:
|
||||
compound = _Compound()
|
||||
bb = _BBuilder()
|
||||
bb.MakeCompound(compound)
|
||||
for s in eligible:
|
||||
bb.Add(compound, s)
|
||||
_tessellate_with_gmsh(compound, args.linear_deflection, args.angular_deflection)
|
||||
else:
|
||||
for i in range(1, free_labels.Length() + 1):
|
||||
shape = shape_tool.GetShape_s(free_labels.Value(i))
|
||||
|
||||
Reference in New Issue
Block a user