Files
HartOMat/render-worker/scripts/_blender_materials.py
T

461 lines
17 KiB
Python

"""Material assignment helpers for Blender headless renders."""
from __future__ import annotations
import os
import re as _re
import time as _time
FAILED_MATERIAL_NAME = "HARTOMAT_059999_FailedMaterial"
def _find_material_with_nodes(base_name: str):
"""Find a material by name that actually has shader nodes.
Blender's USD importer creates empty stub materials (use_nodes=True,
node_tree has 0 nodes) from USD material bindings. When we later
append the real material from a .blend library, Blender renames it
with a .001/.002 suffix to avoid the name collision.
This helper searches bpy.data.materials for the version that has
actual shader nodes, preferring exact name match, then .NNN suffixes.
"""
import bpy # type: ignore[import]
# Exact name first
exact = bpy.data.materials.get(base_name)
if exact and exact.node_tree and len(exact.node_tree.nodes) > 0:
return exact
# Search for .NNN suffixed versions
for mat in bpy.data.materials:
if not mat.name.startswith(base_name):
continue
suffix = mat.name[len(base_name):]
if suffix == "" or _re.match(r'^\.\d{3}$', suffix):
if mat.node_tree and len(mat.node_tree.nodes) > 0:
return mat
return None
def _iter_object_name_variants(raw_name: str):
"""Yield conservative object-name variants for direct material lookup."""
if not raw_name:
return
seen: set[str] = set()
def _emit(value: str):
value = (value or "").strip()
if value and value not in seen:
seen.add(value)
return value
return None
exact = _emit(raw_name)
if exact:
yield exact
no_blender_suffix = _emit(_re.sub(r'\.\d{3}$', '', raw_name))
if no_blender_suffix:
yield no_blender_suffix
def _batch_append_materials(mat_lib_path: str, names: set[str]) -> dict:
"""Append multiple materials from a .blend file in a single open.
Uses bpy.data.libraries.load() to open the .blend once instead of
N separate bpy.ops.wm.append() calls (each reopens the file).
Falls back to individual append for any materials that fail to load.
Handles empty material stubs left by Blender's USD importer: when a
stub exists with the target name, the library material gets renamed
with a .NNN suffix. Blender returns the actual loaded datablocks in
data_to.materials, so we can use those directly instead of re-scanning
bpy.data.materials after the library load.
"""
import bpy # type: ignore[import]
result: dict = {}
if not names:
return result
try:
with bpy.data.libraries.load(mat_lib_path, link=False) as (data_from, data_to):
# data_from.materials lists all material names in the .blend
available = set(data_from.materials)
to_load = [n for n in names if n in available]
not_found = names - available
requested_names = [str(n) for n in to_load]
data_to.materials = list(requested_names)
loaded_materials = list(data_to.materials)
# After the context manager closes, data_to.materials contains the actual
# appended datablocks in the same order as to_load, including any .NNN
# renames Blender introduced to avoid collisions with USD stubs.
for mat_name, mat in zip(requested_names, loaded_materials):
if mat:
result[mat_name] = mat
if mat.name != mat_name:
print(f"[blender_render] batch-appended material: {mat_name} (as '{mat.name}', stub collision)")
else:
print(f"[blender_render] batch-appended material: {mat_name}")
continue
print(f"[blender_render] WARNING: material '{mat_name}' not returned after batch append")
if not_found:
print(f"[blender_render] WARNING: materials not in library: {sorted(not_found)[:10]}")
except Exception as exc:
print(f"[blender_render] WARNING: batch append failed ({exc}), falling back to individual append")
# Fallback: individual append for each material
for mat_name in names:
if mat_name in result:
continue
try:
bpy.ops.wm.append(
filepath=f"{mat_lib_path}/Material/{mat_name}",
directory=f"{mat_lib_path}/Material/",
filename=mat_name,
link=False,
)
mat = _find_material_with_nodes(mat_name)
if mat:
result[mat_name] = mat
except Exception:
pass
return result
def assign_failed_material(part_obj) -> None:
"""Assign the standard fallback material (magenta) when no library material matches.
Reuses HARTOMAT_059999_FailedMaterial if already loaded; otherwise
creates a simple magenta Principled BSDF node tree.
"""
import bpy # type: ignore[import]
mat = bpy.data.materials.get(FAILED_MATERIAL_NAME)
if mat is None:
mat = bpy.data.materials.new(name=FAILED_MATERIAL_NAME)
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
bsdf.inputs["Base Color"].default_value = (1.0, 0.0, 1.0, 1.0) # magenta
bsdf.inputs["Roughness"].default_value = 0.6
part_obj.data.materials.clear()
part_obj.data.materials.append(mat)
def build_mat_map_lower(material_map: dict) -> dict:
"""Return a lowercased version of material_map with _AF\\d+ suffix variants added.
Both the original key and the AF-stripped key are inserted so that GLB
object names (which may lack _AF suffixes that OCC adds to mat_map keys)
can match in either direction.
"""
mat_map_lower: dict = {}
for k, v in material_map.items():
kl = k.lower().strip()
mat_map_lower[kl] = v
# USD path: part_key slugs replace ALL non-alphanumeric chars with '_'
# (same regex as generate_part_key in export_step_to_usd.py).
# E.g. "F-802007_TR4-D1" → "f_802007_tr4_d1". Add slug variant so
# hyphenated OCC names match USD-imported Blender objects.
slug_key = _re.sub(r'[^a-z0-9]+', '_', kl).strip('_')
if slug_key and slug_key != kl:
mat_map_lower.setdefault(slug_key, v)
# Strip OCC assembly-frame suffixes: _AF0, _AF0_1, _AF0_1_AF0, etc.
# Pattern matches one or more groups of _AF<n> optionally followed by
# an instance number _<n>, anchored at end of string.
stripped = _re.sub(r'(_af\d+(_\d+)?)+$', '', kl)
if stripped != kl:
mat_map_lower.setdefault(stripped, v)
# Also slug the AF-stripped key for USD path where part_key is
# both AF-stripped AND slugified (e.g. "ge360-hf_..." → "ge360_hf_...")
slug_stripped = _re.sub(r'[^a-z0-9]+', '_', stripped).strip('_')
if slug_stripped and slug_stripped != stripped:
mat_map_lower.setdefault(slug_stripped, v)
return mat_map_lower
def _common_prefix_len(left: str, right: str) -> int:
limit = min(len(left), len(right))
idx = 0
while idx < limit and left[idx] == right[idx]:
idx += 1
return idx
def _lookup_by_common_prefix(query: str, mat_map: dict) -> str | None:
"""Resolve near-matches when USD/source names omit trailing serial suffixes.
This is intentionally conservative: only return a material when the
strongest common-prefix matches all point to the same material.
"""
if not query or not mat_map:
return None
scored: list[tuple[float, int, int, str]] = []
query_len = len(query)
for key, material in mat_map.items():
prefix_len = _common_prefix_len(query, key)
if prefix_len < 12:
continue
ratio = prefix_len / max(query_len, len(key))
if ratio < 0.68:
continue
scored.append((ratio, prefix_len, len(key), material))
if not scored:
return None
scored.sort(reverse=True)
top_ratio, top_prefix, _, top_material = scored[0]
contenders = [
material
for ratio, prefix_len, _, material in scored
if ratio >= top_ratio - 0.02 and prefix_len >= top_prefix - 2
]
unique_materials = set(contenders)
if len(unique_materials) == 1:
return top_material
return None
def _lookup_by_prefix(query: str, mat_map: dict) -> str | None:
"""Resolve prefix-compatible matches when all contenders share one material."""
if not query or not mat_map:
return None
contenders: list[tuple[int, str]] = []
for key, material in mat_map.items():
if len(key) >= 5 and len(query) >= 5 and (
query.startswith(key) or key.startswith(query)
):
contenders.append((len(key), material))
if not contenders:
return None
contenders.sort(reverse=True)
top_len = contenders[0][0]
close_materials = {
material for key_len, material in contenders if key_len >= top_len - 2
}
if len(close_materials) == 1:
return contenders[0][1]
return None
def lookup_material_name(raw_name: str, mat_map: dict, *fallback_names: str) -> str | None:
"""Resolve a material name against normalized mat_map keys.
Lookup order:
1. exact normalized key
2. prefix-compatible key
3. conservative common-prefix fuzzy match
"""
candidates = [raw_name, *fallback_names]
seen: set[str] = set()
for candidate in candidates:
if not candidate:
continue
normalized = candidate.lower().strip()
variants = [normalized]
stripped = _re.sub(r'(_af\d+(_\d+)?)+$', '', normalized, flags=_re.IGNORECASE)
if stripped != normalized:
variants.append(stripped)
no_instance = _re.sub(r'_\d+$', '', stripped)
if no_instance and no_instance not in variants:
variants.append(no_instance)
for variant in list(variants):
slug = _re.sub(r'[^a-z0-9]+', '_', variant).strip('_')
if slug and slug not in variants:
variants.append(slug)
deduped_variants = [variant for variant in variants if variant and not (variant in seen or seen.add(variant))]
for variant in deduped_variants:
mat_name = mat_map.get(variant)
if mat_name:
return mat_name
for variant in deduped_variants:
mat_name = _lookup_by_prefix(variant, mat_map)
if mat_name:
return mat_name
for variant in deduped_variants:
mat_name = _lookup_by_common_prefix(variant, mat_map)
if mat_name:
return mat_name
return None
def apply_material_library_direct(
parts: list,
mat_lib_path: str,
material_lookup: dict[str, str],
) -> None:
"""Assign materials from library using a direct object_name → material_name mapping.
This bypasses all name-matching heuristics — the mapping comes from USD
customData (hartomat:canonicalMaterialName) read via pxr after Blender import.
Parts not present in material_lookup receive FAILED_MATERIAL_NAME.
material_lookup: {blender_object_name: canonical_material_name}
"""
if not mat_lib_path or not os.path.isfile(mat_lib_path):
print(f"[blender_render] material library not found: {mat_lib_path}")
return
import bpy # type: ignore[import]
_t0 = _time.monotonic()
# Collect unique material names needed
needed = set(material_lookup.values())
if not needed:
return
# Batch-append materials from library (single file open)
appended: dict = {}
_t_append = _time.monotonic()
# Check already-loaded materials first — but skip empty stubs created by
# Blender's USD importer (use_nodes=True but node_tree has 0 nodes).
# Those stubs must be loaded from the library via _batch_append_materials
# which uses _find_material_with_nodes() to resolve stub collisions.
still_needed = set()
for mat_name in needed:
existing = _find_material_with_nodes(mat_name)
if existing:
appended[mat_name] = existing
else:
still_needed.add(mat_name)
# Load remaining from .blend in one pass
if still_needed:
appended.update(_batch_append_materials(mat_lib_path, still_needed))
_append_dur = _time.monotonic() - _t_append
print(f"[blender_render] TIMING material_append_direct={_append_dur:.2f}s ({len(appended)}/{len(needed)} materials)", flush=True)
if not appended:
return
assigned_count = 0
unmatched_names = []
for part in parts:
mat_name = None
for candidate in _iter_object_name_variants(part.name):
mat_name = material_lookup.get(candidate)
if mat_name:
break
if mat_name and mat_name in appended:
if part.data.users > 1:
part.data = part.data.copy()
part.data.materials.clear()
part.data.materials.append(appended[mat_name])
assigned_count += 1
else:
unmatched_names.append(part.name)
_assign_dur = _time.monotonic() - _t_append - _append_dur + (_time.monotonic() - _t0 - _append_dur)
_total = _time.monotonic() - _t0
print(f"[blender_render] TIMING material_assign_direct={_total:.2f}s "
f"(append={_append_dur:.2f}s, assign={_total - _append_dur:.2f}s, "
f"{assigned_count}/{len(parts)} matched)", flush=True)
if unmatched_names:
print(f"[blender_render] unmatched (no primvar): {unmatched_names[:10]}", flush=True)
for part in parts:
if part.name in set(unmatched_names):
if part.data.users > 1:
part.data = part.data.copy()
assign_failed_material(part)
def apply_material_library(
parts: list,
mat_lib_path: str,
mat_map: dict,
part_names_ordered: list | None = None,
) -> None:
"""Append materials from library .blend and assign to parts via material_map.
GLB-imported objects are named after STEP parts, so matching is by name
(stripping Blender .NNN suffix for duplicates). Falls back to
part_names_ordered index-based matching.
mat_map: {part_name_lower: material_name}
Parts without a match receive the FAILED_MATERIAL_NAME sentinel.
"""
if not mat_lib_path or not os.path.isfile(mat_lib_path):
print(f"[blender_render] material library not found: {mat_lib_path}")
return
import bpy # type: ignore[import]
_t0 = _time.monotonic()
if part_names_ordered is None:
part_names_ordered = []
# Collect unique material names needed
needed = set(mat_map.values())
if not needed:
return
# Batch-append materials from library (single file open)
appended: dict = {}
_t_append = _time.monotonic()
appended.update(_batch_append_materials(mat_lib_path, needed))
_append_dur = _time.monotonic() - _t_append
print(f"[blender_render] TIMING material_append={_append_dur:.2f}s ({len(appended)}/{len(needed)} materials)", flush=True)
if not appended:
return
# Assign materials to parts — primary: name-based (GLB object names),
# secondary: index-based via part_names_ordered
assigned_count = 0
unmatched_names = []
for i, part in enumerate(parts):
# Try name-based matching first (strip Blender .NNN suffix)
base_name = _re.sub(r'\.\d{3}$', '', part.name)
# Strip OCC assembly-instance suffix (_AF0, _AF1, …) — GLB object
# names may or may not have them while mat_map keys might.
_prev = None
while _prev != base_name:
_prev = base_name
base_name = _re.sub(r'_AF\d+$', '', base_name, flags=_re.IGNORECASE)
part_key = base_name.lower().strip()
mat_name = lookup_material_name(part_key, mat_map)
# Fall back to index-based matching via part_names_ordered
if not mat_name and part_names_ordered and i < len(part_names_ordered):
step_name = part_names_ordered[i]
mat_name = lookup_material_name(step_name, mat_map, part_key)
if mat_name and mat_name in appended:
part.data.materials.clear()
part.data.materials.append(appended[mat_name])
assigned_count += 1
else:
unmatched_names.append(part.name)
_total = _time.monotonic() - _t0
print(f"[blender_render] TIMING material_assign={_total:.2f}s "
f"(append={_append_dur:.2f}s, match={_total - _append_dur:.2f}s, "
f"{assigned_count}/{len(parts)} matched)", flush=True)
if unmatched_names:
print(f"[blender_render] unmatched parts → assigning {FAILED_MATERIAL_NAME}: {unmatched_names[:10]}", flush=True)
unmatched_set = set(unmatched_names)
for part in parts:
if part.name in unmatched_set:
if part.data.users > 1:
part.data = part.data.copy()
assign_failed_material(part)