feat: unify order-line render invocation paths

This commit is contained in:
2026-04-08 21:57:37 +02:00
parent 042f62fe55
commit dde04fcaa5
5 changed files with 3016 additions and 278 deletions
@@ -2,6 +2,7 @@ from __future__ import annotations
import logging
import time
import uuid
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
@@ -10,7 +11,9 @@ from typing import Any
from sqlalchemy import select
from sqlalchemy.orm import Session, selectinload
from app.config import settings
from app.core.process_steps import StepName
from app.domains.products.models import CadFile
from app.domains.rendering.models import WorkflowNodeResult, WorkflowRun
from app.domains.rendering.workflow_executor import STEP_TASK_MAP, WorkflowContext, WorkflowDispatchResult
from app.domains.rendering.workflow_node_registry import get_node_definition
@@ -21,10 +24,12 @@ from app.domains.rendering.workflow_runtime_services import (
OrderLineRenderSetupResult,
TemplateResolutionResult,
auto_populate_materials_for_cad,
build_order_line_render_invocation,
prepare_order_line_render_context,
resolve_cad_bbox,
resolve_order_line_material_map,
resolve_order_line_template_context,
resolve_render_position_context,
)
logger = logging.getLogger(__name__)
@@ -37,6 +42,7 @@ class WorkflowGraphRuntimeError(RuntimeError):
@dataclass(slots=True)
class WorkflowGraphState:
setup: OrderLineRenderSetupResult | None = None
cad_file: CadFile | None = None
template: TemplateResolutionResult | None = None
materials: MaterialResolutionResult | None = None
auto_populate: AutoPopulateMaterialsResult | None = None
@@ -52,6 +58,119 @@ _ORDER_LINE_RENDER_STEPS = {
StepName.NOTIFY,
}
_STILL_TASK_KEYS = {
"width",
"height",
"engine",
"samples",
"smooth_angle",
"cycles_device",
"transparent_bg",
"part_colors",
"template_path",
"target_collection",
"material_library_path",
"material_map",
"part_names_ordered",
"lighting_only",
"shadow_catcher",
"rotation_x",
"rotation_y",
"rotation_z",
"noise_threshold",
"denoiser",
"denoising_input_passes",
"denoising_prefilter",
"denoising_quality",
"denoising_use_gpu",
"usd_path",
"focal_length_mm",
"sensor_width_mm",
"material_override",
"render_engine",
"resolution",
}
_TURNTABLE_TASK_KEYS = {
"output_name",
"engine",
"samples",
"smooth_angle",
"cycles_device",
"transparent_bg",
"width",
"height",
"frame_count",
"fps",
"turntable_degrees",
"turntable_axis",
"bg_color",
"template_path",
"target_collection",
"material_library_path",
"material_map",
"part_names_ordered",
"lighting_only",
"shadow_catcher",
"camera_orbit",
"rotation_x",
"rotation_y",
"rotation_z",
"focal_length_mm",
"sensor_width_mm",
"material_override",
}
_THUMBNAIL_TASK_KEYS = {
"renderer",
"render_engine",
"samples",
"width",
"height",
"transparent_bg",
}
_AUTHORITATIVE_RENDER_SETTING_KEYS = {
"render_engine",
"engine",
"samples",
"width",
"height",
"transparent_bg",
"cycles_device",
"noise_threshold",
"denoiser",
"denoising_input_passes",
"denoising_prefilter",
"denoising_quality",
"denoising_use_gpu",
"camera_orbit",
"focal_length_mm",
"sensor_width_mm",
"bg_color",
}
def _filter_graph_render_overrides(step: StepName, params: dict[str, Any]) -> dict[str, Any]:
normalized = dict(params)
use_custom_render_settings = bool(normalized.pop("use_custom_render_settings", False))
if use_custom_render_settings:
return normalized
filtered = dict(normalized)
for key in _AUTHORITATIVE_RENDER_SETTING_KEYS:
if key in filtered:
filtered.pop(key, None)
if step == StepName.BLENDER_TURNTABLE:
# Turntable timing remains workflow-specific even when render quality inherits from the output type.
for key in ("fps", "duration_s", "frame_count", "turntable_degrees", "turntable_axis"):
value = normalized.get(key)
if value not in (None, ""):
filtered[key] = value
return filtered
def find_unsupported_graph_nodes(workflow_context: WorkflowContext) -> list[str]:
unsupported: list[str] = []
@@ -119,6 +238,7 @@ def execute_graph_workflow(
session=session,
workflow_context=workflow_context,
state=state,
node=node,
node_params=node.params,
)
except Exception as exc:
@@ -208,14 +328,12 @@ def execute_graph_workflow(
from app.tasks.celery_app import celery_app
task_kwargs = dict(node.params)
task_kwargs["workflow_run_id"] = str(workflow_context.workflow_run_id)
task_kwargs["workflow_node_id"] = node.id
if workflow_context.execution_mode == "shadow":
task_kwargs["publish_asset_enabled"] = False
task_kwargs["emit_events"] = False
task_kwargs["job_document_enabled"] = False
task_kwargs["output_name_suffix"] = f"shadow-{str(workflow_context.workflow_run_id)[:8]}"
task_kwargs = _build_task_kwargs(
session=session,
workflow_context=workflow_context,
state=state,
node=node,
)
result = celery_app.send_task(
task_name,
@@ -228,10 +346,19 @@ def execute_graph_workflow(
metadata["attempt_count"] = 1
metadata["max_attempts"] = retry_policy["max_attempts"]
metadata["execution_mode"] = workflow_context.execution_mode
predicted_output = _predict_task_output_metadata(
workflow_context=workflow_context,
state=state,
node=node,
task_kwargs=task_kwargs,
)
if predicted_output:
metadata.update(predicted_output)
node_result.status = "queued"
node_result.output = metadata
node_result.log = None
node_result.duration_s = None
state.node_outputs[node.id] = dict(metadata)
session.flush()
task_ids.append(result.id)
node_task_ids[node.id] = result.id
@@ -377,13 +504,330 @@ def _serialize_bbox_result(result: BBoxResolutionResult) -> dict[str, Any]:
}
def _serialize_cad_file_result(cad_file: CadFile) -> dict[str, Any]:
parsed_objects = cad_file.parsed_objects or {}
objects = parsed_objects.get("objects")
object_count = len(objects) if isinstance(objects, list) else None
return {
"cad_file_id": str(cad_file.id),
"step_path": cad_file.stored_path,
"original_name": cad_file.original_name,
"processing_status": cad_file.processing_status.value if getattr(cad_file, "processing_status", None) else None,
"object_count": object_count,
"has_parsed_objects": bool(parsed_objects),
"gltf_path": cad_file.gltf_path,
}
def _workflow_node_ids(workflow_context: WorkflowContext, step: StepName) -> list[str]:
return [node.id for node in workflow_context.ordered_nodes if node.step == step]
def _workflow_node_map(workflow_context: WorkflowContext) -> dict[str, Any]:
return {node.id: node for node in workflow_context.ordered_nodes}
def _upstream_node_ids(workflow_context: WorkflowContext, node_id: str) -> list[str]:
return [edge.from_node for edge in workflow_context.edges if edge.to_node == node_id]
def _downstream_node_ids(workflow_context: WorkflowContext, node_id: str) -> list[str]:
return [edge.to_node for edge in workflow_context.edges if edge.from_node == node_id]
def _connected_node_ids_by_step(
workflow_context: WorkflowContext,
*,
node_id: str,
step: StepName,
direction: str,
) -> list[str]:
node_map = _workflow_node_map(workflow_context)
if direction == "upstream":
candidate_ids = _upstream_node_ids(workflow_context, node_id)
elif direction == "downstream":
candidate_ids = _downstream_node_ids(workflow_context, node_id)
else:
raise ValueError(f"Unsupported graph direction: {direction}")
return [
candidate_id
for candidate_id in candidate_ids
if node_map.get(candidate_id) is not None and node_map[candidate_id].step == step
]
def _connected_upstream_artifacts(
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node_id: str,
) -> list[dict[str, Any]]:
preferred_upstream_ids = set(_upstream_node_ids(workflow_context, node_id))
artifacts = _collect_upstream_artifacts(state)
if not preferred_upstream_ids:
return []
return [artifact for artifact in artifacts if artifact["node_id"] in preferred_upstream_ids]
def _predict_task_output_metadata(
*,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
task_kwargs: dict[str, Any],
) -> dict[str, Any]:
if node.step == StepName.THUMBNAIL_SAVE:
renderer = str(task_kwargs.get("renderer") or "blender")
output_format = "png" if renderer == "threejs" or bool(task_kwargs.get("transparent_bg")) else "jpg"
output_dir = Path(settings.upload_dir) / "thumbnails"
return {
"artifact_role": "thumbnail_output",
"predicted_output_path": str(output_dir / f"{workflow_context.context_id}.{output_format}"),
"predicted_asset_type": "thumbnail",
"publish_asset_enabled": True,
"graph_authoritative_output_enabled": True,
"graph_output_node_ids": [node.id],
"notify_handoff_enabled": False,
}
if state.setup is None or state.setup.order_line is None or state.setup.cad_file is None:
return {}
step_path = Path(state.setup.cad_file.stored_path)
output_name_suffix = task_kwargs.get("output_name_suffix")
order_line_id = str(state.setup.order_line.id)
if node.step == StepName.BLENDER_STILL:
output_dir = step_path.parent / "renders"
output_filename = f"line_{order_line_id}.png"
if output_name_suffix:
output_filename = f"line_{order_line_id}_{output_name_suffix}.png"
return {
"artifact_role": "render_output",
"predicted_output_path": str(output_dir / output_filename),
"predicted_asset_type": "still",
"publish_asset_enabled": bool(task_kwargs.get("publish_asset_enabled", True)),
"graph_authoritative_output_enabled": bool(
task_kwargs.get("graph_authoritative_output_enabled", False)
),
"graph_output_node_ids": list(task_kwargs.get("graph_output_node_ids") or []),
"notify_handoff_enabled": bool(task_kwargs.get("emit_legacy_notifications", False)),
"graph_notify_node_ids": list(task_kwargs.get("graph_notify_node_ids") or []),
}
if node.step == StepName.EXPORT_BLEND:
output_filename = f"{step_path.stem}_production.blend"
if output_name_suffix:
output_filename = f"{step_path.stem}_production_{output_name_suffix}.blend"
return {
"artifact_role": "blend_export",
"predicted_output_path": str(step_path.parent / output_filename),
"predicted_asset_type": "blend_production",
"publish_asset_enabled": bool(task_kwargs.get("publish_asset_enabled", True)),
"graph_authoritative_output_enabled": bool(
task_kwargs.get("graph_authoritative_output_enabled", False)
),
"graph_output_node_ids": list(task_kwargs.get("graph_output_node_ids") or []),
"notify_handoff_enabled": bool(task_kwargs.get("emit_legacy_notifications", False)),
"graph_notify_node_ids": list(task_kwargs.get("graph_notify_node_ids") or []),
}
if node.step == StepName.BLENDER_TURNTABLE:
output_name = str(task_kwargs.get("output_name") or "turntable")
output_name_suffix = task_kwargs.get("output_name_suffix")
if output_name_suffix:
output_name = f"{output_name}_{output_name_suffix}"
output_dir = task_kwargs.get("output_dir")
predicted_output_path = None
if isinstance(output_dir, str) and output_dir.strip():
predicted_output_path = str(Path(output_dir) / f"{output_name}.mp4")
else:
predicted_output_path = str(step_path.parent / "renders" / f"{output_name}.mp4")
return {
"artifact_role": "turntable_output",
"predicted_output_path": predicted_output_path,
"predicted_asset_type": "turntable",
"publish_asset_enabled": bool(task_kwargs.get("publish_asset_enabled", True)),
"graph_authoritative_output_enabled": bool(
task_kwargs.get("graph_authoritative_output_enabled", False)
),
"graph_output_node_ids": list(task_kwargs.get("graph_output_node_ids") or []),
"notify_handoff_enabled": bool(task_kwargs.get("emit_legacy_notifications", False)),
"graph_notify_node_ids": list(task_kwargs.get("graph_notify_node_ids") or []),
}
return {}
def _collect_upstream_artifacts(state: WorkflowGraphState) -> list[dict[str, Any]]:
artifacts: list[dict[str, Any]] = []
for node_id, output in state.node_outputs.items():
predicted_output_path = output.get("predicted_output_path")
artifact_role = output.get("artifact_role")
if not artifact_role and not predicted_output_path:
continue
artifacts.append(
{
"node_id": node_id,
"artifact_role": artifact_role,
"predicted_output_path": predicted_output_path,
"predicted_asset_type": output.get("predicted_asset_type"),
"publish_asset_enabled": bool(output.get("publish_asset_enabled", False)),
"graph_authoritative_output_enabled": bool(
output.get("graph_authoritative_output_enabled", False)
),
"graph_output_node_ids": list(output.get("graph_output_node_ids") or []),
"notify_handoff_enabled": bool(output.get("notify_handoff_enabled", False)),
"task_id": output.get("task_id"),
**(
{"graph_notify_node_ids": list(output.get("graph_notify_node_ids") or [])}
if output.get("graph_notify_node_ids")
else {}
),
}
)
return artifacts
def _resolve_cad_file_context(
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
) -> CadFile:
if state.cad_file is not None:
return state.cad_file
try:
cad_file_id = workflow_context.context_id
except AttributeError as exc:
raise WorkflowGraphRuntimeError("cad_file context_id is missing") from exc
try:
parsed_cad_file_id = uuid.UUID(cad_file_id)
except ValueError as exc:
raise WorkflowGraphRuntimeError(f"cad_file context is not a valid UUID: {cad_file_id}") from exc
cad_file = session.get(CadFile, parsed_cad_file_id)
if cad_file is None:
raise WorkflowGraphRuntimeError(f"cad_file context not found: {cad_file_id}")
state.cad_file = cad_file
return cad_file
def _resolve_thumbnail_request(
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node_id: str,
) -> dict[str, Any] | None:
preferred_upstream_ids = set(_upstream_node_ids(workflow_context, node_id))
if preferred_upstream_ids:
for upstream_node in reversed(workflow_context.ordered_nodes):
if upstream_node.id not in preferred_upstream_ids:
continue
output = state.node_outputs.get(upstream_node.id)
if output and output.get("thumbnail_request") is True:
return output
for output in reversed(list(state.node_outputs.values())):
if output.get("thumbnail_request") is True:
return output
return None
def _build_task_kwargs(
*,
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
) -> dict[str, Any]:
task_kwargs = dict(node.params)
connected_output_node_ids: list[str] = []
connected_notify_node_ids: list[str] = []
render_defaults: dict[str, Any] = {}
if state.setup is not None and state.setup.is_ready and state.setup.order_line is not None:
render_invocation = build_order_line_render_invocation(
state.setup,
template_context=state.template,
position_context=resolve_render_position_context(session, state.setup.order_line),
material_context=state.materials,
)
render_defaults = render_invocation.task_defaults()
if node.step == StepName.BLENDER_STILL:
task_kwargs = _filter_graph_render_overrides(StepName.BLENDER_STILL, task_kwargs)
task_kwargs = {
key: value
for key, value in {
**render_defaults,
**task_kwargs,
}.items()
if key in _STILL_TASK_KEYS
}
elif node.step == StepName.BLENDER_TURNTABLE:
task_kwargs = _filter_graph_render_overrides(StepName.BLENDER_TURNTABLE, task_kwargs)
task_kwargs = {
key: value
for key, value in {
**render_defaults,
**task_kwargs,
}.items()
if key in _TURNTABLE_TASK_KEYS
}
elif node.step == StepName.THUMBNAIL_SAVE:
thumbnail_request = _resolve_thumbnail_request(workflow_context, state, node.id) or {}
task_kwargs = {
key: value
for key, value in {
**thumbnail_request,
**task_kwargs,
}.items()
if key in _THUMBNAIL_TASK_KEYS
}
task_kwargs["workflow_run_id"] = str(workflow_context.workflow_run_id)
task_kwargs["workflow_node_id"] = node.id
if workflow_context.execution_mode == "graph" and node.step in {
StepName.BLENDER_STILL,
StepName.EXPORT_BLEND,
StepName.BLENDER_TURNTABLE,
}:
connected_output_node_ids = _connected_node_ids_by_step(
workflow_context,
node_id=node.id,
step=StepName.OUTPUT_SAVE,
direction="downstream",
)
connected_notify_node_ids = _connected_node_ids_by_step(
workflow_context,
node_id=node.id,
step=StepName.NOTIFY,
direction="downstream",
)
if connected_output_node_ids:
task_kwargs["publish_asset_enabled"] = False
task_kwargs["graph_authoritative_output_enabled"] = True
task_kwargs["graph_output_node_ids"] = connected_output_node_ids
if connected_notify_node_ids:
task_kwargs["emit_legacy_notifications"] = True
task_kwargs["graph_notify_node_ids"] = connected_notify_node_ids
if workflow_context.execution_mode == "shadow":
task_kwargs["publish_asset_enabled"] = False
task_kwargs["emit_events"] = False
task_kwargs["job_document_enabled"] = False
task_kwargs["output_name_suffix"] = f"shadow-{str(workflow_context.workflow_run_id)[:8]}"
return task_kwargs
def _execute_order_line_setup(
*,
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
del node
del node_params
shadow_mode = workflow_context.execution_mode == "shadow"
if shadow_mode:
@@ -409,8 +853,10 @@ def _execute_resolve_template(
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
del node
del workflow_context, node_params
if state.setup is None or not state.setup.is_ready:
if state.setup is not None and state.setup.status == "skip":
@@ -426,8 +872,10 @@ def _execute_material_map_resolve(
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
del node
del session, workflow_context, node_params
if state.setup is None or not state.setup.is_ready:
if state.setup is not None and state.setup.status == "skip":
@@ -457,8 +905,10 @@ def _execute_auto_populate_materials(
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
del node
del node_params
if state.setup is None or state.setup.cad_file is None:
if state.setup is not None and state.setup.status == "skip":
@@ -487,8 +937,10 @@ def _execute_glb_bbox(
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
del node
del session, workflow_context
if state.setup is None or state.setup.cad_file is None:
if state.setup is not None and state.setup.status == "skip":
@@ -510,10 +962,198 @@ def _execute_glb_bbox(
return _serialize_bbox_result(result), "completed", None
def _execute_resolve_step_path(
*,
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
del node
del node_params
cad_file = _resolve_cad_file_context(session, workflow_context, state)
return _serialize_cad_file_result(cad_file), "completed", None
def _execute_stl_cache_generate(
*,
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
del node
del node_params
cad_file = _resolve_cad_file_context(session, workflow_context, state)
step_path = Path(cad_file.stored_path)
stl_dir = step_path.parent / "stl_cache"
payload = _serialize_cad_file_result(cad_file)
payload.update(
{
"cache_mode": "compatibility_noop",
"cache_required": False,
"stl_cache_dir": str(stl_dir),
"reason": "HartOMat CAD graph uses direct OCC/GLB export instead of legacy STL cache generation.",
}
)
return payload, "completed", None
def _execute_thumbnail_render_request(
*,
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
renderer: str,
) -> tuple[dict[str, Any], str, str | None]:
del node
cad_file = _resolve_cad_file_context(session, workflow_context, state)
payload: dict[str, Any] = {
"cad_file_id": str(cad_file.id),
"step_path": cad_file.stored_path,
"renderer": renderer,
"thumbnail_request": True,
}
for key in ("width", "height", "transparent_bg", "render_engine", "samples"):
value = node_params.get(key)
if value not in (None, ""):
payload[key] = value
return payload, "completed", None
def _execute_blender_thumbnail_render(
*,
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
return _execute_thumbnail_render_request(
session=session,
workflow_context=workflow_context,
state=state,
node=node,
node_params=node_params,
renderer="blender",
)
def _execute_threejs_thumbnail_render(
*,
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
return _execute_thumbnail_render_request(
session=session,
workflow_context=workflow_context,
state=state,
node=node,
node_params=node_params,
renderer="threejs",
)
def _execute_output_save(
*,
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
del session, node_params
if state.setup is None or state.setup.order_line is None:
raise WorkflowGraphRuntimeError("output_save requires an order_line_setup result")
if state.setup.status == "skip":
return _serialize_setup_result(state.setup), "skipped", state.setup.reason
if not state.setup.is_ready:
return _serialize_setup_result(state.setup), "failed", state.setup.reason or "output_save_blocked"
order_line = state.setup.order_line
payload: dict[str, Any] = {
"order_line_id": str(order_line.id),
"authoritative_result_path": order_line.result_path,
"shadow_mode": workflow_context.execution_mode == "shadow",
}
upstream_artifacts = _connected_upstream_artifacts(workflow_context, state, node.id)
if workflow_context.execution_mode == "shadow":
payload["publication_mode"] = "shadow_observer_only"
elif any(artifact["publish_asset_enabled"] for artifact in upstream_artifacts):
payload["publication_mode"] = "deferred_to_render_task"
else:
payload["publication_mode"] = "awaiting_graph_authoritative_save"
if upstream_artifacts:
payload["artifact_count"] = len(upstream_artifacts)
payload["upstream_artifacts"] = upstream_artifacts
if state.template is not None and state.template.template is not None:
payload["template_name"] = state.template.template.name
if state.materials is not None:
payload["material_map_count"] = len(state.materials.material_map or {})
return payload, "completed", None
def _execute_notify(
*,
session: Session,
workflow_context: WorkflowContext,
state: WorkflowGraphState,
node,
node_params: dict[str, Any],
) -> tuple[dict[str, Any], str, str | None]:
del session, node_params
if state.setup is None or state.setup.order_line is None:
raise WorkflowGraphRuntimeError("notify requires an order_line_setup result")
if state.setup.status == "skip":
return _serialize_setup_result(state.setup), "skipped", state.setup.reason
if not state.setup.is_ready:
return _serialize_setup_result(state.setup), "failed", state.setup.reason or "notify_blocked"
payload: dict[str, Any] = {
"order_line_id": str(state.setup.order_line.id),
"shadow_mode": workflow_context.execution_mode == "shadow",
"channel": "audit_log",
}
if workflow_context.execution_mode == "shadow":
payload["notification_mode"] = "shadow_suppressed"
return payload, "skipped", "shadow mode suppresses user notifications"
connected_artifacts = _connected_upstream_artifacts(workflow_context, state, node.id)
armed_node_ids = [
artifact["node_id"]
for artifact in connected_artifacts
if artifact["notify_handoff_enabled"]
]
if not armed_node_ids:
payload["notification_mode"] = "not_armed"
return payload, "skipped", "No graph render task is configured for notification handoff"
payload["notification_mode"] = "deferred_to_render_task"
payload["armed_node_ids"] = armed_node_ids
payload["armed_node_count"] = len(armed_node_ids)
return payload, "completed", None
_BRIDGE_EXECUTORS = {
StepName.RESOLVE_STEP_PATH: _execute_resolve_step_path,
StepName.BLENDER_RENDER: _execute_blender_thumbnail_render,
StepName.THREEJS_RENDER: _execute_threejs_thumbnail_render,
StepName.ORDER_LINE_SETUP: _execute_order_line_setup,
StepName.RESOLVE_TEMPLATE: _execute_resolve_template,
StepName.MATERIAL_MAP_RESOLVE: _execute_material_map_resolve,
StepName.AUTO_POPULATE_MATERIALS: _execute_auto_populate_materials,
StepName.GLB_BBOX: _execute_glb_bbox,
StepName.STL_CACHE_GENERATE: _execute_stl_cache_generate,
StepName.OUTPUT_SAVE: _execute_output_save,
StepName.NOTIFY: _execute_notify,
}
@@ -1,7 +1,9 @@
from __future__ import annotations
import logging
import re
import shutil
import uuid
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
@@ -11,10 +13,17 @@ from sqlalchemy import select, update as sql_update
from sqlalchemy.orm import Session, joinedload
from app.config import settings as app_settings
from app.core.render_paths import resolve_result_path, result_path_to_storage_key
from app.domains.media.models import MediaAsset, MediaAssetType
from app.domains.orders.models import Order, OrderLine, OrderStatus
from app.domains.products.models import CadFile, Product
from app.domains.rendering.models import GlobalRenderPosition, ProductRenderPosition, RenderTemplate
from app.domains.rendering.output_type_contracts import merge_output_type_invocation_overrides
from app.domains.rendering.models import (
GlobalRenderPosition,
ProductRenderPosition,
RenderTemplate,
WorkflowRun,
)
from app.services.material_service import resolve_material_map
from app.services.step_processor import build_part_colors
from app.services.template_service import (
@@ -108,6 +117,216 @@ class OutputSaveResult:
asset_type: MediaAssetType | None = None
@dataclass(slots=True)
class OrderLineRenderInvocation:
product_name: str
output_type_name: str
output_extension: str
output_filename: str
output_path: str
is_animation: bool
is_cinematic: bool
width: int | None = None
height: int | None = None
engine: str | None = None
samples: int | None = None
frame_count: int = 24
fps: int = 25
bg_color: str = ""
turntable_axis: str = "world_z"
noise_threshold: str = ""
denoiser: str = ""
denoising_input_passes: str = ""
denoising_prefilter: str = ""
denoising_quality: str = ""
denoising_use_gpu: str = ""
transparent_bg: bool = False
cycles_device: str = "auto"
part_colors: dict[str, str] = field(default_factory=dict)
part_names_ordered: list[str] | None = None
template_path: str | None = None
target_collection: str = "Product"
material_library_path: str | None = None
material_map: dict[str, str] | None = None
lighting_only: bool = False
shadow_catcher: bool = False
camera_orbit: bool = True
rotation_x: float = 0.0
rotation_y: float = 0.0
rotation_z: float = 0.0
focal_length_mm: float | None = None
sensor_width_mm: float | None = None
usd_path: str | None = None
material_override: str | None = None
def task_defaults(self) -> dict[str, Any]:
payload: dict[str, Any] = {
"transparent_bg": self.transparent_bg,
"cycles_device": self.cycles_device,
"part_colors": self.part_colors,
"target_collection": self.target_collection,
"lighting_only": self.lighting_only,
"shadow_catcher": self.shadow_catcher,
"camera_orbit": self.camera_orbit,
"rotation_x": self.rotation_x,
"rotation_y": self.rotation_y,
"rotation_z": self.rotation_z,
"frame_count": self.frame_count,
"fps": self.fps,
"bg_color": self.bg_color,
"turntable_axis": self.turntable_axis,
"noise_threshold": self.noise_threshold,
"denoiser": self.denoiser,
"denoising_input_passes": self.denoising_input_passes,
"denoising_prefilter": self.denoising_prefilter,
"denoising_quality": self.denoising_quality,
"denoising_use_gpu": self.denoising_use_gpu,
}
optional_values = {
"width": self.width,
"height": self.height,
"engine": self.engine,
"samples": self.samples,
"template_path": self.template_path,
"material_library_path": self.material_library_path,
"material_map": self.material_map,
"part_names_ordered": self.part_names_ordered,
"focal_length_mm": self.focal_length_mm,
"sensor_width_mm": self.sensor_width_mm,
"usd_path": self.usd_path,
"material_override": self.material_override,
}
for key, value in optional_values.items():
if value not in (None, ""):
payload[key] = value
return payload
def as_still_renderer_kwargs(
self,
*,
step_path: str,
output_path: str,
job_id: str | None = None,
order_line_id: str | None = None,
) -> dict[str, Any]:
return {
"step_path": step_path,
"output_path": output_path,
"part_colors": self.part_colors or None,
"width": self.width,
"height": self.height,
"transparent_bg": self.transparent_bg,
"engine": self.engine,
"samples": self.samples,
"template_path": self.template_path,
"target_collection": self.target_collection,
"material_library_path": self.material_library_path,
"material_map": self.material_map,
"part_names_ordered": self.part_names_ordered,
"lighting_only": self.lighting_only,
"shadow_catcher": self.shadow_catcher,
"cycles_device": self.cycles_device,
"rotation_x": self.rotation_x,
"rotation_y": self.rotation_y,
"rotation_z": self.rotation_z,
"job_id": job_id,
"noise_threshold": self.noise_threshold,
"denoiser": self.denoiser,
"denoising_input_passes": self.denoising_input_passes,
"denoising_prefilter": self.denoising_prefilter,
"denoising_quality": self.denoising_quality,
"denoising_use_gpu": self.denoising_use_gpu,
"order_line_id": order_line_id,
"usd_path": self.usd_path,
"focal_length_mm": self.focal_length_mm,
"sensor_width_mm": self.sensor_width_mm,
"material_override": self.material_override,
}
def as_turntable_renderer_kwargs(
self,
*,
step_path: Path,
output_path: Path,
smooth_angle: int,
default_width: int,
default_height: int,
default_engine: str,
default_samples: int,
) -> dict[str, Any]:
return {
"step_path": step_path,
"output_path": output_path,
"frame_count": self.frame_count,
"fps": self.fps,
"width": self.width or default_width,
"height": self.height or default_height,
"engine": self.engine or default_engine,
"samples": self.samples or default_samples,
"smooth_angle": smooth_angle,
"cycles_device": self.cycles_device,
"transparent_bg": self.transparent_bg,
"bg_color": self.bg_color,
"turntable_axis": self.turntable_axis,
"part_colors": self.part_colors or None,
"template_path": self.template_path,
"target_collection": self.target_collection,
"material_library_path": self.material_library_path,
"material_map": self.material_map,
"part_names_ordered": self.part_names_ordered,
"lighting_only": self.lighting_only,
"shadow_catcher": self.shadow_catcher,
"rotation_x": self.rotation_x,
"rotation_y": self.rotation_y,
"rotation_z": self.rotation_z,
"camera_orbit": self.camera_orbit,
"usd_path": self.usd_path,
"focal_length_mm": self.focal_length_mm,
"sensor_width_mm": self.sensor_width_mm,
"material_override": self.material_override,
}
def as_cinematic_renderer_kwargs(
self,
*,
step_path: Path,
output_path: Path,
smooth_angle: int,
default_width: int,
default_height: int,
default_engine: str,
default_samples: int,
log_callback: Callable[[str], None] | None = None,
) -> dict[str, Any]:
return {
"step_path": step_path,
"output_path": output_path,
"width": self.width or default_width,
"height": self.height or default_height,
"engine": self.engine or default_engine,
"samples": self.samples or default_samples,
"smooth_angle": smooth_angle,
"cycles_device": self.cycles_device,
"transparent_bg": self.transparent_bg,
"part_colors": self.part_colors or None,
"template_path": self.template_path,
"target_collection": self.target_collection,
"material_library_path": self.material_library_path,
"material_map": self.material_map,
"part_names_ordered": self.part_names_ordered,
"lighting_only": self.lighting_only,
"shadow_catcher": self.shadow_catcher,
"rotation_x": self.rotation_x,
"rotation_y": self.rotation_y,
"rotation_z": self.rotation_z,
"usd_path": self.usd_path,
"focal_length_mm": self.focal_length_mm,
"sensor_width_mm": self.sensor_width_mm,
"material_override": self.material_override,
"log_callback": log_callback,
}
def _emit(emit: EmitFn, order_line_id: str, message: str, level: str | None = None) -> None:
if emit is None:
return
@@ -118,14 +337,42 @@ def _emit(emit: EmitFn, order_line_id: str, message: str, level: str | None = No
def _resolve_asset_path(storage_key: str | None) -> Path | None:
if not storage_key:
return None
candidate = Path(app_settings.upload_dir) / storage_key
if candidate.exists():
return candidate
return resolve_result_path(storage_key)
def _usd_master_refresh_reason(cad_file: CadFile) -> str | None:
resolved = cad_file.resolved_material_assignments
if not isinstance(resolved, dict) or not resolved:
return "missing resolved material assignments"
canonical_materials: list[str] = []
for meta in resolved.values():
if not isinstance(meta, dict):
continue
canonical = meta.get("canonical_material")
if isinstance(canonical, str) and canonical.strip():
canonical_materials.append(canonical.strip())
if not canonical_materials:
return "missing canonical material metadata"
if any(material.upper().startswith("SCHAEFFLER_") for material in canonical_materials):
return "legacy Schaeffler material metadata"
return None
def _queue_usd_master_refresh(cad_file_id: str) -> bool:
try:
from app.tasks.step_tasks import generate_usd_master_task
generate_usd_master_task.delay(cad_file_id)
return True
except Exception:
logger.exception("render_order_line: failed to queue usd_master refresh for cad %s", cad_file_id)
return False
def extract_bbox_from_glb(glb_path: str) -> dict[str, dict[str, float]] | None:
"""Extract a bounding box from a GLB file in meters and convert to mm."""
try:
@@ -207,8 +454,7 @@ def resolve_cad_bbox(
def _normalize_storage_key(output_path: str) -> str:
upload_prefix = str(app_settings.upload_dir).rstrip("/") + "/"
return output_path[len(upload_prefix):] if output_path.startswith(upload_prefix) else output_path
return result_path_to_storage_key(output_path) or output_path
def _resolve_output_asset_type(output_path: str) -> MediaAssetType:
@@ -218,6 +464,8 @@ def _resolve_output_asset_type(output_path: str) -> MediaAssetType:
def _resolve_output_mime_type(output_path: str) -> str:
extension = output_path.rsplit(".", 1)[-1].lower() if "." in output_path else "bin"
if extension == "blend":
return "application/x-blender"
if extension in ("mp4", "webm"):
return "video/mp4"
if extension == "webp":
@@ -227,6 +475,333 @@ def _resolve_output_mime_type(output_path: str) -> str:
return "image/png"
def _sanitize_public_output_name(value: str) -> str:
sanitized = re.sub(r"[^\w\-.]", "_", value.strip())
return sanitized[:100] or "output"
def _coerce_int(value: Any) -> int | None:
if value in (None, ""):
return None
try:
return int(value)
except (TypeError, ValueError):
return None
def _coerce_bool(value: Any) -> bool:
if isinstance(value, bool):
return value
if isinstance(value, str):
return value.strip().lower() in {"1", "true", "yes", "on"}
return bool(value)
def _resolve_render_output_extension(line: OrderLine) -> str:
output_type = line.output_type
output_extension = "jpg"
if output_type is not None and output_type.output_format:
fmt = str(output_type.output_format).lower()
if fmt == "mp4":
output_extension = "mp4"
elif fmt == "webp":
output_extension = "webp"
elif fmt in {"png", "jpg", "jpeg"}:
output_extension = "png" if fmt == "png" else "jpg"
render_overrides = getattr(line, "render_overrides", None)
if isinstance(render_overrides, dict) and render_overrides.get("output_format") not in (None, ""):
override = str(render_overrides["output_format"]).lower()
if override == "mp4":
return "mp4"
if override == "webp":
return "webp"
if override in {"png", "jpg", "jpeg"}:
return "png" if override == "png" else "jpg"
return output_extension
def _scale_render_samples_for_resolution(
samples: int | None,
width: int | None,
height: int | None,
) -> int | None:
if samples is None or width is None or height is None:
return samples
max_dim = max(width, height)
if max_dim > 1024:
return samples
scaled = max(32, int(samples * max_dim / 2048))
return scaled if scaled < samples else samples
def build_order_line_render_invocation(
setup: OrderLineRenderSetupResult,
*,
template_context: TemplateResolutionResult | None = None,
position_context: RenderPositionContext | None = None,
material_context: MaterialResolutionResult | None = None,
emit: EmitFn = None,
) -> OrderLineRenderInvocation:
if not setup.is_ready or setup.order_line is None or setup.cad_file is None:
raise ValueError("build_order_line_render_invocation requires a ready order-line setup")
line = setup.order_line
cad_file = setup.cad_file
output_type = line.output_type
position = position_context or RenderPositionContext()
render_settings = (
merge_output_type_invocation_overrides(
output_type.render_settings,
getattr(output_type, "invocation_overrides", None),
)
if output_type is not None
else {}
)
width = _coerce_int(render_settings.get("width"))
height = _coerce_int(render_settings.get("height"))
samples = _coerce_int(render_settings.get("samples"))
frame_count = _coerce_int(render_settings.get("frame_count")) or 24
fps = _coerce_int(render_settings.get("fps")) or 25
engine = render_settings.get("engine")
bg_color = str(render_settings.get("bg_color", ""))
turntable_axis = str(render_settings.get("turntable_axis", "world_z"))
noise_threshold = str(render_settings.get("noise_threshold", ""))
denoiser = str(render_settings.get("denoiser", ""))
denoising_input_passes = str(render_settings.get("denoising_input_passes", ""))
denoising_prefilter = str(render_settings.get("denoising_prefilter", ""))
denoising_quality = str(render_settings.get("denoising_quality", ""))
denoising_use_gpu = str(render_settings.get("denoising_use_gpu", ""))
transparent_bg = bool(output_type and output_type.transparent_bg)
cycles_device = (output_type.cycles_device or "auto") if output_type is not None else "auto"
render_overrides = getattr(line, "render_overrides", None)
if isinstance(render_overrides, dict):
width = _coerce_int(render_overrides.get("width")) or width
height = _coerce_int(render_overrides.get("height")) or height
samples = _coerce_int(render_overrides.get("samples")) or samples
frame_count = _coerce_int(render_overrides.get("frame_count")) or frame_count
fps = _coerce_int(render_overrides.get("fps")) or fps
engine = render_overrides.get("engine") or engine
if render_overrides.get("bg_color") not in (None, ""):
bg_color = str(render_overrides["bg_color"])
if render_overrides.get("turntable_axis") not in (None, ""):
turntable_axis = str(render_overrides["turntable_axis"])
if render_overrides.get("noise_threshold") not in (None, ""):
noise_threshold = str(render_overrides["noise_threshold"])
if render_overrides.get("denoiser") not in (None, ""):
denoiser = str(render_overrides["denoiser"])
if render_overrides.get("denoising_input_passes") not in (None, ""):
denoising_input_passes = str(render_overrides["denoising_input_passes"])
if render_overrides.get("denoising_prefilter") not in (None, ""):
denoising_prefilter = str(render_overrides["denoising_prefilter"])
if render_overrides.get("denoising_quality") not in (None, ""):
denoising_quality = str(render_overrides["denoising_quality"])
if render_overrides.get("denoising_use_gpu") not in (None, ""):
denoising_use_gpu = str(render_overrides["denoising_use_gpu"])
if "transparent_bg" in render_overrides:
transparent_bg = _coerce_bool(render_overrides["transparent_bg"])
if render_overrides.get("cycles_device") not in (None, ""):
cycles_device = str(render_overrides["cycles_device"])
_emit(emit, str(line.id), f"Render overrides active: {render_overrides}")
scaled_samples = _scale_render_samples_for_resolution(samples, width, height)
if (
samples is not None
and scaled_samples is not None
and scaled_samples < samples
and width is not None
and height is not None
):
_emit(
emit,
str(line.id),
f"Auto-scaled samples {samples} -> {scaled_samples} for {width}x{height}",
)
samples = scaled_samples
part_names_ordered = None
if cad_file.parsed_objects:
part_names = cad_file.parsed_objects.get("objects", [])
part_names_ordered = part_names or None
product_name = line.product.name or line.product.pim_id or "product"
output_type_name = output_type.name if output_type is not None else "render"
output_extension = _resolve_render_output_extension(line)
output_filename = (
f"{_sanitize_public_output_name(product_name)}_"
f"{_sanitize_public_output_name(output_type_name)}.{output_extension}"
)
output_dir = Path(app_settings.upload_dir) / "renders" / str(line.id)
material_map = None
use_materials = False
material_override = None
if template_context is not None:
material_map = template_context.material_map
use_materials = template_context.use_materials
material_override = template_context.override_material
if material_context is not None:
material_map = material_context.material_map
use_materials = material_context.use_materials
material_override = material_context.override_material
return OrderLineRenderInvocation(
product_name=product_name,
output_type_name=output_type_name,
output_extension=output_extension,
output_filename=output_filename,
output_path=str(output_dir / output_filename),
is_animation=bool(output_type and output_type.is_animation),
is_cinematic=bool(output_type and render_settings.get("cinematic")),
width=width,
height=height,
engine=str(engine) if engine not in (None, "") else None,
samples=samples,
frame_count=frame_count,
fps=fps,
bg_color=bg_color,
turntable_axis=turntable_axis,
noise_threshold=noise_threshold,
denoiser=denoiser,
denoising_input_passes=denoising_input_passes,
denoising_prefilter=denoising_prefilter,
denoising_quality=denoising_quality,
denoising_use_gpu=denoising_use_gpu,
transparent_bg=transparent_bg,
cycles_device=cycles_device,
part_colors=dict(setup.part_colors or {}),
part_names_ordered=part_names_ordered,
template_path=template_context.template.blend_file_path if template_context and template_context.template else None,
target_collection=(
template_context.template.target_collection
if template_context and template_context.template and template_context.template.target_collection
else "Product"
),
material_library_path=(
template_context.material_library if template_context and use_materials else None
),
material_map=material_map,
lighting_only=bool(template_context.template.lighting_only) if template_context and template_context.template else False,
shadow_catcher=(
bool(template_context.template.shadow_catcher_enabled)
if template_context and template_context.template
else False
),
camera_orbit=bool(template_context.template.camera_orbit) if template_context and template_context.template else True,
rotation_x=position.rotation_x,
rotation_y=position.rotation_y,
rotation_z=position.rotation_z,
focal_length_mm=position.focal_length_mm,
sensor_width_mm=position.sensor_width_mm,
usd_path=str(setup.usd_render_path) if setup.usd_render_path is not None else None,
material_override=material_override,
)
def _canonical_public_output_path(line: OrderLine, output_path: str) -> str:
source_path = Path(output_path)
upload_root = Path(app_settings.upload_dir)
try:
source_path.relative_to(upload_root / "renders")
return str(source_path)
except ValueError:
pass
extension = source_path.suffix or ".bin"
product_name = None
if line.product is not None:
product_name = getattr(line.product, "name", None) or getattr(line.product, "pim_id", None)
output_type_name = getattr(line.output_type, "name", None) if line.output_type is not None else None
filename = f"{_sanitize_public_output_name(product_name or 'product')}_{_sanitize_public_output_name(output_type_name or 'render')}{extension}"
return str(upload_root / "renders" / str(line.id) / filename)
def _materialize_public_output(line: OrderLine, output_path: str) -> str:
canonical_path = Path(_canonical_public_output_path(line, output_path))
source_path = Path(output_path)
canonical_path.parent.mkdir(parents=True, exist_ok=True)
if source_path != canonical_path:
shutil.copy2(source_path, canonical_path)
return str(canonical_path)
def _resolve_existing_workflow_run_id(session: Session, workflow_run_id: str | None) -> uuid.UUID | None:
if workflow_run_id in (None, ""):
return None
try:
candidate = uuid.UUID(str(workflow_run_id))
except (TypeError, ValueError):
return None
existing = session.get(WorkflowRun, candidate)
return existing.id if existing is not None else None
def persist_order_line_media_asset(
session: Session,
line: OrderLine,
*,
success: bool,
output_path: str,
asset_type: MediaAssetType,
render_log: dict[str, Any] | None = None,
workflow_run_id: str | None = None,
) -> OutputSaveResult:
"""Persist a non-primary workflow artifact as a MediaAsset without mutating order-line result fields."""
status: Literal["completed", "failed"] = "completed" if success else "failed"
asset_id: str | None = None
storage_key: str | None = None
resolved_workflow_run_id = _resolve_existing_workflow_run_id(session, workflow_run_id)
if success:
storage_key = _normalize_storage_key(output_path)
output_file = Path(output_path)
existing_asset = session.execute(
select(MediaAsset).where(MediaAsset.storage_key == storage_key).limit(1)
).scalar_one_or_none()
if existing_asset is None:
asset = MediaAsset(
tenant_id=line.product.cad_file.tenant_id if (line.product and line.product.cad_file) else None,
product_id=line.product_id,
cad_file_id=line.product.cad_file_id if line.product is not None else None,
order_line_id=line.id,
workflow_run_id=resolved_workflow_run_id,
asset_type=asset_type,
storage_key=storage_key,
mime_type=_resolve_output_mime_type(output_path),
file_size_bytes=output_file.stat().st_size if output_file.exists() else None,
render_config=render_log if isinstance(render_log, dict) else None,
)
session.add(asset)
session.flush()
asset_id = str(asset.id)
else:
existing_asset.asset_type = asset_type
existing_asset.order_line_id = line.id
existing_asset.product_id = line.product_id
existing_asset.cad_file_id = line.product.cad_file_id if line.product is not None else None
existing_asset.mime_type = _resolve_output_mime_type(output_path)
existing_asset.file_size_bytes = output_file.stat().st_size if output_file.exists() else None
if isinstance(render_log, dict):
existing_asset.render_config = render_log
if resolved_workflow_run_id is not None:
existing_asset.workflow_run_id = resolved_workflow_run_id
session.flush()
asset_id = str(existing_asset.id)
session.commit()
return OutputSaveResult(
status=status,
result_path=output_path if success else None,
asset_id=asset_id,
storage_key=storage_key,
asset_type=asset_type if success else None,
)
def _extract_render_error(render_log: dict[str, Any] | None) -> str | None:
if not isinstance(render_log, dict):
return None
@@ -319,28 +894,43 @@ def persist_order_line_output(
output_path: str,
render_log: dict[str, Any] | None,
render_completed_at: datetime | None = None,
workflow_run_id: str | None = None,
) -> OutputSaveResult:
"""Persist the render result for an order line and publish the media asset if needed."""
status: Literal["completed", "failed"] = "completed" if success else "failed"
completed_at = render_completed_at or datetime.utcnow()
persisted_output_path = output_path
line.render_status = status
line.render_completed_at = completed_at
line.render_log = render_log
line.result_path = output_path if success else None
if success:
persisted_output_path = _materialize_public_output(line, output_path)
line.result_path = persisted_output_path if success else None
session.flush()
asset_id: str | None = None
storage_key: str | None = None
asset_type: MediaAssetType | None = None
resolved_workflow_run_id = _resolve_existing_workflow_run_id(session, workflow_run_id)
if success:
storage_key = _normalize_storage_key(output_path)
asset_type = _resolve_output_asset_type(output_path)
storage_key = _normalize_storage_key(persisted_output_path)
asset_type = _resolve_output_asset_type(persisted_output_path)
output_file = Path(persisted_output_path)
existing_asset = session.execute(
select(MediaAsset).where(MediaAsset.storage_key == storage_key).limit(1)
).scalar_one_or_none()
if existing_asset is None:
output_file = Path(output_path)
existing_asset = session.execute(
select(MediaAsset)
.where(
MediaAsset.order_line_id == line.id,
MediaAsset.asset_type == asset_type,
)
.order_by(MediaAsset.created_at.desc())
.limit(1)
).scalar_one_or_none()
if existing_asset is None:
render_config = None
if isinstance(render_log, dict):
render_config = {
@@ -360,9 +950,10 @@ def persist_order_line_output(
tenant_id=line.product.cad_file.tenant_id if (line.product and line.product.cad_file) else None,
order_line_id=line.id,
product_id=line.product_id,
workflow_run_id=resolved_workflow_run_id,
asset_type=asset_type,
storage_key=storage_key,
mime_type=_resolve_output_mime_type(output_path),
mime_type=_resolve_output_mime_type(persisted_output_path),
file_size_bytes=output_file.stat().st_size if output_file.exists() else None,
width=None,
height=None,
@@ -372,9 +963,41 @@ def persist_order_line_output(
session.flush()
asset_id = str(asset.id)
else:
existing_asset.order_line_id = line.id
existing_asset.product_id = line.product_id
existing_asset.asset_type = asset_type
existing_asset.storage_key = storage_key
existing_asset.mime_type = _resolve_output_mime_type(persisted_output_path)
existing_asset.file_size_bytes = output_file.stat().st_size if output_file.exists() else None
if line.product is not None:
existing_asset.cad_file_id = line.product.cad_file_id
if isinstance(render_log, dict):
existing_asset.render_config = {
key: render_log[key]
for key in (
"renderer",
"engine_used",
"engine",
"samples",
"device_used",
"compute_type",
"total_duration_s",
)
if key in render_log
}
if resolved_workflow_run_id is not None:
existing_asset.workflow_run_id = resolved_workflow_run_id
session.flush()
asset_id = str(existing_asset.id)
session.commit()
if line.order_id is not None:
try:
from app.domains.orders.service import check_order_completion
check_order_completion(str(line.order_id))
except Exception:
logger.exception("Failed to check order completion for order_line %s", line.id)
return OutputSaveResult(
status=status,
result_path=line.result_path,
@@ -480,13 +1103,29 @@ def prepare_order_line_render_context(
.limit(1)
).scalar_one_or_none()
if usd_asset:
usd_render_path = _resolve_asset_path(usd_asset.storage_key)
if usd_render_path:
logger.info(
"render_order_line: using usd_master %s for cad %s",
usd_render_path.name,
refresh_reason = _usd_master_refresh_reason(cad_file)
if refresh_reason is not None:
logger.warning(
"render_order_line: ignoring stale usd_master for cad %s (%s)",
cad_file.id,
refresh_reason,
)
_emit(
emit,
order_line_id,
f"Existing USD master is stale ({refresh_reason}) — falling back to GLB/STEP",
"warning",
)
if _queue_usd_master_refresh(str(cad_file.id)):
_emit(emit, order_line_id, "Queued USD master regeneration in background")
else:
usd_render_path = _resolve_asset_path(usd_asset.storage_key)
if usd_render_path:
logger.info(
"render_order_line: using usd_master %s for cad %s",
usd_render_path.name,
cad_file.id,
)
glb_reuse_path = None
if not usd_render_path: