chore: snapshot workflow migration progress

This commit is contained in:
2026-04-12 11:49:04 +02:00
parent 0cd02513d5
commit 3e810c74a3
163 changed files with 31774 additions and 2753 deletions
+220
View File
@@ -0,0 +1,220 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import os
import re
import subprocess
import sys
import tempfile
from pathlib import Path
from typing import Any
import requests
ROOT = Path(__file__).resolve().parents[1]
sys.path.insert(0, str(ROOT / "backend"))
from app.domains.rendering.template_input_audit import ( # noqa: E402
extract_template_input_marker,
suggest_workflow_input_schema,
)
DEFAULT_HOST = os.environ.get("TEST_HOST", "http://localhost:8888")
DEFAULT_EMAIL = os.environ.get("TEST_EMAIL", "admin@hartomat.com")
DEFAULT_PASSWORD = os.environ.get("TEST_PASSWORD", "Admin1234!")
DEFAULT_BACKEND_CONTAINER = os.environ.get("HARTOMAT_BACKEND_CONTAINER", "hartomat-backend-1")
AUDIT_SCRIPT = """
import bpy
import json
def props(target):
out = {}
for key in target.keys():
if key == "_RNA_UI":
continue
value = target[key]
if isinstance(value, bool):
out[key] = value
elif isinstance(value, (int, float, str)):
out[key] = value
else:
out[key] = str(value)
return out
payload = {
"file": bpy.data.filepath,
"scene": bpy.context.scene.name if bpy.context.scene else None,
"worlds": [{"name": world.name, "props": props(world)} for world in bpy.data.worlds],
"collections": [{"name": coll.name, "props": props(coll)} for coll in bpy.data.collections],
"objects": [{"name": obj.name, "type": obj.type, "props": props(obj)} for obj in bpy.data.objects],
}
print("===HARTOMAT_TEMPLATE_AUDIT_START===")
print(json.dumps(payload))
print("===HARTOMAT_TEMPLATE_AUDIT_END===")
"""
class APIClient:
def __init__(self, host: str, email: str, password: str) -> None:
self.host = host.rstrip("/")
self.session = requests.Session()
response = self.session.post(
f"{self.host}/api/auth/login",
json={"email": email, "password": password},
timeout=30,
)
response.raise_for_status()
access_token = response.json()["access_token"]
self.session.headers["Authorization"] = f"Bearer {access_token}"
def list_render_templates(self) -> list[dict[str, Any]]:
response = self.session.get(f"{self.host}/api/render-templates", timeout=30)
response.raise_for_status()
return response.json()
def _docker_cp(container: str, source: str, destination: Path) -> None:
subprocess.run(
["docker", "cp", f"{container}:{source}", str(destination)],
check=True,
capture_output=True,
text=True,
)
def _run_blender_audit(blend_file: Path) -> dict[str, Any]:
with tempfile.NamedTemporaryFile("w", suffix=".py", delete=False) as handle:
handle.write(AUDIT_SCRIPT)
audit_script = Path(handle.name)
try:
completed = subprocess.run(
["blender", "-b", str(blend_file), "--python", str(audit_script)],
check=True,
capture_output=True,
text=True,
)
finally:
audit_script.unlink(missing_ok=True)
match = re.search(
r"===HARTOMAT_TEMPLATE_AUDIT_START===\n(.*?)\n===HARTOMAT_TEMPLATE_AUDIT_END===",
completed.stdout,
re.DOTALL,
)
if not match:
raise RuntimeError(f"Could not parse Blender audit output for {blend_file}")
return json.loads(match.group(1))
def _collect_markers(audit_payload: dict[str, Any]) -> list[dict[str, str]]:
markers: list[dict[str, str]] = []
for kind in ("worlds", "collections", "objects"):
for target in audit_payload.get(kind, []):
marker = extract_template_input_marker(name=target.get("name"), props=target.get("props") or {})
if marker is None:
continue
key, value = marker
markers.append(
{
"target_kind": kind[:-1],
"target_name": target.get("name") or "",
"key": key,
"value": value,
}
)
return markers
def _render_markdown_report(report: list[dict[str, Any]]) -> str:
lines = [
"# Render Template Input Audit",
"",
"Generated from live `.blend` templates via `scripts/audit_render_templates.py`.",
"",
]
for template in report:
lines.append(f"## {template['name']}")
lines.append("")
lines.append(f"- Template ID: `{template['id']}`")
lines.append(f"- Blend path: `{template['blend_file_path']}`")
lines.append(f"- Existing workflow_input_schema entries: `{template['existing_schema_count']}`")
lines.append(f"- Collections: `{', '.join(template['collection_names']) or '-'}`")
lines.append(f"- Worlds: `{', '.join(template['world_names']) or '-'}`")
lines.append(f"- Objects: `{', '.join(template['object_names']) or '-'}`")
if template["markers"]:
lines.append("- Detected markers:")
for marker in template["markers"]:
lines.append(
f" - `{marker['target_kind']}:{marker['target_name']}` => "
f"`{marker['key']}={marker['value']}`"
)
else:
lines.append("- Detected markers: none")
if template["suggested_schema"]:
lines.append("- Suggested schema:")
lines.append("")
lines.append("```json")
lines.append(json.dumps(template["suggested_schema"], indent=2))
lines.append("```")
else:
lines.append("- Suggested schema: none")
lines.append("")
return "\n".join(lines)
def main() -> int:
parser = argparse.ArgumentParser(description="Audit live render templates for template-input markers.")
parser.add_argument("--host", default=DEFAULT_HOST)
parser.add_argument("--email", default=DEFAULT_EMAIL)
parser.add_argument("--password", default=DEFAULT_PASSWORD)
parser.add_argument("--backend-container", default=DEFAULT_BACKEND_CONTAINER)
parser.add_argument("--write-markdown", help="Optional markdown report output path.")
parser.add_argument("--json", action="store_true", help="Emit full JSON report.")
args = parser.parse_args()
client = APIClient(args.host, args.email, args.password)
templates = client.list_render_templates()
report: list[dict[str, Any]] = []
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
for template in templates:
blend_file_path = template["blend_file_path"]
local_blend = temp_path / f"{template['id']}.blend"
_docker_cp(args.backend_container, blend_file_path, local_blend)
audit_payload = _run_blender_audit(local_blend)
markers = _collect_markers(audit_payload)
suggested_schema = suggest_workflow_input_schema(
(marker["key"], marker["value"]) for marker in markers
)
report.append(
{
"id": template["id"],
"name": template["name"],
"blend_file_path": blend_file_path,
"existing_schema_count": len(template.get("workflow_input_schema") or []),
"world_names": [item["name"] for item in audit_payload.get("worlds", [])],
"collection_names": [item["name"] for item in audit_payload.get("collections", [])],
"object_names": [item["name"] for item in audit_payload.get("objects", [])],
"markers": markers,
"suggested_schema": suggested_schema,
}
)
if args.write_markdown:
output_path = Path(args.write_markdown)
output_path.write_text(_render_markdown_report(report) + "\n", encoding="utf-8")
if args.json:
print(json.dumps(report, indent=2))
else:
for template in report:
print(f"{template['name']}: markers={len(template['markers'])}, suggested_schema={len(template['suggested_schema'])}")
return 0
if __name__ == "__main__":
raise SystemExit(main())
+176
View File
@@ -0,0 +1,176 @@
#!/usr/bin/env python3
"""Low-RAM live CAD parity gate for manifest/model part-key consistency."""
from __future__ import annotations
import argparse
import json
import struct
import sys
from collections import Counter
import requests
DEFAULT_HOST = "http://localhost:8888"
DEFAULT_EMAIL = "admin@hartomat.com"
DEFAULT_PASSWORD = "Admin1234!"
DEFAULT_TIMEOUT = 60
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description=(
"Verify that the live CAD scene manifest and served GLB expose the same "
"renderable part-key set, without duplicates or missing assignments."
)
)
parser.add_argument("--host", default=DEFAULT_HOST, help="Backend base URL.")
parser.add_argument("--email", default=DEFAULT_EMAIL, help="Login email.")
parser.add_argument("--password", default=DEFAULT_PASSWORD, help="Login password.")
parser.add_argument("--cad-id", required=True, help="CAD file id to inspect.")
parser.add_argument(
"--timeout",
type=int,
default=DEFAULT_TIMEOUT,
help="HTTP timeout in seconds.",
)
parser.add_argument(
"--allow-extra-non-mesh-keys",
action="store_true",
help=(
"Only enforce mesh-node parity. Kept for forward compatibility if the "
"manifest ever intentionally contains helper-only entries."
),
)
return parser.parse_args()
def login(session: requests.Session, *, host: str, email: str, password: str, timeout: int) -> None:
response = session.post(
f"{host}/api/auth/login",
json={"email": email, "password": password},
timeout=timeout,
)
response.raise_for_status()
payload = response.json()
session.headers["Authorization"] = f"Bearer {payload['access_token']}"
def fetch_manifest_part_keys(session: requests.Session, *, host: str, cad_id: str, timeout: int) -> set[str]:
response = session.get(f"{host}/api/cad/{cad_id}/scene-manifest", timeout=timeout)
response.raise_for_status()
payload = response.json()
return {
str(part["part_key"]).strip()
for part in payload.get("parts", [])
if part.get("part_key")
}
def fetch_glb_payload(session: requests.Session, *, host: str, cad_id: str, timeout: int) -> dict:
response = session.get(f"{host}/api/cad/{cad_id}/model", timeout=timeout)
response.raise_for_status()
data = response.content
if len(data) < 20:
raise RuntimeError("GLB payload is too small to contain a JSON chunk header")
json_len, json_type = struct.unpack_from("<II", data, 12)
if json_type != 0x4E4F534A:
raise RuntimeError(f"Unexpected GLB JSON chunk type: {json_type}")
return json.loads(data[20 : 20 + json_len])
def build_report(manifest_part_keys: set[str], glb_payload: dict) -> dict:
mesh_nodes = [node for node in glb_payload.get("nodes", []) if "mesh" in node]
live_part_keys = [
node.get("extras", {}).get("partKey")
for node in mesh_nodes
if node.get("extras", {}).get("partKey")
]
live_part_keys = [str(part_key).strip() for part_key in live_part_keys if str(part_key).strip()]
unique_live_part_keys = set(live_part_keys)
duplicate_live_part_keys = {
key: count for key, count in Counter(live_part_keys).items() if count > 1
}
missing_manifest_part_keys = sorted(manifest_part_keys - unique_live_part_keys)
extra_live_part_keys = sorted(unique_live_part_keys - manifest_part_keys)
return {
"manifest_parts": len(manifest_part_keys),
"mesh_nodes": len(mesh_nodes),
"live_part_keys": len(live_part_keys),
"unique_live_part_keys": len(unique_live_part_keys),
"missing_manifest_part_keys": len(missing_manifest_part_keys),
"extra_live_part_keys": len(extra_live_part_keys),
"duplicate_live_part_keys": len(duplicate_live_part_keys),
"sample_missing": missing_manifest_part_keys[:20],
"sample_extra": extra_live_part_keys[:20],
"sample_dupes": list(sorted(duplicate_live_part_keys.items())[:20]),
}
def evaluate_report(report: dict, *, allow_extra_non_mesh_keys: bool) -> list[str]:
failures: list[str] = []
if report["mesh_nodes"] != report["manifest_parts"]:
failures.append(
f"mesh_nodes={report['mesh_nodes']} does not match manifest_parts={report['manifest_parts']}"
)
if report["live_part_keys"] != report["mesh_nodes"]:
failures.append(
f"live_part_keys={report['live_part_keys']} does not match mesh_nodes={report['mesh_nodes']}"
)
if report["unique_live_part_keys"] != report["manifest_parts"]:
failures.append(
"unique_live_part_keys does not match manifest_parts"
)
if report["missing_manifest_part_keys"] != 0:
failures.append(
f"missing_manifest_part_keys={report['missing_manifest_part_keys']}"
)
if not allow_extra_non_mesh_keys and report["extra_live_part_keys"] != 0:
failures.append(
f"extra_live_part_keys={report['extra_live_part_keys']}"
)
if report["duplicate_live_part_keys"] != 0:
failures.append(
f"duplicate_live_part_keys={report['duplicate_live_part_keys']}"
)
return failures
def main() -> int:
args = parse_args()
session = requests.Session()
login(
session,
host=args.host,
email=args.email,
password=args.password,
timeout=args.timeout,
)
manifest_part_keys = fetch_manifest_part_keys(
session,
host=args.host,
cad_id=args.cad_id,
timeout=args.timeout,
)
glb_payload = fetch_glb_payload(
session,
host=args.host,
cad_id=args.cad_id,
timeout=args.timeout,
)
report = build_report(manifest_part_keys, glb_payload)
failures = evaluate_report(
report,
allow_extra_non_mesh_keys=args.allow_extra_non_mesh_keys,
)
print(json.dumps(report, indent=2, sort_keys=True))
if failures:
for failure in failures:
print(f"[cad-parity] {failure}", file=sys.stderr)
return 1
return 0
if __name__ == "__main__":
raise SystemExit(main())
File diff suppressed because it is too large Load Diff
+543
View File
@@ -0,0 +1,543 @@
#!/usr/bin/env python3
"""Serial live parity runner for real Blender turntable output types.
Creates reversible shadow-workflow probes for active template-backed turntable
output types, dispatches real renders against the live stack, waits for the
legacy + shadow outputs, and compares the resulting MP4s frame-by-frame.
"""
from __future__ import annotations
import argparse
import hashlib
import importlib.util
import json
import os
import subprocess
import tempfile
from pathlib import Path
from PIL import Image, ImageChops, ImageStat
ROOT = Path(__file__).resolve().parents[1]
HARNESS_PATH = ROOT / "scripts" / "test_render_pipeline.py"
TURNABLE_RENDER_TIMEOUT_SECONDS = 3600
TURNABLE_WORKFLOW_RUN_TIMEOUT_SECONDS = 3600
TURNABLE_WORKFLOW_COMPARISON_TIMEOUT_SECONDS = 1200
TURNABLE_ASSET_TIMEOUT_SECONDS = 600
def _load_harness():
spec = importlib.util.spec_from_file_location("live_render_harness", HARNESS_PATH)
if spec is None or spec.loader is None:
raise RuntimeError(f"Could not load harness from {HARNESS_PATH}")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def _is_real_turntable_output_type(output_type: dict, *, include_generated: bool) -> bool:
if not output_type.get("is_active", True):
return False
if output_type.get("renderer") != "blender":
return False
if output_type.get("artifact_kind") != "turntable_video":
return False
if not output_type.get("is_animation"):
return False
if include_generated:
return True
name = str(output_type.get("name") or "")
return not name.startswith("[")
def _build_turntable_shadow_probe_config(harness) -> dict:
config = harness.build_graph_turntable_config(execution_mode="shadow")
for node in config.get("nodes", []):
if node.get("id") == "turntable":
node["params"] = {}
return config
def _ensure_shadow_probe_workflow(harness, client, *, output_type: dict) -> dict:
workflow_name = f"[Turntable Parity] {output_type['name']}"
workflows = harness.get_workflows(client)
workflow = harness.find_named(workflows, workflow_name)
workflow_payload = {
"name": workflow_name,
"output_type_id": output_type["id"],
"config": _build_turntable_shadow_probe_config(harness),
"is_active": True,
}
if workflow is None:
resp = client.post("/workflows", json=workflow_payload)
if resp.status_code not in (200, 201):
raise RuntimeError(
f"Shadow probe workflow create failed for {output_type['name']}: "
f"{resp.status_code} {resp.text[:400]}"
)
workflow = resp.json()
else:
resp = client.put(
f"/workflows/{workflow['id']}",
json={
"name": workflow_payload["name"],
"config": workflow_payload["config"],
"is_active": workflow_payload["is_active"],
},
)
if resp.status_code != 200:
raise RuntimeError(
f"Shadow probe workflow update failed for {output_type['name']}: "
f"{resp.status_code} {resp.text[:400]}"
)
workflow = resp.json()
return workflow
def _download_bytes(client, path: str) -> bytes:
response = client.session.get(f"{client.host}{path}", timeout=120)
response.raise_for_status()
return response.content
def _write_temp_file(tmpdir: Path, *, name: str, payload: bytes) -> Path:
path = tmpdir / name
path.write_bytes(payload)
return path
def _probe_video(video_path: Path) -> dict:
cmd = [
"ffprobe",
"-v",
"error",
"-print_format",
"json",
"-show_streams",
"-show_format",
str(video_path),
]
result = subprocess.run(cmd, check=True, capture_output=True, text=True)
payload = json.loads(result.stdout or "{}")
streams = payload.get("streams") or []
video_stream = next((stream for stream in streams if stream.get("codec_type") == "video"), {})
format_info = payload.get("format") or {}
return {
"codec_name": video_stream.get("codec_name"),
"pix_fmt": video_stream.get("pix_fmt"),
"width": video_stream.get("width"),
"height": video_stream.get("height"),
"avg_frame_rate": video_stream.get("avg_frame_rate"),
"r_frame_rate": video_stream.get("r_frame_rate"),
"nb_frames": video_stream.get("nb_frames"),
"duration": format_info.get("duration") or video_stream.get("duration"),
"size": format_info.get("size"),
}
def _extract_frames(video_path: Path, output_dir: Path) -> list[Path]:
output_dir.mkdir(parents=True, exist_ok=True)
cmd = [
"ffmpeg",
"-y",
"-i",
str(video_path),
"-vsync",
"0",
str(output_dir / "frame_%06d.png"),
]
subprocess.run(cmd, check=True, capture_output=True, text=True)
return sorted(output_dir.glob("frame_*.png"))
def _compute_image_delta(authoritative_path: Path, observer_path: Path) -> tuple[bool | None, float | None]:
try:
with Image.open(authoritative_path) as authoritative_image, Image.open(observer_path) as observer_image:
authoritative_rgba = authoritative_image.convert("RGBA")
observer_rgba = observer_image.convert("RGBA")
if authoritative_rgba.size != observer_rgba.size:
return False, None
diff = ImageChops.difference(authoritative_rgba, observer_rgba)
mean_channels = ImageStat.Stat(diff).mean
return True, sum(mean_channels) / (len(mean_channels) * 255.0)
except Exception:
return None, None
def _build_video_artifact(payload: bytes, *, download_url: str, metadata: dict) -> dict:
return {
"path": download_url,
"storage_key": None,
"exists": bool(payload),
"file_size_bytes": len(payload) if payload else None,
"sha256": hashlib.sha256(payload).hexdigest() if payload else None,
"mime_type": "video/mp4",
"image_width": metadata.get("width"),
"image_height": metadata.get("height"),
}
def _compare_videos(authoritative_bytes: bytes, observer_bytes: bytes) -> dict:
with tempfile.TemporaryDirectory(prefix="hartomat-turntable-parity-") as tmp:
tmpdir = Path(tmp)
authoritative_path = _write_temp_file(tmpdir, name="authoritative.mp4", payload=authoritative_bytes)
observer_path = _write_temp_file(tmpdir, name="observer.mp4", payload=observer_bytes)
authoritative_meta = _probe_video(authoritative_path)
observer_meta = _probe_video(observer_path)
authoritative_frames = _extract_frames(authoritative_path, tmpdir / "authoritative_frames")
observer_frames = _extract_frames(observer_path, tmpdir / "observer_frames")
exact_match = hashlib.sha256(authoritative_bytes).hexdigest() == hashlib.sha256(observer_bytes).hexdigest()
dimensions_match = (
authoritative_meta.get("width") == observer_meta.get("width")
and authoritative_meta.get("height") == observer_meta.get("height")
and authoritative_meta.get("width") is not None
and observer_meta.get("width") is not None
)
frame_count_match = len(authoritative_frames) == len(observer_frames)
mean_frame_deltas: list[float] = []
frames_dimensions_match = dimensions_match
if frame_count_match:
for authoritative_frame, observer_frame in zip(authoritative_frames, observer_frames, strict=True):
frame_dimensions_match, frame_delta = _compute_image_delta(authoritative_frame, observer_frame)
if frame_dimensions_match is False:
frames_dimensions_match = False
mean_frame_deltas = []
break
if frame_delta is None:
mean_frame_deltas = []
break
mean_frame_deltas.append(frame_delta)
if exact_match:
status = "matched"
summary = "Observer video matches the authoritative legacy output byte-for-byte."
mean_pixel_delta = 0.0
dimensions_match = True
elif not frame_count_match:
status = "different"
summary = "Observer video differs from the authoritative output and the frame count changed."
mean_pixel_delta = None
elif frames_dimensions_match is False:
status = "different"
summary = "Observer video differs from the authoritative output and frame dimensions changed."
mean_pixel_delta = None
elif mean_frame_deltas:
mean_pixel_delta = sum(mean_frame_deltas) / len(mean_frame_deltas)
if mean_pixel_delta <= 1e-6:
status = "matched"
summary = "Observer video matches the authoritative output within the visual pass threshold."
else:
status = "different"
summary = "Observer video differs from the authoritative output."
else:
mean_pixel_delta = None
status = "different"
summary = "Observer video differs from the authoritative output and could not be frame-compared."
return {
"status": status,
"summary": summary,
"exact_match": exact_match,
"dimensions_match": dimensions_match if frame_count_match else False,
"mean_pixel_delta": mean_pixel_delta,
"authoritative_video": authoritative_meta,
"observer_video": observer_meta,
"authoritative_frame_count": len(authoritative_frames),
"observer_frame_count": len(observer_frames),
}
def _resolve_turntable_assets(
harness,
client,
*,
order_line_id: str,
workflow_run_id: str,
timeout_seconds: int,
) -> tuple[dict, dict]:
deadline = harness.time.time() + timeout_seconds
while harness.time.time() < deadline:
assets = harness.list_media_assets(
client,
order_line_id=order_line_id,
asset_type="turntable",
)
observer_asset = next(
(asset for asset in assets if str(asset.get("workflow_run_id")) == workflow_run_id),
None,
)
authoritative_asset = next(
(
asset
for asset in assets
if asset.get("workflow_run_id") is None
or str(asset.get("workflow_run_id")) != workflow_run_id
),
None,
)
if authoritative_asset is not None and observer_asset is not None:
return authoritative_asset, observer_asset
harness.time.sleep(2)
raise RuntimeError(
f"Could not resolve authoritative+observer turntable assets for line {order_line_id} run {workflow_run_id}"
)
def _run_single_output_type_with_product(harness, client, *, product_id: str, output_type: dict) -> dict:
templates = harness.render_template_candidates_for_output_type(
harness.get_render_templates(client),
output_type["id"],
)
if not templates:
raise RuntimeError(f"Output type {output_type['name']} is not template-backed")
snapshot = harness.build_output_type_workflow_snapshot(output_type)
workflow = _ensure_shadow_probe_workflow(harness, client, output_type=output_type)
try:
resp = client.patch(
f"/output-types/{output_type['id']}",
json=harness.build_output_type_workflow_link_payload(
workflow_definition_id=workflow["id"],
execution_mode="shadow",
),
)
if resp.status_code != 200:
raise RuntimeError(
f"Output type shadow link failed for {output_type['name']}: "
f"{resp.status_code} {resp.text[:400]}"
)
bound_output_type = resp.json()
order = harness.create_test_order(
client,
product_id=product_id,
output_type_ids=[bound_output_type["id"]],
test_label=f"Turntable Parity [{bound_output_type['name']}]",
)
if order is None:
raise RuntimeError(f"Order creation failed for {bound_output_type['name']}")
lines = order.get("lines", [])
if len(lines) != 1:
raise RuntimeError(
f"Expected exactly one order line for {bound_output_type['name']}, got {len(lines)}"
)
line_id = lines[0]["id"]
resp_preflight = client.get(
f"/workflows/{workflow['id']}/preflight",
params={"context_id": line_id},
)
if resp_preflight.status_code != 200:
raise RuntimeError(
f"Workflow preflight failed for {bound_output_type['name']}: "
f"{resp_preflight.status_code} {resp_preflight.text[:400]}"
)
preflight = resp_preflight.json()
if not preflight.get("graph_dispatch_allowed"):
raise RuntimeError(
f"Workflow preflight blocked dispatch for {bound_output_type['name']}: "
f"{preflight.get('summary')}"
)
success = harness._submit_and_wait(
client,
order,
[bound_output_type["id"]],
use_graph_dispatch=False,
timeout_seconds=getattr(harness, "ANIMATION_RENDER_TIMEOUT_SECONDS", TURNABLE_RENDER_TIMEOUT_SECONDS),
)
if not success:
raise RuntimeError(f"Render dispatch did not complete successfully for {bound_output_type['name']}")
workflow_run = harness.wait_for_workflow_run(
client,
workflow_id=workflow["id"],
line_id=line_id,
timeout_seconds=getattr(harness, "ANIMATION_WORKFLOW_RUN_TIMEOUT_SECONDS", TURNABLE_WORKFLOW_RUN_TIMEOUT_SECONDS),
terminal_only=True,
)
if workflow_run is None:
raise RuntimeError(f"Workflow run not found for {bound_output_type['name']}")
raw_comparison = harness.wait_for_workflow_comparison(
client,
workflow_run_id=workflow_run["id"],
timeout_seconds=getattr(harness, "ANIMATION_WORKFLOW_COMPARISON_TIMEOUT_SECONDS", TURNABLE_WORKFLOW_COMPARISON_TIMEOUT_SECONDS),
)
if raw_comparison is None:
raise RuntimeError(f"Workflow comparison did not stabilize for {bound_output_type['name']}")
authoritative_asset, observer_asset = _resolve_turntable_assets(
harness,
client,
order_line_id=line_id,
workflow_run_id=str(workflow_run["id"]),
timeout_seconds=TURNABLE_ASSET_TIMEOUT_SECONDS,
)
authoritative_bytes = _download_bytes(client, authoritative_asset["download_url"])
observer_bytes = _download_bytes(client, observer_asset["download_url"])
manual = _compare_videos(authoritative_bytes, observer_bytes)
comparison = {
"workflow_run_id": workflow_run.get("id"),
"workflow_def_id": workflow.get("id"),
"order_line_id": line_id,
"execution_mode": workflow_run.get("execution_mode"),
"status": manual["status"],
"summary": manual["summary"],
"authoritative_output": _build_video_artifact(
authoritative_bytes,
download_url=authoritative_asset["download_url"],
metadata=manual["authoritative_video"],
),
"observer_output": _build_video_artifact(
observer_bytes,
download_url=observer_asset["download_url"],
metadata=manual["observer_video"],
),
"exact_match": manual["exact_match"],
"dimensions_match": manual["dimensions_match"],
"mean_pixel_delta": manual["mean_pixel_delta"],
"authoritative_frame_count": manual["authoritative_frame_count"],
"observer_frame_count": manual["observer_frame_count"],
}
rollout_gate = harness.evaluate_rollout_gate_from_comparison(comparison)
template_node = harness._node_result_by_name(workflow_run, "template")
render_node = harness._node_result_by_name(workflow_run, "turntable")
return {
"output_type": {
"id": bound_output_type["id"],
"name": bound_output_type["name"],
"format": bound_output_type.get("output_format"),
"artifact_kind": bound_output_type.get("artifact_kind"),
"render_settings": bound_output_type.get("render_settings"),
"invocation_overrides": bound_output_type.get("invocation_overrides"),
},
"templates": [
{
"name": template.get("name"),
"blend_file_path": template.get("blend_file_path"),
"lighting_only": template.get("lighting_only"),
"shadow_catcher_enabled": template.get("shadow_catcher_enabled"),
"target_collection": template.get("target_collection"),
"camera_orbit": template.get("camera_orbit"),
}
for template in templates
],
"workflow": {
"id": workflow.get("id"),
"name": workflow.get("name"),
},
"preflight": {
"execution_mode": preflight.get("execution_mode"),
"context_kind": preflight.get("context_kind"),
"graph_dispatch_allowed": preflight.get("graph_dispatch_allowed"),
},
"order_line_id": line_id,
"workflow_run": {
"id": workflow_run.get("id"),
"status": workflow_run.get("status"),
"execution_mode": workflow_run.get("execution_mode"),
},
"template_resolution": template_node.get("output") if template_node else None,
"render_output": render_node.get("output") if render_node else None,
"raw_backend_comparison": raw_comparison,
"comparison": comparison,
"rollout_gate": rollout_gate,
}
finally:
harness.restore_output_type_workflow_snapshot(
client,
output_type_id=output_type["id"],
snapshot=snapshot,
)
def main() -> int:
harness = _load_harness()
parser = argparse.ArgumentParser(description="Compare live legacy vs shadow renders for real turntable output types")
parser.add_argument("--host", default=os.environ.get("TEST_HOST", "http://localhost:8888"))
parser.add_argument("--email", default=os.environ.get("TEST_EMAIL", "admin@hartomat.com"))
parser.add_argument("--password", default=os.environ.get("TEST_PASSWORD", "Admin1234!"))
parser.add_argument("--product-id", required=True, help="Existing product id to use for parity runs")
parser.add_argument("--include-generated", action="store_true")
parser.add_argument("--output", default=None, help="Optional path for JSON report")
parser.add_argument("--only", action="append", default=[], help="Only run the named output type; repeatable")
args = parser.parse_args()
client = harness.APIClient(args.host, args.email, args.password)
if not harness.test_health(client):
raise RuntimeError("Render stack health check failed")
output_types = [
output_type
for output_type in harness.get_output_types(client, include_inactive=True)
if _is_real_turntable_output_type(output_type, include_generated=args.include_generated)
]
if args.only:
wanted = set(args.only)
output_types = [output_type for output_type in output_types if output_type.get("name") in wanted]
output_types.sort(key=lambda item: item.get("name") or "")
if not output_types:
raise RuntimeError("No eligible turntable output types found")
results: list[dict] = []
for output_type in output_types:
print(f"\n=== {output_type['name']} ===", flush=True)
result = _run_single_output_type_with_product(
harness,
client,
product_id=args.product_id,
output_type=output_type,
)
results.append(result)
summary = {
"name": result["output_type"]["name"],
"artifact_kind": result["output_type"].get("artifact_kind"),
"exact_match": result["comparison"].get("exact_match"),
"status": result["comparison"].get("status"),
"mean_pixel_delta": result["comparison"].get("mean_pixel_delta"),
"authoritative_frame_count": result["comparison"].get("authoritative_frame_count"),
"observer_frame_count": result["comparison"].get("observer_frame_count"),
"rollout_verdict": result["rollout_gate"].get("verdict"),
"workflow_run_id": result["workflow_run"]["id"],
"order_line_id": result["order_line_id"],
}
print(json.dumps(summary, ensure_ascii=False), flush=True)
report = {
"host": args.host,
"product_id": args.product_id,
"results": results,
}
if args.output:
output_path = Path(args.output)
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(json.dumps(report, indent=2, ensure_ascii=False) + "\n")
print(f"\nWrote report to {output_path}", flush=True)
overall = {
"total": len(results),
"exact_match": sum(1 for item in results if item["comparison"].get("exact_match") is True),
"pass": sum(1 for item in results if item["rollout_gate"].get("verdict") == "pass"),
"warn": sum(1 for item in results if item["rollout_gate"].get("verdict") == "warn"),
"fail": sum(1 for item in results if item["rollout_gate"].get("verdict") == "fail"),
}
print("\n=== Overall ===", flush=True)
print(json.dumps(overall, ensure_ascii=False), flush=True)
return 0
if __name__ == "__main__":
raise SystemExit(main())
+113
View File
@@ -0,0 +1,113 @@
#!/usr/bin/env bash
set -euo pipefail
GPU_PCI_ADDR="${GPU_PCI_ADDR:-0000:01:00.0}"
POWER_CONTROL_PATH="/sys/bus/pci/devices/${GPU_PCI_ADDR}/power/control"
RUNTIME_STATUS_PATH="/sys/bus/pci/devices/${GPU_PCI_ADDR}/power/runtime_status"
MODPROBE_CONF="/etc/modprobe.d/hartomat-nvidia-runtimepm.conf"
TARGET_OPTION='options nvidia NVreg_DynamicPowerManagement=0x00 NVreg_EnableGpuFirmware=0'
COMPOSE_SERVICES=(render-worker render-worker-light)
log() {
printf '
[%s] %s
' "$(date +%H:%M:%S)" "$*"
}
require_root() {
if [[ "${EUID}" -ne 0 ]]; then
echo "Run as root: sudo $0" >&2
exit 1
fi
}
show_state() {
log "GPU runtime state"
if [[ -r "${RUNTIME_STATUS_PATH}" ]]; then
cat "${RUNTIME_STATUS_PATH}"
else
echo "runtime_status unavailable at ${RUNTIME_STATUS_PATH}"
fi
log "nvidia-smi"
if ! nvidia-smi; then
echo "nvidia-smi still failing"
fi
}
show_gpu_clients() {
log "Processes using /dev/nvidia*"
if ! lsof /dev/nvidia* 2>/dev/null; then
echo "No open /dev/nvidia* handles detected"
fi
}
stop_compose_workers() {
if ! command -v docker >/dev/null 2>&1; then
return
fi
if [[ ! -f "docker-compose.yml" ]]; then
return
fi
log "Stopping HartOMat render workers"
docker compose stop "${COMPOSE_SERVICES[@]}" || true
}
start_compose_workers() {
if ! command -v docker >/dev/null 2>&1; then
return
fi
if [[ ! -f "docker-compose.yml" ]]; then
return
fi
log "Starting HartOMat render workers"
docker compose up -d "${COMPOSE_SERVICES[@]}" || true
}
reload_nvidia_modules() {
log "Reloading NVIDIA kernel modules"
systemctl stop nvidia-persistenced.service nvidia-powerd.service || true
local pids=""
pids="$(fuser -v /dev/nvidia* 2>/dev/null | awk '{for (i = 1; i <= NF; i++) print $i}' | rg '^[0-9]+$' | sort -u || true)"
if [[ -n "${pids}" ]]; then
echo "The following PIDs still hold /dev/nvidia*: ${pids}" >&2
echo "Close those applications and rerun the script." >&2
exit 1
fi
modprobe -r nvidia_uvm nvidia_drm nvidia_modeset nvidia
modprobe nvidia
modprobe nvidia_modeset
modprobe nvidia_uvm
modprobe nvidia_drm
systemctl start nvidia-persistenced.service || true
systemctl start nvidia-powerd.service || true
}
require_root
if [[ ! -e "${POWER_CONTROL_PATH}" ]]; then
echo "GPU power control path not found: ${POWER_CONTROL_PATH}" >&2
exit 1
fi
show_gpu_clients
show_state
stop_compose_workers
log "Disabling runtime autosuspend for this boot"
echo on > "${POWER_CONTROL_PATH}"
log "Persisting NVIDIA runtime power setting"
printf '%s
' "${TARGET_OPTION}" > "${MODPROBE_CONF}"
reload_nvidia_modules
log "Final state"
show_state
start_compose_workers
log "If nvidia-smi still fails, reboot once so the new modprobe option is applied from a clean boot."
+254
View File
@@ -0,0 +1,254 @@
#!/usr/bin/env bash
set -euo pipefail
APPLY=0
DEEP=0
usage() {
cat <<'EOF'
Usage:
./scripts/repo_hygiene.sh [options]
Options:
--apply Delete the reported hygiene artifacts.
--deep Also remove heavy local environments (`frontend/node_modules`, `backend/.venv`).
-h, --help Show this help.
Behavior:
Without --apply, the script only reports what it would clean.
The default cleanup is conservative and removes debug/output/cache artifacts only.
Examples:
./scripts/repo_hygiene.sh
./scripts/repo_hygiene.sh --apply
./scripts/repo_hygiene.sh --apply --deep
EOF
}
log() {
printf '\n[%s] %s\n' "$(date '+%H:%M:%S')" "$*"
}
warn() {
printf '[warn] %s\n' "$*" >&2
}
ownership_candidate_find_expr() {
cat <<'EOF'
find . \
\( -path './.git' -o -path './backend/.venv' -o -path './frontend/node_modules' \) -prune -o \
\( \
-path './tmp' -o \
-path './frontend/dist' -o \
-path './backend/.pytest_cache' -o \
-path './backend/celerybeat-schedule' -o \
-type d -name '__pycache__' -o \
-type f \( -name '*.pyc' -o -name '*.pyo' -o -name 'core' \) \
\) \
-print
EOF
}
repair_backend_ownership_with_docker() {
if ! command -v docker >/dev/null 2>&1; then
return 1
fi
if ! docker info >/dev/null 2>&1; then
return 1
fi
log "Repairing backend artifact ownership through Docker"
docker run --rm \
-v "$REPO_ROOT/backend:/target" \
alpine:3.20 \
sh -lc "chown -R $(id -u):$(id -g) /target"
}
repair_repo_ownership_with_docker() {
if ! command -v docker >/dev/null 2>&1; then
return 1
fi
if ! docker info >/dev/null 2>&1; then
return 1
fi
log "Repairing generated artifact ownership across the repository through Docker"
docker run --rm \
-v "$REPO_ROOT:/target" \
alpine:3.20 \
sh -lc "
find /target \\
\\( -path /target/.git -o -path /target/frontend/node_modules -o -path /target/backend/.venv \\) -prune -o \\
\\( -type d -name __pycache__ -o -type f \\( -name '*.pyc' -o -name '*.pyo' -o -name 'celerybeat-schedule' -o -name 'core' \\) \\) \\
-exec chown $(id -u):$(id -g) {} +
"
}
ensure_repo_root() {
if ! REPO_ROOT="$(git rev-parse --show-toplevel 2>/dev/null)"; then
echo "This script must be run inside the git repository." >&2
exit 1
fi
cd "$REPO_ROOT"
}
parse_args() {
while [ "$#" -gt 0 ]; do
case "$1" in
--apply)
APPLY=1
;;
--deep)
DEEP=1
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown option: $1" >&2
usage >&2
exit 1
;;
esac
shift
done
}
print_candidates() {
local label="$1"
shift
local cmd=("$@")
log "$label"
"${cmd[@]}"
}
print_non_writable_artifacts() {
log "Generated artifacts not owned by the current user"
bash -lc "$(ownership_candidate_find_expr) | while IFS= read -r path; do [ -e \"\$path\" ] || continue; if [ ! -O \"\$path\" ]; then printf '%s\n' \"\$path\"; fi; done | sort"
}
repo_find_expr() {
cat <<'EOF'
find . \
\( -path './.git' -o -path './backend/.venv' -o -path './frontend/node_modules' \) -prune -o
EOF
}
cleanup_safe_targets() {
local permission_failures=0
local target
PERMISSION_FAILURE_PATHS=()
local targets=(
tmp
frontend/dist
backend/.pytest_cache
backend/celerybeat-schedule
)
for target in "${targets[@]}"; do
[ -e "$target" ] || continue
if ! rm -rf "$target" 2>/dev/null; then
warn "Could not remove $target. Ownership or permissions need to be fixed first."
PERMISSION_FAILURE_PATHS+=("$target")
permission_failures=1
fi
done
while IFS= read -r target; do
if ! rm -rf "$target" 2>/dev/null; then
warn "Could not remove $target. Ownership or permissions need to be fixed first."
PERMISSION_FAILURE_PATHS+=("$target")
permission_failures=1
fi
done < <(find . -type d -name __pycache__ -print)
while IFS= read -r target; do
if ! rm -f "$target" 2>/dev/null; then
warn "Could not remove $target. Ownership or permissions need to be fixed first."
PERMISSION_FAILURE_PATHS+=("$target")
permission_failures=1
fi
done < <(find . -type f \( -name '*.pyc' -o -name '*.pyo' \) -print)
while IFS= read -r target; do
if ! rm -f "$target" 2>/dev/null; then
warn "Could not remove $target. Ownership or permissions need to be fixed first."
PERMISSION_FAILURE_PATHS+=("$target")
permission_failures=1
fi
done < <(find . -type f -name 'core' -print)
return "$permission_failures"
}
cleanup_deep_targets() {
rm -rf frontend/node_modules
rm -rf backend/.venv
}
main() {
parse_args "$@"
ensure_repo_root
print_candidates \
"Core debug and cache artifacts" \
bash -lc "find tmp frontend/dist backend/.pytest_cache -mindepth 0 -maxdepth 0 2>/dev/null | sort"
print_candidates \
"Python cache directories" \
bash -lc "$(repo_find_expr) -type d -name '__pycache__' -print | sort"
print_candidates \
"Python bytecode files" \
bash -lc "$(repo_find_expr) -type f \\( -name '*.pyc' -o -name '*.pyo' \\) -print | sort | sed -n '1,200p'"
print_non_writable_artifacts
if [ "$DEEP" -eq 1 ]; then
print_candidates \
"Heavy local environments" \
bash -lc "find frontend/node_modules backend/.venv -mindepth 0 -maxdepth 0 2>/dev/null | sort"
fi
if [ "$APPLY" -eq 0 ]; then
log "Dry run only. Re-run with --apply to delete the reported artifacts."
if [ "$DEEP" -eq 1 ]; then
echo "Deep mode is enabled: node_modules and backend/.venv would also be removed."
fi
exit 0
fi
log "Deleting conservative hygiene artifacts"
CLEANUP_PERMISSION_FAILURES=0
if ! cleanup_safe_targets; then
CLEANUP_PERMISSION_FAILURES=1
fi
if [ "$DEEP" -eq 1 ]; then
log "Deleting deep local environments"
cleanup_deep_targets
fi
if [ "$CLEANUP_PERMISSION_FAILURES" -eq 1 ]; then
if repair_repo_ownership_with_docker || repair_backend_ownership_with_docker; then
log "Retrying conservative hygiene cleanup after ownership repair"
CLEANUP_PERMISSION_FAILURES=0
cleanup_safe_targets || CLEANUP_PERMISSION_FAILURES=1
fi
fi
if [ "$CLEANUP_PERMISSION_FAILURES" -eq 1 ]; then
log "Some artifacts could not be removed because they are not writable by the current user."
printf 'Blocked paths:\n'
printf ' %s\n' "${PERMISSION_FAILURE_PATHS[@]}" | sort -u
echo
echo "Run this once, then re-run the hygiene script:"
echo " $(ownership_candidate_find_expr | sed 's/-print$/-print0/' | tr '\n' ' ' | sed 's/ */ /g') | sudo xargs -0 chown -R \"\$USER:\$USER\""
fi
log "Remaining git status"
git status --short
}
main "$@"
+246
View File
@@ -0,0 +1,246 @@
#!/usr/bin/env bash
set -euo pipefail
usage() {
cat <<'EOF'
Usage:
./scripts/rerender_closed_legacy_still.sh <order-line-id>
Description:
Re-renders a completed legacy still order line with the full legacy template,
material, position, and USD context, then persists the canonical output and
updates the linked media asset.
EOF
}
if [ "${1:-}" = "-h" ] || [ "${1:-}" = "--help" ]; then
usage
exit 0
fi
ORDER_LINE_ID="${1:-}"
if [ -z "$ORDER_LINE_ID" ]; then
usage >&2
exit 1
fi
REPO_ROOT="$(git rev-parse --show-toplevel 2>/dev/null || true)"
if [ -z "$REPO_ROOT" ]; then
echo "This script must be run inside the repository." >&2
exit 1
fi
cd "$REPO_ROOT"
docker compose exec -T render-worker python3 - "$ORDER_LINE_ID" <<'PY'
import json
import re
import sys
from datetime import datetime
from pathlib import Path
import app.models.template # noqa: F401
from sqlalchemy import create_engine, select
from sqlalchemy.orm import Session, joinedload
from app.config import settings
from app.core.render_paths import resolve_result_path
from app.domains.media.models import MediaAsset, MediaAssetType
from app.domains.orders.models import OrderLine
from app.domains.products.models import Product
from app.domains.rendering.workflow_runtime_services import (
OrderLineRenderSetupResult,
persist_order_line_output,
resolve_order_line_template_context,
resolve_render_position_context,
)
from app.services.step_processor import build_part_colors, render_to_file
def _sanitize(value: str) -> str:
return re.sub(r"[^\w\-.]", "_", value.strip())[:100] or "output"
order_line_id = sys.argv[1]
engine = create_engine(settings.database_url.replace("+asyncpg", ""))
with Session(engine) as session:
line = session.execute(
select(OrderLine)
.where(OrderLine.id == order_line_id)
.options(
joinedload(OrderLine.product).joinedload(Product.cad_file),
joinedload(OrderLine.output_type),
)
).scalar_one_or_none()
if line is None:
raise RuntimeError(f"Order line not found: {order_line_id}")
if line.product is None or line.product.cad_file is None:
raise RuntimeError(f"Order line {order_line_id} has no linked CAD file")
if line.output_type is None:
raise RuntimeError(f"Order line {order_line_id} has no output type")
cad_file = line.product.cad_file
render_settings = dict(line.output_type.render_settings or {})
if bool(render_settings.get("cinematic")):
raise RuntimeError("This support script only handles still outputs, not cinematic outputs")
if bool(getattr(line.output_type, "is_animation", False)):
raise RuntimeError("This support script only handles still outputs, not animation outputs")
materials_source = list(line.product.cad_part_materials or [])
part_colors = {}
parsed_names = []
if cad_file.parsed_objects:
parsed_names = list(cad_file.parsed_objects.get("objects", []) or [])
if materials_source:
part_colors = build_part_colors(parsed_names, materials_source)
usd_render_path = None
usd_asset = session.execute(
select(MediaAsset)
.where(
MediaAsset.cad_file_id == cad_file.id,
MediaAsset.asset_type == MediaAssetType.usd_master,
)
.order_by(MediaAsset.created_at.desc())
.limit(1)
).scalar_one_or_none()
if usd_asset is not None:
usd_render_path = resolve_result_path(usd_asset.storage_key)
setup = OrderLineRenderSetupResult(
status="ready",
order_line=line,
cad_file=cad_file,
materials_source=materials_source,
usd_render_path=usd_render_path,
glb_reuse_path=None,
part_colors=part_colors,
render_start=datetime.utcnow(),
)
template_context = resolve_order_line_template_context(session, setup)
position_context = resolve_render_position_context(session, line)
out_ext = "jpg"
fmt = (line.output_type.output_format or "").lower()
if fmt == "png":
out_ext = "png"
elif fmt in {"jpg", "jpeg"}:
out_ext = "jpg"
elif fmt == "webp":
out_ext = "webp"
render_width = int(render_settings["width"]) if render_settings.get("width") else None
render_height = int(render_settings["height"]) if render_settings.get("height") else None
render_engine = render_settings.get("engine")
render_samples = int(render_settings["samples"]) if render_settings.get("samples") else None
noise_threshold = str(render_settings.get("noise_threshold", ""))
denoiser = str(render_settings.get("denoiser", ""))
denoising_input_passes = str(render_settings.get("denoising_input_passes", ""))
denoising_prefilter = str(render_settings.get("denoising_prefilter", ""))
denoising_quality = str(render_settings.get("denoising_quality", ""))
denoising_use_gpu = str(render_settings.get("denoising_use_gpu", ""))
transparent_bg = bool(line.output_type.transparent_bg)
render_overrides = getattr(line, "render_overrides", None)
if isinstance(render_overrides, dict):
if "width" in render_overrides:
render_width = int(render_overrides["width"])
if "height" in render_overrides:
render_height = int(render_overrides["height"])
if "samples" in render_overrides:
render_samples = int(render_overrides["samples"])
if "engine" in render_overrides:
render_engine = render_overrides["engine"]
if "noise_threshold" in render_overrides:
noise_threshold = str(render_overrides["noise_threshold"])
if "denoiser" in render_overrides:
denoiser = str(render_overrides["denoiser"])
if "denoising_input_passes" in render_overrides:
denoising_input_passes = str(render_overrides["denoising_input_passes"])
if "denoising_prefilter" in render_overrides:
denoising_prefilter = str(render_overrides["denoising_prefilter"])
if "denoising_quality" in render_overrides:
denoising_quality = str(render_overrides["denoising_quality"])
if "denoising_use_gpu" in render_overrides:
denoising_use_gpu = str(render_overrides["denoising_use_gpu"])
if "transparent_bg" in render_overrides:
transparent_bg = bool(render_overrides["transparent_bg"])
if "output_format" in render_overrides:
fmt_override = str(render_overrides["output_format"]).lower()
if fmt_override == "png":
out_ext = "png"
elif fmt_override in {"jpg", "jpeg"}:
out_ext = "jpg"
elif fmt_override == "webp":
out_ext = "webp"
product_name = line.product.name or getattr(line.product, "pim_id", None) or "product"
output_type_name = line.output_type.name or "render"
filename = f"{_sanitize(product_name)}_{_sanitize(output_type_name)}.{out_ext}"
output_dir = Path(settings.upload_dir) / "renders" / str(line.id)
output_dir.mkdir(parents=True, exist_ok=True)
output_path = str(output_dir / filename)
success, render_log = render_to_file(
step_path=cad_file.stored_path,
output_path=output_path,
part_colors=part_colors,
width=render_width,
height=render_height,
transparent_bg=transparent_bg,
engine=render_engine,
samples=render_samples,
template_path=template_context.template.blend_file_path if template_context.template else None,
target_collection=template_context.template.target_collection if template_context.template else "Product",
material_library_path=template_context.material_library if template_context.use_materials else None,
material_map=template_context.material_map,
part_names_ordered=parsed_names or None,
lighting_only=bool(template_context.template.lighting_only) if template_context.template else False,
shadow_catcher=bool(template_context.template.shadow_catcher_enabled) if template_context.template else False,
cycles_device=line.output_type.cycles_device,
rotation_x=position_context.rotation_x,
rotation_y=position_context.rotation_y,
rotation_z=position_context.rotation_z,
focal_length_mm=position_context.focal_length_mm,
sensor_width_mm=position_context.sensor_width_mm,
material_override=template_context.override_material,
job_id=str(line.id),
order_line_id=str(line.id),
noise_threshold=noise_threshold,
denoiser=denoiser,
denoising_input_passes=denoising_input_passes,
denoising_prefilter=denoising_prefilter,
denoising_quality=denoising_quality,
denoising_use_gpu=denoising_use_gpu,
usd_path=usd_render_path,
)
if not success:
raise RuntimeError(json.dumps(render_log, ensure_ascii=True))
persisted = persist_order_line_output(
session,
line,
success=True,
output_path=output_path,
render_log=render_log if isinstance(render_log, dict) else None,
render_completed_at=datetime.utcnow(),
)
print(
json.dumps(
{
"order_line_id": str(line.id),
"result_path": persisted.result_path,
"asset_id": persisted.asset_id,
"storage_key": persisted.storage_key,
"asset_type": persisted.asset_type.value if persisted.asset_type else None,
"template": template_context.template.name if template_context.template else None,
"material_map_count": len(template_context.material_map or {}),
"usd_path": str(usd_render_path) if usd_render_path else None,
"duration_s": render_log.get("total_duration_s") if isinstance(render_log, dict) else None,
},
ensure_ascii=True,
)
)
PY
File diff suppressed because it is too large Load Diff
+128
View File
@@ -0,0 +1,128 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT="$(git rev-parse --show-toplevel 2>/dev/null || pwd)"
cd "$ROOT"
RUN_LIVE_SHADOW=0
RUN_GOLDEN=0
RUN_CAD_PARITY=0
DEFAULT_CAD_PARITY_HOST="${CAD_PARITY_HOST:-http://localhost:8888}"
DEFAULT_CAD_PARITY_EMAIL="${CAD_PARITY_EMAIL:-admin@hartomat.com}"
DEFAULT_CAD_PARITY_PASSWORD="${CAD_PARITY_PASSWORD:-Admin1234!}"
DEFAULT_CAD_PARITY_ID="${CAD_PARITY_ID:-7c214057-9982-4d6e-aa87-43bfabfdb709}"
usage() {
cat <<'EOF'
Usage:
./scripts/workflow_sequential_gates.sh [options]
Options:
--with-live-shadow Also run the live still-smoke harness in shadow mode.
--with-golden Also run the live golden suite after the other gates.
--with-cad-parity Also verify live CAD scene-manifest/model part-key parity.
-h, --help Show this help.
Behavior:
Runs the current minimum workflow verification gates sequentially to avoid
RAM spikes from parallel test execution.
EOF
}
parse_args() {
while [ "$#" -gt 0 ]; do
case "$1" in
--with-live-shadow)
RUN_LIVE_SHADOW=1
;;
--with-golden)
RUN_GOLDEN=1
;;
--with-cad-parity)
RUN_CAD_PARITY=1
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown option: $1" >&2
usage >&2
exit 1
;;
esac
shift
done
}
log() {
printf '\n[%s] %s\n' "$(date '+%H:%M:%S')" "$*"
}
die() {
echo "[error] $*" >&2
exit 1
}
require_command() {
local binary="$1"
local hint="$2"
command -v "$binary" >/dev/null 2>&1 || die "$binary is required. $hint"
}
require_path() {
local path="$1"
local hint="$2"
[ -e "$path" ] || die "$path is missing. $hint"
}
preflight() {
log "Preflight checks"
require_command curl "Install curl or run this script from the normal development shell."
require_command python3 "Install python3 or activate the expected local toolchain."
require_path backend/.venv/bin/pytest "Create the backend venv first, for example via the normal backend setup flow."
require_path frontend/node_modules "Run 'cd frontend && npm install' first."
require_path frontend/node_modules/.bin/vitest "Frontend test dependencies are incomplete. Re-run 'cd frontend && npm install'."
require_path scripts/compare_live_cad_parity.py "The live CAD parity gate script is missing."
}
run_step() {
local label="$1"
shift
log "$label"
"$@"
}
parse_args "$@"
preflight
run_step "Frontend health" curl -fsSI http://localhost:5173
run_step "Backend health" curl -fsS http://localhost:8888/health
run_step "Runtime config regression tests" backend/.venv/bin/pytest backend/tests/test_config_runtime_resolution.py -q
run_step "Workflow runtime regression tests" backend/.venv/bin/pytest backend/tests/domains/test_workflow_runtime_services.py -q
run_step \
"Workflow editor/frontend regression tests" \
bash -lc "cd frontend && npx vitest run src/__tests__/components/workflowEditorUi.test.tsx src/__tests__/api/outputTypes.test.ts --pool forks --poolOptions.forks.singleFork=true"
if [ "$RUN_CAD_PARITY" -eq 1 ]; then
run_step \
"Live CAD manifest/model parity" \
python3 scripts/compare_live_cad_parity.py \
--host "$DEFAULT_CAD_PARITY_HOST" \
--email "$DEFAULT_CAD_PARITY_EMAIL" \
--password "$DEFAULT_CAD_PARITY_PASSWORD" \
--cad-id "$DEFAULT_CAD_PARITY_ID"
fi
if [ "$RUN_LIVE_SHADOW" -eq 1 ]; then
run_step \
"Live still smoke harness (shadow)" \
python3 scripts/test_render_pipeline.py --workflow-still-smoke --execution-mode shadow
fi
if [ "$RUN_GOLDEN" -eq 1 ]; then
run_step "Live golden suite" python3 scripts/test_render_pipeline.py --workflow-golden-suite
fi
log "Sequential workflow gates completed"