#!/usr/bin/env python3 """Render pipeline integration test. Tests the full pipeline: STEP upload → CAD processing → thumbnail rendering → order creation → submit → dispatch renders → wait for completed. Usage: # Quick smoke test (1 STEP file, 1 output type) python scripts/test_render_pipeline.py --sample # Full test — all output types, waits for all renders python scripts/test_render_pipeline.py --full # Only check render health endpoint python scripts/test_render_pipeline.py --health # Custom credentials / host python scripts/test_render_pipeline.py --sample --host http://localhost:8888 \ --email admin@hartomat.com --password Admin1234! Environment variables (alternative to flags): TEST_HOST, TEST_EMAIL, TEST_PASSWORD """ import argparse import os import sys import time import json import requests from pathlib import Path # --------------------------------------------------------------------------- # Config # --------------------------------------------------------------------------- DEFAULT_HOST = os.environ.get("TEST_HOST", "http://localhost:8888") DEFAULT_EMAIL = os.environ.get("TEST_EMAIL", "admin@hartomat.com") DEFAULT_PASSWORD = os.environ.get("TEST_PASSWORD", "Admin1234!") SAMPLE_STEP = Path(__file__).parent.parent / "step-sample-file" / "81113-l_cut.stp" RENDER_TIMEOUT_SECONDS = 300 # 5 minutes per still render ANIMATION_RENDER_TIMEOUT_SECONDS = 3600 # 60 minutes per animation render POLL_INTERVAL_SECONDS = 5 CAD_PROCESSING_TIMEOUT = 120 # 2 minutes for STEP processing COMPARISON_TIMEOUT_SECONDS = 60 WORKFLOW_RUN_TIMEOUT_SECONDS = 300 ANIMATION_WORKFLOW_RUN_TIMEOUT_SECONDS = 3600 WORKFLOW_COMPARISON_TIMEOUT_SECONDS = 240 ANIMATION_WORKFLOW_COMPARISON_TIMEOUT_SECONDS = 1200 TRANSIENT_HTTP_RETRY_ATTEMPTS = 5 TRANSIENT_HTTP_RETRY_DELAY_SECONDS = 1.5 GREEN = "\033[92m" RED = "\033[91m" YELLOW = "\033[93m" BLUE = "\033[94m" RESET = "\033[0m" passed = [] failed = [] warnings = [] # Keep the live rollout harness aligned with backend rollout evaluation. The # epsilon is intentionally tiny and only absorbs proven 1-LSB render drift. ROLLOUT_PASS_MAX_MEAN_PIXEL_DELTA = 1e-6 ROLLOUT_WARN_MAX_MEAN_PIXEL_DELTA = 0.02 # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- def ok(msg: str): print(f" {GREEN}✓{RESET} {msg}") passed.append(msg) def fail(msg: str): print(f" {RED}✗{RESET} {msg}") failed.append(msg) def warn(msg: str): print(f" {YELLOW}⚠{RESET} {msg}") warnings.append(msg) def info(msg: str): print(f" {BLUE}→{RESET} {msg}") def section(title: str): print(f"\n{BLUE}{'='*60}{RESET}") print(f"{BLUE} {title}{RESET}") print(f"{BLUE}{'='*60}{RESET}") def evaluate_rollout_gate_from_comparison(comparison: dict) -> dict: reasons: list[str] = [] mean_pixel_delta = comparison.get("mean_pixel_delta") exact_match = comparison.get("exact_match") dimensions_match = comparison.get("dimensions_match") status = comparison.get("status") authoritative_exists = bool(comparison.get("authoritative_output", {}).get("exists")) observer_exists = bool(comparison.get("observer_output", {}).get("exists")) if not authoritative_exists: verdict = "fail" reasons.append("Authoritative legacy output is missing.") elif not observer_exists: verdict = "fail" reasons.append("Observer workflow output is missing.") elif exact_match: verdict = "pass" reasons.append("Observer output matches the authoritative legacy output byte-for-byte.") elif dimensions_match is False: verdict = "fail" reasons.append("Observer output dimensions differ from the authoritative legacy output.") elif mean_pixel_delta is None: verdict = "fail" reasons.append(f"Workflow comparison did not produce a pixel delta (status={status}).") elif mean_pixel_delta <= ROLLOUT_PASS_MAX_MEAN_PIXEL_DELTA: verdict = "pass" reasons.append("Observer output is visually identical within the pass threshold.") elif mean_pixel_delta <= ROLLOUT_WARN_MAX_MEAN_PIXEL_DELTA: verdict = "warn" reasons.append("Observer output differs slightly but remains within the warn threshold.") else: verdict = "fail" reasons.append("Observer output exceeds the rollout parity threshold.") if mean_pixel_delta is not None and not exact_match: reasons.append( f"Mean pixel delta {mean_pixel_delta:.6f}; " f"pass<={ROLLOUT_PASS_MAX_MEAN_PIXEL_DELTA:.6f}, " f"warn<={ROLLOUT_WARN_MAX_MEAN_PIXEL_DELTA:.6f}." ) return { "verdict": verdict, "ready": verdict == "pass", "reasons": reasons, } def build_output_type_workflow_link_payload( *, workflow_definition_id: str, execution_mode: str, ) -> dict: payload = { "workflow_definition_id": workflow_definition_id, "is_active": True, } if execution_mode == "graph": payload["workflow_rollout_mode"] = "graph" elif execution_mode == "shadow": payload["workflow_rollout_mode"] = "shadow" return payload class APIClient: def __init__(self, host: str, email: str, password: str): self.host = host.rstrip("/") self.session = requests.Session() self.token: str | None = None self._login(email, password) def _login(self, email: str, password: str): resp = self._request_url( "post", f"{self.host}/api/auth/login", json={"email": email, "password": password}, ) resp.raise_for_status() data = resp.json() self.token = data["access_token"] self.session.headers["Authorization"] = f"Bearer {self.token}" def _request_url(self, method: str, url: str, **kwargs) -> requests.Response: kwargs.setdefault("timeout", 30) attempt = 0 transient_errors = ( requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError, requests.exceptions.ReadTimeout, ) while True: try: response = self.session.request(method, url, **kwargs) except transient_errors as exc: attempt += 1 if attempt >= TRANSIENT_HTTP_RETRY_ATTEMPTS: raise info( f"Transient {method.upper()} {url} failed ({exc.__class__.__name__}); " f"retry {attempt}/{TRANSIENT_HTTP_RETRY_ATTEMPTS - 1}" ) time.sleep(TRANSIENT_HTTP_RETRY_DELAY_SECONDS * attempt) continue if response.status_code in {502, 503, 504} and attempt < TRANSIENT_HTTP_RETRY_ATTEMPTS - 1: attempt += 1 info( f"Transient {method.upper()} {url} returned {response.status_code}; " f"retry {attempt}/{TRANSIENT_HTTP_RETRY_ATTEMPTS - 1}" ) time.sleep(TRANSIENT_HTTP_RETRY_DELAY_SECONDS * attempt) continue return response def get(self, path: str, **kwargs) -> requests.Response: return self._request_url("get", f"{self.host}/api{path}", **kwargs) def post(self, path: str, **kwargs) -> requests.Response: return self._request_url("post", f"{self.host}/api{path}", **kwargs) def put(self, path: str, **kwargs) -> requests.Response: return self._request_url("put", f"{self.host}/api{path}", **kwargs) def patch(self, path: str, **kwargs) -> requests.Response: return self._request_url("patch", f"{self.host}/api{path}", **kwargs) def delete(self, path: str, **kwargs) -> requests.Response: return self._request_url("delete", f"{self.host}/api{path}", **kwargs) def _normalize_render_params(params: dict | None = None) -> dict: normalized = dict(params or {}) resolution = normalized.pop("resolution", None) if isinstance(resolution, (list, tuple)) and len(resolution) == 2: normalized.setdefault("width", int(resolution[0])) normalized.setdefault("height", int(resolution[1])) if "engine" in normalized and "render_engine" not in normalized: normalized["render_engine"] = normalized.pop("engine") return normalized def build_graph_still_config( *, execution_mode: str = "graph", render_params: dict | None = None, use_custom_render_settings: bool = False, ) -> dict: render_params = _normalize_render_params(render_params) render_params["use_custom_render_settings"] = bool(use_custom_render_settings) return { "version": 1, "ui": { "preset": "still_graph", "execution_mode": execution_mode, "family": "order_line", }, "nodes": [ { "id": "setup", "step": "order_line_setup", "params": {}, "ui": {"label": "Order Line Setup", "position": {"x": 0, "y": 160}}, }, { "id": "template", "step": "resolve_template", "params": {}, "ui": {"label": "Resolve Template", "position": {"x": 220, "y": 160}}, }, { "id": "populate_materials", "step": "auto_populate_materials", "params": {}, "ui": {"label": "Auto Populate Materials", "position": {"x": 220, "y": 320}}, }, { "id": "bbox", "step": "glb_bbox", "params": {}, "ui": {"label": "Compute Bounding Box", "position": {"x": 220, "y": 40}}, }, { "id": "resolve_materials", "step": "material_map_resolve", "params": {}, "ui": {"label": "Resolve Material Map", "position": {"x": 440, "y": 200}}, }, { "id": "render", "step": "blender_still", "params": render_params, "ui": {"type": "renderNode", "label": "Still Render", "position": {"x": 680, "y": 160}}, }, { "id": "output", "step": "output_save", "params": {}, "ui": {"type": "outputNode", "label": "Save Output", "position": {"x": 920, "y": 120}}, }, { "id": "notify", "step": "notify", "params": {}, "ui": {"type": "outputNode", "label": "Notify Result", "position": {"x": 920, "y": 220}}, }, ], "edges": [ {"from": "setup", "to": "template"}, {"from": "setup", "to": "populate_materials"}, {"from": "setup", "to": "bbox"}, {"from": "template", "to": "resolve_materials"}, {"from": "populate_materials", "to": "resolve_materials"}, {"from": "resolve_materials", "to": "render"}, {"from": "bbox", "to": "render"}, {"from": "template", "to": "render"}, {"from": "render", "to": "output"}, {"from": "render", "to": "notify"}, ], } def build_graph_turntable_config(*, execution_mode: str = "graph") -> dict: return { "version": 1, "ui": {"preset": "turntable", "execution_mode": execution_mode}, "nodes": [ { "id": "setup", "step": "order_line_setup", "params": {}, "ui": {"label": "Order Line Setup", "position": {"x": 0, "y": 100}}, }, { "id": "template", "step": "resolve_template", "params": {}, "ui": {"label": "Resolve Template", "position": {"x": 220, "y": 100}}, }, { "id": "populate_materials", "step": "auto_populate_materials", "params": {}, "ui": {"label": "Auto Populate Materials", "position": {"x": 220, "y": 260}}, }, { "id": "bbox", "step": "glb_bbox", "params": {}, "ui": {"label": "Compute Bounding Box", "position": {"x": 220, "y": -20}}, }, { "id": "resolve_materials", "step": "material_map_resolve", "params": {}, "ui": {"label": "Resolve Material Map", "position": {"x": 440, "y": 160}}, }, { "id": "turntable", "step": "blender_turntable", "params": { "width": 768, "height": 768, "render_engine": "cycles", "samples": 16, "fps": 12, "duration_s": 2, }, "ui": {"type": "renderFramesNode", "label": "Turntable Render", "position": {"x": 440, "y": 100}}, }, { "id": "output", "step": "output_save", "params": {}, "ui": {"type": "outputNode", "label": "Save Output", "position": {"x": 660, "y": 100}}, }, ], "edges": [ {"from": "setup", "to": "template"}, {"from": "setup", "to": "populate_materials"}, {"from": "setup", "to": "bbox"}, {"from": "template", "to": "resolve_materials"}, {"from": "populate_materials", "to": "resolve_materials"}, {"from": "resolve_materials", "to": "turntable"}, {"from": "bbox", "to": "turntable"}, {"from": "template", "to": "turntable"}, {"from": "turntable", "to": "output"}, ], } def build_graph_blend_export_config(*, execution_mode: str = "graph") -> dict: return { "version": 1, "ui": {"preset": "custom", "execution_mode": execution_mode}, "nodes": [ { "id": "setup", "step": "order_line_setup", "params": {}, "ui": {"label": "Order Line Setup", "position": {"x": 0, "y": 100}}, }, { "id": "template", "step": "resolve_template", "params": {}, "ui": {"label": "Resolve Template", "position": {"x": 220, "y": 100}}, }, { "id": "blend", "step": "export_blend", "params": {}, "ui": {"label": "Export Blend", "position": {"x": 440, "y": 100}}, }, ], "edges": [ {"from": "setup", "to": "template"}, {"from": "template", "to": "blend"}, ], } def get_workflows(client: APIClient) -> list[dict]: resp = client.get("/workflows") if resp.status_code != 200: return [] data = resp.json() return data if isinstance(data, list) else [] def get_render_templates(client: APIClient) -> list[dict]: resp = client.get("/render-templates") if resp.status_code != 200: return [] data = resp.json() return data if isinstance(data, list) else [] def find_named(items: list[dict], name: str) -> dict | None: return next((item for item in items if item.get("name") == name), None) def render_template_candidates_for_output_type( render_templates: list[dict], output_type_id: str, *, active_only: bool = True, ) -> list[dict]: normalized_output_type_id = str(output_type_id) matches: list[dict] = [] for template in render_templates: if active_only and not template.get("is_active", True): continue linked_output_type_ids = [ str(candidate_id) for candidate_id in (template.get("output_type_ids") or []) if candidate_id is not None ] fallback_output_type_id = template.get("output_type_id") if fallback_output_type_id and str(fallback_output_type_id) not in linked_output_type_ids: linked_output_type_ids.append(str(fallback_output_type_id)) if normalized_output_type_id in linked_output_type_ids: matches.append(template) return matches def choose_template_backed_output_type( output_types: list[dict], render_templates: list[dict], *, preferred_name: str | None = None, ) -> tuple[dict, list[dict]]: if preferred_name: output_type = find_named(output_types, preferred_name) if output_type is None: raise RuntimeError(f"Template parity output type not found: {preferred_name}") templates = render_template_candidates_for_output_type(render_templates, output_type["id"]) if not templates: raise RuntimeError( f"Output type '{preferred_name}' has no active render template linked in /admin" ) return output_type, templates for output_type in output_types: if output_type.get("renderer") != "blender": continue if output_type.get("artifact_kind") != "still_image": continue if output_type.get("is_animation"): continue templates = render_template_candidates_for_output_type(render_templates, output_type["id"]) if templates: return output_type, templates raise RuntimeError("No active template-backed still-image output type was found") def build_output_type_workflow_snapshot(output_type: dict) -> dict: return { "workflow_definition_id": output_type.get("workflow_definition_id"), "workflow_rollout_mode": output_type.get("workflow_rollout_mode") or "legacy_only", "is_active": bool(output_type.get("is_active", True)), } def smoke_output_type_name(execution_mode: str) -> str: return f"[Workflow Smoke] Still {execution_mode.title()}" def smoke_workflow_name(execution_mode: str) -> str: return f"[Workflow Smoke] Canonical Still {execution_mode.title()}" def build_workflow_golden_cases() -> list[dict]: still_invocation = { "width": 1024, "height": 1024, "engine": "cycles", "samples": 64, } return [ { "key": "still_legacy", "label": "Canonical Still Legacy", "execution_mode": "legacy", "output_type_name": "[Workflow Golden] Canonical Still Legacy", "workflow_name": None, "description": "Legacy-authoritative still render golden case", "renderer": "blender", "output_format": "png", "is_animation": False, "transparent_bg": False, "workflow_family": "order_line", "artifact_kind": "still_image", "invocation_overrides": still_invocation, "workflow_config": None, "expected_asset_type": "still", "expected_media_count": 1, "expected_publication_mode": None, "expected_render_node_id": None, "expected_render_predicted_asset_type": None, }, { "key": "still_graph", "label": "Canonical Still Graph", "execution_mode": "graph", "output_type_name": "[Workflow Golden] Canonical Still Graph", "workflow_name": "[Workflow Golden] Canonical Still Graph", "description": "Graph-authoritative still render golden case", "renderer": "blender", "output_format": "png", "is_animation": False, "transparent_bg": False, "workflow_family": "order_line", "artifact_kind": "still_image", "invocation_overrides": still_invocation, "workflow_config": build_graph_still_config( execution_mode="graph", render_params=still_invocation, ), "expected_asset_type": "still", "expected_media_count": 1, "expected_publication_mode": "graph_authoritative", "expected_render_node_id": "render", "expected_render_predicted_asset_type": "still", }, { "key": "still_shadow", "label": "Canonical Still Shadow", "execution_mode": "shadow", "output_type_name": "[Workflow Golden] Canonical Still Shadow", "workflow_name": "[Workflow Golden] Canonical Still Shadow", "description": "Shadow parity still render golden case", "renderer": "blender", "output_format": "png", "is_animation": False, "transparent_bg": False, "workflow_family": "order_line", "artifact_kind": "still_image", "invocation_overrides": still_invocation, "workflow_config": build_graph_still_config( execution_mode="shadow", render_params=still_invocation, ), "expected_asset_type": "still", "expected_media_count": 1, "expected_publication_mode": "shadow_observer_only", "expected_render_node_id": "render", "expected_render_predicted_asset_type": "still", }, { "key": "turntable_graph", "label": "Turntable Graph", "execution_mode": "graph", "output_type_name": "[Workflow Golden] Turntable Graph", "workflow_name": "[Workflow Golden] Turntable Graph", "description": "Graph-authoritative turntable render golden case", "renderer": "blender", "output_format": "mp4", "is_animation": True, "transparent_bg": False, "workflow_family": "order_line", "artifact_kind": "turntable_video", "invocation_overrides": { "width": 768, "height": 768, "samples": 16, "fps": 12, }, "workflow_config": build_graph_turntable_config(execution_mode="graph"), "expected_asset_type": "turntable", "expected_media_count": 1, "expected_publication_mode": "graph_authoritative", "expected_render_node_id": "turntable", "expected_render_predicted_asset_type": "turntable", }, { "key": "blend_graph", "label": "Blend Export Graph", "execution_mode": "graph", "output_type_name": "[Workflow Golden] Blend Export Graph", "workflow_name": "[Workflow Golden] Blend Export Graph", "description": "Graph-authoritative blend export golden case", "renderer": "blender", "output_format": "blend", "is_animation": False, "transparent_bg": False, "workflow_family": "order_line", "artifact_kind": "blend_asset", "invocation_overrides": {}, "workflow_config": build_graph_blend_export_config(execution_mode="graph"), "expected_asset_type": "blend_production", "expected_media_count": 1, "expected_publication_mode": None, "expected_render_node_id": "blend", "expected_render_predicted_asset_type": "blend_production", }, ] def ensure_workflow_still_smoke_resources( client: APIClient, *, execution_mode: str, ) -> dict: output_type_name = smoke_output_type_name(execution_mode) workflow_name = smoke_workflow_name(execution_mode) output_types = get_output_types(client, include_inactive=True) output_type = find_named(output_types, output_type_name) invocation_overrides = { "width": 1024, "height": 1024, "engine": "cycles", "samples": 64, } output_type_payload = { "name": output_type_name, "description": f"Canonical still workflow smoke profile ({execution_mode})", "renderer": "blender", "render_settings": invocation_overrides, "output_format": "png", "sort_order": 0, "is_active": True, "compatible_categories": [], "render_backend": "celery", "is_animation": False, "transparent_bg": False, "workflow_family": "order_line", "artifact_kind": "still_image", "invocation_overrides": invocation_overrides, "workflow_definition_id": None, } if output_type is None: resp = client.post("/output-types", json=output_type_payload) if resp.status_code not in (200, 201): raise RuntimeError( f"Workflow smoke output type create failed: {resp.status_code} {resp.text[:400]}" ) output_type = resp.json() ok(f"Provisioned smoke output type: {output_type_name}") else: resp = client.patch(f"/output-types/{output_type['id']}", json=output_type_payload) if resp.status_code != 200: raise RuntimeError( f"Workflow smoke output type update failed: {resp.status_code} {resp.text[:400]}" ) output_type = resp.json() info(f"Reusing smoke output type: {output_type_name}") workflow = None if execution_mode != "legacy": workflows = get_workflows(client) workflow = find_named(workflows, workflow_name) workflow_payload = { "name": workflow_name, "output_type_id": output_type["id"], "config": build_graph_still_config( execution_mode=execution_mode, render_params=invocation_overrides, ), "is_active": True, } if workflow is None: resp = client.post("/workflows", json=workflow_payload) if resp.status_code not in (200, 201): raise RuntimeError( f"Workflow smoke workflow create failed: {resp.status_code} {resp.text[:400]}" ) workflow = resp.json() ok(f"Provisioned smoke workflow: {workflow_name}") else: resp = client.put( f"/workflows/{workflow['id']}", json={ "name": workflow_payload["name"], "config": workflow_payload["config"], "is_active": workflow_payload["is_active"], }, ) if resp.status_code != 200: raise RuntimeError( f"Workflow smoke workflow update failed: {resp.status_code} {resp.text[:400]}" ) workflow = resp.json() info(f"Reusing smoke workflow: {workflow_name}") resp = client.patch( f"/output-types/{output_type['id']}", json=build_output_type_workflow_link_payload( workflow_definition_id=workflow["id"], execution_mode=execution_mode, ), ) if resp.status_code != 200: raise RuntimeError( f"Workflow smoke output type link failed: {resp.status_code} {resp.text[:400]}" ) output_type = resp.json() else: workflow = None return { "output_type": output_type, "workflow": workflow, "execution_mode": execution_mode, } def ensure_workflow_golden_resources( client: APIClient, *, case: dict, ) -> dict: output_types = get_output_types(client, include_inactive=True) output_type = find_named(output_types, case["output_type_name"]) output_type_payload = { "name": case["output_type_name"], "description": case["description"], "renderer": case["renderer"], "render_settings": dict(case["invocation_overrides"]), "output_format": case["output_format"], "sort_order": 0, "is_active": True, "compatible_categories": [], "render_backend": "celery", "is_animation": case["is_animation"], "transparent_bg": case["transparent_bg"], "workflow_family": case["workflow_family"], "artifact_kind": case["artifact_kind"], "invocation_overrides": dict(case["invocation_overrides"]), "workflow_definition_id": None, } if output_type is None: resp = client.post("/output-types", json=output_type_payload) if resp.status_code not in (200, 201): raise RuntimeError( f"Golden output type create failed ({case['key']}): {resp.status_code} {resp.text[:400]}" ) output_type = resp.json() ok(f"Provisioned golden output type: {case['output_type_name']}") else: resp = client.patch(f"/output-types/{output_type['id']}", json=output_type_payload) if resp.status_code != 200: raise RuntimeError( f"Golden output type update failed ({case['key']}): {resp.status_code} {resp.text[:400]}" ) output_type = resp.json() info(f"Reusing golden output type: {case['output_type_name']}") workflow = None workflow_name = case.get("workflow_name") workflow_config = case.get("workflow_config") if workflow_name and workflow_config: workflows = get_workflows(client) workflow = find_named(workflows, workflow_name) workflow_payload = { "name": workflow_name, "output_type_id": output_type["id"], "config": workflow_config, "is_active": True, } if workflow is None: resp = client.post("/workflows", json=workflow_payload) if resp.status_code not in (200, 201): raise RuntimeError( f"Golden workflow create failed ({case['key']}): {resp.status_code} {resp.text[:400]}" ) workflow = resp.json() ok(f"Provisioned golden workflow: {workflow_name}") else: resp = client.put( f"/workflows/{workflow['id']}", json={ "name": workflow_payload["name"], "config": workflow_payload["config"], "is_active": workflow_payload["is_active"], }, ) if resp.status_code != 200: raise RuntimeError( f"Golden workflow update failed ({case['key']}): {resp.status_code} {resp.text[:400]}" ) workflow = resp.json() info(f"Reusing golden workflow: {workflow_name}") resp = client.patch( f"/output-types/{output_type['id']}", json=build_output_type_workflow_link_payload( workflow_definition_id=workflow["id"], execution_mode=case["execution_mode"], ), ) if resp.status_code != 200: raise RuntimeError( f"Golden output type link failed ({case['key']}): {resp.status_code} {resp.text[:400]}" ) output_type = resp.json() return { "output_type": output_type, "workflow": workflow, "execution_mode": case["execution_mode"], } def ensure_template_parity_shadow_resources( client: APIClient, *, output_type: dict, ) -> dict: workflow_name = f"[Template Parity] {output_type['name']}" workflows = get_workflows(client) workflow = find_named(workflows, workflow_name) workflow_payload = { "name": workflow_name, "output_type_id": output_type["id"], "config": build_graph_still_config( execution_mode="shadow", use_custom_render_settings=False, ), "is_active": True, } if workflow is None: resp = client.post("/workflows", json=workflow_payload) if resp.status_code not in (200, 201): raise RuntimeError( f"Template parity workflow create failed: {resp.status_code} {resp.text[:400]}" ) workflow = resp.json() ok(f"Provisioned template parity workflow: {workflow_name}") else: resp = client.put( f"/workflows/{workflow['id']}", json={ "name": workflow_payload["name"], "config": workflow_payload["config"], "is_active": workflow_payload["is_active"], }, ) if resp.status_code != 200: raise RuntimeError( f"Template parity workflow update failed: {resp.status_code} {resp.text[:400]}" ) workflow = resp.json() info(f"Reusing template parity workflow: {workflow_name}") snapshot = build_output_type_workflow_snapshot(output_type) resp = client.patch( f"/output-types/{output_type['id']}", json=build_output_type_workflow_link_payload( workflow_definition_id=workflow["id"], execution_mode="shadow", ), ) if resp.status_code != 200: raise RuntimeError( f"Template parity output type link failed: {resp.status_code} {resp.text[:400]}" ) output_type = resp.json() return { "output_type": output_type, "workflow": workflow, "snapshot": snapshot, } def restore_output_type_workflow_snapshot( client: APIClient, *, output_type_id: str, snapshot: dict, ) -> dict: restore_payload = { "workflow_definition_id": snapshot.get("workflow_definition_id"), "workflow_rollout_mode": snapshot.get("workflow_rollout_mode") or "legacy_only", "is_active": bool(snapshot.get("is_active", True)), } resp = client.patch(f"/output-types/{output_type_id}", json=restore_payload) if resp.status_code != 200: raise RuntimeError( f"Output type workflow restore failed: {resp.status_code} {resp.text[:400]}" ) return resp.json() # --------------------------------------------------------------------------- # Test: Render health endpoint # --------------------------------------------------------------------------- def test_health(client: APIClient) -> bool: section("1. Render Health Check") resp = client.get("/worker/health/render") if resp.status_code != 200: fail(f"GET /worker/health/render → {resp.status_code}: {resp.text[:200]}") return False data = resp.json() info(f"Overall status: {data['status']}") info(f"Render worker connected: {data['render_worker_connected']}") info(f"Blender available: {data['blender_available']}") info(f"asset_pipeline queue depth: {data['thumbnail_queue_depth']}") if data.get("last_render_at"): info(f"Last render: {data['last_render_at']} ({'success' if data['last_render_success'] else 'FAILED'}, {data['last_render_age_minutes']}m ago)") if data["render_worker_connected"]: ok("Render worker connected") else: fail("Render worker NOT connected — renders will fail") if data["blender_available"]: ok("Blender renderer reachable (port 8100)") else: fail("Blender renderer NOT reachable — thumbnail/order renders will fail") if data["thumbnail_queue_ok"]: ok(f"asset_pipeline queue healthy (depth={data['thumbnail_queue_depth']})") else: warn(f"asset_pipeline queue DEEP ({data['thumbnail_queue_depth']} tasks) — renders may be slow") return data["status"] != "down" # --------------------------------------------------------------------------- # Test: STEP upload + CAD processing # --------------------------------------------------------------------------- def test_step_upload(client: APIClient, step_file: Path) -> str | None: """Upload STEP file, wait for completed processing. Returns cad_file_id or None.""" section("2. STEP Upload + CAD Processing") if not step_file.exists(): fail(f"Sample STEP file not found: {step_file}") return None info(f"Uploading {step_file.name} ({step_file.stat().st_size // 1024} KB)") with open(step_file, "rb") as f: resp = client.post( "/uploads/step", files={"file": (step_file.name, f, "application/octet-stream")}, ) if resp.status_code not in (200, 201): fail(f"STEP upload failed: {resp.status_code} {resp.text[:300]}") return None data = resp.json() cad_file_id = data["cad_file_id"] ok(f"STEP uploaded → cad_file_id={cad_file_id[:8]}... status={data.get('status')}") # Poll the existing CAD endpoints. There is no GET /api/cad/{id}; the most # reliable readiness signal is /objects returning 200 with processing_status. info(f"Waiting for CAD processing (timeout={CAD_PROCESSING_TIMEOUT}s)...") deadline = time.time() + CAD_PROCESSING_TIMEOUT last_status = None while time.time() < deadline: resp_objects = client.get(f"/cad/{cad_file_id}/objects") if resp_objects.status_code == 200: cad = resp_objects.json() status = cad.get("processing_status") if status != last_status: info(f" CAD status: {status}") last_status = status if status == "completed": ok("CAD processing completed (parsed objects available)") return cad_file_id if status == "failed": fail(f"CAD processing FAILED: {cad.get('error_message', 'unknown error')}") return None resp_thumb = client.get(f"/cad/{cad_file_id}/thumbnail") if resp_thumb.status_code == 200: if last_status != "completed": info(" CAD status: completed") last_status = "completed" ok("CAD processing completed (thumbnail available)") return cad_file_id time.sleep(POLL_INTERVAL_SECONDS) fail(f"CAD processing timed out after {CAD_PROCESSING_TIMEOUT}s (last status: {last_status})") return None # --------------------------------------------------------------------------- # Helpers: Product / Order / Workflow tracking # --------------------------------------------------------------------------- def get_or_create_test_product(client: APIClient, cad_file_id: str) -> str | None: product_id = None resp_products = client.get("/products/?limit=100") if resp_products.status_code == 200: products = resp_products.json() if isinstance(products, dict): products = products.get("items", []) for p in products: if str(p.get("cad_file_id")) == cad_file_id: product_id = str(p["id"]) info(f"Using existing product: {p.get('name', p['id'])[:40]}") break if product_id: return product_id resp_create = client.post("/products/", json={ "name": f"Test Product {cad_file_id[:8]}", "pim_id": f"TEST-{cad_file_id[:8]}", "is_active": True, "cad_file_id": cad_file_id, }) if resp_create.status_code not in (200, 201): fail(f"Product creation failed: {resp_create.status_code} {resp_create.text[:200]}") return None product_id = resp_create.json()["id"] ok(f"Created test product: {product_id[:8]}...") return product_id def create_test_order( client: APIClient, *, product_id: str, output_type_ids: list[str], test_label: str, ) -> dict | None: resp_order = client.post( "/orders", json={ "notes": f"Render pipeline integration test: {test_label}", "items": [], "lines": [ {"product_id": product_id, "output_type_id": ot_id} for ot_id in output_type_ids ], }, ) if resp_order.status_code not in (200, 201): fail(f"Order creation failed: {resp_order.status_code} {resp_order.text[:300]}") return None order = resp_order.json() order_id = order["id"] ok(f"Order created: {order.get('order_number')} (id={order_id[:8]}...)") return order def wait_for_workflow_run( client: APIClient, *, workflow_id: str, line_id: str, timeout_seconds: int = WORKFLOW_RUN_TIMEOUT_SECONDS, terminal_only: bool = False, ) -> dict | None: deadline = time.time() + timeout_seconds terminal_statuses = {"completed", "failed", "cancelled"} while time.time() < deadline: resp = client.get(f"/workflows/{workflow_id}/runs") if resp.status_code == 200: for run in resp.json(): if run.get("order_line_id") == line_id: if not terminal_only or run.get("status") in terminal_statuses: return run time.sleep(2) return None def wait_for_workflow_comparison( client: APIClient, *, workflow_run_id: str, timeout_seconds: int = WORKFLOW_COMPARISON_TIMEOUT_SECONDS, ) -> dict | None: deadline = time.time() + timeout_seconds last_status = None while time.time() < deadline: resp = client.get(f"/workflows/runs/{workflow_run_id}/comparison") if resp.status_code != 200: time.sleep(2) continue comparison = resp.json() status = comparison.get("status") authoritative_exists = bool(comparison.get("authoritative_output", {}).get("exists")) observer_exists = bool(comparison.get("observer_output", {}).get("exists")) if status != last_status: info( " Comparison poll: " f"status={status} authoritative_exists={authoritative_exists} " f"observer_exists={observer_exists}" ) last_status = status # Shadow observer artifacts can arrive shortly after the workflow run is visible. # Treat missing/processing observer states as transient until the timeout expires. if authoritative_exists and observer_exists and status not in {"missing_observer", "pending", "running"}: return comparison time.sleep(2) return None def list_media_assets( client: APIClient, *, order_line_id: str | None = None, asset_type: str | None = None, ) -> list[dict]: params: dict[str, str] = {"limit": "50"} if order_line_id: params["order_line_id"] = order_line_id if asset_type: params["asset_type"] = asset_type resp = client.get("/media", params=params) if resp.status_code != 200: return [] data = resp.json() return data if isinstance(data, list) else [] def wait_for_media_assets( client: APIClient, *, order_line_id: str, asset_type: str, timeout_seconds: int = 60, minimum_count: int = 1, ) -> list[dict]: deadline = time.time() + timeout_seconds while time.time() < deadline: assets = list_media_assets( client, order_line_id=order_line_id, asset_type=asset_type, ) if len(assets) >= minimum_count: return assets time.sleep(2) return [] def _node_result_by_name(workflow_run: dict, node_name: str) -> dict | None: return next( (item for item in workflow_run.get("node_results", []) if item.get("node_name") == node_name), None, ) # --------------------------------------------------------------------------- # Test: Order creation + submit + dispatch + wait # --------------------------------------------------------------------------- def test_order_render( client: APIClient, cad_file_id: str, output_type_ids: list[str], test_label: str, *, use_graph_dispatch: bool = False, ) -> bool: """Create a minimal order, submit, dispatch renders, wait for completion.""" section(f"3. Order Render — {test_label}") info(f"Output types: {len(output_type_ids)}") product_id = get_or_create_test_product(client, cad_file_id) if not product_id: return False order = create_test_order( client, product_id=product_id, output_type_ids=output_type_ids, test_label=test_label, ) if order is None: return False return _submit_and_wait( client, order, output_type_ids, use_graph_dispatch=use_graph_dispatch, ) def _submit_and_wait( client: APIClient, order: dict, output_type_ids: list[str], *, use_graph_dispatch: bool = False, timeout_seconds: int | None = None, ) -> bool: order_id = order["id"] # Submit resp_sub = client.post(f"/orders/{order_id}/submit") if resp_sub.status_code not in (200, 201, 204): if resp_sub.status_code == 409: info("Order already submitted") else: fail(f"Order submit failed: {resp_sub.status_code} {resp_sub.text[:200]}") return False else: ok("Order submitted") dispatch_run_id = None if use_graph_dispatch: lines = order.get("lines", []) if len(lines) != 1: fail("Graph mode currently expects exactly one order line per test order") return False line_id = lines[0]["id"] resp_disp = client.post( "/workflows/dispatch", json={ "context_id": line_id, "config": build_graph_still_config(), }, ) if resp_disp.status_code not in (200, 201): fail(f"Workflow draft dispatch failed: {resp_disp.status_code} {resp_disp.text[:300]}") return False dispatch_data = resp_disp.json() dispatch_run_id = dispatch_data["workflow_run"]["id"] ok(f"Graph workflow dispatched (run={dispatch_run_id[:8]}..., tasks={dispatch_data.get('dispatched', '?')})") else: resp_disp = client.post(f"/orders/{order_id}/dispatch-renders") if resp_disp.status_code not in (200, 201, 204): fail(f"Dispatch renders failed: {resp_disp.status_code} {resp_disp.text[:200]}") return False dispatch_data = resp_disp.json() if resp_disp.content else {} dispatched = dispatch_data.get("dispatched", "?") ok(f"Renders dispatched ({dispatched} lines)") # Poll for order completion effective_timeout_seconds = timeout_seconds or (RENDER_TIMEOUT_SECONDS * max(len(output_type_ids), 1)) info(f"Waiting for renders to complete (timeout={effective_timeout_seconds}s)...") deadline = time.time() + effective_timeout_seconds last_summary = "" while time.time() < deadline: resp_ord = client.get(f"/orders/{order_id}") if resp_ord.status_code != 200: fail(f"Order poll failed: {resp_ord.status_code}") return False order = resp_ord.json() order_status = order.get("status") lines = order.get("lines", order.get("order_lines", [])) statuses = [l.get("render_status") for l in lines] summary = f"order={order_status} lines={statuses}" if summary != last_summary: info(f" {summary}") last_summary = summary terminal_states = {"completed", "failed", "cancelled"} line_states = [state for state in statuses if state] if line_states and all(state in terminal_states for state in line_states): all_success = all(state == "completed" for state in line_states) if order_status == "completed": ok(f"Order completed — all {len(lines)} render(s) done") elif all_success: ok( f"All {len(lines)} render line(s) completed " f"(order status remains {order_status})" ) else: fail(f"Order reached terminal line states with order={order_status}") for line in lines: rs = line.get("render_status") ot_name = line.get("output_type_name") or line.get("output_type", {}).get("name", "?") if rs == "completed": ok(f" Line [{ot_name}]: completed") elif rs == "failed": fail(f" Line [{ot_name}]: FAILED") else: warn(f" Line [{ot_name}]: {rs}") if all_success and dispatch_run_id: resp_cmp = client.get(f"/workflows/runs/{dispatch_run_id}/comparison") if resp_cmp.status_code == 200: comparison = resp_cmp.json() rollout_gate = evaluate_rollout_gate_from_comparison(comparison) verdict = rollout_gate["verdict"] if verdict == "pass": ok(" Rollout gate PASS — graph output is ready for workflow-first rollout") elif verdict == "warn": warn(" Rollout gate WARN — keep legacy authoritative and review drift") else: warn(" Rollout gate FAIL — keep legacy authoritative") info(f" Comparison status: {comparison.get('status')}, verdict={verdict}") for reason in rollout_gate["reasons"]: info(f" {reason}") else: warn(f" Comparison lookup failed: {resp_cmp.status_code}") return all_success if order_status == "failed": fail("Order FAILED — check render logs") return False time.sleep(POLL_INTERVAL_SECONDS) fail(f"Render timed out after {(time.time() - deadline + effective_timeout_seconds):.0f}s") return False def test_workflow_still_smoke( client: APIClient, cad_file_id: str, *, execution_mode: str, ) -> bool: section(f"3. Workflow Still Smoke — {execution_mode}") smoke_resources = ensure_workflow_still_smoke_resources( client, execution_mode=execution_mode, ) output_type = smoke_resources["output_type"] workflow = smoke_resources["workflow"] info( f"Smoke contract: output_type={output_type['name']} " f"workflow={workflow['name'] if workflow else 'legacy-only'}" ) product_id = get_or_create_test_product(client, cad_file_id) if not product_id: return False order = create_test_order( client, product_id=product_id, output_type_ids=[output_type["id"]], test_label=f"Workflow Still Smoke [{execution_mode}]", ) if order is None: return False lines = order.get("lines", []) if len(lines) != 1: fail("Workflow still smoke expects exactly one order line") return False line_id = lines[0]["id"] if workflow is not None: resp_preflight = client.get( f"/workflows/{workflow['id']}/preflight", params={"context_id": line_id}, ) if resp_preflight.status_code != 200: fail(f"Workflow preflight failed: {resp_preflight.status_code} {resp_preflight.text[:300]}") return False preflight = resp_preflight.json() info( "Preflight: " f"execution_mode={preflight.get('execution_mode')} " f"context={preflight.get('context_kind')} " f"allowed={preflight.get('graph_dispatch_allowed')}" ) if not preflight.get("graph_dispatch_allowed"): fail(f"Workflow preflight blocked dispatch: {preflight.get('summary')}") for issue in preflight.get("issues", []): info(f" {issue.get('code')}: {issue.get('message')}") return False ok(f"Workflow preflight passed for {execution_mode} mode") success = _submit_and_wait( client, order, [output_type["id"]], use_graph_dispatch=False, ) workflow_run = None if workflow is not None: workflow_run = wait_for_workflow_run( client, workflow_id=workflow["id"], line_id=line_id, ) if workflow_run is None: warn("Workflow run could not be resolved after dispatch") else: ok( f"Workflow run tracked: mode={workflow_run.get('execution_mode')} " f"run={workflow_run.get('id')[:8]}..." ) if success and execution_mode == "shadow" and workflow_run is not None: comparison = wait_for_workflow_comparison( client, workflow_run_id=workflow_run["id"], ) if comparison is None: warn("Shadow comparison did not stabilize before timeout") return success rollout_gate = evaluate_rollout_gate_from_comparison(comparison) verdict = rollout_gate["verdict"] info( "Shadow comparison: " f"status={comparison.get('status')} " f"exact_match={comparison.get('exact_match')} " f"mean_pixel_delta={comparison.get('mean_pixel_delta')}" ) if verdict == "pass": ok("Shadow rollout gate PASS — canonical still workflow is ready for workflow-first rollout") elif verdict == "warn": warn("Shadow rollout gate WARN — keep legacy authoritative and review drift") else: warn("Shadow rollout gate FAIL — keep legacy authoritative") for reason in rollout_gate["reasons"]: info(f" {reason}") return success def _assert_workflow_run_contract(case: dict, workflow_run: dict) -> bool: success = True expected_mode = case["execution_mode"] if workflow_run.get("execution_mode") == expected_mode: ok(f"Workflow run execution mode matches: {expected_mode}") else: fail( f"Workflow run execution mode mismatch: expected {expected_mode}, " f"got {workflow_run.get('execution_mode')}" ) success = False if workflow_run.get("status") == "completed": ok("Workflow run completed") else: fail(f"Workflow run did not complete cleanly: {workflow_run.get('status')}") success = False render_node_id = case.get("expected_render_node_id") if render_node_id: render_node = _node_result_by_name(workflow_run, render_node_id) if render_node is None: fail(f"Workflow run missing render node result: {render_node_id}") success = False else: if render_node.get("status") == "completed": ok(f"Render node completed: {render_node_id}") else: fail(f"Render node not completed: {render_node_id} status={render_node.get('status')}") success = False expected_predicted_asset_type = case.get("expected_render_predicted_asset_type") predicted_asset_type = (render_node.get("output") or {}).get("predicted_asset_type") if expected_predicted_asset_type: if predicted_asset_type == expected_predicted_asset_type: ok( f"Render node predicted asset type matches: " f"{render_node_id} -> {expected_predicted_asset_type}" ) else: fail( f"Render node predicted asset type mismatch for {render_node_id}: " f"expected {expected_predicted_asset_type}, got {predicted_asset_type}" ) success = False expected_publication_mode = case.get("expected_publication_mode") if expected_publication_mode: output_node = _node_result_by_name(workflow_run, "output") if output_node is None: fail("Workflow run missing output node result") success = False else: publication_mode = (output_node.get("output") or {}).get("publication_mode") if output_node.get("status") == "completed": ok("Output node completed") else: fail(f"Output node not completed: {output_node.get('status')}") success = False if publication_mode == expected_publication_mode: ok(f"Output publication mode matches: {expected_publication_mode}") else: fail( f"Output publication mode mismatch: expected {expected_publication_mode}, " f"got {publication_mode}" ) success = False return success def _assert_media_asset_contract( client: APIClient, *, case: dict, line_id: str, workflow_run: dict | None, ) -> bool: success = True assets = wait_for_media_assets( client, order_line_id=line_id, asset_type=case["expected_asset_type"], timeout_seconds=60, minimum_count=case.get("expected_media_count", 1), ) if not assets: fail( f"No media assets found for line {line_id[:8]}... " f"asset_type={case['expected_asset_type']}" ) return False info( f"Resolved {len(assets)} media asset(s) for order line {line_id[:8]}... " f"asset_type={case['expected_asset_type']}" ) expected_media_count = case.get("expected_media_count") if expected_media_count is not None: if len(assets) == expected_media_count: ok( f"Media asset count matches for {case['key']}: " f"{expected_media_count} {case['expected_asset_type']} asset(s)" ) else: fail( f"Media asset count mismatch for {case['key']}: " f"expected {expected_media_count}, got {len(assets)}" ) success = False newest_asset = assets[0] workflow_run_id = newest_asset.get("workflow_run_id") if workflow_run is not None and case["execution_mode"] == "graph": if workflow_run_id == workflow_run.get("id"): ok("Graph-authoritative media asset is linked to the workflow run") else: fail( "Graph-authoritative media asset workflow_run_id mismatch: " f"expected {workflow_run.get('id')}, got {workflow_run_id}" ) success = False elif case["execution_mode"] == "legacy": if workflow_run_id is None: ok("Legacy media asset remains unlinked to any workflow run") else: fail(f"Legacy media asset unexpectedly linked to workflow run {workflow_run_id}") success = False elif case["execution_mode"] == "shadow": if workflow_run_id is None: ok("Shadow still keeps the authoritative media asset on the legacy path") else: warn(f"Shadow still media asset is linked to workflow run {workflow_run_id}") return success def test_workflow_golden_case( client: APIClient, cad_file_id: str, *, case: dict, ) -> bool: section(f"3. Workflow Golden Case — {case['label']}") resources = ensure_workflow_golden_resources(client, case=case) output_type = resources["output_type"] workflow = resources["workflow"] info( f"Golden contract: output_type={output_type['name']} " f"workflow={workflow['name'] if workflow else 'legacy-only'} " f"artifact_kind={output_type.get('artifact_kind')}" ) product_id = get_or_create_test_product(client, cad_file_id) if not product_id: return False order = create_test_order( client, product_id=product_id, output_type_ids=[output_type["id"]], test_label=f"Workflow Golden [{case['key']}]", ) if order is None: return False lines = order.get("lines", []) if len(lines) != 1: fail("Workflow golden case expects exactly one order line") return False line_id = lines[0]["id"] if workflow is not None: resp_preflight = client.get( f"/workflows/{workflow['id']}/preflight", params={"context_id": line_id}, ) if resp_preflight.status_code != 200: fail(f"Workflow preflight failed: {resp_preflight.status_code} {resp_preflight.text[:300]}") return False preflight = resp_preflight.json() info( "Preflight: " f"execution_mode={preflight.get('execution_mode')} " f"context={preflight.get('context_kind')} " f"allowed={preflight.get('graph_dispatch_allowed')}" ) if not preflight.get("graph_dispatch_allowed"): fail(f"Workflow preflight blocked dispatch: {preflight.get('summary')}") for issue in preflight.get("issues", []): info(f" {issue.get('code')}: {issue.get('message')}") return False ok(f"Workflow preflight passed for golden case {case['key']}") success = _submit_and_wait( client, order, [output_type["id"]], use_graph_dispatch=False, ) if not success: return False workflow_run = None if workflow is not None: workflow_run = wait_for_workflow_run( client, workflow_id=workflow["id"], line_id=line_id, timeout_seconds=120, terminal_only=True, ) if workflow_run is None: fail("Workflow run could not be resolved to a terminal state after dispatch") return False ok( f"Workflow run tracked: mode={workflow_run.get('execution_mode')} " f"run={workflow_run.get('id')[:8]}..." ) workflow_contract_ok = True if workflow_run is not None: workflow_contract_ok = _assert_workflow_run_contract(case, workflow_run) media_contract_ok = _assert_media_asset_contract( client, case=case, line_id=line_id, workflow_run=workflow_run, ) comparison_ok = True if workflow_run is not None and case["execution_mode"] == "shadow": comparison = wait_for_workflow_comparison( client, workflow_run_id=workflow_run["id"], ) if comparison is None: fail("Shadow comparison did not stabilize before timeout") comparison_ok = False else: rollout_gate = evaluate_rollout_gate_from_comparison(comparison) verdict = rollout_gate["verdict"] info( "Shadow comparison: " f"status={comparison.get('status')} " f"exact_match={comparison.get('exact_match')} " f"mean_pixel_delta={comparison.get('mean_pixel_delta')}" ) if verdict == "pass": ok("Shadow golden case parity PASS") elif verdict == "warn": warn("Shadow golden case parity WARN — keep legacy authoritative") else: fail("Shadow golden case parity FAIL") comparison_ok = False for reason in rollout_gate["reasons"]: info(f" {reason}") return workflow_contract_ok and media_contract_ok and comparison_ok and success def test_workflow_golden_suite(client: APIClient, cad_file_id: str) -> bool: section("3. Workflow Golden Suite") cases = build_workflow_golden_cases() info(f"Running {len(cases)} golden workflow cases against the live stack") overall_success = True for case in cases: case_success = test_workflow_golden_case( client, cad_file_id, case=case, ) overall_success = overall_success and case_success return overall_success def test_template_backed_shadow_parity( client: APIClient, cad_file_id: str, *, output_type_name: str | None = None, ) -> bool: section("3. Template-Backed Shadow Parity") output_types = get_output_types(client, include_inactive=True) render_templates = get_render_templates(client) output_type, templates = choose_template_backed_output_type( output_types, render_templates, preferred_name=output_type_name, ) info( f"Selected output type: {output_type['name']} " f"(artifact_kind={output_type.get('artifact_kind')} transparent_bg={output_type.get('transparent_bg')})" ) for template in templates: info( "Template candidate: " f"{template.get('name')} path={template.get('blend_file_path')} " f"lighting_only={template.get('lighting_only')} " f"shadow_catcher={template.get('shadow_catcher_enabled')} " f"target_collection={template.get('target_collection')}" ) product_id = get_or_create_test_product(client, cad_file_id) if not product_id: return False resources = ensure_template_parity_shadow_resources( client, output_type=output_type, ) parity_output_type = resources["output_type"] workflow = resources["workflow"] snapshot = resources["snapshot"] try: order = create_test_order( client, product_id=product_id, output_type_ids=[parity_output_type["id"]], test_label=f"Template Shadow Parity [{parity_output_type['name']}]", ) if order is None: return False lines = order.get("lines", []) if len(lines) != 1: fail("Template parity expects exactly one order line") return False line_id = lines[0]["id"] resp_preflight = client.get( f"/workflows/{workflow['id']}/preflight", params={"context_id": line_id}, ) if resp_preflight.status_code != 200: fail(f"Workflow preflight failed: {resp_preflight.status_code} {resp_preflight.text[:300]}") return False preflight = resp_preflight.json() info( "Preflight: " f"execution_mode={preflight.get('execution_mode')} " f"context={preflight.get('context_kind')} " f"allowed={preflight.get('graph_dispatch_allowed')}" ) if not preflight.get("graph_dispatch_allowed"): fail(f"Workflow preflight blocked dispatch: {preflight.get('summary')}") for issue in preflight.get("issues", []): info(f" {issue.get('code')}: {issue.get('message')}") return False ok(f"Workflow preflight passed for template-backed output type {parity_output_type['name']}") success = _submit_and_wait( client, order, [parity_output_type["id"]], use_graph_dispatch=False, ) if not success: return False workflow_run = wait_for_workflow_run( client, workflow_id=workflow["id"], line_id=line_id, timeout_seconds=120, terminal_only=True, ) if workflow_run is None: fail("Template parity workflow run could not be resolved to a terminal state") return False ok( f"Workflow run tracked: mode={workflow_run.get('execution_mode')} " f"run={workflow_run.get('id')[:8]}..." ) template_node = _node_result_by_name(workflow_run, "template") if template_node is None: fail("Template parity workflow run is missing the template node result") return False template_output = template_node.get("output") or {} info( "Resolved template in graph observer: " f"name={template_output.get('template_name')} " f"path={template_output.get('template_path')} " f"material_map_count={template_output.get('material_map_count')}" ) comparison = wait_for_workflow_comparison( client, workflow_run_id=workflow_run["id"], ) if comparison is None: fail("Template parity comparison did not stabilize before timeout") return False rollout_gate = evaluate_rollout_gate_from_comparison(comparison) verdict = rollout_gate["verdict"] info( "Template parity comparison: " f"status={comparison.get('status')} " f"exact_match={comparison.get('exact_match')} " f"mean_pixel_delta={comparison.get('mean_pixel_delta')}" ) if verdict == "pass": ok("Template-backed shadow parity PASS") elif verdict == "warn": warn("Template-backed shadow parity WARN — outputs are not identical, keep legacy authoritative") else: fail("Template-backed shadow parity FAIL") return False for reason in rollout_gate["reasons"]: info(f" {reason}") return verdict in {"pass", "warn"} finally: restored_output_type = restore_output_type_workflow_snapshot( client, output_type_id=parity_output_type["id"], snapshot=snapshot, ) info( "Restored output type workflow contract: " f"{restored_output_type['name']} rollout={restored_output_type.get('workflow_rollout_mode')} " f"workflow_definition_id={restored_output_type.get('workflow_definition_id')}" ) # --------------------------------------------------------------------------- # Get output types # --------------------------------------------------------------------------- def get_output_types(client: APIClient, *, include_inactive: bool = False) -> list[dict]: params = {"include_inactive": "true"} if include_inactive else None resp = client.get("/output-types/", params=params) if resp.status_code != 200: resp = client.get("/output-types", params=params) if resp.status_code != 200: return [] data = resp.json() if isinstance(data, dict): data = data.get("items", []) return [ot for ot in data if ot.get("is_active", True)] # --------------------------------------------------------------------------- # Main # --------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description="Render pipeline integration tests") parser.add_argument("--host", default=DEFAULT_HOST) parser.add_argument("--email", default=DEFAULT_EMAIL) parser.add_argument("--password", default=DEFAULT_PASSWORD) parser.add_argument("--health", action="store_true", help="Only run health check") parser.add_argument("--sample", action="store_true", help="Quick sample test (1 STEP, 1 OT)") parser.add_argument("--full", action="store_true", help="Full test (all output types)") parser.add_argument("--graph", action="store_true", help="Dispatch sample/full renders via /api/workflows/dispatch") parser.add_argument( "--workflow-still-smoke", action="store_true", help="Run the canonical still workflow smoke path via real order dispatch", ) parser.add_argument( "--workflow-golden-suite", action="store_true", help="Run the representative live golden workflow suite (still, shadow, turntable, blend export)", ) parser.add_argument( "--template-backed-shadow-parity", action="store_true", help="Run a reversible shadow parity check against an existing Admin render-template-backed still output type", ) parser.add_argument( "--execution-mode", choices=["legacy", "graph", "shadow"], default="shadow", help="Execution mode for --workflow-still-smoke (default: shadow)", ) parser.add_argument( "--output-type-name", default=None, help="Existing output type name for --template-backed-shadow-parity", ) parser.add_argument("--step", default=str(SAMPLE_STEP), help="Path to STEP file") args = parser.parse_args() if not any([ args.health, args.sample, args.full, args.workflow_still_smoke, args.workflow_golden_suite, args.template_backed_shadow_parity, ]): parser.print_help() sys.exit(0) print(f"\n{BLUE}Render Pipeline Test{RESET}") print(f"Host: {args.host}") mode_label = "health" if args.workflow_still_smoke: mode_label = f"workflow-still-smoke[{args.execution_mode}]" elif args.workflow_golden_suite: mode_label = "workflow-golden-suite" elif args.template_backed_shadow_parity: mode_label = "template-backed-shadow-parity" elif args.sample: mode_label = "sample" elif args.full: mode_label = "full" print(f"Mode: {mode_label}") # Login try: client = APIClient(args.host, args.email, args.password) ok(f"Authenticated as {args.email}") except Exception as exc: fail(f"Authentication failed: {exc}") sys.exit(1) # Health check health_ok = test_health(client) if args.health: _print_summary() sys.exit(0 if not failed else 1) if not health_ok: warn("Health check failed — render tests may not work. Continuing anyway...") # STEP upload step_path = Path(args.step) cad_file_id = test_step_upload(client, step_path) if not cad_file_id: fail("STEP processing failed — cannot proceed to render tests") _print_summary() sys.exit(1) if args.workflow_still_smoke: test_workflow_still_smoke( client, cad_file_id, execution_mode=args.execution_mode, ) elif args.workflow_golden_suite: test_workflow_golden_suite( client, cad_file_id, ) elif args.template_backed_shadow_parity: test_template_backed_shadow_parity( client, cad_file_id, output_type_name=args.output_type_name, ) elif args.sample: output_types = get_output_types(client) if not output_types: fail("No active output types found") _print_summary() sys.exit(1) info(f"Found {len(output_types)} active output types: {[ot['name'] for ot in output_types]}") # Pick the first non-animation output type (fastest) ot = next( (ot for ot in output_types if not ot.get("is_animation") and "LQ" in ot["name"].upper()), output_types[0], ) info(f"Sample test using output type: {ot['name']}") test_order_render( client, cad_file_id, [ot["id"]], f"Sample [{ot['name']}]", use_graph_dispatch=args.graph, ) elif args.full: output_types = get_output_types(client) if not output_types: fail("No active output types found") _print_summary() sys.exit(1) info(f"Found {len(output_types)} active output types: {[ot['name'] for ot in output_types]}") # Test each output type individually for ot in output_types: if ot.get("is_animation"): warn(f"Skipping animation output type: {ot['name']} (too slow for full test)") continue test_order_render( client, cad_file_id, [ot["id"]], ot["name"], use_graph_dispatch=args.graph, ) _print_summary() sys.exit(0 if not failed else 1) def _print_summary(): section("Test Summary") print(f" {GREEN}Passed:{RESET} {len(passed)}") print(f" {RED}Failed:{RESET} {len(failed)}") print(f" {YELLOW}Warnings:{RESET} {len(warnings)}") if failed: print(f"\n{RED}FAILURES:{RESET}") for f_ in failed: print(f" - {f_}") if not failed: print(f"\n{GREEN}All tests passed!{RESET}") else: print(f"\n{RED}Tests FAILED{RESET}") if __name__ == "__main__": main()