Files
HartOMat/backend/tests/domains/test_workflow_dispatch_service.py
T

1732 lines
61 KiB
Python

from __future__ import annotations
import uuid
from pathlib import Path
from types import SimpleNamespace
import pytest
from PIL import Image, PngImagePlugin
from sqlalchemy import select
from sqlalchemy.orm import selectinload
from app.config import settings
from app.domains.orders.models import Order, OrderLine, OrderStatus
from app.domains.products.models import CadFile, Product
from app.domains.rendering.dispatch_service import dispatch_render_with_workflow
from app.domains.rendering.models import OutputType, WorkflowDefinition, WorkflowRun
from app.domains.rendering.workflow_comparison_service import (
_build_artifact,
evaluate_rollout_gate,
)
from app.domains.rendering.workflow_config_utils import build_preset_workflow_config
def _use_test_database(monkeypatch) -> None:
monkeypatch.setattr(settings, "postgres_host", "postgres")
monkeypatch.setattr(settings, "postgres_port", 5432)
monkeypatch.setattr(settings, "postgres_user", "hartomat")
monkeypatch.setattr(settings, "postgres_password", "hartomat")
monkeypatch.setattr(settings, "postgres_db", "hartomat_test")
async def _seed_order_line(
db,
admin_user,
*,
workflow_config: dict | None = None,
) -> dict[str, object]:
product = Product(
pim_id=f"PIM-{uuid.uuid4().hex[:8]}",
name="Workflow Test Product",
)
output_type = OutputType(
name=f"Workflow Output {uuid.uuid4().hex[:8]}",
render_backend="auto",
)
order = Order(
order_number=f"WF-{uuid.uuid4().hex[:10]}",
created_by=admin_user.id,
)
db.add_all([product, output_type, order])
await db.flush()
workflow_definition = None
if workflow_config is not None:
workflow_definition = WorkflowDefinition(
name=f"Workflow {uuid.uuid4().hex[:8]}",
output_type_id=output_type.id,
config=workflow_config,
is_active=True,
)
db.add(workflow_definition)
await db.flush()
output_type.workflow_definition_id = workflow_definition.id
order_line = OrderLine(
order_id=order.id,
product_id=product.id,
output_type_id=output_type.id,
)
db.add(order_line)
await db.commit()
return {
"order_line": order_line,
"workflow_definition": workflow_definition,
"output_type": output_type,
}
async def _seed_renderable_order_line(
db,
admin_user,
tmp_path: Path,
) -> OrderLine:
step_path = tmp_path / "dispatch" / "product.step"
step_path.parent.mkdir(parents=True, exist_ok=True)
step_path.write_text("STEP", encoding="utf-8")
cad_file = CadFile(
original_name="product.step",
stored_path=str(step_path),
file_hash=f"hash-{uuid.uuid4().hex}",
parsed_objects={"objects": ["Body"]},
)
product = Product(
pim_id=f"PIM-{uuid.uuid4().hex[:8]}",
name="Dispatch Product",
category_key="dispatch",
cad_file=cad_file,
cad_part_materials=[{"part_name": "Body", "material": "Steel"}],
)
output_type = OutputType(
name=f"Workflow Output {uuid.uuid4().hex[:8]}",
render_backend="auto",
)
order = Order(
order_number=f"WF-{uuid.uuid4().hex[:10]}",
created_by=admin_user.id,
)
order_line = OrderLine(
order=order,
product=product,
output_type=output_type,
)
db.add_all([cad_file, product, output_type, order, order_line])
await db.commit()
await db.refresh(order_line)
return order_line
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_falls_back_to_legacy_without_workflow_definition(
db,
admin_user,
monkeypatch,
):
_use_test_database(monkeypatch)
seeded = await _seed_order_line(db, admin_user)
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
)
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
await db.rollback()
assert result["backend"] == "legacy"
assert result["order_line_id"] == str(seeded["order_line"].id)
assert result["rollout_gate_status"] == "legacy_only"
assert result["rollout_gate_verdict"] is None
assert result["workflow_rollout_ready"] is False
assert result["output_type_rollout_ready"] is False
assert result["rollout_workflow_definition_id"] is None
assert result["rollout_output_type_id"] == str(seeded["output_type"].id)
runs = (await db.execute(select(WorkflowRun))).scalars().all()
assert runs == []
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_creates_run_and_node_results_for_preset_dispatch(
db,
admin_user,
monkeypatch,
):
_use_test_database(monkeypatch)
seeded = await _seed_order_line(
db,
admin_user,
workflow_config=build_preset_workflow_config("still", {"width": 1024, "height": 1024}),
)
monkeypatch.setattr(
"app.domains.rendering.workflow_builder.dispatch_workflow",
lambda workflow_type, order_line_id, params=None: "canvas-123",
)
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
await db.rollback()
run_result = await db.execute(
select(WorkflowRun)
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
.options(selectinload(WorkflowRun.node_results))
)
run = run_result.scalar_one()
assert result["backend"] == "workflow"
assert result["workflow_type"] == "still"
assert result["celery_task_id"] == "canvas-123"
assert run.execution_mode == "legacy"
assert run.workflow_def_id == seeded["workflow_definition"].id
assert run.order_line_id == seeded["order_line"].id
assert run.celery_task_id == "canvas-123"
assert {node_result.node_name for node_result in run.node_results} == {
"setup",
"template",
"render",
"output",
}
assert all(node_result.status == "pending" for node_result in run.node_results)
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_falls_back_when_workflow_runtime_preparation_is_invalid(
db,
admin_user,
monkeypatch,
):
_use_test_database(monkeypatch)
seeded = await _seed_order_line(
db,
admin_user,
workflow_config={
"version": 1,
"nodes": [
{"id": "render", "step": "blender_still", "params": {}},
],
"edges": [
{"from": "missing", "to": "render"},
],
},
)
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
)
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
await db.rollback()
assert result["backend"] == "legacy"
assert result["order_line_id"] == str(seeded["order_line"].id)
assert result["rollout_gate_status"] == "workflow_preparation_failed"
assert result["rollout_gate_verdict"] is None
assert result["workflow_rollout_ready"] is False
assert result["output_type_rollout_ready"] is False
assert result["rollout_workflow_definition_id"] == str(seeded["workflow_definition"].id)
assert result["rollout_output_type_id"] == str(seeded["output_type"].id)
assert any("Workflow runtime preparation failed:" in reason for reason in result["rollout_gate_reasons"])
runs = (await db.execute(select(WorkflowRun))).scalars().all()
assert runs == []
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_graph_mode_dispatches_supported_custom_workflow(
db,
admin_user,
monkeypatch,
tmp_path,
):
_use_test_database(monkeypatch)
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Graph Workflow {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
],
},
is_active=True,
)
db.add(workflow_definition)
await db.flush()
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
await db.commit()
monkeypatch.setattr(
"app.tasks.celery_app.celery_app.send_task",
lambda task_name, args, kwargs: type("Result", (), {"id": "graph-task-1"})(),
)
result = dispatch_render_with_workflow(str(order_line.id))
await db.rollback()
run_result = await db.execute(
select(WorkflowRun)
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
.options(selectinload(WorkflowRun.node_results))
)
run = run_result.scalar_one()
node_results = {node_result.node_name: node_result for node_result in run.node_results}
assert result["backend"] == "workflow_graph"
assert result["execution_mode"] == "graph"
assert result["task_ids"] == ["graph-task-1"]
assert result["rollout_gate_status"] == "graph_authoritative"
assert result["rollout_gate_verdict"] == "pass"
assert result["workflow_rollout_ready"] is True
assert result["output_type_rollout_ready"] is True
assert run.execution_mode == "graph"
assert run.status == "pending"
assert node_results["setup"].status == "completed"
assert node_results["template"].status == "completed"
assert node_results["render"].status == "queued"
assert node_results["render"].output["publish_asset_enabled"] is True
assert node_results["render"].output["graph_authoritative_output_enabled"] is False
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_graph_mode_uses_output_save_as_authoritative_boundary(
db,
admin_user,
monkeypatch,
tmp_path,
):
_use_test_database(monkeypatch)
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Graph Output Save {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
{"id": "output", "step": "output_save", "params": {}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
{"from": "render", "to": "output"},
],
},
is_active=True,
)
db.add(workflow_definition)
await db.flush()
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
await db.commit()
calls: list[tuple[str, list[str], dict]] = []
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
calls.append((task_name, args, kwargs))
return type("Result", (), {"id": "graph-output-save-task-1"})()
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
result = dispatch_render_with_workflow(str(order_line.id))
await db.rollback()
run_result = await db.execute(
select(WorkflowRun)
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
.options(selectinload(WorkflowRun.node_results))
)
run = run_result.scalar_one()
node_results = {node_result.node_name: node_result for node_result in run.node_results}
assert result["backend"] == "workflow_graph"
assert result["task_ids"] == ["graph-output-save-task-1"]
assert len(calls) == 1
assert calls[0][0] == "app.domains.rendering.tasks.render_order_line_still_task"
assert calls[0][1] == [str(order_line.id)]
assert calls[0][2]["publish_asset_enabled"] is False
assert calls[0][2]["graph_authoritative_output_enabled"] is True
assert calls[0][2]["graph_output_node_ids"] == ["output"]
assert node_results["output"].status == "completed"
assert node_results["output"].output["publication_mode"] == "awaiting_graph_authoritative_save"
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_graph_mode_canonicalizes_legacy_preset_config(
db,
admin_user,
monkeypatch,
tmp_path,
):
_use_test_database(monkeypatch)
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Legacy Preset Graph {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"type": "still",
"params": {"width": 1024, "height": 768},
"ui": {"execution_mode": "graph"},
},
is_active=True,
)
db.add(workflow_definition)
await db.flush()
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
await db.commit()
monkeypatch.setattr(
"app.tasks.celery_app.celery_app.send_task",
lambda task_name, args, kwargs: type("Result", (), {"id": "legacy-graph-task-1"})(),
)
result = dispatch_render_with_workflow(str(order_line.id))
await db.rollback()
run_result = await db.execute(
select(WorkflowRun)
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
.options(selectinload(WorkflowRun.node_results))
)
run = run_result.scalar_one()
node_results = {node_result.node_name: node_result for node_result in run.node_results}
assert result["backend"] == "workflow_graph"
assert result["execution_mode"] == "graph"
assert result["task_ids"] == ["legacy-graph-task-1"]
assert run.execution_mode == "graph"
assert node_results["setup"].status == "completed"
assert node_results["template"].status == "completed"
assert node_results["render"].status == "queued"
assert node_results["output"].status == "completed"
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_graph_mode_falls_back_to_legacy_on_graph_failure(
db,
admin_user,
monkeypatch,
tmp_path,
):
_use_test_database(monkeypatch)
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Graph Workflow {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{
"id": "setup",
"step": "order_line_setup",
"params": {"failure_policy": {"fallback_to_legacy": True}},
},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
],
"edges": [
{"from": "setup", "to": "render"},
],
},
is_active=True,
)
db.add(workflow_definition)
await db.flush()
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
await db.commit()
monkeypatch.setattr(
"app.domains.rendering.workflow_graph_runtime.execute_graph_workflow",
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("graph dispatch exploded")),
)
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
)
result = dispatch_render_with_workflow(str(order_line.id))
await db.rollback()
runs = (
await db.execute(
select(WorkflowRun).options(selectinload(WorkflowRun.node_results)).order_by(WorkflowRun.created_at.desc())
)
).scalars().all()
run = runs[0]
assert result["backend"] == "legacy"
assert result["fallback_from"] == "workflow_graph"
assert result["workflow_run_id"] == str(run.id)
assert result["rollout_gate_status"] == "graph_execution_failed"
assert result["workflow_rollout_ready"] is False
assert run.execution_mode == "graph"
assert run.status == "failed"
assert run.error_message == "graph dispatch exploded"
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_shadow_mode_keeps_legacy_authoritative_and_dispatches_graph_observer(
db,
admin_user,
monkeypatch,
tmp_path,
):
_use_test_database(monkeypatch)
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Shadow Workflow {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "shadow"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
],
},
is_active=True,
)
db.add(workflow_definition)
await db.flush()
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
await db.commit()
calls: list[tuple[str, list[str], dict]] = []
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
calls.append((task_name, args, kwargs))
return type("Result", (), {"id": "shadow-task-1"})()
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
)
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
result = dispatch_render_with_workflow(str(order_line.id))
await db.rollback()
run_result = await db.execute(
select(WorkflowRun)
.where(WorkflowRun.id == uuid.UUID(result["shadow_workflow_run_id"]))
.options(selectinload(WorkflowRun.node_results))
)
run = run_result.scalar_one()
render_call = calls[0]
assert result["backend"] == "legacy"
assert result["execution_mode"] == "shadow"
assert result["shadow_status"] == "dispatched"
assert result["shadow_task_ids"] == ["shadow-task-1"]
assert result["rollout_gate_status"] == "pending_shadow_verdict"
assert result["rollout_gate_verdict"] is None
assert result["workflow_rollout_ready"] is False
assert result["output_type_rollout_ready"] is False
assert run.execution_mode == "shadow"
assert run.status == "pending"
assert render_call[0] == "app.domains.rendering.tasks.render_order_line_still_task"
assert render_call[1] == [str(order_line.id)]
assert render_call[2]["publish_asset_enabled"] is False
assert render_call[2]["emit_events"] is False
assert render_call[2]["job_document_enabled"] is False
assert render_call[2]["output_name_suffix"].startswith("shadow-")
assert render_call[2]["workflow_run_id"] == str(run.id)
assert render_call[2]["workflow_node_id"] == "render"
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_shadow_mode_canonicalizes_legacy_preset_config(
db,
admin_user,
monkeypatch,
tmp_path,
):
_use_test_database(monkeypatch)
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Legacy Preset Shadow {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"type": "still",
"params": {"width": 1024, "height": 768},
"ui": {"execution_mode": "shadow"},
},
is_active=True,
)
db.add(workflow_definition)
await db.flush()
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
await db.commit()
calls: list[tuple[str, list[str], dict]] = []
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
calls.append((task_name, args, kwargs))
return type("Result", (), {"id": "legacy-shadow-task-1"})()
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
)
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
result = dispatch_render_with_workflow(str(order_line.id))
await db.rollback()
run_result = await db.execute(
select(WorkflowRun)
.where(WorkflowRun.id == uuid.UUID(result["shadow_workflow_run_id"]))
.options(selectinload(WorkflowRun.node_results))
)
run = run_result.scalar_one()
node_results = {node_result.node_name: node_result for node_result in run.node_results}
assert result["backend"] == "legacy"
assert result["execution_mode"] == "shadow"
assert result["shadow_status"] == "dispatched"
assert result["shadow_task_ids"] == ["legacy-shadow-task-1"]
assert run.execution_mode == "shadow"
assert node_results["output"].status == "completed"
assert calls[0][2]["publish_asset_enabled"] is False
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_shadow_mode_ignores_graph_failures_after_legacy_dispatch(
db,
admin_user,
monkeypatch,
tmp_path,
):
_use_test_database(monkeypatch)
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Shadow Workflow {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "shadow"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
],
"edges": [
{"from": "setup", "to": "render"},
],
},
is_active=True,
)
db.add(workflow_definition)
await db.flush()
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
await db.commit()
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
)
monkeypatch.setattr(
"app.domains.rendering.workflow_graph_runtime.execute_graph_workflow",
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("shadow graph exploded")),
)
result = dispatch_render_with_workflow(str(order_line.id))
await db.rollback()
run = (
await db.execute(select(WorkflowRun).order_by(WorkflowRun.created_at.desc()))
).scalars().first()
assert result["backend"] == "legacy"
assert result["execution_mode"] == "shadow"
assert result["shadow_status"] == "failed"
assert result["shadow_error"] == "shadow graph exploded"
assert result["shadow_workflow_run_id"] == str(run.id)
assert result["rollout_gate_status"] == "shadow_execution_failed"
assert result["workflow_rollout_ready"] is False
assert run.execution_mode == "shadow"
assert run.status == "failed"
assert run.error_message == "shadow graph exploded"
def test_evaluate_rollout_gate_passes_exact_match(tmp_path: Path):
authoritative = tmp_path / "authoritative.png"
observer = tmp_path / "observer.png"
Image.new("RGBA", (16, 16), color=(0, 128, 255, 255)).save(authoritative)
Image.new("RGBA", (16, 16), color=(0, 128, 255, 255)).save(observer)
gate = evaluate_rollout_gate(
authoritative_output=_build_artifact(str(authoritative)),
observer_output=_build_artifact(str(observer)),
exact_match=True,
dimensions_match=True,
mean_pixel_delta=0.0,
)
assert gate["verdict"] == "pass"
assert gate["ready"] is True
assert gate["workflow_rollout_ready"] is True
assert gate["output_type_rollout_ready"] is True
def test_evaluate_rollout_gate_warns_on_small_visual_delta(tmp_path: Path):
authoritative = tmp_path / "authoritative.png"
observer = tmp_path / "observer.png"
Image.new("RGBA", (16, 16), color=(0, 128, 255, 255)).save(authoritative)
Image.new("RGBA", (16, 16), color=(0, 129, 255, 255)).save(observer)
gate = evaluate_rollout_gate(
authoritative_output=_build_artifact(str(authoritative)),
observer_output=_build_artifact(str(observer)),
exact_match=False,
dimensions_match=True,
mean_pixel_delta=1 / (4 * 255),
)
assert gate["verdict"] == "warn"
assert gate["ready"] is False
assert gate["status"] == "hold_legacy_authoritative"
assert any("warn threshold" in reason for reason in gate["reasons"])
def test_evaluate_rollout_gate_fails_on_missing_observer(tmp_path: Path):
authoritative = tmp_path / "authoritative.png"
Image.new("RGBA", (16, 16), color=(0, 128, 255, 255)).save(authoritative)
gate = evaluate_rollout_gate(
authoritative_output=_build_artifact(str(authoritative)),
observer_output=_build_artifact(str(tmp_path / "missing.png")),
exact_match=None,
dimensions_match=None,
mean_pixel_delta=None,
)
assert gate["verdict"] == "fail"
assert gate["ready"] is False
assert any("Observer workflow output is missing" in reason for reason in gate["reasons"])
def test_dispatch_render_with_workflow_unit_adds_legacy_only_rollout_signal(monkeypatch):
order_line_id = str(uuid.uuid4())
output_type_id = uuid.uuid4()
fake_line = SimpleNamespace(
id=uuid.UUID(order_line_id),
output_type=SimpleNamespace(id=output_type_id, workflow_definition_id=None),
)
class _FakeExecuteResult:
def __init__(self, value):
self._value = value
def scalar_one_or_none(self):
return self._value
class _FakeSession:
def __init__(self, _engine):
self._engine = _engine
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def execute(self, _query):
return _FakeExecuteResult(fake_line)
monkeypatch.setattr("sqlalchemy.create_engine", lambda *args, **kwargs: object())
monkeypatch.setattr("sqlalchemy.orm.Session", _FakeSession)
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda value: {"backend": "legacy", "order_line_id": value},
)
result = dispatch_render_with_workflow(order_line_id)
assert result["backend"] == "legacy"
assert result["rollout_gate_status"] == "legacy_only"
assert result["workflow_rollout_ready"] is False
assert result["rollout_output_type_id"] == str(output_type_id)
def test_dispatch_render_with_workflow_unit_marks_shadow_dispatch_as_pending_rollout(monkeypatch):
order_line_id = str(uuid.uuid4())
output_type_id = uuid.uuid4()
workflow_def_id = uuid.uuid4()
fake_line = SimpleNamespace(
id=uuid.UUID(order_line_id),
output_type=SimpleNamespace(id=output_type_id, workflow_definition_id=workflow_def_id),
)
fake_workflow_def = SimpleNamespace(id=workflow_def_id, config={"version": 1}, is_active=True)
fake_run = SimpleNamespace(id=uuid.uuid4())
execute_values = [fake_line, fake_workflow_def]
class _FakeExecuteResult:
def __init__(self, value):
self._value = value
def scalar_one_or_none(self):
return self._value
class _FakeSession:
def __init__(self, _engine):
self._engine = _engine
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
def execute(self, _query):
return _FakeExecuteResult(execute_values.pop(0))
def commit(self):
return None
def rollback(self):
return None
def add(self, _value):
return None
monkeypatch.setattr("sqlalchemy.create_engine", lambda *args, **kwargs: object())
monkeypatch.setattr("sqlalchemy.orm.Session", _FakeSession)
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda value: {"backend": "legacy", "order_line_id": value},
)
monkeypatch.setattr(
"app.domains.rendering.workflow_config_utils.canonicalize_workflow_config",
lambda config: config,
)
monkeypatch.setattr(
"app.domains.rendering.workflow_config_utils.get_workflow_execution_mode",
lambda config, default="legacy": "shadow",
)
monkeypatch.setattr(
"app.domains.rendering.workflow_executor.prepare_workflow_context",
lambda *args, **kwargs: {"nodes": [{"id": "render"}]},
)
monkeypatch.setattr(
"app.domains.rendering.workflow_graph_runtime.find_unsupported_graph_nodes",
lambda *_args, **_kwargs: [],
)
monkeypatch.setattr(
"app.domains.rendering.workflow_run_service.create_workflow_run",
lambda *args, **kwargs: fake_run,
)
monkeypatch.setattr(
"app.domains.rendering.workflow_graph_runtime.execute_graph_workflow",
lambda *args, **kwargs: SimpleNamespace(task_ids=["shadow-task-1"]),
)
result = dispatch_render_with_workflow(order_line_id)
assert result["backend"] == "legacy"
assert result["execution_mode"] == "shadow"
assert result["shadow_status"] == "dispatched"
assert result["rollout_gate_status"] == "pending_shadow_verdict"
assert result["workflow_rollout_ready"] is False
assert result["output_type_rollout_ready"] is False
assert result["shadow_workflow_run_id"] == str(fake_run.id)
@pytest.mark.asyncio
async def test_workflow_dispatch_endpoint_returns_workflow_run_with_node_results(
client,
db,
admin_user,
auth_headers,
tmp_path,
monkeypatch,
):
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.render_settings = {
"width": 2048,
"height": 2048,
"engine": "cycles",
"samples": 128,
}
workflow_definition = WorkflowDefinition(
name=f"Dispatch Workflow {uuid.uuid4().hex[:8]}",
config=build_preset_workflow_config("still_with_exports", {"width": 640, "height": 640}),
is_active=True,
)
db.add(workflow_definition)
await db.commit()
await db.refresh(workflow_definition)
calls: list[tuple[str, list[str], dict]] = []
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
calls.append((task_name, args, kwargs))
return type("Result", (), {"id": f"task-{len(calls)}"})()
context_id = str(order_line.id)
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
response = await client.post(
f"/api/workflows/{workflow_definition.id}/dispatch",
params={"context_id": context_id},
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["context_id"] == context_id
assert body["execution_mode"] == "graph"
assert body["dispatched"] == 2
assert body["task_ids"] == ["task-1", "task-2"]
assert [call[0] for call in calls] == [
"app.domains.rendering.tasks.render_order_line_still_task",
"app.domains.rendering.tasks.export_blend_for_order_line_task",
]
assert [call[1] for call in calls] == [[context_id], [context_id]]
assert calls[0][2]["publish_asset_enabled"] is False
assert calls[0][2]["graph_authoritative_output_enabled"] is True
assert calls[0][2]["graph_output_node_ids"] == ["output"]
assert calls[0][2]["width"] == 2048
assert calls[0][2]["height"] == 2048
assert calls[0][2]["samples"] == 128
assert calls[0][2]["workflow_node_id"] == "render"
assert calls[1][2]["workflow_node_id"] == "blend"
assert "workflow_run_id" in calls[0][2]
assert calls[0][2]["workflow_run_id"] == calls[1][2]["workflow_run_id"]
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
assert body["workflow_run"]["status"] == "pending"
assert body["workflow_run"]["execution_mode"] == "graph"
assert body["workflow_run"]["celery_task_id"] == "task-1"
assert body["workflow_run"]["order_line_id"] == str(order_line.id)
assert node_results["render"]["status"] == "queued"
assert node_results["render"]["output"]["task_id"] == "task-1"
assert node_results["blend"]["status"] == "queued"
assert node_results["blend"]["output"]["task_id"] == "task-2"
assert node_results["setup"]["status"] == "completed"
assert node_results["setup"]["output"]["order_line_id"] == str(order_line.id)
assert node_results["template"]["status"] == "completed"
assert node_results["template"]["output"]["use_materials"] is False
assert node_results["output"]["status"] == "completed"
assert node_results["output"]["output"]["publication_mode"] == "awaiting_graph_authoritative_save"
@pytest.mark.asyncio
async def test_workflow_dispatch_endpoint_arms_output_save_for_export_blend(
client,
db,
admin_user,
auth_headers,
tmp_path,
monkeypatch,
):
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Blend Output Workflow {uuid.uuid4().hex[:8]}",
config={
"version": 1,
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "blend", "step": "export_blend", "params": {}},
{"id": "output", "step": "output_save", "params": {}},
],
"edges": [
{"from": "setup", "to": "blend"},
{"from": "blend", "to": "output"},
],
},
is_active=True,
)
db.add(workflow_definition)
await db.commit()
await db.refresh(workflow_definition)
calls: list[tuple[str, list[str], dict]] = []
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
calls.append((task_name, args, kwargs))
return type("Result", (), {"id": f"task-{len(calls)}"})()
context_id = str(order_line.id)
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
response = await client.post(
f"/api/workflows/{workflow_definition.id}/dispatch",
params={"context_id": context_id},
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["context_id"] == context_id
assert body["execution_mode"] == "graph"
assert body["dispatched"] == 1
assert body["task_ids"] == ["task-1"]
assert calls == [
(
"app.domains.rendering.tasks.export_blend_for_order_line_task",
[context_id],
{
"workflow_run_id": body["workflow_run"]["id"],
"workflow_node_id": "blend",
"publish_asset_enabled": False,
"graph_authoritative_output_enabled": True,
"graph_output_node_ids": ["output"],
},
)
]
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
assert node_results["blend"]["status"] == "queued"
assert node_results["blend"]["output"]["predicted_asset_type"] == "blend_production"
assert node_results["blend"]["output"]["publish_asset_enabled"] is False
assert node_results["blend"]["output"]["graph_authoritative_output_enabled"] is True
assert node_results["blend"]["output"]["graph_output_node_ids"] == ["output"]
assert node_results["output"]["status"] == "completed"
assert node_results["output"]["output"]["publication_mode"] == "awaiting_graph_authoritative_save"
@pytest.mark.asyncio
async def test_workflow_dispatch_endpoint_arms_output_save_for_turntable(
client,
db,
admin_user,
auth_headers,
tmp_path,
monkeypatch,
):
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Turntable Output Workflow {uuid.uuid4().hex[:8]}",
config={
"version": 1,
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "turntable", "step": "blender_turntable", "params": {"fps": 24, "frame_count": 96}},
{"id": "output", "step": "output_save", "params": {}},
],
"edges": [
{"from": "setup", "to": "turntable"},
{"from": "turntable", "to": "output"},
],
},
is_active=True,
)
db.add(workflow_definition)
await db.commit()
await db.refresh(workflow_definition)
calls: list[tuple[str, list[str], dict]] = []
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
calls.append((task_name, args, kwargs))
return type("Result", (), {"id": f"task-{len(calls)}"})()
context_id = str(order_line.id)
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
response = await client.post(
f"/api/workflows/{workflow_definition.id}/dispatch",
params={"context_id": context_id},
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["context_id"] == context_id
assert body["execution_mode"] == "graph"
assert body["dispatched"] == 1
assert body["task_ids"] == ["task-1"]
assert calls[0][0] == "app.domains.rendering.tasks.render_turntable_task"
assert calls[0][1] == [context_id]
assert calls[0][2]["workflow_run_id"] == body["workflow_run"]["id"]
assert calls[0][2]["workflow_node_id"] == "turntable"
assert calls[0][2]["publish_asset_enabled"] is False
assert calls[0][2]["graph_authoritative_output_enabled"] is True
assert calls[0][2]["graph_output_node_ids"] == ["output"]
assert calls[0][2]["fps"] == 24
assert calls[0][2]["frame_count"] == 96
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
assert node_results["turntable"]["status"] == "queued"
assert node_results["turntable"]["output"]["predicted_asset_type"] == "turntable"
assert node_results["turntable"]["output"]["publish_asset_enabled"] is False
assert node_results["turntable"]["output"]["graph_authoritative_output_enabled"] is True
assert node_results["turntable"]["output"]["graph_output_node_ids"] == ["output"]
assert node_results["output"]["status"] == "completed"
assert node_results["output"]["output"]["publication_mode"] == "awaiting_graph_authoritative_save"
@pytest.mark.asyncio
async def test_workflow_dispatch_endpoint_arms_notify_handoff_for_render_node(
client,
db,
admin_user,
auth_headers,
tmp_path,
monkeypatch,
):
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Notify Workflow {uuid.uuid4().hex[:8]}",
config={
"version": 1,
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "render", "step": "blender_still", "params": {}},
{"id": "notify", "step": "notify", "params": {}},
],
"edges": [
{"from": "setup", "to": "render"},
{"from": "render", "to": "notify"},
],
},
is_active=True,
)
db.add(workflow_definition)
await db.commit()
await db.refresh(workflow_definition)
calls: list[tuple[str, list[str], dict]] = []
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
calls.append((task_name, args, kwargs))
return type("Result", (), {"id": "task-1"})()
context_id = str(order_line.id)
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
response = await client.post(
f"/api/workflows/{workflow_definition.id}/dispatch",
params={"context_id": context_id},
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["context_id"] == context_id
assert body["execution_mode"] == "graph"
assert body["dispatched"] == 1
assert body["task_ids"] == ["task-1"]
assert len(calls) == 1
assert calls[0][0] == "app.domains.rendering.tasks.render_order_line_still_task"
assert calls[0][1] == [context_id]
assert calls[0][2]["workflow_run_id"] == body["workflow_run"]["id"]
assert calls[0][2]["workflow_node_id"] == "render"
assert calls[0][2]["emit_legacy_notifications"] is True
assert calls[0][2]["graph_notify_node_ids"] == ["notify"]
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
assert node_results["render"]["status"] == "queued"
assert node_results["render"]["output"]["graph_notify_node_ids"] == ["notify"]
assert node_results["notify"]["status"] == "completed"
assert node_results["notify"]["output"]["notification_mode"] == "deferred_to_render_task"
assert node_results["notify"]["output"]["armed_node_ids"] == ["render"]
@pytest.mark.asyncio
async def test_workflow_preflight_endpoint_reports_render_graph_readiness(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Preflight Workflow {uuid.uuid4().hex[:8]}",
config=build_preset_workflow_config("still_with_exports", {"width": 640, "height": 640}),
is_active=True,
)
db.add(workflow_definition)
await db.commit()
await db.refresh(workflow_definition)
response = await client.get(
f"/api/workflows/{workflow_definition.id}/preflight",
params={"context_id": str(order_line.id)},
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
node_checks = {node["node_id"]: node for node in body["nodes"]}
assert body["workflow_id"] == str(workflow_definition.id)
assert body["context_kind"] == "order_line"
assert body["expected_context_kind"] == "order_line"
assert body["execution_mode"] == "legacy"
assert body["graph_dispatch_allowed"] is True
assert body["resolved_order_line_id"] == str(order_line.id)
assert body["resolved_cad_file_id"] == str(order_line.product.cad_file_id)
assert body["unsupported_node_ids"] == []
assert node_checks["setup"]["status"] == "ready"
assert node_checks["template"]["status"] == "warning"
assert node_checks["template"]["issues"][0]["code"] == "template_missing"
assert node_checks["render"]["status"] == "ready"
assert node_checks["blend"]["status"] == "ready"
@pytest.mark.asyncio
async def test_workflow_draft_dispatch_endpoint_dispatches_unsaved_render_graph(
client,
db,
admin_user,
auth_headers,
tmp_path,
monkeypatch,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Draft Dispatch Workflow {uuid.uuid4().hex[:8]}",
config=build_preset_workflow_config("still", {"width": 640, "height": 640}),
is_active=True,
)
db.add(workflow_definition)
await db.commit()
await db.refresh(workflow_definition)
calls: list[tuple[str, list[str], dict]] = []
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
calls.append((task_name, args, kwargs))
return type("Result", (), {"id": f"draft-task-{len(calls)}"})()
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
response = await client.post(
"/api/workflows/dispatch",
headers=auth_headers,
json={
"workflow_id": str(workflow_definition.id),
"context_id": str(order_line.id),
"config": {
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}, "ui": {"label": "Setup"}},
{"id": "template", "step": "resolve_template", "params": {}, "ui": {"label": "Template"}},
{"id": "render", "step": "blender_still", "params": {"width": 800, "height": 600}, "ui": {"label": "Render"}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
],
},
},
)
assert response.status_code == 200
body = response.json()
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
assert body["context_id"] == str(order_line.id)
assert body["execution_mode"] == "graph"
assert body["dispatched"] == 1
assert body["task_ids"] == ["draft-task-1"]
assert body["workflow_run"]["workflow_def_id"] == str(workflow_definition.id)
assert body["workflow_run"]["execution_mode"] == "graph"
assert body["workflow_run"]["order_line_id"] == str(order_line.id)
assert [call[0] for call in calls] == ["app.domains.rendering.tasks.render_order_line_still_task"]
assert calls[0][1] == [str(order_line.id)]
assert calls[0][2]["workflow_node_id"] == "render"
assert "workflow_run_id" in calls[0][2]
assert node_results["setup"]["status"] == "completed"
assert node_results["template"]["status"] == "completed"
assert node_results["render"]["status"] == "queued"
@pytest.mark.asyncio
async def test_workflow_draft_dispatch_endpoint_marks_submitted_order_processing(
client,
db,
admin_user,
auth_headers,
tmp_path,
monkeypatch,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
order = await db.get(Order, order_line.order_id)
assert order is not None
order.status = OrderStatus.submitted
await db.commit()
monkeypatch.setattr(
"app.tasks.celery_app.celery_app.send_task",
lambda task_name, args, kwargs: type("Result", (), {"id": "draft-task-1"})(),
)
response = await client.post(
"/api/workflows/dispatch",
headers=auth_headers,
json={
"context_id": str(order_line.id),
"config": {
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}, "ui": {"label": "Setup"}},
{"id": "render", "step": "blender_still", "params": {}, "ui": {"label": "Render"}},
],
"edges": [
{"from": "setup", "to": "render"},
],
},
},
)
assert response.status_code == 200
await db.refresh(order)
assert order.status == OrderStatus.processing
assert order.processing_started_at is not None
assert order_line.order.completed_at is None
@pytest.mark.asyncio
async def test_workflow_draft_dispatch_endpoint_rejects_invalid_graph_config(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
response = await client.post(
"/api/workflows/dispatch",
headers=auth_headers,
json={
"context_id": str(order_line.id),
"config": {
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "render", "step": "blender_still", "params": {}},
],
"edges": [
{"from": "missing", "to": "render"},
],
},
},
)
assert response.status_code == 422
assert "Invalid workflow config" in response.json()["detail"]
@pytest.mark.asyncio
async def test_workflow_preflight_endpoint_rejects_context_kind_mismatch(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Mismatch Workflow {uuid.uuid4().hex[:8]}",
config=build_preset_workflow_config("still", {"width": 640, "height": 640}),
is_active=True,
)
db.add(workflow_definition)
await db.commit()
await db.refresh(workflow_definition)
response = await client.get(
f"/api/workflows/{workflow_definition.id}/preflight",
params={"context_id": str(order_line.product.cad_file_id)},
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["context_kind"] == "cad_file"
assert body["expected_context_kind"] == "order_line"
assert body["graph_dispatch_allowed"] is False
assert any(issue["code"] == "context_kind_mismatch" for issue in body["issues"])
assert any(node["status"] == "error" for node in body["nodes"])
@pytest.mark.asyncio
async def test_workflow_preflight_endpoint_supports_direct_cad_file_graphs(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
step_path = tmp_path / "cad-preflight" / "thumb.step"
step_path.parent.mkdir(parents=True, exist_ok=True)
step_path.write_text("STEP", encoding="utf-8")
cad_file = CadFile(
original_name="thumb.step",
stored_path=str(step_path),
file_hash=f"hash-{uuid.uuid4().hex}",
parsed_objects={"objects": ["Body"]},
)
workflow_definition = WorkflowDefinition(
name=f"CAD Workflow {uuid.uuid4().hex[:8]}",
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "input", "step": "resolve_step_path", "params": {}, "ui": {"label": "Resolve STEP"}},
{"id": "render", "step": "blender_render", "params": {"width": 512, "height": 512}, "ui": {"label": "Thumbnail"}},
{"id": "save", "step": "thumbnail_save", "params": {}, "ui": {"label": "Save Thumbnail"}},
],
"edges": [
{"from": "input", "to": "render"},
{"from": "render", "to": "save"},
],
},
is_active=True,
)
db.add_all([cad_file, workflow_definition])
await db.commit()
await db.refresh(workflow_definition)
response = await client.get(
f"/api/workflows/{workflow_definition.id}/preflight",
params={"context_id": str(cad_file.id)},
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["context_kind"] == "cad_file"
assert body["expected_context_kind"] == "cad_file"
assert body["execution_mode"] == "graph"
assert body["graph_dispatch_allowed"] is True
assert body["resolved_cad_file_id"] == str(cad_file.id)
assert all(node["status"] == "ready" for node in body["nodes"])
@pytest.mark.asyncio
async def test_workflow_draft_preflight_endpoint_validates_unsaved_render_graph(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
response = await client.post(
"/api/workflows/preflight",
headers=auth_headers,
json={
"context_id": str(order_line.id),
"config": {
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}, "ui": {"label": "Setup"}},
{"id": "template", "step": "resolve_template", "params": {}, "ui": {"label": "Template"}},
{"id": "render", "step": "blender_still", "params": {"width": 640, "height": 640}, "ui": {"label": "Render"}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
],
},
},
)
assert response.status_code == 200
body = response.json()
assert body["workflow_id"] is None
assert body["context_kind"] == "order_line"
assert body["expected_context_kind"] == "order_line"
assert body["execution_mode"] == "graph"
assert body["graph_dispatch_allowed"] is True
assert body["resolved_order_line_id"] == str(order_line.id)
assert [node["node_id"] for node in body["nodes"]] == ["setup", "template", "render"]
@pytest.mark.asyncio
async def test_workflow_draft_preflight_endpoint_reports_context_kind_mismatch(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
response = await client.post(
"/api/workflows/preflight",
headers=auth_headers,
json={
"context_id": str(order_line.product.cad_file_id),
"config": build_preset_workflow_config("still", {"width": 640, "height": 640}),
},
)
assert response.status_code == 200
body = response.json()
assert body["workflow_id"] is None
assert body["context_kind"] == "cad_file"
assert body["expected_context_kind"] == "order_line"
assert body["graph_dispatch_allowed"] is False
assert any(issue["code"] == "context_kind_mismatch" for issue in body["issues"])
@pytest.mark.asyncio
async def test_workflow_draft_preflight_endpoint_rejects_invalid_graph_config(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
response = await client.post(
"/api/workflows/preflight",
headers=auth_headers,
json={
"context_id": str(order_line.id),
"config": {
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "render", "step": "blender_still", "params": {}},
],
"edges": [
{"from": "missing", "to": "render"},
],
},
},
)
assert response.status_code == 422
assert "Invalid workflow config" in response.json()["detail"]
@pytest.mark.asyncio
async def test_workflow_run_comparison_endpoint_reports_identical_shadow_output(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_run = WorkflowRun(
order_line_id=order_line.id,
execution_mode="shadow",
status="completed",
)
db.add(workflow_run)
await db.flush()
render_dir = tmp_path / "comparison" / str(order_line.id)
render_dir.mkdir(parents=True, exist_ok=True)
authoritative_path = render_dir / "authoritative.png"
shadow_path = render_dir / f"authoritative_shadow-{str(workflow_run.id)[:8]}.png"
Image.new("RGBA", (8, 8), (0, 128, 255, 255)).save(authoritative_path)
Image.new("RGBA", (8, 8), (0, 128, 255, 255)).save(shadow_path)
order_line.result_path = str(authoritative_path)
order_line.render_status = "completed"
await db.commit()
response = await client.get(
f"/api/workflows/runs/{workflow_run.id}/comparison",
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["workflow_run_id"] == str(workflow_run.id)
assert body["execution_mode"] == "shadow"
assert body["status"] == "matched"
assert body["exact_match"] is True
assert body["dimensions_match"] is True
assert body["mean_pixel_delta"] == 0.0
assert body["authoritative_output"]["path"] == str(authoritative_path)
assert body["observer_output"]["path"] == str(shadow_path)
assert body["authoritative_output"]["image_width"] == 8
assert body["observer_output"]["image_height"] == 8
@pytest.mark.asyncio
async def test_workflow_run_comparison_endpoint_reports_metadata_only_difference_as_matched(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_run = WorkflowRun(
order_line_id=order_line.id,
execution_mode="shadow",
status="completed",
)
db.add(workflow_run)
await db.flush()
render_dir = tmp_path / "comparison-metadata" / str(order_line.id)
render_dir.mkdir(parents=True, exist_ok=True)
authoritative_path = render_dir / "authoritative.png"
shadow_path = render_dir / f"authoritative_shadow-{str(workflow_run.id)[:8]}.png"
image = Image.new("RGBA", (8, 8), (0, 128, 255, 255))
authoritative_meta = PngImagePlugin.PngInfo()
authoritative_meta.add_text("Date", "2026-04-07 10:38:23")
observer_meta = PngImagePlugin.PngInfo()
observer_meta.add_text("Date", "2026-04-07 10:40:45")
image.save(authoritative_path, pnginfo=authoritative_meta)
image.save(shadow_path, pnginfo=observer_meta)
order_line.result_path = str(authoritative_path)
order_line.render_status = "completed"
await db.commit()
response = await client.get(
f"/api/workflows/runs/{workflow_run.id}/comparison",
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["status"] == "matched"
assert body["exact_match"] is False
assert body["dimensions_match"] is True
assert body["mean_pixel_delta"] == 0.0
assert "metadata differs" in body["summary"]
@pytest.mark.asyncio
async def test_workflow_run_comparison_endpoint_reports_missing_shadow_output(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_run = WorkflowRun(
order_line_id=order_line.id,
execution_mode="shadow",
status="completed",
)
db.add(workflow_run)
await db.flush()
render_dir = tmp_path / "comparison-missing" / str(order_line.id)
render_dir.mkdir(parents=True, exist_ok=True)
authoritative_path = render_dir / "authoritative.png"
Image.new("RGBA", (4, 4), (255, 64, 64, 255)).save(authoritative_path)
order_line.result_path = str(authoritative_path)
order_line.render_status = "completed"
await db.commit()
response = await client.get(
f"/api/workflows/runs/{workflow_run.id}/comparison",
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["status"] == "missing_observer"
assert body["exact_match"] is None
assert body["observer_output"]["exists"] is False
assert body["authoritative_output"]["exists"] is True
@pytest.mark.asyncio
async def test_workflow_run_comparison_endpoint_finds_shadow_output_in_step_files_render_dir(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_run = WorkflowRun(
order_line_id=order_line.id,
execution_mode="shadow",
status="completed",
)
db.add(workflow_run)
await db.flush()
render_dir = tmp_path / "comparison-step-files" / str(order_line.id)
render_dir.mkdir(parents=True, exist_ok=True)
authoritative_path = render_dir / "authoritative.png"
Image.new("RGBA", (12, 12), (32, 160, 255, 255)).save(authoritative_path)
step_shadow_dir = Path("/app/uploads/step_files/renders")
step_shadow_dir.mkdir(parents=True, exist_ok=True)
shadow_path = step_shadow_dir / f"line_{order_line.id}_shadow-{str(workflow_run.id)[:8]}.png"
Image.new("RGBA", (12, 12), (32, 160, 255, 255)).save(shadow_path)
order_line.result_path = str(authoritative_path)
order_line.render_status = "completed"
await db.commit()
response = await client.get(
f"/api/workflows/runs/{workflow_run.id}/comparison",
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["status"] == "matched"
assert body["observer_output"]["exists"] is True
assert body["observer_output"]["path"] == str(shadow_path)