chore: snapshot workflow migration progress

This commit is contained in:
2026-04-12 11:49:04 +02:00
parent 0cd02513d5
commit 3e810c74a3
163 changed files with 31774 additions and 2753 deletions
@@ -7,6 +7,7 @@ from types import SimpleNamespace
import pytest
from PIL import Image, PngImagePlugin
from sqlalchemy import select
from sqlalchemy.engine import make_url
from sqlalchemy.orm import selectinload
from app.config import settings
@@ -18,15 +19,128 @@ from app.domains.rendering.workflow_comparison_service import (
_build_artifact,
evaluate_rollout_gate,
)
from app.domains.rendering.workflow_config_utils import build_preset_workflow_config
from app.domains.rendering.workflow_config_utils import (
build_preset_workflow_config,
build_workflow_blueprint_config,
)
from tests.db_test_utils import resolve_test_db_url
def _use_test_database(monkeypatch) -> None:
monkeypatch.setattr(settings, "postgres_host", "postgres")
monkeypatch.setattr(settings, "postgres_port", 5432)
monkeypatch.setattr(settings, "postgres_user", "hartomat")
monkeypatch.setattr(settings, "postgres_password", "hartomat")
monkeypatch.setattr(settings, "postgres_db", "hartomat_test")
resolved = make_url(resolve_test_db_url(async_driver=False))
monkeypatch.setattr(settings, "postgres_host", resolved.host or settings.postgres_host)
monkeypatch.setattr(settings, "postgres_port", int(resolved.port or settings.postgres_port))
monkeypatch.setattr(settings, "postgres_user", resolved.username or settings.postgres_user)
monkeypatch.setattr(settings, "postgres_password", resolved.password or settings.postgres_password)
monkeypatch.setattr(settings, "postgres_db", resolved.database or settings.postgres_db)
def _build_valid_custom_still_graph(
*,
execution_mode: str = "graph",
width: int = 1024,
height: int = 768,
include_output: bool = False,
include_notify: bool = False,
) -> dict[str, object]:
nodes: list[dict[str, object]] = [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "populate_materials", "step": "auto_populate_materials", "params": {}},
{"id": "resolve_materials", "step": "material_map_resolve", "params": {}},
{"id": "render", "step": "blender_still", "params": {"width": width, "height": height}},
]
edges: list[dict[str, str]] = [
{"from": "setup", "to": "template"},
{"from": "setup", "to": "populate_materials"},
{"from": "template", "to": "resolve_materials"},
{"from": "populate_materials", "to": "resolve_materials"},
{"from": "template", "to": "render"},
{"from": "resolve_materials", "to": "render"},
]
if include_output:
nodes.append({"id": "output", "step": "output_save", "params": {}})
edges.append({"from": "render", "to": "output"})
if include_notify:
nodes.append({"id": "notify", "step": "notify", "params": {}})
edges.append({"from": "render", "to": "notify"})
return {
"version": 1,
"ui": {"preset": "custom", "execution_mode": execution_mode},
"nodes": nodes,
"edges": edges,
}
def _build_valid_custom_turntable_graph(
*,
execution_mode: str = "graph",
fps: int = 24,
frame_count: int = 96,
include_output: bool = False,
include_notify: bool = False,
) -> dict[str, object]:
duration_s = frame_count / fps
nodes: list[dict[str, object]] = [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "populate_materials", "step": "auto_populate_materials", "params": {}},
{"id": "bbox", "step": "glb_bbox", "params": {}},
{"id": "resolve_materials", "step": "material_map_resolve", "params": {}},
{"id": "turntable", "step": "blender_turntable", "params": {"fps": fps, "duration_s": duration_s}},
]
edges: list[dict[str, str]] = [
{"from": "setup", "to": "template"},
{"from": "setup", "to": "populate_materials"},
{"from": "setup", "to": "bbox"},
{"from": "template", "to": "resolve_materials"},
{"from": "populate_materials", "to": "resolve_materials"},
{"from": "bbox", "to": "turntable"},
{"from": "template", "to": "turntable"},
{"from": "resolve_materials", "to": "turntable"},
]
if include_output:
nodes.append({"id": "output", "step": "output_save", "params": {}})
edges.append({"from": "turntable", "to": "output"})
if include_notify:
nodes.append({"id": "notify", "step": "notify", "params": {}})
edges.append({"from": "turntable", "to": "notify"})
return {
"version": 1,
"ui": {"preset": "custom", "execution_mode": execution_mode},
"nodes": nodes,
"edges": edges,
}
def _build_valid_custom_blend_graph(*, include_output: bool = False) -> dict[str, object]:
nodes: list[dict[str, object]] = [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "blend", "step": "export_blend", "params": {}},
]
edges: list[dict[str, str]] = [
{"from": "setup", "to": "template"},
{"from": "template", "to": "blend"},
]
if include_output:
nodes.append({"id": "output", "step": "output_save", "params": {}})
edges.append({"from": "blend", "to": "output"})
return {
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": nodes,
"edges": edges,
}
def _derive_rollout_mode_from_config(workflow_config: dict | None) -> str:
execution_mode = ((workflow_config or {}).get("ui") or {}).get("execution_mode")
if execution_mode == "graph":
return "graph"
if execution_mode == "shadow":
return "shadow"
return "legacy_only"
async def _seed_order_line(
@@ -61,6 +175,7 @@ async def _seed_order_line(
db.add(workflow_definition)
await db.flush()
output_type.workflow_definition_id = workflow_definition.id
output_type.workflow_rollout_mode = _derive_rollout_mode_from_config(workflow_config)
order_line = OrderLine(
order_id=order.id,
@@ -148,6 +263,54 @@ async def test_dispatch_render_with_workflow_falls_back_to_legacy_without_workfl
assert runs == []
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_falls_back_on_artifact_contract_mismatch(
db,
admin_user,
monkeypatch,
):
_use_test_database(monkeypatch)
seeded = await _seed_order_line(
db,
admin_user,
workflow_config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "blend", "step": "export_blend", "params": {}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "blend"},
],
},
)
output_type = seeded["output_type"]
output_type.artifact_kind = "still_image"
await db.commit()
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
)
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
await db.rollback()
assert result["backend"] == "legacy"
assert result["order_line_id"] == str(seeded["order_line"].id)
assert result["rollout_gate_status"] == "workflow_contract_mismatch"
assert result["workflow_rollout_ready"] is False
assert result["output_type_rollout_ready"] is False
assert any("Expected artifact kind: still_image." in reason for reason in result["rollout_gate_reasons"])
assert any("blend_asset" in reason for reason in result["rollout_gate_reasons"])
runs = (await db.execute(select(WorkflowRun))).scalars().all()
assert runs == []
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_creates_run_and_node_results_for_preset_dispatch(
db,
@@ -203,15 +366,11 @@ async def test_dispatch_render_with_workflow_falls_back_when_workflow_runtime_pr
seeded = await _seed_order_line(
db,
admin_user,
workflow_config={
"version": 1,
"nodes": [
{"id": "render", "step": "blender_still", "params": {}},
],
"edges": [
{"from": "missing", "to": "render"},
],
},
workflow_config=build_preset_workflow_config("still", {"width": 640, "height": 640}),
)
monkeypatch.setattr(
"app.domains.rendering.workflow_executor.prepare_workflow_context",
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("prep exploded")),
)
monkeypatch.setattr(
@@ -248,19 +407,7 @@ async def test_dispatch_render_with_workflow_graph_mode_dispatches_supported_cus
workflow_definition = WorkflowDefinition(
name=f"Graph Workflow {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
],
},
config=_build_valid_custom_still_graph(execution_mode="graph"),
is_active=True,
)
db.add(workflow_definition)
@@ -268,6 +415,7 @@ async def test_dispatch_render_with_workflow_graph_mode_dispatches_supported_cus
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
output_type.workflow_rollout_mode = "graph"
await db.commit()
monkeypatch.setattr(
@@ -315,21 +463,7 @@ async def test_dispatch_render_with_workflow_graph_mode_uses_output_save_as_auth
workflow_definition = WorkflowDefinition(
name=f"Graph Output Save {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
{"id": "output", "step": "output_save", "params": {}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
{"from": "render", "to": "output"},
],
},
config=_build_valid_custom_still_graph(execution_mode="graph", include_output=True),
is_active=True,
)
db.add(workflow_definition)
@@ -337,6 +471,7 @@ async def test_dispatch_render_with_workflow_graph_mode_uses_output_save_as_auth
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
output_type.workflow_rollout_mode = "graph"
await db.commit()
calls: list[tuple[str, list[str], dict]] = []
@@ -367,8 +502,10 @@ async def test_dispatch_render_with_workflow_graph_mode_uses_output_save_as_auth
assert calls[0][2]["publish_asset_enabled"] is False
assert calls[0][2]["graph_authoritative_output_enabled"] is True
assert calls[0][2]["graph_output_node_ids"] == ["output"]
assert node_results["output"].status == "completed"
assert node_results["output"].status == "pending"
assert node_results["output"].output["publication_mode"] == "awaiting_graph_authoritative_save"
assert node_results["output"].output["handoff_state"] == "armed"
assert node_results["output"].output["handoff_node_ids"] == ["render"]
@pytest.mark.asyncio
@@ -395,6 +532,7 @@ async def test_dispatch_render_with_workflow_graph_mode_canonicalizes_legacy_pre
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
output_type.workflow_rollout_mode = "graph"
await db.commit()
monkeypatch.setattr(
@@ -421,7 +559,7 @@ async def test_dispatch_render_with_workflow_graph_mode_canonicalizes_legacy_pre
assert node_results["setup"].status == "completed"
assert node_results["template"].status == "completed"
assert node_results["render"].status == "queued"
assert node_results["output"].status == "completed"
assert node_results["output"].status == "pending"
@pytest.mark.asyncio
@@ -436,21 +574,7 @@ async def test_dispatch_render_with_workflow_graph_mode_falls_back_to_legacy_on_
workflow_definition = WorkflowDefinition(
name=f"Graph Workflow {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{
"id": "setup",
"step": "order_line_setup",
"params": {"failure_policy": {"fallback_to_legacy": True}},
},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
],
"edges": [
{"from": "setup", "to": "render"},
],
},
config=_build_valid_custom_still_graph(execution_mode="graph"),
is_active=True,
)
db.add(workflow_definition)
@@ -458,6 +582,7 @@ async def test_dispatch_render_with_workflow_graph_mode_falls_back_to_legacy_on_
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
output_type.workflow_rollout_mode = "graph"
await db.commit()
monkeypatch.setattr(
@@ -490,6 +615,40 @@ async def test_dispatch_render_with_workflow_graph_mode_falls_back_to_legacy_on_
assert run.error_message == "graph dispatch exploded"
@pytest.mark.asyncio
async def test_dispatch_render_with_graph_capable_workflow_respects_legacy_only_rollout_mode(
db,
admin_user,
monkeypatch,
):
_use_test_database(monkeypatch)
seeded = await _seed_order_line(
db,
admin_user,
workflow_config=_build_valid_custom_still_graph(execution_mode="graph"),
)
output_type = seeded["output_type"]
output_type.workflow_rollout_mode = "legacy_only"
await db.commit()
monkeypatch.setattr(
"app.domains.rendering.dispatch_service._legacy_dispatch",
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
)
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
await db.rollback()
assert result["backend"] == "legacy"
assert result["order_line_id"] == str(seeded["order_line"].id)
assert result["workflow_rollout_mode"] == "legacy_only"
assert result["configured_execution_mode"] == "graph"
assert result["rollout_gate_status"] == "rollout_legacy_only"
assert result["workflow_rollout_ready"] is False
assert result["output_type_rollout_ready"] is False
@pytest.mark.asyncio
async def test_dispatch_render_with_workflow_shadow_mode_keeps_legacy_authoritative_and_dispatches_graph_observer(
db,
@@ -502,19 +661,7 @@ async def test_dispatch_render_with_workflow_shadow_mode_keeps_legacy_authoritat
workflow_definition = WorkflowDefinition(
name=f"Shadow Workflow {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "shadow"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "template", "step": "resolve_template", "params": {}},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
],
},
config=_build_valid_custom_still_graph(execution_mode="shadow"),
is_active=True,
)
db.add(workflow_definition)
@@ -522,6 +669,7 @@ async def test_dispatch_render_with_workflow_shadow_mode_keeps_legacy_authoritat
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
output_type.workflow_rollout_mode = "shadow"
await db.commit()
calls: list[tuple[str, list[str], dict]] = []
@@ -592,6 +740,7 @@ async def test_dispatch_render_with_workflow_shadow_mode_canonicalizes_legacy_pr
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
output_type.workflow_rollout_mode = "shadow"
await db.commit()
calls: list[tuple[str, list[str], dict]] = []
@@ -610,21 +759,13 @@ async def test_dispatch_render_with_workflow_shadow_mode_canonicalizes_legacy_pr
await db.rollback()
run_result = await db.execute(
select(WorkflowRun)
.where(WorkflowRun.id == uuid.UUID(result["shadow_workflow_run_id"]))
.options(selectinload(WorkflowRun.node_results))
)
run = run_result.scalar_one()
node_results = {node_result.node_name: node_result for node_result in run.node_results}
assert result["backend"] == "legacy"
assert result["execution_mode"] == "shadow"
assert result["shadow_status"] == "dispatched"
assert result["shadow_task_ids"] == ["legacy-shadow-task-1"]
assert run.execution_mode == "shadow"
assert node_results["output"].status == "completed"
assert calls[0][2]["publish_asset_enabled"] is False
assert result["shadow_status"] == "skipped"
assert result["rollout_gate_status"] == "shadow_skipped"
assert "shadow_workflow_run_id" not in result
assert "material_assignments" in result["shadow_error"]
assert calls == []
@pytest.mark.asyncio
@@ -639,17 +780,7 @@ async def test_dispatch_render_with_workflow_shadow_mode_ignores_graph_failures_
workflow_definition = WorkflowDefinition(
name=f"Shadow Workflow {uuid.uuid4().hex[:8]}",
output_type_id=order_line.output_type_id,
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "shadow"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
],
"edges": [
{"from": "setup", "to": "render"},
],
},
config=_build_valid_custom_still_graph(execution_mode="shadow"),
is_active=True,
)
db.add(workflow_definition)
@@ -657,6 +788,7 @@ async def test_dispatch_render_with_workflow_shadow_mode_ignores_graph_failures_
output_type = await db.get(OutputType, order_line.output_type_id)
assert output_type is not None
output_type.workflow_definition_id = workflow_definition.id
output_type.workflow_rollout_mode = "shadow"
await db.commit()
monkeypatch.setattr(
@@ -730,6 +862,32 @@ def test_evaluate_rollout_gate_warns_on_small_visual_delta(tmp_path: Path):
assert any("warn threshold" in reason for reason in gate["reasons"])
def test_evaluate_rollout_gate_passes_near_zero_visual_delta(tmp_path: Path):
authoritative = tmp_path / "authoritative.png"
observer = tmp_path / "observer.png"
Image.new("RGBA", (1024, 1024), color=(106, 106, 106, 255)).save(authoritative)
Image.new("RGBA", (1024, 1024), color=(106, 106, 106, 255)).save(observer)
with Image.open(observer) as image:
image.putpixel((444, 137), (106, 106, 107, 255))
image.putpixel((651, 142), (105, 106, 106, 255))
image.save(observer)
gate = evaluate_rollout_gate(
authoritative_output=_build_artifact(str(authoritative)),
observer_output=_build_artifact(str(observer)),
exact_match=False,
dimensions_match=True,
mean_pixel_delta=((1 + 1) / (1024 * 1024 * 4 * 255)),
)
assert gate["verdict"] == "pass"
assert gate["ready"] is True
assert gate["status"] == "ready_for_rollout"
assert any("pass threshold" in reason for reason in gate["reasons"])
def test_evaluate_rollout_gate_fails_on_missing_observer(tmp_path: Path):
authoritative = tmp_path / "authoritative.png"
Image.new("RGBA", (16, 16), color=(0, 128, 255, 255)).save(authoritative)
@@ -796,7 +954,11 @@ def test_dispatch_render_with_workflow_unit_marks_shadow_dispatch_as_pending_rol
workflow_def_id = uuid.uuid4()
fake_line = SimpleNamespace(
id=uuid.UUID(order_line_id),
output_type=SimpleNamespace(id=output_type_id, workflow_definition_id=workflow_def_id),
output_type=SimpleNamespace(
id=output_type_id,
workflow_definition_id=workflow_def_id,
workflow_rollout_mode="shadow",
),
)
fake_workflow_def = SimpleNamespace(id=workflow_def_id, config={"version": 1}, is_active=True)
fake_run = SimpleNamespace(id=uuid.uuid4())
@@ -951,12 +1113,14 @@ async def test_workflow_dispatch_endpoint_returns_workflow_run_with_node_results
assert node_results["setup"]["output"]["order_line_id"] == str(order_line.id)
assert node_results["template"]["status"] == "completed"
assert node_results["template"]["output"]["use_materials"] is False
assert node_results["output"]["status"] == "completed"
assert node_results["output"]["status"] == "pending"
assert node_results["output"]["output"]["publication_mode"] == "awaiting_graph_authoritative_save"
assert node_results["output"]["output"]["handoff_state"] == "armed"
assert node_results["output"]["output"]["handoff_node_ids"] == ["render"]
@pytest.mark.asyncio
async def test_workflow_dispatch_endpoint_arms_output_save_for_export_blend(
async def test_workflow_dispatch_endpoint_rejects_output_save_for_export_blend_only_graph(
client,
db,
admin_user,
@@ -968,18 +1132,7 @@ async def test_workflow_dispatch_endpoint_arms_output_save_for_export_blend(
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Blend Output Workflow {uuid.uuid4().hex[:8]}",
config={
"version": 1,
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "blend", "step": "export_blend", "params": {}},
{"id": "output", "step": "output_save", "params": {}},
],
"edges": [
{"from": "setup", "to": "blend"},
{"from": "blend", "to": "output"},
],
},
config=_build_valid_custom_blend_graph(include_output=True),
is_active=True,
)
db.add(workflow_definition)
@@ -1000,35 +1153,9 @@ async def test_workflow_dispatch_endpoint_arms_output_save_for_export_blend(
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["context_id"] == context_id
assert body["execution_mode"] == "graph"
assert body["dispatched"] == 1
assert body["task_ids"] == ["task-1"]
assert calls == [
(
"app.domains.rendering.tasks.export_blend_for_order_line_task",
[context_id],
{
"workflow_run_id": body["workflow_run"]["id"],
"workflow_node_id": "blend",
"publish_asset_enabled": False,
"graph_authoritative_output_enabled": True,
"graph_output_node_ids": ["output"],
},
)
]
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
assert node_results["blend"]["status"] == "queued"
assert node_results["blend"]["output"]["predicted_asset_type"] == "blend_production"
assert node_results["blend"]["output"]["publish_asset_enabled"] is False
assert node_results["blend"]["output"]["graph_authoritative_output_enabled"] is True
assert node_results["blend"]["output"]["graph_output_node_ids"] == ["output"]
assert node_results["output"]["status"] == "completed"
assert node_results["output"]["output"]["publication_mode"] == "awaiting_graph_authoritative_save"
assert response.status_code == 422
assert "output_save" in response.json()["detail"]
assert calls == []
@pytest.mark.asyncio
@@ -1044,18 +1171,7 @@ async def test_workflow_dispatch_endpoint_arms_output_save_for_turntable(
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Turntable Output Workflow {uuid.uuid4().hex[:8]}",
config={
"version": 1,
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "turntable", "step": "blender_turntable", "params": {"fps": 24, "frame_count": 96}},
{"id": "output", "step": "output_save", "params": {}},
],
"edges": [
{"from": "setup", "to": "turntable"},
{"from": "turntable", "to": "output"},
],
},
config=_build_valid_custom_turntable_graph(include_output=True),
is_active=True,
)
db.add(workflow_definition)
@@ -1091,7 +1207,6 @@ async def test_workflow_dispatch_endpoint_arms_output_save_for_turntable(
assert calls[0][2]["graph_authoritative_output_enabled"] is True
assert calls[0][2]["graph_output_node_ids"] == ["output"]
assert calls[0][2]["fps"] == 24
assert calls[0][2]["frame_count"] == 96
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
assert node_results["turntable"]["status"] == "queued"
@@ -1099,8 +1214,10 @@ async def test_workflow_dispatch_endpoint_arms_output_save_for_turntable(
assert node_results["turntable"]["output"]["publish_asset_enabled"] is False
assert node_results["turntable"]["output"]["graph_authoritative_output_enabled"] is True
assert node_results["turntable"]["output"]["graph_output_node_ids"] == ["output"]
assert node_results["output"]["status"] == "completed"
assert node_results["output"]["status"] == "pending"
assert node_results["output"]["output"]["publication_mode"] == "awaiting_graph_authoritative_save"
assert node_results["output"]["output"]["handoff_state"] == "armed"
assert node_results["output"]["output"]["handoff_node_ids"] == ["turntable"]
@pytest.mark.asyncio
@@ -1116,18 +1233,7 @@ async def test_workflow_dispatch_endpoint_arms_notify_handoff_for_render_node(
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_definition = WorkflowDefinition(
name=f"Notify Workflow {uuid.uuid4().hex[:8]}",
config={
"version": 1,
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}},
{"id": "render", "step": "blender_still", "params": {}},
{"id": "notify", "step": "notify", "params": {}},
],
"edges": [
{"from": "setup", "to": "render"},
{"from": "render", "to": "notify"},
],
},
config=_build_valid_custom_still_graph(include_notify=True),
is_active=True,
)
db.add(workflow_definition)
@@ -1166,9 +1272,10 @@ async def test_workflow_dispatch_endpoint_arms_notify_handoff_for_render_node(
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
assert node_results["render"]["status"] == "queued"
assert node_results["render"]["output"]["graph_notify_node_ids"] == ["notify"]
assert node_results["notify"]["status"] == "completed"
assert node_results["notify"]["status"] == "pending"
assert node_results["notify"]["output"]["notification_mode"] == "deferred_to_render_task"
assert node_results["notify"]["output"]["armed_node_ids"] == ["render"]
assert node_results["notify"]["output"]["handoff_state"] == "armed"
@pytest.mark.asyncio
@@ -1246,19 +1353,7 @@ async def test_workflow_draft_dispatch_endpoint_dispatches_unsaved_render_graph(
json={
"workflow_id": str(workflow_definition.id),
"context_id": str(order_line.id),
"config": {
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}, "ui": {"label": "Setup"}},
{"id": "template", "step": "resolve_template", "params": {}, "ui": {"label": "Template"}},
{"id": "render", "step": "blender_still", "params": {"width": 800, "height": 600}, "ui": {"label": "Render"}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
],
},
"config": _build_valid_custom_still_graph(width=800, height=600),
},
)
@@ -1306,17 +1401,7 @@ async def test_workflow_draft_dispatch_endpoint_marks_submitted_order_processing
headers=auth_headers,
json={
"context_id": str(order_line.id),
"config": {
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}, "ui": {"label": "Setup"}},
{"id": "render", "step": "blender_still", "params": {}, "ui": {"label": "Render"}},
],
"edges": [
{"from": "setup", "to": "render"},
],
},
"config": _build_valid_custom_still_graph(),
},
)
@@ -1413,19 +1498,7 @@ async def test_workflow_preflight_endpoint_supports_direct_cad_file_graphs(
)
workflow_definition = WorkflowDefinition(
name=f"CAD Workflow {uuid.uuid4().hex[:8]}",
config={
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "input", "step": "resolve_step_path", "params": {}, "ui": {"label": "Resolve STEP"}},
{"id": "render", "step": "blender_render", "params": {"width": 512, "height": 512}, "ui": {"label": "Thumbnail"}},
{"id": "save", "step": "thumbnail_save", "params": {}, "ui": {"label": "Save Thumbnail"}},
],
"edges": [
{"from": "input", "to": "render"},
{"from": "render", "to": "save"},
],
},
config=build_workflow_blueprint_config("cad_intake"),
is_active=True,
)
db.add_all([cad_file, workflow_definition])
@@ -1443,7 +1516,7 @@ async def test_workflow_preflight_endpoint_supports_direct_cad_file_graphs(
assert body["context_kind"] == "cad_file"
assert body["expected_context_kind"] == "cad_file"
assert body["execution_mode"] == "graph"
assert body["execution_mode"] == "legacy"
assert body["graph_dispatch_allowed"] is True
assert body["resolved_cad_file_id"] == str(cad_file.id)
assert all(node["status"] == "ready" for node in body["nodes"])
@@ -1464,19 +1537,7 @@ async def test_workflow_draft_preflight_endpoint_validates_unsaved_render_graph(
headers=auth_headers,
json={
"context_id": str(order_line.id),
"config": {
"version": 1,
"ui": {"preset": "custom", "execution_mode": "graph"},
"nodes": [
{"id": "setup", "step": "order_line_setup", "params": {}, "ui": {"label": "Setup"}},
{"id": "template", "step": "resolve_template", "params": {}, "ui": {"label": "Template"}},
{"id": "render", "step": "blender_still", "params": {"width": 640, "height": 640}, "ui": {"label": "Render"}},
],
"edges": [
{"from": "setup", "to": "template"},
{"from": "template", "to": "render"},
],
},
"config": _build_valid_custom_still_graph(width=640, height=640),
},
)
@@ -1489,7 +1550,13 @@ async def test_workflow_draft_preflight_endpoint_validates_unsaved_render_graph(
assert body["execution_mode"] == "graph"
assert body["graph_dispatch_allowed"] is True
assert body["resolved_order_line_id"] == str(order_line.id)
assert [node["node_id"] for node in body["nodes"]] == ["setup", "template", "render"]
assert [node["node_id"] for node in body["nodes"]] == [
"setup",
"template",
"populate_materials",
"resolve_materials",
"render",
]
@pytest.mark.asyncio
@@ -1646,7 +1713,9 @@ async def test_workflow_run_comparison_endpoint_reports_metadata_only_difference
assert body["exact_match"] is False
assert body["dimensions_match"] is True
assert body["mean_pixel_delta"] == 0.0
assert "metadata differs" in body["summary"]
assert body["summary"] == (
"Observer output matches the authoritative legacy output within the visual pass threshold."
)
@pytest.mark.asyncio
@@ -1695,7 +1764,9 @@ async def test_workflow_run_comparison_endpoint_finds_shadow_output_in_step_file
admin_user,
auth_headers,
tmp_path,
monkeypatch,
):
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_run = WorkflowRun(
order_line_id=order_line.id,
@@ -1710,7 +1781,7 @@ async def test_workflow_run_comparison_endpoint_finds_shadow_output_in_step_file
authoritative_path = render_dir / "authoritative.png"
Image.new("RGBA", (12, 12), (32, 160, 255, 255)).save(authoritative_path)
step_shadow_dir = Path("/app/uploads/step_files/renders")
step_shadow_dir = Path(settings.upload_dir) / "step_files" / "renders" / str(order_line.id)
step_shadow_dir.mkdir(parents=True, exist_ok=True)
shadow_path = step_shadow_dir / f"line_{order_line.id}_shadow-{str(workflow_run.id)[:8]}.png"
Image.new("RGBA", (12, 12), (32, 160, 255, 255)).save(shadow_path)
@@ -1729,3 +1800,52 @@ async def test_workflow_run_comparison_endpoint_finds_shadow_output_in_step_file
assert body["status"] == "matched"
assert body["observer_output"]["exists"] is True
assert body["observer_output"]["path"] == str(shadow_path)
@pytest.mark.asyncio
async def test_workflow_run_comparison_endpoint_treats_near_zero_visual_delta_as_match(
client,
db,
admin_user,
auth_headers,
tmp_path,
):
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
workflow_run = WorkflowRun(
order_line_id=order_line.id,
execution_mode="shadow",
status="completed",
)
db.add(workflow_run)
await db.flush()
render_dir = tmp_path / "comparison-near-zero" / str(order_line.id)
render_dir.mkdir(parents=True, exist_ok=True)
authoritative_path = render_dir / "authoritative.png"
shadow_path = render_dir / f"line_{order_line.id}_shadow-{str(workflow_run.id)[:8]}.png"
Image.new("RGBA", (1024, 1024), (106, 106, 106, 255)).save(authoritative_path)
Image.new("RGBA", (1024, 1024), (106, 106, 106, 255)).save(shadow_path)
with Image.open(shadow_path) as image:
image.putpixel((444, 137), (106, 106, 107, 255))
image.putpixel((651, 142), (105, 106, 106, 255))
image.save(shadow_path)
order_line.result_path = str(authoritative_path)
order_line.render_status = "completed"
await db.commit()
response = await client.get(
f"/api/workflows/runs/{workflow_run.id}/comparison",
headers=auth_headers,
)
assert response.status_code == 200
body = response.json()
assert body["status"] == "matched"
assert body["exact_match"] is False
assert body["dimensions_match"] is True
assert body["mean_pixel_delta"] is not None
assert body["mean_pixel_delta"] <= 1e-6
assert "pass threshold" in body["summary"]