1852 lines
65 KiB
Python
1852 lines
65 KiB
Python
from __future__ import annotations
|
|
|
|
import uuid
|
|
from pathlib import Path
|
|
from types import SimpleNamespace
|
|
|
|
import pytest
|
|
from PIL import Image, PngImagePlugin
|
|
from sqlalchemy import select
|
|
from sqlalchemy.engine import make_url
|
|
from sqlalchemy.orm import selectinload
|
|
|
|
from app.config import settings
|
|
from app.domains.orders.models import Order, OrderLine, OrderStatus
|
|
from app.domains.products.models import CadFile, Product
|
|
from app.domains.rendering.dispatch_service import dispatch_render_with_workflow
|
|
from app.domains.rendering.models import OutputType, WorkflowDefinition, WorkflowRun
|
|
from app.domains.rendering.workflow_comparison_service import (
|
|
_build_artifact,
|
|
evaluate_rollout_gate,
|
|
)
|
|
from app.domains.rendering.workflow_config_utils import (
|
|
build_preset_workflow_config,
|
|
build_workflow_blueprint_config,
|
|
)
|
|
from tests.db_test_utils import resolve_test_db_url
|
|
|
|
|
|
def _use_test_database(monkeypatch) -> None:
|
|
resolved = make_url(resolve_test_db_url(async_driver=False))
|
|
monkeypatch.setattr(settings, "postgres_host", resolved.host or settings.postgres_host)
|
|
monkeypatch.setattr(settings, "postgres_port", int(resolved.port or settings.postgres_port))
|
|
monkeypatch.setattr(settings, "postgres_user", resolved.username or settings.postgres_user)
|
|
monkeypatch.setattr(settings, "postgres_password", resolved.password or settings.postgres_password)
|
|
monkeypatch.setattr(settings, "postgres_db", resolved.database or settings.postgres_db)
|
|
|
|
|
|
def _build_valid_custom_still_graph(
|
|
*,
|
|
execution_mode: str = "graph",
|
|
width: int = 1024,
|
|
height: int = 768,
|
|
include_output: bool = False,
|
|
include_notify: bool = False,
|
|
) -> dict[str, object]:
|
|
nodes: list[dict[str, object]] = [
|
|
{"id": "setup", "step": "order_line_setup", "params": {}},
|
|
{"id": "template", "step": "resolve_template", "params": {}},
|
|
{"id": "populate_materials", "step": "auto_populate_materials", "params": {}},
|
|
{"id": "resolve_materials", "step": "material_map_resolve", "params": {}},
|
|
{"id": "render", "step": "blender_still", "params": {"width": width, "height": height}},
|
|
]
|
|
edges: list[dict[str, str]] = [
|
|
{"from": "setup", "to": "template"},
|
|
{"from": "setup", "to": "populate_materials"},
|
|
{"from": "template", "to": "resolve_materials"},
|
|
{"from": "populate_materials", "to": "resolve_materials"},
|
|
{"from": "template", "to": "render"},
|
|
{"from": "resolve_materials", "to": "render"},
|
|
]
|
|
if include_output:
|
|
nodes.append({"id": "output", "step": "output_save", "params": {}})
|
|
edges.append({"from": "render", "to": "output"})
|
|
if include_notify:
|
|
nodes.append({"id": "notify", "step": "notify", "params": {}})
|
|
edges.append({"from": "render", "to": "notify"})
|
|
return {
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": execution_mode},
|
|
"nodes": nodes,
|
|
"edges": edges,
|
|
}
|
|
|
|
|
|
def _build_valid_custom_turntable_graph(
|
|
*,
|
|
execution_mode: str = "graph",
|
|
fps: int = 24,
|
|
frame_count: int = 96,
|
|
include_output: bool = False,
|
|
include_notify: bool = False,
|
|
) -> dict[str, object]:
|
|
duration_s = frame_count / fps
|
|
nodes: list[dict[str, object]] = [
|
|
{"id": "setup", "step": "order_line_setup", "params": {}},
|
|
{"id": "template", "step": "resolve_template", "params": {}},
|
|
{"id": "populate_materials", "step": "auto_populate_materials", "params": {}},
|
|
{"id": "bbox", "step": "glb_bbox", "params": {}},
|
|
{"id": "resolve_materials", "step": "material_map_resolve", "params": {}},
|
|
{"id": "turntable", "step": "blender_turntable", "params": {"fps": fps, "duration_s": duration_s}},
|
|
]
|
|
edges: list[dict[str, str]] = [
|
|
{"from": "setup", "to": "template"},
|
|
{"from": "setup", "to": "populate_materials"},
|
|
{"from": "setup", "to": "bbox"},
|
|
{"from": "template", "to": "resolve_materials"},
|
|
{"from": "populate_materials", "to": "resolve_materials"},
|
|
{"from": "bbox", "to": "turntable"},
|
|
{"from": "template", "to": "turntable"},
|
|
{"from": "resolve_materials", "to": "turntable"},
|
|
]
|
|
if include_output:
|
|
nodes.append({"id": "output", "step": "output_save", "params": {}})
|
|
edges.append({"from": "turntable", "to": "output"})
|
|
if include_notify:
|
|
nodes.append({"id": "notify", "step": "notify", "params": {}})
|
|
edges.append({"from": "turntable", "to": "notify"})
|
|
return {
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": execution_mode},
|
|
"nodes": nodes,
|
|
"edges": edges,
|
|
}
|
|
|
|
|
|
def _build_valid_custom_blend_graph(*, include_output: bool = False) -> dict[str, object]:
|
|
nodes: list[dict[str, object]] = [
|
|
{"id": "setup", "step": "order_line_setup", "params": {}},
|
|
{"id": "template", "step": "resolve_template", "params": {}},
|
|
{"id": "blend", "step": "export_blend", "params": {}},
|
|
]
|
|
edges: list[dict[str, str]] = [
|
|
{"from": "setup", "to": "template"},
|
|
{"from": "template", "to": "blend"},
|
|
]
|
|
if include_output:
|
|
nodes.append({"id": "output", "step": "output_save", "params": {}})
|
|
edges.append({"from": "blend", "to": "output"})
|
|
return {
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": "graph"},
|
|
"nodes": nodes,
|
|
"edges": edges,
|
|
}
|
|
|
|
|
|
def _derive_rollout_mode_from_config(workflow_config: dict | None) -> str:
|
|
execution_mode = ((workflow_config or {}).get("ui") or {}).get("execution_mode")
|
|
if execution_mode == "graph":
|
|
return "graph"
|
|
if execution_mode == "shadow":
|
|
return "shadow"
|
|
return "legacy_only"
|
|
|
|
|
|
async def _seed_order_line(
|
|
db,
|
|
admin_user,
|
|
*,
|
|
workflow_config: dict | None = None,
|
|
) -> dict[str, object]:
|
|
product = Product(
|
|
pim_id=f"PIM-{uuid.uuid4().hex[:8]}",
|
|
name="Workflow Test Product",
|
|
)
|
|
output_type = OutputType(
|
|
name=f"Workflow Output {uuid.uuid4().hex[:8]}",
|
|
render_backend="auto",
|
|
)
|
|
order = Order(
|
|
order_number=f"WF-{uuid.uuid4().hex[:10]}",
|
|
created_by=admin_user.id,
|
|
)
|
|
db.add_all([product, output_type, order])
|
|
await db.flush()
|
|
|
|
workflow_definition = None
|
|
if workflow_config is not None:
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=output_type.id,
|
|
config=workflow_config,
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
output_type.workflow_rollout_mode = _derive_rollout_mode_from_config(workflow_config)
|
|
|
|
order_line = OrderLine(
|
|
order_id=order.id,
|
|
product_id=product.id,
|
|
output_type_id=output_type.id,
|
|
)
|
|
db.add(order_line)
|
|
await db.commit()
|
|
|
|
return {
|
|
"order_line": order_line,
|
|
"workflow_definition": workflow_definition,
|
|
"output_type": output_type,
|
|
}
|
|
|
|
|
|
async def _seed_renderable_order_line(
|
|
db,
|
|
admin_user,
|
|
tmp_path: Path,
|
|
) -> OrderLine:
|
|
step_path = tmp_path / "dispatch" / "product.step"
|
|
step_path.parent.mkdir(parents=True, exist_ok=True)
|
|
step_path.write_text("STEP", encoding="utf-8")
|
|
|
|
cad_file = CadFile(
|
|
original_name="product.step",
|
|
stored_path=str(step_path),
|
|
file_hash=f"hash-{uuid.uuid4().hex}",
|
|
parsed_objects={"objects": ["Body"]},
|
|
)
|
|
product = Product(
|
|
pim_id=f"PIM-{uuid.uuid4().hex[:8]}",
|
|
name="Dispatch Product",
|
|
category_key="dispatch",
|
|
cad_file=cad_file,
|
|
cad_part_materials=[{"part_name": "Body", "material": "Steel"}],
|
|
)
|
|
output_type = OutputType(
|
|
name=f"Workflow Output {uuid.uuid4().hex[:8]}",
|
|
render_backend="auto",
|
|
)
|
|
order = Order(
|
|
order_number=f"WF-{uuid.uuid4().hex[:10]}",
|
|
created_by=admin_user.id,
|
|
)
|
|
order_line = OrderLine(
|
|
order=order,
|
|
product=product,
|
|
output_type=output_type,
|
|
)
|
|
db.add_all([cad_file, product, output_type, order, order_line])
|
|
await db.commit()
|
|
await db.refresh(order_line)
|
|
return order_line
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_falls_back_to_legacy_without_workflow_definition(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
seeded = await _seed_order_line(db, admin_user)
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
|
|
|
|
await db.rollback()
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["order_line_id"] == str(seeded["order_line"].id)
|
|
assert result["rollout_gate_status"] == "legacy_only"
|
|
assert result["rollout_gate_verdict"] is None
|
|
assert result["workflow_rollout_ready"] is False
|
|
assert result["output_type_rollout_ready"] is False
|
|
assert result["rollout_workflow_definition_id"] is None
|
|
assert result["rollout_output_type_id"] == str(seeded["output_type"].id)
|
|
runs = (await db.execute(select(WorkflowRun))).scalars().all()
|
|
assert runs == []
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_falls_back_on_artifact_contract_mismatch(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
seeded = await _seed_order_line(
|
|
db,
|
|
admin_user,
|
|
workflow_config={
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": "graph"},
|
|
"nodes": [
|
|
{"id": "setup", "step": "order_line_setup", "params": {}},
|
|
{"id": "template", "step": "resolve_template", "params": {}},
|
|
{"id": "blend", "step": "export_blend", "params": {}},
|
|
],
|
|
"edges": [
|
|
{"from": "setup", "to": "template"},
|
|
{"from": "template", "to": "blend"},
|
|
],
|
|
},
|
|
)
|
|
output_type = seeded["output_type"]
|
|
output_type.artifact_kind = "still_image"
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
|
|
|
|
await db.rollback()
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["order_line_id"] == str(seeded["order_line"].id)
|
|
assert result["rollout_gate_status"] == "workflow_contract_mismatch"
|
|
assert result["workflow_rollout_ready"] is False
|
|
assert result["output_type_rollout_ready"] is False
|
|
assert any("Expected artifact kind: still_image." in reason for reason in result["rollout_gate_reasons"])
|
|
assert any("blend_asset" in reason for reason in result["rollout_gate_reasons"])
|
|
runs = (await db.execute(select(WorkflowRun))).scalars().all()
|
|
assert runs == []
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_creates_run_and_node_results_for_preset_dispatch(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
seeded = await _seed_order_line(
|
|
db,
|
|
admin_user,
|
|
workflow_config=build_preset_workflow_config("still", {"width": 1024, "height": 1024}),
|
|
)
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_builder.dispatch_workflow",
|
|
lambda workflow_type, order_line_id, params=None: "canvas-123",
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
|
|
|
|
await db.rollback()
|
|
|
|
run_result = await db.execute(
|
|
select(WorkflowRun)
|
|
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
|
|
.options(selectinload(WorkflowRun.node_results))
|
|
)
|
|
run = run_result.scalar_one()
|
|
|
|
assert result["backend"] == "workflow"
|
|
assert result["workflow_type"] == "still"
|
|
assert result["celery_task_id"] == "canvas-123"
|
|
assert run.execution_mode == "legacy"
|
|
assert run.workflow_def_id == seeded["workflow_definition"].id
|
|
assert run.order_line_id == seeded["order_line"].id
|
|
assert run.celery_task_id == "canvas-123"
|
|
assert {node_result.node_name for node_result in run.node_results} == {
|
|
"setup",
|
|
"template",
|
|
"render",
|
|
"output",
|
|
}
|
|
assert all(node_result.status == "pending" for node_result in run.node_results)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_falls_back_when_workflow_runtime_preparation_is_invalid(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
seeded = await _seed_order_line(
|
|
db,
|
|
admin_user,
|
|
workflow_config=build_preset_workflow_config("still", {"width": 640, "height": 640}),
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_executor.prepare_workflow_context",
|
|
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("prep exploded")),
|
|
)
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
|
|
|
|
await db.rollback()
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["order_line_id"] == str(seeded["order_line"].id)
|
|
assert result["rollout_gate_status"] == "workflow_preparation_failed"
|
|
assert result["rollout_gate_verdict"] is None
|
|
assert result["workflow_rollout_ready"] is False
|
|
assert result["output_type_rollout_ready"] is False
|
|
assert result["rollout_workflow_definition_id"] == str(seeded["workflow_definition"].id)
|
|
assert result["rollout_output_type_id"] == str(seeded["output_type"].id)
|
|
assert any("Workflow runtime preparation failed:" in reason for reason in result["rollout_gate_reasons"])
|
|
runs = (await db.execute(select(WorkflowRun))).scalars().all()
|
|
assert runs == []
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_graph_mode_dispatches_supported_custom_workflow(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Graph Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config=_build_valid_custom_still_graph(execution_mode="graph"),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
output_type.workflow_rollout_mode = "graph"
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.tasks.celery_app.celery_app.send_task",
|
|
lambda task_name, args, kwargs: type("Result", (), {"id": "graph-task-1"})(),
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
run_result = await db.execute(
|
|
select(WorkflowRun)
|
|
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
|
|
.options(selectinload(WorkflowRun.node_results))
|
|
)
|
|
run = run_result.scalar_one()
|
|
node_results = {node_result.node_name: node_result for node_result in run.node_results}
|
|
|
|
assert result["backend"] == "workflow_graph"
|
|
assert result["execution_mode"] == "graph"
|
|
assert result["task_ids"] == ["graph-task-1"]
|
|
assert result["rollout_gate_status"] == "graph_authoritative"
|
|
assert result["rollout_gate_verdict"] == "pass"
|
|
assert result["workflow_rollout_ready"] is True
|
|
assert result["output_type_rollout_ready"] is True
|
|
assert run.execution_mode == "graph"
|
|
assert run.status == "pending"
|
|
assert node_results["setup"].status == "completed"
|
|
assert node_results["template"].status == "completed"
|
|
assert node_results["render"].status == "queued"
|
|
assert node_results["render"].output["publish_asset_enabled"] is True
|
|
assert node_results["render"].output["graph_authoritative_output_enabled"] is False
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_graph_mode_uses_output_save_as_authoritative_boundary(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Graph Output Save {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config=_build_valid_custom_still_graph(execution_mode="graph", include_output=True),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
output_type.workflow_rollout_mode = "graph"
|
|
await db.commit()
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": "graph-output-save-task-1"})()
|
|
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
run_result = await db.execute(
|
|
select(WorkflowRun)
|
|
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
|
|
.options(selectinload(WorkflowRun.node_results))
|
|
)
|
|
run = run_result.scalar_one()
|
|
node_results = {node_result.node_name: node_result for node_result in run.node_results}
|
|
|
|
assert result["backend"] == "workflow_graph"
|
|
assert result["task_ids"] == ["graph-output-save-task-1"]
|
|
assert len(calls) == 1
|
|
assert calls[0][0] == "app.domains.rendering.tasks.render_order_line_still_task"
|
|
assert calls[0][1] == [str(order_line.id)]
|
|
assert calls[0][2]["publish_asset_enabled"] is False
|
|
assert calls[0][2]["graph_authoritative_output_enabled"] is True
|
|
assert calls[0][2]["graph_output_node_ids"] == ["output"]
|
|
assert node_results["output"].status == "pending"
|
|
assert node_results["output"].output["publication_mode"] == "awaiting_graph_authoritative_save"
|
|
assert node_results["output"].output["handoff_state"] == "armed"
|
|
assert node_results["output"].output["handoff_node_ids"] == ["render"]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_graph_mode_canonicalizes_legacy_preset_config(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Legacy Preset Graph {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config={
|
|
"type": "still",
|
|
"params": {"width": 1024, "height": 768},
|
|
"ui": {"execution_mode": "graph"},
|
|
},
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
output_type.workflow_rollout_mode = "graph"
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.tasks.celery_app.celery_app.send_task",
|
|
lambda task_name, args, kwargs: type("Result", (), {"id": "legacy-graph-task-1"})(),
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
run_result = await db.execute(
|
|
select(WorkflowRun)
|
|
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
|
|
.options(selectinload(WorkflowRun.node_results))
|
|
)
|
|
run = run_result.scalar_one()
|
|
node_results = {node_result.node_name: node_result for node_result in run.node_results}
|
|
|
|
assert result["backend"] == "workflow_graph"
|
|
assert result["execution_mode"] == "graph"
|
|
assert result["task_ids"] == ["legacy-graph-task-1"]
|
|
assert run.execution_mode == "graph"
|
|
assert node_results["setup"].status == "completed"
|
|
assert node_results["template"].status == "completed"
|
|
assert node_results["render"].status == "queued"
|
|
assert node_results["output"].status == "pending"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_graph_mode_falls_back_to_legacy_on_graph_failure(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Graph Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config=_build_valid_custom_still_graph(execution_mode="graph"),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
output_type.workflow_rollout_mode = "graph"
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_graph_runtime.execute_graph_workflow",
|
|
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("graph dispatch exploded")),
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
runs = (
|
|
await db.execute(
|
|
select(WorkflowRun).options(selectinload(WorkflowRun.node_results)).order_by(WorkflowRun.created_at.desc())
|
|
)
|
|
).scalars().all()
|
|
run = runs[0]
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["fallback_from"] == "workflow_graph"
|
|
assert result["workflow_run_id"] == str(run.id)
|
|
assert result["rollout_gate_status"] == "graph_execution_failed"
|
|
assert result["workflow_rollout_ready"] is False
|
|
assert run.execution_mode == "graph"
|
|
assert run.status == "failed"
|
|
assert run.error_message == "graph dispatch exploded"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_graph_capable_workflow_respects_legacy_only_rollout_mode(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
seeded = await _seed_order_line(
|
|
db,
|
|
admin_user,
|
|
workflow_config=_build_valid_custom_still_graph(execution_mode="graph"),
|
|
)
|
|
output_type = seeded["output_type"]
|
|
output_type.workflow_rollout_mode = "legacy_only"
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
|
|
|
|
await db.rollback()
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["order_line_id"] == str(seeded["order_line"].id)
|
|
assert result["workflow_rollout_mode"] == "legacy_only"
|
|
assert result["configured_execution_mode"] == "graph"
|
|
assert result["rollout_gate_status"] == "rollout_legacy_only"
|
|
assert result["workflow_rollout_ready"] is False
|
|
assert result["output_type_rollout_ready"] is False
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_shadow_mode_keeps_legacy_authoritative_and_dispatches_graph_observer(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Shadow Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config=_build_valid_custom_still_graph(execution_mode="shadow"),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
output_type.workflow_rollout_mode = "shadow"
|
|
await db.commit()
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": "shadow-task-1"})()
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
run_result = await db.execute(
|
|
select(WorkflowRun)
|
|
.where(WorkflowRun.id == uuid.UUID(result["shadow_workflow_run_id"]))
|
|
.options(selectinload(WorkflowRun.node_results))
|
|
)
|
|
run = run_result.scalar_one()
|
|
render_call = calls[0]
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["execution_mode"] == "shadow"
|
|
assert result["shadow_status"] == "dispatched"
|
|
assert result["shadow_task_ids"] == ["shadow-task-1"]
|
|
assert result["rollout_gate_status"] == "pending_shadow_verdict"
|
|
assert result["rollout_gate_verdict"] is None
|
|
assert result["workflow_rollout_ready"] is False
|
|
assert result["output_type_rollout_ready"] is False
|
|
assert run.execution_mode == "shadow"
|
|
assert run.status == "pending"
|
|
assert render_call[0] == "app.domains.rendering.tasks.render_order_line_still_task"
|
|
assert render_call[1] == [str(order_line.id)]
|
|
assert render_call[2]["publish_asset_enabled"] is False
|
|
assert render_call[2]["emit_events"] is False
|
|
assert render_call[2]["job_document_enabled"] is False
|
|
assert render_call[2]["output_name_suffix"].startswith("shadow-")
|
|
assert render_call[2]["workflow_run_id"] == str(run.id)
|
|
assert render_call[2]["workflow_node_id"] == "render"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_shadow_mode_canonicalizes_legacy_preset_config(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Legacy Preset Shadow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config={
|
|
"type": "still",
|
|
"params": {"width": 1024, "height": 768},
|
|
"ui": {"execution_mode": "shadow"},
|
|
},
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
output_type.workflow_rollout_mode = "shadow"
|
|
await db.commit()
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": "legacy-shadow-task-1"})()
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["execution_mode"] == "shadow"
|
|
assert result["shadow_status"] == "skipped"
|
|
assert result["rollout_gate_status"] == "shadow_skipped"
|
|
assert "shadow_workflow_run_id" not in result
|
|
assert "material_assignments" in result["shadow_error"]
|
|
assert calls == []
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_shadow_mode_ignores_graph_failures_after_legacy_dispatch(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Shadow Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config=_build_valid_custom_still_graph(execution_mode="shadow"),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
output_type.workflow_rollout_mode = "shadow"
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_graph_runtime.execute_graph_workflow",
|
|
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("shadow graph exploded")),
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
run = (
|
|
await db.execute(select(WorkflowRun).order_by(WorkflowRun.created_at.desc()))
|
|
).scalars().first()
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["execution_mode"] == "shadow"
|
|
assert result["shadow_status"] == "failed"
|
|
assert result["shadow_error"] == "shadow graph exploded"
|
|
assert result["shadow_workflow_run_id"] == str(run.id)
|
|
assert result["rollout_gate_status"] == "shadow_execution_failed"
|
|
assert result["workflow_rollout_ready"] is False
|
|
assert run.execution_mode == "shadow"
|
|
assert run.status == "failed"
|
|
assert run.error_message == "shadow graph exploded"
|
|
|
|
|
|
def test_evaluate_rollout_gate_passes_exact_match(tmp_path: Path):
|
|
authoritative = tmp_path / "authoritative.png"
|
|
observer = tmp_path / "observer.png"
|
|
|
|
Image.new("RGBA", (16, 16), color=(0, 128, 255, 255)).save(authoritative)
|
|
Image.new("RGBA", (16, 16), color=(0, 128, 255, 255)).save(observer)
|
|
|
|
gate = evaluate_rollout_gate(
|
|
authoritative_output=_build_artifact(str(authoritative)),
|
|
observer_output=_build_artifact(str(observer)),
|
|
exact_match=True,
|
|
dimensions_match=True,
|
|
mean_pixel_delta=0.0,
|
|
)
|
|
|
|
assert gate["verdict"] == "pass"
|
|
assert gate["ready"] is True
|
|
assert gate["workflow_rollout_ready"] is True
|
|
assert gate["output_type_rollout_ready"] is True
|
|
|
|
|
|
def test_evaluate_rollout_gate_warns_on_small_visual_delta(tmp_path: Path):
|
|
authoritative = tmp_path / "authoritative.png"
|
|
observer = tmp_path / "observer.png"
|
|
|
|
Image.new("RGBA", (16, 16), color=(0, 128, 255, 255)).save(authoritative)
|
|
Image.new("RGBA", (16, 16), color=(0, 129, 255, 255)).save(observer)
|
|
|
|
gate = evaluate_rollout_gate(
|
|
authoritative_output=_build_artifact(str(authoritative)),
|
|
observer_output=_build_artifact(str(observer)),
|
|
exact_match=False,
|
|
dimensions_match=True,
|
|
mean_pixel_delta=1 / (4 * 255),
|
|
)
|
|
|
|
assert gate["verdict"] == "warn"
|
|
assert gate["ready"] is False
|
|
assert gate["status"] == "hold_legacy_authoritative"
|
|
assert any("warn threshold" in reason for reason in gate["reasons"])
|
|
|
|
|
|
def test_evaluate_rollout_gate_passes_near_zero_visual_delta(tmp_path: Path):
|
|
authoritative = tmp_path / "authoritative.png"
|
|
observer = tmp_path / "observer.png"
|
|
|
|
Image.new("RGBA", (1024, 1024), color=(106, 106, 106, 255)).save(authoritative)
|
|
Image.new("RGBA", (1024, 1024), color=(106, 106, 106, 255)).save(observer)
|
|
|
|
with Image.open(observer) as image:
|
|
image.putpixel((444, 137), (106, 106, 107, 255))
|
|
image.putpixel((651, 142), (105, 106, 106, 255))
|
|
image.save(observer)
|
|
|
|
gate = evaluate_rollout_gate(
|
|
authoritative_output=_build_artifact(str(authoritative)),
|
|
observer_output=_build_artifact(str(observer)),
|
|
exact_match=False,
|
|
dimensions_match=True,
|
|
mean_pixel_delta=((1 + 1) / (1024 * 1024 * 4 * 255)),
|
|
)
|
|
|
|
assert gate["verdict"] == "pass"
|
|
assert gate["ready"] is True
|
|
assert gate["status"] == "ready_for_rollout"
|
|
assert any("pass threshold" in reason for reason in gate["reasons"])
|
|
|
|
|
|
def test_evaluate_rollout_gate_fails_on_missing_observer(tmp_path: Path):
|
|
authoritative = tmp_path / "authoritative.png"
|
|
Image.new("RGBA", (16, 16), color=(0, 128, 255, 255)).save(authoritative)
|
|
|
|
gate = evaluate_rollout_gate(
|
|
authoritative_output=_build_artifact(str(authoritative)),
|
|
observer_output=_build_artifact(str(tmp_path / "missing.png")),
|
|
exact_match=None,
|
|
dimensions_match=None,
|
|
mean_pixel_delta=None,
|
|
)
|
|
|
|
assert gate["verdict"] == "fail"
|
|
assert gate["ready"] is False
|
|
assert any("Observer workflow output is missing" in reason for reason in gate["reasons"])
|
|
|
|
|
|
def test_dispatch_render_with_workflow_unit_adds_legacy_only_rollout_signal(monkeypatch):
|
|
order_line_id = str(uuid.uuid4())
|
|
output_type_id = uuid.uuid4()
|
|
fake_line = SimpleNamespace(
|
|
id=uuid.UUID(order_line_id),
|
|
output_type=SimpleNamespace(id=output_type_id, workflow_definition_id=None),
|
|
)
|
|
|
|
class _FakeExecuteResult:
|
|
def __init__(self, value):
|
|
self._value = value
|
|
|
|
def scalar_one_or_none(self):
|
|
return self._value
|
|
|
|
class _FakeSession:
|
|
def __init__(self, _engine):
|
|
self._engine = _engine
|
|
|
|
def __enter__(self):
|
|
return self
|
|
|
|
def __exit__(self, exc_type, exc, tb):
|
|
return False
|
|
|
|
def execute(self, _query):
|
|
return _FakeExecuteResult(fake_line)
|
|
|
|
monkeypatch.setattr("sqlalchemy.create_engine", lambda *args, **kwargs: object())
|
|
monkeypatch.setattr("sqlalchemy.orm.Session", _FakeSession)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda value: {"backend": "legacy", "order_line_id": value},
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(order_line_id)
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["rollout_gate_status"] == "legacy_only"
|
|
assert result["workflow_rollout_ready"] is False
|
|
assert result["rollout_output_type_id"] == str(output_type_id)
|
|
|
|
|
|
def test_dispatch_render_with_workflow_unit_marks_shadow_dispatch_as_pending_rollout(monkeypatch):
|
|
order_line_id = str(uuid.uuid4())
|
|
output_type_id = uuid.uuid4()
|
|
workflow_def_id = uuid.uuid4()
|
|
fake_line = SimpleNamespace(
|
|
id=uuid.UUID(order_line_id),
|
|
output_type=SimpleNamespace(
|
|
id=output_type_id,
|
|
workflow_definition_id=workflow_def_id,
|
|
workflow_rollout_mode="shadow",
|
|
),
|
|
)
|
|
fake_workflow_def = SimpleNamespace(id=workflow_def_id, config={"version": 1}, is_active=True)
|
|
fake_run = SimpleNamespace(id=uuid.uuid4())
|
|
execute_values = [fake_line, fake_workflow_def]
|
|
|
|
class _FakeExecuteResult:
|
|
def __init__(self, value):
|
|
self._value = value
|
|
|
|
def scalar_one_or_none(self):
|
|
return self._value
|
|
|
|
class _FakeSession:
|
|
def __init__(self, _engine):
|
|
self._engine = _engine
|
|
|
|
def __enter__(self):
|
|
return self
|
|
|
|
def __exit__(self, exc_type, exc, tb):
|
|
return False
|
|
|
|
def execute(self, _query):
|
|
return _FakeExecuteResult(execute_values.pop(0))
|
|
|
|
def commit(self):
|
|
return None
|
|
|
|
def rollback(self):
|
|
return None
|
|
|
|
def add(self, _value):
|
|
return None
|
|
|
|
monkeypatch.setattr("sqlalchemy.create_engine", lambda *args, **kwargs: object())
|
|
monkeypatch.setattr("sqlalchemy.orm.Session", _FakeSession)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda value: {"backend": "legacy", "order_line_id": value},
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_config_utils.canonicalize_workflow_config",
|
|
lambda config: config,
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_config_utils.get_workflow_execution_mode",
|
|
lambda config, default="legacy": "shadow",
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_executor.prepare_workflow_context",
|
|
lambda *args, **kwargs: {"nodes": [{"id": "render"}]},
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_graph_runtime.find_unsupported_graph_nodes",
|
|
lambda *_args, **_kwargs: [],
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_run_service.create_workflow_run",
|
|
lambda *args, **kwargs: fake_run,
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_graph_runtime.execute_graph_workflow",
|
|
lambda *args, **kwargs: SimpleNamespace(task_ids=["shadow-task-1"]),
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(order_line_id)
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["execution_mode"] == "shadow"
|
|
assert result["shadow_status"] == "dispatched"
|
|
assert result["rollout_gate_status"] == "pending_shadow_verdict"
|
|
assert result["workflow_rollout_ready"] is False
|
|
assert result["output_type_rollout_ready"] is False
|
|
assert result["shadow_workflow_run_id"] == str(fake_run.id)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_dispatch_endpoint_returns_workflow_run_with_node_results(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
monkeypatch,
|
|
):
|
|
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.render_settings = {
|
|
"width": 2048,
|
|
"height": 2048,
|
|
"engine": "cycles",
|
|
"samples": 128,
|
|
}
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Dispatch Workflow {uuid.uuid4().hex[:8]}",
|
|
config=build_preset_workflow_config("still_with_exports", {"width": 640, "height": 640}),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.commit()
|
|
await db.refresh(workflow_definition)
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": f"task-{len(calls)}"})()
|
|
|
|
context_id = str(order_line.id)
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
response = await client.post(
|
|
f"/api/workflows/{workflow_definition.id}/dispatch",
|
|
params={"context_id": context_id},
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
|
|
assert body["context_id"] == context_id
|
|
assert body["execution_mode"] == "graph"
|
|
assert body["dispatched"] == 2
|
|
assert body["task_ids"] == ["task-1", "task-2"]
|
|
assert [call[0] for call in calls] == [
|
|
"app.domains.rendering.tasks.render_order_line_still_task",
|
|
"app.domains.rendering.tasks.export_blend_for_order_line_task",
|
|
]
|
|
assert [call[1] for call in calls] == [[context_id], [context_id]]
|
|
assert calls[0][2]["publish_asset_enabled"] is False
|
|
assert calls[0][2]["graph_authoritative_output_enabled"] is True
|
|
assert calls[0][2]["graph_output_node_ids"] == ["output"]
|
|
assert calls[0][2]["width"] == 2048
|
|
assert calls[0][2]["height"] == 2048
|
|
assert calls[0][2]["samples"] == 128
|
|
assert calls[0][2]["workflow_node_id"] == "render"
|
|
assert calls[1][2]["workflow_node_id"] == "blend"
|
|
assert "workflow_run_id" in calls[0][2]
|
|
assert calls[0][2]["workflow_run_id"] == calls[1][2]["workflow_run_id"]
|
|
|
|
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
|
|
assert body["workflow_run"]["status"] == "pending"
|
|
assert body["workflow_run"]["execution_mode"] == "graph"
|
|
assert body["workflow_run"]["celery_task_id"] == "task-1"
|
|
assert body["workflow_run"]["order_line_id"] == str(order_line.id)
|
|
assert node_results["render"]["status"] == "queued"
|
|
assert node_results["render"]["output"]["task_id"] == "task-1"
|
|
assert node_results["blend"]["status"] == "queued"
|
|
assert node_results["blend"]["output"]["task_id"] == "task-2"
|
|
assert node_results["setup"]["status"] == "completed"
|
|
assert node_results["setup"]["output"]["order_line_id"] == str(order_line.id)
|
|
assert node_results["template"]["status"] == "completed"
|
|
assert node_results["template"]["output"]["use_materials"] is False
|
|
assert node_results["output"]["status"] == "pending"
|
|
assert node_results["output"]["output"]["publication_mode"] == "awaiting_graph_authoritative_save"
|
|
assert node_results["output"]["output"]["handoff_state"] == "armed"
|
|
assert node_results["output"]["output"]["handoff_node_ids"] == ["render"]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_dispatch_endpoint_rejects_output_save_for_export_blend_only_graph(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
monkeypatch,
|
|
):
|
|
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Blend Output Workflow {uuid.uuid4().hex[:8]}",
|
|
config=_build_valid_custom_blend_graph(include_output=True),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.commit()
|
|
await db.refresh(workflow_definition)
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": f"task-{len(calls)}"})()
|
|
|
|
context_id = str(order_line.id)
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
response = await client.post(
|
|
f"/api/workflows/{workflow_definition.id}/dispatch",
|
|
params={"context_id": context_id},
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 422
|
|
assert "output_save" in response.json()["detail"]
|
|
assert calls == []
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_dispatch_endpoint_arms_output_save_for_turntable(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
monkeypatch,
|
|
):
|
|
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Turntable Output Workflow {uuid.uuid4().hex[:8]}",
|
|
config=_build_valid_custom_turntable_graph(include_output=True),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.commit()
|
|
await db.refresh(workflow_definition)
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": f"task-{len(calls)}"})()
|
|
|
|
context_id = str(order_line.id)
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
response = await client.post(
|
|
f"/api/workflows/{workflow_definition.id}/dispatch",
|
|
params={"context_id": context_id},
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
|
|
assert body["context_id"] == context_id
|
|
assert body["execution_mode"] == "graph"
|
|
assert body["dispatched"] == 1
|
|
assert body["task_ids"] == ["task-1"]
|
|
assert calls[0][0] == "app.domains.rendering.tasks.render_turntable_task"
|
|
assert calls[0][1] == [context_id]
|
|
assert calls[0][2]["workflow_run_id"] == body["workflow_run"]["id"]
|
|
assert calls[0][2]["workflow_node_id"] == "turntable"
|
|
assert calls[0][2]["publish_asset_enabled"] is False
|
|
assert calls[0][2]["graph_authoritative_output_enabled"] is True
|
|
assert calls[0][2]["graph_output_node_ids"] == ["output"]
|
|
assert calls[0][2]["fps"] == 24
|
|
|
|
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
|
|
assert node_results["turntable"]["status"] == "queued"
|
|
assert node_results["turntable"]["output"]["predicted_asset_type"] == "turntable"
|
|
assert node_results["turntable"]["output"]["publish_asset_enabled"] is False
|
|
assert node_results["turntable"]["output"]["graph_authoritative_output_enabled"] is True
|
|
assert node_results["turntable"]["output"]["graph_output_node_ids"] == ["output"]
|
|
assert node_results["output"]["status"] == "pending"
|
|
assert node_results["output"]["output"]["publication_mode"] == "awaiting_graph_authoritative_save"
|
|
assert node_results["output"]["output"]["handoff_state"] == "armed"
|
|
assert node_results["output"]["output"]["handoff_node_ids"] == ["turntable"]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_dispatch_endpoint_arms_notify_handoff_for_render_node(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
monkeypatch,
|
|
):
|
|
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Notify Workflow {uuid.uuid4().hex[:8]}",
|
|
config=_build_valid_custom_still_graph(include_notify=True),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.commit()
|
|
await db.refresh(workflow_definition)
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": "task-1"})()
|
|
|
|
context_id = str(order_line.id)
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
response = await client.post(
|
|
f"/api/workflows/{workflow_definition.id}/dispatch",
|
|
params={"context_id": context_id},
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
|
|
assert body["context_id"] == context_id
|
|
assert body["execution_mode"] == "graph"
|
|
assert body["dispatched"] == 1
|
|
assert body["task_ids"] == ["task-1"]
|
|
assert len(calls) == 1
|
|
assert calls[0][0] == "app.domains.rendering.tasks.render_order_line_still_task"
|
|
assert calls[0][1] == [context_id]
|
|
assert calls[0][2]["workflow_run_id"] == body["workflow_run"]["id"]
|
|
assert calls[0][2]["workflow_node_id"] == "render"
|
|
assert calls[0][2]["emit_legacy_notifications"] is True
|
|
assert calls[0][2]["graph_notify_node_ids"] == ["notify"]
|
|
|
|
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
|
|
assert node_results["render"]["status"] == "queued"
|
|
assert node_results["render"]["output"]["graph_notify_node_ids"] == ["notify"]
|
|
assert node_results["notify"]["status"] == "pending"
|
|
assert node_results["notify"]["output"]["notification_mode"] == "deferred_to_render_task"
|
|
assert node_results["notify"]["output"]["armed_node_ids"] == ["render"]
|
|
assert node_results["notify"]["output"]["handoff_state"] == "armed"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_preflight_endpoint_reports_render_graph_readiness(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Preflight Workflow {uuid.uuid4().hex[:8]}",
|
|
config=build_preset_workflow_config("still_with_exports", {"width": 640, "height": 640}),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.commit()
|
|
await db.refresh(workflow_definition)
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/{workflow_definition.id}/preflight",
|
|
params={"context_id": str(order_line.id)},
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
node_checks = {node["node_id"]: node for node in body["nodes"]}
|
|
|
|
assert body["workflow_id"] == str(workflow_definition.id)
|
|
assert body["context_kind"] == "order_line"
|
|
assert body["expected_context_kind"] == "order_line"
|
|
assert body["execution_mode"] == "legacy"
|
|
assert body["graph_dispatch_allowed"] is True
|
|
assert body["resolved_order_line_id"] == str(order_line.id)
|
|
assert body["resolved_cad_file_id"] == str(order_line.product.cad_file_id)
|
|
assert body["unsupported_node_ids"] == []
|
|
assert node_checks["setup"]["status"] == "ready"
|
|
assert node_checks["template"]["status"] == "warning"
|
|
assert node_checks["template"]["issues"][0]["code"] == "template_missing"
|
|
assert node_checks["render"]["status"] == "ready"
|
|
assert node_checks["blend"]["status"] == "ready"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_draft_dispatch_endpoint_dispatches_unsaved_render_graph(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
monkeypatch,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Draft Dispatch Workflow {uuid.uuid4().hex[:8]}",
|
|
config=build_preset_workflow_config("still", {"width": 640, "height": 640}),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.commit()
|
|
await db.refresh(workflow_definition)
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": f"draft-task-{len(calls)}"})()
|
|
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
response = await client.post(
|
|
"/api/workflows/dispatch",
|
|
headers=auth_headers,
|
|
json={
|
|
"workflow_id": str(workflow_definition.id),
|
|
"context_id": str(order_line.id),
|
|
"config": _build_valid_custom_still_graph(width=800, height=600),
|
|
},
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
|
|
|
|
assert body["context_id"] == str(order_line.id)
|
|
assert body["execution_mode"] == "graph"
|
|
assert body["dispatched"] == 1
|
|
assert body["task_ids"] == ["draft-task-1"]
|
|
assert body["workflow_run"]["workflow_def_id"] == str(workflow_definition.id)
|
|
assert body["workflow_run"]["execution_mode"] == "graph"
|
|
assert body["workflow_run"]["order_line_id"] == str(order_line.id)
|
|
assert [call[0] for call in calls] == ["app.domains.rendering.tasks.render_order_line_still_task"]
|
|
assert calls[0][1] == [str(order_line.id)]
|
|
assert calls[0][2]["workflow_node_id"] == "render"
|
|
assert "workflow_run_id" in calls[0][2]
|
|
assert node_results["setup"]["status"] == "completed"
|
|
assert node_results["template"]["status"] == "completed"
|
|
assert node_results["render"]["status"] == "queued"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_draft_dispatch_endpoint_marks_submitted_order_processing(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
monkeypatch,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
order = await db.get(Order, order_line.order_id)
|
|
assert order is not None
|
|
order.status = OrderStatus.submitted
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.tasks.celery_app.celery_app.send_task",
|
|
lambda task_name, args, kwargs: type("Result", (), {"id": "draft-task-1"})(),
|
|
)
|
|
response = await client.post(
|
|
"/api/workflows/dispatch",
|
|
headers=auth_headers,
|
|
json={
|
|
"context_id": str(order_line.id),
|
|
"config": _build_valid_custom_still_graph(),
|
|
},
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
await db.refresh(order)
|
|
assert order.status == OrderStatus.processing
|
|
assert order.processing_started_at is not None
|
|
assert order_line.order.completed_at is None
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_draft_dispatch_endpoint_rejects_invalid_graph_config(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
|
|
response = await client.post(
|
|
"/api/workflows/dispatch",
|
|
headers=auth_headers,
|
|
json={
|
|
"context_id": str(order_line.id),
|
|
"config": {
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": "graph"},
|
|
"nodes": [
|
|
{"id": "render", "step": "blender_still", "params": {}},
|
|
],
|
|
"edges": [
|
|
{"from": "missing", "to": "render"},
|
|
],
|
|
},
|
|
},
|
|
)
|
|
|
|
assert response.status_code == 422
|
|
assert "Invalid workflow config" in response.json()["detail"]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_preflight_endpoint_rejects_context_kind_mismatch(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Mismatch Workflow {uuid.uuid4().hex[:8]}",
|
|
config=build_preset_workflow_config("still", {"width": 640, "height": 640}),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.commit()
|
|
await db.refresh(workflow_definition)
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/{workflow_definition.id}/preflight",
|
|
params={"context_id": str(order_line.product.cad_file_id)},
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
|
|
assert body["context_kind"] == "cad_file"
|
|
assert body["expected_context_kind"] == "order_line"
|
|
assert body["graph_dispatch_allowed"] is False
|
|
assert any(issue["code"] == "context_kind_mismatch" for issue in body["issues"])
|
|
assert any(node["status"] == "error" for node in body["nodes"])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_preflight_endpoint_supports_direct_cad_file_graphs(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
step_path = tmp_path / "cad-preflight" / "thumb.step"
|
|
step_path.parent.mkdir(parents=True, exist_ok=True)
|
|
step_path.write_text("STEP", encoding="utf-8")
|
|
|
|
cad_file = CadFile(
|
|
original_name="thumb.step",
|
|
stored_path=str(step_path),
|
|
file_hash=f"hash-{uuid.uuid4().hex}",
|
|
parsed_objects={"objects": ["Body"]},
|
|
)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"CAD Workflow {uuid.uuid4().hex[:8]}",
|
|
config=build_workflow_blueprint_config("cad_intake"),
|
|
is_active=True,
|
|
)
|
|
db.add_all([cad_file, workflow_definition])
|
|
await db.commit()
|
|
await db.refresh(workflow_definition)
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/{workflow_definition.id}/preflight",
|
|
params={"context_id": str(cad_file.id)},
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
|
|
assert body["context_kind"] == "cad_file"
|
|
assert body["expected_context_kind"] == "cad_file"
|
|
assert body["execution_mode"] == "legacy"
|
|
assert body["graph_dispatch_allowed"] is True
|
|
assert body["resolved_cad_file_id"] == str(cad_file.id)
|
|
assert all(node["status"] == "ready" for node in body["nodes"])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_draft_preflight_endpoint_validates_unsaved_render_graph(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
|
|
response = await client.post(
|
|
"/api/workflows/preflight",
|
|
headers=auth_headers,
|
|
json={
|
|
"context_id": str(order_line.id),
|
|
"config": _build_valid_custom_still_graph(width=640, height=640),
|
|
},
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
|
|
assert body["workflow_id"] is None
|
|
assert body["context_kind"] == "order_line"
|
|
assert body["expected_context_kind"] == "order_line"
|
|
assert body["execution_mode"] == "graph"
|
|
assert body["graph_dispatch_allowed"] is True
|
|
assert body["resolved_order_line_id"] == str(order_line.id)
|
|
assert [node["node_id"] for node in body["nodes"]] == [
|
|
"setup",
|
|
"template",
|
|
"populate_materials",
|
|
"resolve_materials",
|
|
"render",
|
|
]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_draft_preflight_endpoint_reports_context_kind_mismatch(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
|
|
response = await client.post(
|
|
"/api/workflows/preflight",
|
|
headers=auth_headers,
|
|
json={
|
|
"context_id": str(order_line.product.cad_file_id),
|
|
"config": build_preset_workflow_config("still", {"width": 640, "height": 640}),
|
|
},
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
|
|
assert body["workflow_id"] is None
|
|
assert body["context_kind"] == "cad_file"
|
|
assert body["expected_context_kind"] == "order_line"
|
|
assert body["graph_dispatch_allowed"] is False
|
|
assert any(issue["code"] == "context_kind_mismatch" for issue in body["issues"])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_draft_preflight_endpoint_rejects_invalid_graph_config(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
|
|
response = await client.post(
|
|
"/api/workflows/preflight",
|
|
headers=auth_headers,
|
|
json={
|
|
"context_id": str(order_line.id),
|
|
"config": {
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": "graph"},
|
|
"nodes": [
|
|
{"id": "render", "step": "blender_still", "params": {}},
|
|
],
|
|
"edges": [
|
|
{"from": "missing", "to": "render"},
|
|
],
|
|
},
|
|
},
|
|
)
|
|
|
|
assert response.status_code == 422
|
|
assert "Invalid workflow config" in response.json()["detail"]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_run_comparison_endpoint_reports_identical_shadow_output(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_run = WorkflowRun(
|
|
order_line_id=order_line.id,
|
|
execution_mode="shadow",
|
|
status="completed",
|
|
)
|
|
db.add(workflow_run)
|
|
await db.flush()
|
|
|
|
render_dir = tmp_path / "comparison" / str(order_line.id)
|
|
render_dir.mkdir(parents=True, exist_ok=True)
|
|
authoritative_path = render_dir / "authoritative.png"
|
|
shadow_path = render_dir / f"authoritative_shadow-{str(workflow_run.id)[:8]}.png"
|
|
|
|
Image.new("RGBA", (8, 8), (0, 128, 255, 255)).save(authoritative_path)
|
|
Image.new("RGBA", (8, 8), (0, 128, 255, 255)).save(shadow_path)
|
|
|
|
order_line.result_path = str(authoritative_path)
|
|
order_line.render_status = "completed"
|
|
await db.commit()
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/runs/{workflow_run.id}/comparison",
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
assert body["workflow_run_id"] == str(workflow_run.id)
|
|
assert body["execution_mode"] == "shadow"
|
|
assert body["status"] == "matched"
|
|
assert body["exact_match"] is True
|
|
assert body["dimensions_match"] is True
|
|
assert body["mean_pixel_delta"] == 0.0
|
|
assert body["authoritative_output"]["path"] == str(authoritative_path)
|
|
assert body["observer_output"]["path"] == str(shadow_path)
|
|
assert body["authoritative_output"]["image_width"] == 8
|
|
assert body["observer_output"]["image_height"] == 8
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_run_comparison_endpoint_reports_metadata_only_difference_as_matched(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_run = WorkflowRun(
|
|
order_line_id=order_line.id,
|
|
execution_mode="shadow",
|
|
status="completed",
|
|
)
|
|
db.add(workflow_run)
|
|
await db.flush()
|
|
|
|
render_dir = tmp_path / "comparison-metadata" / str(order_line.id)
|
|
render_dir.mkdir(parents=True, exist_ok=True)
|
|
authoritative_path = render_dir / "authoritative.png"
|
|
shadow_path = render_dir / f"authoritative_shadow-{str(workflow_run.id)[:8]}.png"
|
|
|
|
image = Image.new("RGBA", (8, 8), (0, 128, 255, 255))
|
|
authoritative_meta = PngImagePlugin.PngInfo()
|
|
authoritative_meta.add_text("Date", "2026-04-07 10:38:23")
|
|
observer_meta = PngImagePlugin.PngInfo()
|
|
observer_meta.add_text("Date", "2026-04-07 10:40:45")
|
|
image.save(authoritative_path, pnginfo=authoritative_meta)
|
|
image.save(shadow_path, pnginfo=observer_meta)
|
|
|
|
order_line.result_path = str(authoritative_path)
|
|
order_line.render_status = "completed"
|
|
await db.commit()
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/runs/{workflow_run.id}/comparison",
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
assert body["status"] == "matched"
|
|
assert body["exact_match"] is False
|
|
assert body["dimensions_match"] is True
|
|
assert body["mean_pixel_delta"] == 0.0
|
|
assert body["summary"] == (
|
|
"Observer output matches the authoritative legacy output within the visual pass threshold."
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_run_comparison_endpoint_reports_missing_shadow_output(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_run = WorkflowRun(
|
|
order_line_id=order_line.id,
|
|
execution_mode="shadow",
|
|
status="completed",
|
|
)
|
|
db.add(workflow_run)
|
|
await db.flush()
|
|
|
|
render_dir = tmp_path / "comparison-missing" / str(order_line.id)
|
|
render_dir.mkdir(parents=True, exist_ok=True)
|
|
authoritative_path = render_dir / "authoritative.png"
|
|
Image.new("RGBA", (4, 4), (255, 64, 64, 255)).save(authoritative_path)
|
|
|
|
order_line.result_path = str(authoritative_path)
|
|
order_line.render_status = "completed"
|
|
await db.commit()
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/runs/{workflow_run.id}/comparison",
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
assert body["status"] == "missing_observer"
|
|
assert body["exact_match"] is None
|
|
assert body["observer_output"]["exists"] is False
|
|
assert body["authoritative_output"]["exists"] is True
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_run_comparison_endpoint_finds_shadow_output_in_step_files_render_dir(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
monkeypatch,
|
|
):
|
|
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_run = WorkflowRun(
|
|
order_line_id=order_line.id,
|
|
execution_mode="shadow",
|
|
status="completed",
|
|
)
|
|
db.add(workflow_run)
|
|
await db.flush()
|
|
|
|
render_dir = tmp_path / "comparison-step-files" / str(order_line.id)
|
|
render_dir.mkdir(parents=True, exist_ok=True)
|
|
authoritative_path = render_dir / "authoritative.png"
|
|
Image.new("RGBA", (12, 12), (32, 160, 255, 255)).save(authoritative_path)
|
|
|
|
step_shadow_dir = Path(settings.upload_dir) / "step_files" / "renders" / str(order_line.id)
|
|
step_shadow_dir.mkdir(parents=True, exist_ok=True)
|
|
shadow_path = step_shadow_dir / f"line_{order_line.id}_shadow-{str(workflow_run.id)[:8]}.png"
|
|
Image.new("RGBA", (12, 12), (32, 160, 255, 255)).save(shadow_path)
|
|
|
|
order_line.result_path = str(authoritative_path)
|
|
order_line.render_status = "completed"
|
|
await db.commit()
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/runs/{workflow_run.id}/comparison",
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
assert body["status"] == "matched"
|
|
assert body["observer_output"]["exists"] is True
|
|
assert body["observer_output"]["path"] == str(shadow_path)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_run_comparison_endpoint_treats_near_zero_visual_delta_as_match(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_run = WorkflowRun(
|
|
order_line_id=order_line.id,
|
|
execution_mode="shadow",
|
|
status="completed",
|
|
)
|
|
db.add(workflow_run)
|
|
await db.flush()
|
|
|
|
render_dir = tmp_path / "comparison-near-zero" / str(order_line.id)
|
|
render_dir.mkdir(parents=True, exist_ok=True)
|
|
authoritative_path = render_dir / "authoritative.png"
|
|
shadow_path = render_dir / f"line_{order_line.id}_shadow-{str(workflow_run.id)[:8]}.png"
|
|
|
|
Image.new("RGBA", (1024, 1024), (106, 106, 106, 255)).save(authoritative_path)
|
|
Image.new("RGBA", (1024, 1024), (106, 106, 106, 255)).save(shadow_path)
|
|
|
|
with Image.open(shadow_path) as image:
|
|
image.putpixel((444, 137), (106, 106, 107, 255))
|
|
image.putpixel((651, 142), (105, 106, 106, 255))
|
|
image.save(shadow_path)
|
|
|
|
order_line.result_path = str(authoritative_path)
|
|
order_line.render_status = "completed"
|
|
await db.commit()
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/runs/{workflow_run.id}/comparison",
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
assert body["status"] == "matched"
|
|
assert body["exact_match"] is False
|
|
assert body["dimensions_match"] is True
|
|
assert body["mean_pixel_delta"] is not None
|
|
assert body["mean_pixel_delta"] <= 1e-6
|
|
assert "pass threshold" in body["summary"]
|