636 lines
21 KiB
Python
636 lines
21 KiB
Python
from __future__ import annotations
|
|
|
|
import uuid
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
from PIL import Image
|
|
from sqlalchemy import select
|
|
from sqlalchemy.orm import selectinload
|
|
|
|
from app.config import settings
|
|
from app.domains.orders.models import Order, OrderLine
|
|
from app.domains.products.models import CadFile, Product
|
|
from app.domains.rendering.dispatch_service import dispatch_render_with_workflow
|
|
from app.domains.rendering.models import OutputType, WorkflowDefinition, WorkflowRun
|
|
from app.domains.rendering.workflow_config_utils import build_preset_workflow_config
|
|
|
|
|
|
def _use_test_database(monkeypatch) -> None:
|
|
monkeypatch.setattr(settings, "postgres_host", "postgres")
|
|
monkeypatch.setattr(settings, "postgres_port", 5432)
|
|
monkeypatch.setattr(settings, "postgres_user", "hartomat")
|
|
monkeypatch.setattr(settings, "postgres_password", "hartomat")
|
|
monkeypatch.setattr(settings, "postgres_db", "hartomat_test")
|
|
|
|
|
|
async def _seed_order_line(
|
|
db,
|
|
admin_user,
|
|
*,
|
|
workflow_config: dict | None = None,
|
|
) -> dict[str, object]:
|
|
product = Product(
|
|
pim_id=f"PIM-{uuid.uuid4().hex[:8]}",
|
|
name="Workflow Test Product",
|
|
)
|
|
output_type = OutputType(
|
|
name=f"Workflow Output {uuid.uuid4().hex[:8]}",
|
|
render_backend="auto",
|
|
)
|
|
order = Order(
|
|
order_number=f"WF-{uuid.uuid4().hex[:10]}",
|
|
created_by=admin_user.id,
|
|
)
|
|
db.add_all([product, output_type, order])
|
|
await db.flush()
|
|
|
|
workflow_definition = None
|
|
if workflow_config is not None:
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=output_type.id,
|
|
config=workflow_config,
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
|
|
order_line = OrderLine(
|
|
order_id=order.id,
|
|
product_id=product.id,
|
|
output_type_id=output_type.id,
|
|
)
|
|
db.add(order_line)
|
|
await db.commit()
|
|
|
|
return {
|
|
"order_line": order_line,
|
|
"workflow_definition": workflow_definition,
|
|
"output_type": output_type,
|
|
}
|
|
|
|
|
|
async def _seed_renderable_order_line(
|
|
db,
|
|
admin_user,
|
|
tmp_path: Path,
|
|
) -> OrderLine:
|
|
step_path = tmp_path / "dispatch" / "product.step"
|
|
step_path.parent.mkdir(parents=True, exist_ok=True)
|
|
step_path.write_text("STEP", encoding="utf-8")
|
|
|
|
cad_file = CadFile(
|
|
original_name="product.step",
|
|
stored_path=str(step_path),
|
|
file_hash=f"hash-{uuid.uuid4().hex}",
|
|
parsed_objects={"objects": ["Body"]},
|
|
)
|
|
product = Product(
|
|
pim_id=f"PIM-{uuid.uuid4().hex[:8]}",
|
|
name="Dispatch Product",
|
|
category_key="dispatch",
|
|
cad_file=cad_file,
|
|
cad_part_materials=[{"part_name": "Body", "material": "Steel"}],
|
|
)
|
|
output_type = OutputType(
|
|
name=f"Workflow Output {uuid.uuid4().hex[:8]}",
|
|
render_backend="auto",
|
|
)
|
|
order = Order(
|
|
order_number=f"WF-{uuid.uuid4().hex[:10]}",
|
|
created_by=admin_user.id,
|
|
)
|
|
order_line = OrderLine(
|
|
order=order,
|
|
product=product,
|
|
output_type=output_type,
|
|
)
|
|
db.add_all([cad_file, product, output_type, order, order_line])
|
|
await db.commit()
|
|
await db.refresh(order_line)
|
|
return order_line
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_falls_back_to_legacy_without_workflow_definition(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
seeded = await _seed_order_line(db, admin_user)
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
|
|
|
|
await db.rollback()
|
|
|
|
assert result == {
|
|
"backend": "legacy",
|
|
"order_line_id": str(seeded["order_line"].id),
|
|
}
|
|
runs = (await db.execute(select(WorkflowRun))).scalars().all()
|
|
assert runs == []
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_creates_run_and_node_results_for_preset_dispatch(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
seeded = await _seed_order_line(
|
|
db,
|
|
admin_user,
|
|
workflow_config=build_preset_workflow_config("still", {"width": 1024, "height": 1024}),
|
|
)
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_builder.dispatch_workflow",
|
|
lambda workflow_type, order_line_id, params=None: "canvas-123",
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
|
|
|
|
await db.rollback()
|
|
|
|
run_result = await db.execute(
|
|
select(WorkflowRun)
|
|
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
|
|
.options(selectinload(WorkflowRun.node_results))
|
|
)
|
|
run = run_result.scalar_one()
|
|
|
|
assert result["backend"] == "workflow"
|
|
assert result["workflow_type"] == "still"
|
|
assert result["celery_task_id"] == "canvas-123"
|
|
assert run.execution_mode == "legacy"
|
|
assert run.workflow_def_id == seeded["workflow_definition"].id
|
|
assert run.order_line_id == seeded["order_line"].id
|
|
assert run.celery_task_id == "canvas-123"
|
|
assert {node_result.node_name for node_result in run.node_results} == {
|
|
"setup",
|
|
"template",
|
|
"render",
|
|
"output",
|
|
}
|
|
assert all(node_result.status == "pending" for node_result in run.node_results)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_falls_back_when_workflow_runtime_preparation_is_invalid(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
seeded = await _seed_order_line(
|
|
db,
|
|
admin_user,
|
|
workflow_config={
|
|
"version": 1,
|
|
"nodes": [
|
|
{"id": "render", "step": "blender_still", "params": {}},
|
|
],
|
|
"edges": [
|
|
{"from": "missing", "to": "render"},
|
|
],
|
|
},
|
|
)
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(seeded["order_line"].id))
|
|
|
|
await db.rollback()
|
|
|
|
assert result == {
|
|
"backend": "legacy",
|
|
"order_line_id": str(seeded["order_line"].id),
|
|
}
|
|
runs = (await db.execute(select(WorkflowRun))).scalars().all()
|
|
assert runs == []
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_graph_mode_dispatches_supported_custom_workflow(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Graph Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config={
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": "graph"},
|
|
"nodes": [
|
|
{"id": "setup", "step": "order_line_setup", "params": {}},
|
|
{"id": "template", "step": "resolve_template", "params": {}},
|
|
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
|
|
],
|
|
"edges": [
|
|
{"from": "setup", "to": "template"},
|
|
{"from": "template", "to": "render"},
|
|
],
|
|
},
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.tasks.celery_app.celery_app.send_task",
|
|
lambda task_name, args, kwargs: type("Result", (), {"id": "graph-task-1"})(),
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
run_result = await db.execute(
|
|
select(WorkflowRun)
|
|
.where(WorkflowRun.id == uuid.UUID(result["workflow_run_id"]))
|
|
.options(selectinload(WorkflowRun.node_results))
|
|
)
|
|
run = run_result.scalar_one()
|
|
node_results = {node_result.node_name: node_result for node_result in run.node_results}
|
|
|
|
assert result["backend"] == "workflow_graph"
|
|
assert result["execution_mode"] == "graph"
|
|
assert result["task_ids"] == ["graph-task-1"]
|
|
assert run.execution_mode == "graph"
|
|
assert run.status == "pending"
|
|
assert node_results["setup"].status == "completed"
|
|
assert node_results["template"].status == "completed"
|
|
assert node_results["render"].status == "queued"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_graph_mode_falls_back_to_legacy_on_graph_failure(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Graph Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config={
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": "graph"},
|
|
"nodes": [
|
|
{
|
|
"id": "setup",
|
|
"step": "order_line_setup",
|
|
"params": {"failure_policy": {"fallback_to_legacy": True}},
|
|
},
|
|
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
|
|
],
|
|
"edges": [
|
|
{"from": "setup", "to": "render"},
|
|
],
|
|
},
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_graph_runtime.execute_graph_workflow",
|
|
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("graph dispatch exploded")),
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
runs = (
|
|
await db.execute(
|
|
select(WorkflowRun).options(selectinload(WorkflowRun.node_results)).order_by(WorkflowRun.created_at.desc())
|
|
)
|
|
).scalars().all()
|
|
run = runs[0]
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["fallback_from"] == "workflow_graph"
|
|
assert result["workflow_run_id"] == str(run.id)
|
|
assert run.execution_mode == "graph"
|
|
assert run.status == "failed"
|
|
assert run.error_message == "graph dispatch exploded"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_shadow_mode_keeps_legacy_authoritative_and_dispatches_graph_observer(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Shadow Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config={
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": "shadow"},
|
|
"nodes": [
|
|
{"id": "setup", "step": "order_line_setup", "params": {}},
|
|
{"id": "template", "step": "resolve_template", "params": {}},
|
|
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
|
|
],
|
|
"edges": [
|
|
{"from": "setup", "to": "template"},
|
|
{"from": "template", "to": "render"},
|
|
],
|
|
},
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
await db.commit()
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": "shadow-task-1"})()
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
run_result = await db.execute(
|
|
select(WorkflowRun)
|
|
.where(WorkflowRun.id == uuid.UUID(result["shadow_workflow_run_id"]))
|
|
.options(selectinload(WorkflowRun.node_results))
|
|
)
|
|
run = run_result.scalar_one()
|
|
render_call = calls[0]
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["execution_mode"] == "shadow"
|
|
assert result["shadow_status"] == "dispatched"
|
|
assert result["shadow_task_ids"] == ["shadow-task-1"]
|
|
assert run.execution_mode == "shadow"
|
|
assert run.status == "pending"
|
|
assert render_call[0] == "app.domains.rendering.tasks.render_order_line_still_task"
|
|
assert render_call[1] == [str(order_line.id)]
|
|
assert render_call[2]["publish_asset_enabled"] is False
|
|
assert render_call[2]["emit_events"] is False
|
|
assert render_call[2]["job_document_enabled"] is False
|
|
assert render_call[2]["output_name_suffix"].startswith("shadow-")
|
|
assert render_call[2]["workflow_run_id"] == str(run.id)
|
|
assert render_call[2]["workflow_node_id"] == "render"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_dispatch_render_with_workflow_shadow_mode_ignores_graph_failures_after_legacy_dispatch(
|
|
db,
|
|
admin_user,
|
|
monkeypatch,
|
|
tmp_path,
|
|
):
|
|
_use_test_database(monkeypatch)
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Shadow Workflow {uuid.uuid4().hex[:8]}",
|
|
output_type_id=order_line.output_type_id,
|
|
config={
|
|
"version": 1,
|
|
"ui": {"preset": "custom", "execution_mode": "shadow"},
|
|
"nodes": [
|
|
{"id": "setup", "step": "order_line_setup", "params": {}},
|
|
{"id": "render", "step": "blender_still", "params": {"width": 1024, "height": 768}},
|
|
],
|
|
"edges": [
|
|
{"from": "setup", "to": "render"},
|
|
],
|
|
},
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.flush()
|
|
output_type = await db.get(OutputType, order_line.output_type_id)
|
|
assert output_type is not None
|
|
output_type.workflow_definition_id = workflow_definition.id
|
|
await db.commit()
|
|
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.dispatch_service._legacy_dispatch",
|
|
lambda order_line_id: {"backend": "legacy", "order_line_id": order_line_id},
|
|
)
|
|
monkeypatch.setattr(
|
|
"app.domains.rendering.workflow_graph_runtime.execute_graph_workflow",
|
|
lambda *_args, **_kwargs: (_ for _ in ()).throw(RuntimeError("shadow graph exploded")),
|
|
)
|
|
|
|
result = dispatch_render_with_workflow(str(order_line.id))
|
|
|
|
await db.rollback()
|
|
|
|
run = (
|
|
await db.execute(select(WorkflowRun).order_by(WorkflowRun.created_at.desc()))
|
|
).scalars().first()
|
|
|
|
assert result["backend"] == "legacy"
|
|
assert result["execution_mode"] == "shadow"
|
|
assert result["shadow_status"] == "failed"
|
|
assert result["shadow_error"] == "shadow graph exploded"
|
|
assert result["shadow_workflow_run_id"] == str(run.id)
|
|
assert run.execution_mode == "shadow"
|
|
assert run.status == "failed"
|
|
assert run.error_message == "shadow graph exploded"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_dispatch_endpoint_returns_workflow_run_with_node_results(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
monkeypatch,
|
|
):
|
|
monkeypatch.setattr(settings, "upload_dir", str(tmp_path / "uploads"))
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_definition = WorkflowDefinition(
|
|
name=f"Dispatch Workflow {uuid.uuid4().hex[:8]}",
|
|
config=build_preset_workflow_config("still_with_exports", {"width": 640, "height": 640}),
|
|
is_active=True,
|
|
)
|
|
db.add(workflow_definition)
|
|
await db.commit()
|
|
await db.refresh(workflow_definition)
|
|
|
|
calls: list[tuple[str, list[str], dict]] = []
|
|
|
|
def _fake_send_task(task_name: str, args: list[str], kwargs: dict):
|
|
calls.append((task_name, args, kwargs))
|
|
return type("Result", (), {"id": f"task-{len(calls)}"})()
|
|
|
|
context_id = str(order_line.id)
|
|
monkeypatch.setattr("app.tasks.celery_app.celery_app.send_task", _fake_send_task)
|
|
response = await client.post(
|
|
f"/api/workflows/{workflow_definition.id}/dispatch",
|
|
params={"context_id": context_id},
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
|
|
assert body["context_id"] == context_id
|
|
assert body["execution_mode"] == "graph"
|
|
assert body["dispatched"] == 2
|
|
assert body["task_ids"] == ["task-1", "task-2"]
|
|
assert [call[0] for call in calls] == [
|
|
"app.domains.rendering.tasks.render_order_line_still_task",
|
|
"app.domains.rendering.tasks.export_blend_for_order_line_task",
|
|
]
|
|
assert [call[1] for call in calls] == [[context_id], [context_id]]
|
|
assert calls[0][2]["width"] == 640
|
|
assert calls[0][2]["height"] == 640
|
|
assert calls[0][2]["workflow_node_id"] == "render"
|
|
assert calls[1][2]["workflow_node_id"] == "blend"
|
|
assert "workflow_run_id" in calls[0][2]
|
|
assert calls[0][2]["workflow_run_id"] == calls[1][2]["workflow_run_id"]
|
|
|
|
node_results = {node["node_name"]: node for node in body["workflow_run"]["node_results"]}
|
|
assert body["workflow_run"]["status"] == "pending"
|
|
assert body["workflow_run"]["execution_mode"] == "graph"
|
|
assert body["workflow_run"]["celery_task_id"] == "task-1"
|
|
assert node_results["render"]["status"] == "queued"
|
|
assert node_results["render"]["output"]["task_id"] == "task-1"
|
|
assert node_results["blend"]["status"] == "queued"
|
|
assert node_results["blend"]["output"]["task_id"] == "task-2"
|
|
assert node_results["setup"]["status"] == "completed"
|
|
assert node_results["setup"]["output"]["order_line_id"] == str(order_line.id)
|
|
assert node_results["template"]["status"] == "completed"
|
|
assert node_results["template"]["output"]["use_materials"] is False
|
|
assert node_results["output"]["status"] == "skipped"
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_run_comparison_endpoint_reports_identical_shadow_output(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_run = WorkflowRun(
|
|
order_line_id=order_line.id,
|
|
execution_mode="shadow",
|
|
status="completed",
|
|
)
|
|
db.add(workflow_run)
|
|
await db.flush()
|
|
|
|
render_dir = tmp_path / "comparison" / str(order_line.id)
|
|
render_dir.mkdir(parents=True, exist_ok=True)
|
|
authoritative_path = render_dir / "authoritative.png"
|
|
shadow_path = render_dir / f"authoritative_shadow-{str(workflow_run.id)[:8]}.png"
|
|
|
|
Image.new("RGBA", (8, 8), (0, 128, 255, 255)).save(authoritative_path)
|
|
Image.new("RGBA", (8, 8), (0, 128, 255, 255)).save(shadow_path)
|
|
|
|
order_line.result_path = str(authoritative_path)
|
|
order_line.render_status = "completed"
|
|
await db.commit()
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/runs/{workflow_run.id}/comparison",
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
assert body["workflow_run_id"] == str(workflow_run.id)
|
|
assert body["execution_mode"] == "shadow"
|
|
assert body["status"] == "matched"
|
|
assert body["exact_match"] is True
|
|
assert body["dimensions_match"] is True
|
|
assert body["mean_pixel_delta"] == 0.0
|
|
assert body["authoritative_output"]["path"] == str(authoritative_path)
|
|
assert body["observer_output"]["path"] == str(shadow_path)
|
|
assert body["authoritative_output"]["image_width"] == 8
|
|
assert body["observer_output"]["image_height"] == 8
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_workflow_run_comparison_endpoint_reports_missing_shadow_output(
|
|
client,
|
|
db,
|
|
admin_user,
|
|
auth_headers,
|
|
tmp_path,
|
|
):
|
|
order_line = await _seed_renderable_order_line(db, admin_user, tmp_path)
|
|
workflow_run = WorkflowRun(
|
|
order_line_id=order_line.id,
|
|
execution_mode="shadow",
|
|
status="completed",
|
|
)
|
|
db.add(workflow_run)
|
|
await db.flush()
|
|
|
|
render_dir = tmp_path / "comparison-missing" / str(order_line.id)
|
|
render_dir.mkdir(parents=True, exist_ok=True)
|
|
authoritative_path = render_dir / "authoritative.png"
|
|
Image.new("RGBA", (4, 4), (255, 64, 64, 255)).save(authoritative_path)
|
|
|
|
order_line.result_path = str(authoritative_path)
|
|
order_line.render_status = "completed"
|
|
await db.commit()
|
|
|
|
response = await client.get(
|
|
f"/api/workflows/runs/{workflow_run.id}/comparison",
|
|
headers=auth_headers,
|
|
)
|
|
|
|
assert response.status_code == 200
|
|
body = response.json()
|
|
assert body["status"] == "missing_observer"
|
|
assert body["exact_match"] is None
|
|
assert body["observer_output"]["exists"] is False
|
|
assert body["authoritative_output"]["exists"] is True
|