chore: snapshot workflow migration progress
This commit is contained in:
@@ -12,12 +12,19 @@ from sqlalchemy import select
|
||||
from sqlalchemy.orm import Session, selectinload
|
||||
|
||||
from app.config import settings
|
||||
from app.core.render_paths import build_order_line_export_path, build_order_line_step_render_path
|
||||
from app.core.process_steps import StepName
|
||||
from app.domains.products.models import CadFile
|
||||
from app.domains.rendering.models import WorkflowNodeResult, WorkflowRun
|
||||
from app.domains.rendering.workflow_executor import STEP_TASK_MAP, WorkflowContext, WorkflowDispatchResult
|
||||
from app.domains.rendering.workflow_executor import (
|
||||
STEP_TASK_MAP,
|
||||
WorkflowContext,
|
||||
WorkflowDispatchResult,
|
||||
WorkflowTaskDispatchSpec,
|
||||
)
|
||||
from app.domains.rendering.workflow_node_registry import get_node_definition
|
||||
from app.domains.rendering.workflow_runtime_services import (
|
||||
_resolve_render_output_extension,
|
||||
AutoPopulateMaterialsResult,
|
||||
BBoxResolutionResult,
|
||||
MaterialResolutionResult,
|
||||
@@ -25,6 +32,7 @@ from app.domains.rendering.workflow_runtime_services import (
|
||||
TemplateResolutionResult,
|
||||
auto_populate_materials_for_cad,
|
||||
build_order_line_render_invocation,
|
||||
extract_template_input_overrides,
|
||||
prepare_order_line_render_context,
|
||||
resolve_cad_bbox,
|
||||
resolve_order_line_material_map,
|
||||
@@ -89,11 +97,13 @@ _STILL_TASK_KEYS = {
|
||||
"material_override",
|
||||
"render_engine",
|
||||
"resolution",
|
||||
"template_inputs",
|
||||
}
|
||||
|
||||
_TURNTABLE_TASK_KEYS = {
|
||||
"output_name",
|
||||
"engine",
|
||||
"render_engine",
|
||||
"samples",
|
||||
"smooth_angle",
|
||||
"cycles_device",
|
||||
@@ -119,6 +129,8 @@ _TURNTABLE_TASK_KEYS = {
|
||||
"focal_length_mm",
|
||||
"sensor_width_mm",
|
||||
"material_override",
|
||||
"template_inputs",
|
||||
"duration_s",
|
||||
}
|
||||
|
||||
_THUMBNAIL_TASK_KEYS = {
|
||||
@@ -144,13 +156,62 @@ _AUTHORITATIVE_RENDER_SETTING_KEYS = {
|
||||
"denoising_prefilter",
|
||||
"denoising_quality",
|
||||
"denoising_use_gpu",
|
||||
"camera_orbit",
|
||||
"focal_length_mm",
|
||||
"sensor_width_mm",
|
||||
"bg_color",
|
||||
}
|
||||
|
||||
|
||||
def _inspect_active_worker_queues(timeout: float = 1.0) -> set[str]:
|
||||
from app.tasks.celery_app import celery_app
|
||||
|
||||
try:
|
||||
inspect_result = celery_app.control.inspect(timeout=timeout)
|
||||
active_queues = inspect_result.active_queues() or {}
|
||||
except Exception as exc:
|
||||
logger.info("[WORKFLOW] Could not inspect active Celery queues: %s", exc)
|
||||
return set()
|
||||
|
||||
queue_names: set[str] = set()
|
||||
for queues in active_queues.values():
|
||||
for queue in queues or []:
|
||||
if not isinstance(queue, dict):
|
||||
continue
|
||||
name = queue.get("name")
|
||||
if isinstance(name, str) and name.strip():
|
||||
queue_names.add(name.strip())
|
||||
return queue_names
|
||||
|
||||
|
||||
def _resolve_shadow_render_queue(
|
||||
*,
|
||||
workflow_context: WorkflowContext,
|
||||
node,
|
||||
active_queue_names: set[str],
|
||||
) -> str | None:
|
||||
if workflow_context.execution_mode != "shadow":
|
||||
return None
|
||||
if node.step not in {
|
||||
StepName.BLENDER_STILL,
|
||||
StepName.BLENDER_TURNTABLE,
|
||||
StepName.EXPORT_BLEND,
|
||||
}:
|
||||
return None
|
||||
|
||||
preferred_queue = (settings.workflow_shadow_render_queue or "").strip()
|
||||
if not preferred_queue or preferred_queue == "asset_pipeline":
|
||||
return None
|
||||
if preferred_queue in active_queue_names:
|
||||
return preferred_queue
|
||||
|
||||
logger.info(
|
||||
"[WORKFLOW] Preferred shadow render queue %s unavailable for node %s; using default routing",
|
||||
preferred_queue,
|
||||
node.id,
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _filter_graph_render_overrides(step: StepName, params: dict[str, Any]) -> dict[str, Any]:
|
||||
normalized = dict(params)
|
||||
use_custom_render_settings = bool(normalized.pop("use_custom_render_settings", False))
|
||||
@@ -186,6 +247,8 @@ def find_unsupported_graph_nodes(workflow_context: WorkflowContext) -> list[str]
|
||||
def execute_graph_workflow(
|
||||
session: Session,
|
||||
workflow_context: WorkflowContext,
|
||||
*,
|
||||
dispatch_tasks: bool = True,
|
||||
) -> WorkflowDispatchResult:
|
||||
if workflow_context.workflow_run_id is None:
|
||||
raise ValueError("workflow_context.workflow_run_id is required for graph execution")
|
||||
@@ -201,6 +264,12 @@ def execute_graph_workflow(
|
||||
task_ids: list[str] = []
|
||||
node_task_ids: dict[str, str] = {}
|
||||
skipped_node_ids: list[str] = []
|
||||
task_specs: list[WorkflowTaskDispatchSpec] = []
|
||||
active_queue_names = (
|
||||
_inspect_active_worker_queues()
|
||||
if workflow_context.execution_mode == "shadow"
|
||||
else set()
|
||||
)
|
||||
|
||||
for node in workflow_context.ordered_nodes:
|
||||
node_result = node_results.get(node.id)
|
||||
@@ -326,8 +395,6 @@ def execute_graph_workflow(
|
||||
skipped_node_ids.append(node.id)
|
||||
continue
|
||||
|
||||
from app.tasks.celery_app import celery_app
|
||||
|
||||
task_kwargs = _build_task_kwargs(
|
||||
session=session,
|
||||
workflow_context=workflow_context,
|
||||
@@ -335,12 +402,42 @@ def execute_graph_workflow(
|
||||
node=node,
|
||||
)
|
||||
|
||||
result = celery_app.send_task(
|
||||
task_name,
|
||||
args=[workflow_context.context_id],
|
||||
kwargs=task_kwargs,
|
||||
target_queue = _resolve_shadow_render_queue(
|
||||
workflow_context=workflow_context,
|
||||
node=node,
|
||||
active_queue_names=active_queue_names,
|
||||
)
|
||||
metadata["task_id"] = result.id
|
||||
if dispatch_tasks:
|
||||
from app.tasks.celery_app import celery_app
|
||||
|
||||
if target_queue:
|
||||
result = celery_app.send_task(
|
||||
task_name,
|
||||
args=[workflow_context.context_id],
|
||||
kwargs=task_kwargs,
|
||||
queue=target_queue,
|
||||
)
|
||||
else:
|
||||
result = celery_app.send_task(
|
||||
task_name,
|
||||
args=[workflow_context.context_id],
|
||||
kwargs=task_kwargs,
|
||||
)
|
||||
task_id = result.id
|
||||
else:
|
||||
task_id = str(uuid.uuid4())
|
||||
task_specs.append(
|
||||
WorkflowTaskDispatchSpec(
|
||||
node_id=node.id,
|
||||
task_name=task_name,
|
||||
args=[workflow_context.context_id],
|
||||
kwargs=dict(task_kwargs),
|
||||
task_id=task_id,
|
||||
queue=target_queue,
|
||||
)
|
||||
)
|
||||
metadata["task_id"] = task_id
|
||||
metadata["task_queue"] = target_queue or "asset_pipeline"
|
||||
if definition is not None:
|
||||
metadata["execution_kind"] = definition.execution_kind
|
||||
metadata["attempt_count"] = 1
|
||||
@@ -360,15 +457,15 @@ def execute_graph_workflow(
|
||||
node_result.duration_s = None
|
||||
state.node_outputs[node.id] = dict(metadata)
|
||||
session.flush()
|
||||
task_ids.append(result.id)
|
||||
node_task_ids[node.id] = result.id
|
||||
task_ids.append(task_id)
|
||||
node_task_ids[node.id] = task_id
|
||||
logger.info(
|
||||
"[WORKFLOW] Dispatched node %r (step=%s, mode=%s, run=%s) -> Celery task %s",
|
||||
node.id,
|
||||
node.step,
|
||||
workflow_context.execution_mode,
|
||||
workflow_context.workflow_run_id,
|
||||
result.id,
|
||||
task_id,
|
||||
)
|
||||
continue
|
||||
|
||||
@@ -397,6 +494,7 @@ def execute_graph_workflow(
|
||||
task_ids=task_ids,
|
||||
node_task_ids=node_task_ids,
|
||||
skipped_node_ids=skipped_node_ids,
|
||||
task_specs=task_specs,
|
||||
)
|
||||
|
||||
|
||||
@@ -466,8 +564,15 @@ def _serialize_template_result(result: TemplateResolutionResult) -> dict[str, An
|
||||
"material_map_count": len(result.material_map or {}),
|
||||
"use_materials": result.use_materials,
|
||||
"override_material": result.override_material,
|
||||
"target_collection": result.target_collection,
|
||||
"lighting_only": result.lighting_only,
|
||||
"shadow_catcher": result.shadow_catcher,
|
||||
"camera_orbit": result.camera_orbit,
|
||||
"category_key": result.category_key,
|
||||
"output_type_id": result.output_type_id,
|
||||
"workflow_input_schema": result.workflow_input_schema,
|
||||
"template_inputs": result.template_inputs,
|
||||
"template_input_count": len(result.template_inputs or {}),
|
||||
}
|
||||
|
||||
|
||||
@@ -597,13 +702,17 @@ def _predict_task_output_metadata(
|
||||
order_line_id = str(state.setup.order_line.id)
|
||||
|
||||
if node.step == StepName.BLENDER_STILL:
|
||||
output_dir = step_path.parent / "renders"
|
||||
output_filename = f"line_{order_line_id}.png"
|
||||
output_extension = _resolve_render_output_extension(state.setup.order_line)
|
||||
if output_extension not in {"png", "jpg", "webp"}:
|
||||
output_extension = "png"
|
||||
output_filename = f"line_{order_line_id}.{output_extension}"
|
||||
if output_name_suffix:
|
||||
output_filename = f"line_{order_line_id}_{output_name_suffix}.png"
|
||||
output_filename = f"line_{order_line_id}_{output_name_suffix}.{output_extension}"
|
||||
return {
|
||||
"artifact_role": "render_output",
|
||||
"predicted_output_path": str(output_dir / output_filename),
|
||||
"predicted_output_path": str(
|
||||
build_order_line_step_render_path(step_path, order_line_id, output_filename)
|
||||
),
|
||||
"predicted_asset_type": "still",
|
||||
"publish_asset_enabled": bool(task_kwargs.get("publish_asset_enabled", True)),
|
||||
"graph_authoritative_output_enabled": bool(
|
||||
@@ -618,9 +727,10 @@ def _predict_task_output_metadata(
|
||||
output_filename = f"{step_path.stem}_production.blend"
|
||||
if output_name_suffix:
|
||||
output_filename = f"{step_path.stem}_production_{output_name_suffix}.blend"
|
||||
predicted_output_path = str(build_order_line_export_path(order_line_id, output_filename))
|
||||
return {
|
||||
"artifact_role": "blend_export",
|
||||
"predicted_output_path": str(step_path.parent / output_filename),
|
||||
"predicted_output_path": predicted_output_path,
|
||||
"predicted_asset_type": "blend_production",
|
||||
"publish_asset_enabled": bool(task_kwargs.get("publish_asset_enabled", True)),
|
||||
"graph_authoritative_output_enabled": bool(
|
||||
@@ -641,7 +751,9 @@ def _predict_task_output_metadata(
|
||||
if isinstance(output_dir, str) and output_dir.strip():
|
||||
predicted_output_path = str(Path(output_dir) / f"{output_name}.mp4")
|
||||
else:
|
||||
predicted_output_path = str(step_path.parent / "renders" / f"{output_name}.mp4")
|
||||
predicted_output_path = str(
|
||||
build_order_line_step_render_path(step_path, order_line_id, f"{output_name}.mp4")
|
||||
)
|
||||
return {
|
||||
"artifact_role": "turntable_output",
|
||||
"predicted_output_path": predicted_output_path,
|
||||
@@ -733,6 +845,30 @@ def _resolve_thumbnail_request(
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_turntable_task_kwargs(task_kwargs: dict[str, Any]) -> dict[str, Any]:
|
||||
normalized = dict(task_kwargs)
|
||||
raw_duration = normalized.get("duration_s")
|
||||
if raw_duration in (None, ""):
|
||||
return normalized
|
||||
|
||||
try:
|
||||
duration_s = float(raw_duration)
|
||||
except (TypeError, ValueError):
|
||||
return normalized
|
||||
|
||||
try:
|
||||
fps = int(float(normalized.get("fps", 0)))
|
||||
except (TypeError, ValueError):
|
||||
return normalized
|
||||
|
||||
if duration_s <= 0 or fps <= 0:
|
||||
return normalized
|
||||
|
||||
normalized["duration_s"] = duration_s
|
||||
normalized["frame_count"] = max(1, int(round(duration_s * fps)))
|
||||
return normalized
|
||||
|
||||
|
||||
def _build_task_kwargs(
|
||||
*,
|
||||
session: Session,
|
||||
@@ -751,6 +887,7 @@ def _build_task_kwargs(
|
||||
template_context=state.template,
|
||||
position_context=resolve_render_position_context(session, state.setup.order_line),
|
||||
material_context=state.materials,
|
||||
artifact_kind_override=_artifact_kind_override_for_step(node.step),
|
||||
)
|
||||
render_defaults = render_invocation.task_defaults()
|
||||
|
||||
@@ -774,6 +911,15 @@ def _build_task_kwargs(
|
||||
}.items()
|
||||
if key in _TURNTABLE_TASK_KEYS
|
||||
}
|
||||
task_kwargs = _normalize_turntable_task_kwargs(task_kwargs)
|
||||
if state.setup is not None and state.setup.is_ready and state.setup.cad_file is not None:
|
||||
task_kwargs["output_dir"] = str(
|
||||
build_order_line_step_render_path(
|
||||
state.setup.cad_file.stored_path,
|
||||
str(state.setup.order_line.id),
|
||||
"turntable.mp4",
|
||||
).parent
|
||||
)
|
||||
elif node.step == StepName.THUMBNAIL_SAVE:
|
||||
thumbnail_request = _resolve_thumbnail_request(workflow_context, state, node.id) or {}
|
||||
task_kwargs = {
|
||||
@@ -787,7 +933,7 @@ def _build_task_kwargs(
|
||||
|
||||
task_kwargs["workflow_run_id"] = str(workflow_context.workflow_run_id)
|
||||
task_kwargs["workflow_node_id"] = node.id
|
||||
if workflow_context.execution_mode == "graph" and node.step in {
|
||||
if workflow_context.execution_mode in {"graph", "shadow"} and node.step in {
|
||||
StepName.BLENDER_STILL,
|
||||
StepName.EXPORT_BLEND,
|
||||
StepName.BLENDER_TURNTABLE,
|
||||
@@ -798,19 +944,23 @@ def _build_task_kwargs(
|
||||
step=StepName.OUTPUT_SAVE,
|
||||
direction="downstream",
|
||||
)
|
||||
connected_notify_node_ids = _connected_node_ids_by_step(
|
||||
workflow_context,
|
||||
node_id=node.id,
|
||||
step=StepName.NOTIFY,
|
||||
direction="downstream",
|
||||
)
|
||||
if connected_output_node_ids:
|
||||
task_kwargs["publish_asset_enabled"] = False
|
||||
task_kwargs["graph_authoritative_output_enabled"] = True
|
||||
task_kwargs["graph_output_node_ids"] = connected_output_node_ids
|
||||
if connected_notify_node_ids:
|
||||
task_kwargs["emit_legacy_notifications"] = True
|
||||
task_kwargs["graph_notify_node_ids"] = connected_notify_node_ids
|
||||
if workflow_context.execution_mode == "graph":
|
||||
task_kwargs["graph_authoritative_output_enabled"] = True
|
||||
else:
|
||||
task_kwargs["observer_output_enabled"] = True
|
||||
if workflow_context.execution_mode == "graph":
|
||||
connected_notify_node_ids = _connected_node_ids_by_step(
|
||||
workflow_context,
|
||||
node_id=node.id,
|
||||
step=StepName.NOTIFY,
|
||||
direction="downstream",
|
||||
)
|
||||
if connected_notify_node_ids:
|
||||
task_kwargs["emit_legacy_notifications"] = True
|
||||
task_kwargs["graph_notify_node_ids"] = connected_notify_node_ids
|
||||
if workflow_context.execution_mode == "shadow":
|
||||
task_kwargs["publish_asset_enabled"] = False
|
||||
task_kwargs["emit_events"] = False
|
||||
@@ -819,6 +969,16 @@ def _build_task_kwargs(
|
||||
return task_kwargs
|
||||
|
||||
|
||||
def _artifact_kind_override_for_step(step: StepName) -> str | None:
|
||||
if step == StepName.BLENDER_TURNTABLE:
|
||||
return "turntable_video"
|
||||
if step == StepName.BLENDER_STILL:
|
||||
return "still_image"
|
||||
if step == StepName.EXPORT_BLEND:
|
||||
return "blend_asset"
|
||||
return None
|
||||
|
||||
|
||||
def _execute_order_line_setup(
|
||||
*,
|
||||
session: Session,
|
||||
@@ -857,12 +1017,25 @@ def _execute_resolve_template(
|
||||
node_params: dict[str, Any],
|
||||
) -> tuple[dict[str, Any], str, str | None]:
|
||||
del node
|
||||
del workflow_context, node_params
|
||||
del workflow_context
|
||||
if state.setup is None or not state.setup.is_ready:
|
||||
if state.setup is not None and state.setup.status == "skip":
|
||||
return _serialize_setup_result(state.setup), "skipped", state.setup.reason
|
||||
raise WorkflowGraphRuntimeError("resolve_template requires a ready order_line_setup result")
|
||||
result = resolve_order_line_template_context(session, state.setup)
|
||||
result = resolve_order_line_template_context(
|
||||
session,
|
||||
state.setup,
|
||||
template_id_override=node_params.get("template_id_override"),
|
||||
material_library_path_override=node_params.get("material_library_path"),
|
||||
require_template=bool(node_params.get("require_template", False)),
|
||||
disable_materials=bool(node_params.get("disable_materials", False)),
|
||||
target_collection_override=node_params.get("target_collection"),
|
||||
material_replace_mode=node_params.get("material_replace_mode"),
|
||||
lighting_only_mode=node_params.get("lighting_only_mode"),
|
||||
shadow_catcher_mode=node_params.get("shadow_catcher_mode"),
|
||||
camera_orbit_mode=node_params.get("camera_orbit_mode"),
|
||||
template_input_overrides=extract_template_input_overrides(node_params),
|
||||
)
|
||||
state.template = result
|
||||
return _serialize_template_result(result), "completed", None
|
||||
|
||||
@@ -876,7 +1049,7 @@ def _execute_material_map_resolve(
|
||||
node_params: dict[str, Any],
|
||||
) -> tuple[dict[str, Any], str, str | None]:
|
||||
del node
|
||||
del session, workflow_context, node_params
|
||||
del session, workflow_context
|
||||
if state.setup is None or not state.setup.is_ready:
|
||||
if state.setup is not None and state.setup.status == "skip":
|
||||
return _serialize_setup_result(state.setup), "skipped", state.setup.reason
|
||||
@@ -895,6 +1068,8 @@ def _execute_material_map_resolve(
|
||||
state.setup.materials_source,
|
||||
material_library=material_library,
|
||||
template=template,
|
||||
material_override=node_params.get("material_override"),
|
||||
disable_materials=bool(node_params.get("disable_materials", False)),
|
||||
)
|
||||
state.materials = result
|
||||
return _serialize_material_result(result), "completed", None
|
||||
@@ -909,26 +1084,45 @@ def _execute_auto_populate_materials(
|
||||
node_params: dict[str, Any],
|
||||
) -> tuple[dict[str, Any], str, str | None]:
|
||||
del node
|
||||
del node_params
|
||||
if state.setup is None or state.setup.cad_file is None:
|
||||
if state.setup is not None and state.setup.status == "skip":
|
||||
return _serialize_setup_result(state.setup), "skipped", state.setup.reason
|
||||
raise WorkflowGraphRuntimeError("auto_populate_materials requires a resolved cad_file")
|
||||
shadow_mode = workflow_context.execution_mode == "shadow"
|
||||
persist_updates = bool(node_params.get("persist_updates", not shadow_mode))
|
||||
if shadow_mode:
|
||||
persist_updates = False
|
||||
refresh_material_source = bool(node_params.get("refresh_material_source", True))
|
||||
include_populated_products = bool(node_params.get("include_populated_products", False))
|
||||
if shadow_mode:
|
||||
result = auto_populate_materials_for_cad(
|
||||
session,
|
||||
str(state.setup.cad_file.id),
|
||||
persist_updates=False,
|
||||
include_populated_products=include_populated_products,
|
||||
)
|
||||
else:
|
||||
result = auto_populate_materials_for_cad(session, str(state.setup.cad_file.id))
|
||||
result = auto_populate_materials_for_cad(
|
||||
session,
|
||||
str(state.setup.cad_file.id),
|
||||
persist_updates=persist_updates,
|
||||
include_populated_products=include_populated_products,
|
||||
)
|
||||
state.auto_populate = result
|
||||
if not shadow_mode and state.setup.order_line is not None and state.setup.order_line.product is not None:
|
||||
if (
|
||||
persist_updates
|
||||
and refresh_material_source
|
||||
and not shadow_mode
|
||||
and state.setup.order_line is not None
|
||||
and state.setup.order_line.product is not None
|
||||
):
|
||||
session.refresh(state.setup.order_line.product)
|
||||
state.setup.materials_source = state.setup.order_line.product.cad_part_materials or []
|
||||
payload = _serialize_auto_populate_result(result)
|
||||
payload["shadow_mode"] = shadow_mode
|
||||
payload["persist_updates"] = persist_updates
|
||||
payload["refresh_material_source"] = refresh_material_source
|
||||
payload["include_populated_products"] = include_populated_products
|
||||
return payload, "completed", None
|
||||
|
||||
|
||||
@@ -949,17 +1143,31 @@ def _execute_glb_bbox(
|
||||
|
||||
step_path = state.setup.cad_file.stored_path
|
||||
glb_path = node_params.get("glb_path")
|
||||
if glb_path is None and state.setup.glb_reuse_path is not None:
|
||||
source_preference = str(node_params.get("source_preference") or "auto")
|
||||
if glb_path is None and source_preference != "step_only" and state.setup.glb_reuse_path is not None:
|
||||
glb_path = str(state.setup.glb_reuse_path)
|
||||
elif glb_path is None:
|
||||
elif glb_path is None and source_preference != "step_only":
|
||||
step_file = Path(step_path)
|
||||
fallback_glb = step_file.parent / f"{step_file.stem}_thumbnail.glb"
|
||||
if fallback_glb.exists():
|
||||
glb_path = str(fallback_glb)
|
||||
|
||||
if source_preference == "glb_only" and not glb_path:
|
||||
payload = {
|
||||
"bbox_data": None,
|
||||
"has_bbox": False,
|
||||
"source_kind": "none",
|
||||
"step_path": step_path,
|
||||
"glb_path": None,
|
||||
"source_preference": source_preference,
|
||||
}
|
||||
return payload, "failed", "glb_only requested but no GLB artifact is available"
|
||||
|
||||
result = resolve_cad_bbox(step_path, glb_path=glb_path)
|
||||
state.bbox = result
|
||||
return _serialize_bbox_result(result), "completed", None
|
||||
payload = _serialize_bbox_result(result)
|
||||
payload["source_preference"] = source_preference
|
||||
return payload, "completed", None
|
||||
|
||||
|
||||
def _execute_resolve_step_path(
|
||||
@@ -1069,7 +1277,7 @@ def _execute_output_save(
|
||||
node,
|
||||
node_params: dict[str, Any],
|
||||
) -> tuple[dict[str, Any], str, str | None]:
|
||||
del session, node_params
|
||||
del session
|
||||
if state.setup is None or state.setup.order_line is None:
|
||||
raise WorkflowGraphRuntimeError("output_save requires an order_line_setup result")
|
||||
|
||||
@@ -1085,19 +1293,42 @@ def _execute_output_save(
|
||||
"shadow_mode": workflow_context.execution_mode == "shadow",
|
||||
}
|
||||
upstream_artifacts = _connected_upstream_artifacts(workflow_context, state, node.id)
|
||||
expected_artifact_role = str(node_params.get("expected_artifact_role") or "").strip() or None
|
||||
require_upstream_artifact = bool(node_params.get("require_upstream_artifact", False))
|
||||
if expected_artifact_role is not None:
|
||||
upstream_artifacts = [
|
||||
artifact for artifact in upstream_artifacts if artifact.get("artifact_role") == expected_artifact_role
|
||||
]
|
||||
if workflow_context.execution_mode == "shadow":
|
||||
payload["publication_mode"] = "shadow_observer_only"
|
||||
elif any(artifact["publish_asset_enabled"] for artifact in upstream_artifacts):
|
||||
payload["publication_mode"] = "deferred_to_render_task"
|
||||
else:
|
||||
payload["publication_mode"] = "awaiting_graph_authoritative_save"
|
||||
payload["expected_artifact_role"] = expected_artifact_role
|
||||
payload["require_upstream_artifact"] = require_upstream_artifact
|
||||
if upstream_artifacts:
|
||||
payload["artifact_count"] = len(upstream_artifacts)
|
||||
payload["upstream_artifacts"] = upstream_artifacts
|
||||
elif require_upstream_artifact:
|
||||
payload["artifact_count"] = 0
|
||||
return payload, "failed", "No upstream render artifact is connected to this output node"
|
||||
if state.template is not None and state.template.template is not None:
|
||||
payload["template_name"] = state.template.template.name
|
||||
if state.materials is not None:
|
||||
payload["material_map_count"] = len(state.materials.material_map or {})
|
||||
|
||||
deferred_handoff_node_ids = [
|
||||
str(artifact.get("node_id"))
|
||||
for artifact in upstream_artifacts
|
||||
if artifact.get("task_id")
|
||||
]
|
||||
if deferred_handoff_node_ids:
|
||||
payload["handoff_state"] = "armed"
|
||||
payload["handoff_node_ids"] = deferred_handoff_node_ids
|
||||
payload["handoff_node_count"] = len(deferred_handoff_node_ids)
|
||||
return payload, "pending", None
|
||||
|
||||
return payload, "completed", None
|
||||
|
||||
|
||||
@@ -1109,7 +1340,7 @@ def _execute_notify(
|
||||
node,
|
||||
node_params: dict[str, Any],
|
||||
) -> tuple[dict[str, Any], str, str | None]:
|
||||
del session, node_params
|
||||
del session
|
||||
if state.setup is None or state.setup.order_line is None:
|
||||
raise WorkflowGraphRuntimeError("notify requires an order_line_setup result")
|
||||
|
||||
@@ -1121,8 +1352,10 @@ def _execute_notify(
|
||||
payload: dict[str, Any] = {
|
||||
"order_line_id": str(state.setup.order_line.id),
|
||||
"shadow_mode": workflow_context.execution_mode == "shadow",
|
||||
"channel": "audit_log",
|
||||
"channel": str(node_params.get("channel") or "audit_log"),
|
||||
}
|
||||
require_armed_render = bool(node_params.get("require_armed_render", False))
|
||||
payload["require_armed_render"] = require_armed_render
|
||||
|
||||
if workflow_context.execution_mode == "shadow":
|
||||
payload["notification_mode"] = "shadow_suppressed"
|
||||
@@ -1136,12 +1369,15 @@ def _execute_notify(
|
||||
]
|
||||
if not armed_node_ids:
|
||||
payload["notification_mode"] = "not_armed"
|
||||
if require_armed_render:
|
||||
return payload, "failed", "No graph render task is configured for notification handoff"
|
||||
return payload, "skipped", "No graph render task is configured for notification handoff"
|
||||
|
||||
payload["notification_mode"] = "deferred_to_render_task"
|
||||
payload["armed_node_ids"] = armed_node_ids
|
||||
payload["armed_node_count"] = len(armed_node_ids)
|
||||
return payload, "completed", None
|
||||
payload["handoff_state"] = "armed"
|
||||
return payload, "pending", None
|
||||
|
||||
|
||||
_BRIDGE_EXECUTORS = {
|
||||
|
||||
Reference in New Issue
Block a user