feat: initial commit

This commit is contained in:
2026-03-05 22:12:38 +01:00
commit bce762a783
380 changed files with 51955 additions and 0 deletions
View File
+17
View File
@@ -0,0 +1,17 @@
"""Celery tasks for Azure AI validation."""
import logging
from app.tasks.celery_app import celery_app
logger = logging.getLogger(__name__)
@celery_app.task(bind=True, name="app.tasks.ai_tasks.validate_item", queue="ai_validation")
def validate_item(self, order_item_id: str):
"""Validate orientation of a rendered thumbnail via Azure GPT-4o Vision."""
logger.info(f"AI validation for item: {order_item_id}")
try:
from app.services.azure_ai import validate_thumbnail
validate_thumbnail(order_item_id)
except Exception as exc:
logger.error(f"AI validation failed for {order_item_id}: {exc}")
raise self.retry(exc=exc, countdown=30, max_retries=3)
+36
View File
@@ -0,0 +1,36 @@
from celery import Celery
from app.config import settings
celery_app = Celery(
"schaefflerautomat",
broker=settings.redis_url,
backend=settings.redis_url,
include=["app.tasks.step_tasks", "app.tasks.ai_tasks", "app.tasks.flamenco_tasks"],
)
celery_app.conf.update(
task_serializer="json",
result_serializer="json",
accept_content=["json"],
timezone="UTC",
enable_utc=True,
task_routes={
"app.tasks.step_tasks.*": {"queue": "step_processing"},
"app.tasks.ai_tasks.*": {"queue": "ai_validation"},
"app.tasks.flamenco_tasks.*": {"queue": "step_processing"},
},
beat_schedule={
"poll-flamenco-jobs": {
"task": "app.tasks.flamenco_tasks.poll_flamenco_jobs",
"schedule": 10.0, # every 10 seconds
# Discard if not consumed before the next run; prevents queue build-up
# when workers are busy with long-running STEP/render tasks.
"options": {"expires": 9},
},
"check-stalled-renders": {
"task": "app.tasks.flamenco_tasks.check_stalled_renders",
"schedule": 300.0, # every 5 minutes
"options": {"expires": 290},
},
},
)
+335
View File
@@ -0,0 +1,335 @@
"""Celery tasks for polling Flamenco job status and watchdog recovery."""
import logging
from datetime import datetime, timedelta
from app.tasks.celery_app import celery_app
logger = logging.getLogger(__name__)
# Flamenco status → our render_status mapping
FLAMENCO_STATUS_MAP = {
"queued": "processing",
"active": "processing",
"completed": "completed",
"failed": "failed",
"canceled": "failed",
"cancel-requested": "processing",
"paused": "processing",
}
@celery_app.task(name="app.tasks.flamenco_tasks.poll_flamenco_jobs", queue="step_processing")
def poll_flamenco_jobs():
"""Poll Flamenco Manager for active render jobs and update OrderLine status.
Runs on a Celery Beat schedule (every 10 seconds).
Uses a Redis lock (TTL=9s) to ensure at most one poll executes per 10-second
window. When the queue backs up with many duplicates (e.g. all workers are
busy with long STEP/render tasks), duplicates acquire the lock, find it taken,
and return immediately — draining the queue without doing redundant work.
"""
import redis as redis_lib
from app.config import settings as app_settings
# Deduplicate: skip if a poll ran within the last 9 seconds
try:
r = redis_lib.from_url(app_settings.redis_url)
acquired = r.set("flamenco_poll_lock", "1", nx=True, ex=9)
if not acquired:
return {"skipped": "deduplicated"}
except Exception:
pass # Redis unavailable — proceed anyway
from sqlalchemy import create_engine, select, update as sql_update
from sqlalchemy.orm import Session
from app.models.order_line import OrderLine
from app.models.system_setting import SystemSetting
from app.services.flamenco_client import get_flamenco_client
sync_url = app_settings.database_url.replace("+asyncpg", "")
engine = create_engine(sync_url)
# Track orders whose lines transitioned to a terminal state
completed_order_ids = set()
with Session(engine) as session:
# Load Flamenco Manager URL
row = session.execute(
select(SystemSetting).where(SystemSetting.key == "flamenco_manager_url")
).scalar_one_or_none()
manager_url = row.value if row else "http://flamenco-manager:8080"
# Find all OrderLines dispatched to Flamenco that are still processing
lines = session.execute(
select(OrderLine).where(
OrderLine.render_backend_used == "flamenco",
OrderLine.render_status == "processing",
OrderLine.flamenco_job_id.isnot(None),
)
).scalars().all()
if not lines:
engine.dispose()
return {"polled": 0}
client = get_flamenco_client(manager_url)
updated = 0
for line in lines:
try:
job = client.get_job(line.flamenco_job_id)
flamenco_status = job.get("status", "")
our_status = FLAMENCO_STATUS_MAP.get(flamenco_status, "processing")
if our_status == line.render_status:
continue # No change
updates = {"render_status": our_status}
if our_status == "completed":
updates["render_completed_at"] = datetime.utcnow()
# Try to extract result path from job activity
activity = job.get("activity", "")
if activity:
updates["render_log"] = {
"flamenco_job_id": line.flamenco_job_id,
"flamenco_status": flamenco_status,
"activity": activity,
}
# Set result path based on job type
job_type = job.get("type", "")
metadata = job.get("metadata", {})
if job_type == "schaeffler-turntable":
output_dir = job.get("settings", {}).get("output_dir", "")
output_name = job.get("settings", {}).get("output_name", "turntable")
updates["result_path"] = f"{output_dir}/{output_name}.mp4"
elif job_type == "schaeffler-still":
updates["result_path"] = job.get("settings", {}).get("output_path", "")
elif our_status == "failed":
updates["render_completed_at"] = datetime.utcnow()
updates["render_log"] = {
"flamenco_job_id": line.flamenco_job_id,
"flamenco_status": flamenco_status,
"error": job.get("activity", "Job failed"),
}
session.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(**updates)
)
updated += 1
logger.info(
f"Flamenco job {line.flamenco_job_id}: "
f"{flamenco_status} → render_status={our_status}"
)
# Track orders with lines that reached a terminal state
if our_status in ("completed", "failed"):
completed_order_ids.add(str(line.order_id))
except Exception as exc:
logger.warning(
f"Failed to poll Flamenco job {line.flamenco_job_id}: {exc}"
)
if updated:
session.commit()
engine.dispose()
# Auto-advance orders if all renderable lines are done
if completed_order_ids:
from app.services.order_status_service import check_order_completion
for oid in completed_order_ids:
check_order_completion(oid)
return {"polled": len(lines), "updated": updated}
# ---------------------------------------------------------------------------
# Stalled-render watchdog
# ---------------------------------------------------------------------------
@celery_app.task(name="app.tasks.flamenco_tasks.check_stalled_renders", queue="step_processing")
def check_stalled_renders():
"""Watchdog: detect and re-dispatch render jobs stuck in 'processing'.
Runs on a Celery Beat schedule (every 5 minutes).
After a docker restart, Celery workers lose in-flight tasks — the DB still
shows render_status='processing' indefinitely. This task:
* For **Celery** lines: uses Celery inspect to check whether any worker is
still actively executing the task. If not (e.g. after a restart), and
the job has been stuck longer than ``render_stall_timeout_minutes``
(default: 120 min), it is reset to 'pending' and re-dispatched.
* For **Flamenco** lines: queries the Flamenco Manager. If the manager
reports the job as still active the line is left alone; if the job is
gone or in a terminal/error state it is re-dispatched.
"""
from sqlalchemy import create_engine, select, update as sql_update
from sqlalchemy.orm import Session
from app.config import settings as app_settings
from app.models.order_line import OrderLine
from app.models.system_setting import SystemSetting
sync_url = app_settings.database_url.replace("+asyncpg", "")
engine = create_engine(sync_url)
with Session(engine) as session:
# ── Read timeout from system settings ────────────────────────────────
row = session.execute(
select(SystemSetting).where(SystemSetting.key == "render_stall_timeout_minutes")
).scalar_one_or_none()
try:
timeout_minutes = int(row.value) if row else 120
except (ValueError, TypeError):
timeout_minutes = 120
cutoff = datetime.utcnow() - timedelta(minutes=timeout_minutes)
stalled_lines = session.execute(
select(OrderLine).where(
OrderLine.render_status == "processing",
OrderLine.render_started_at.isnot(None),
OrderLine.render_started_at < cutoff,
)
).scalars().all()
if not stalled_lines:
engine.dispose()
return {"checked": 0, "restarted": 0, "timeout_minutes": timeout_minutes}
logger.info(
"[watchdog] Found %d stalled render(s) older than %d minutes",
len(stalled_lines), timeout_minutes,
)
# ── Build set of order_line_ids actively running on Celery workers ───
active_celery_line_ids: set[str] = set()
inspect_ok = False
try:
inspect = celery_app.control.inspect(timeout=2)
active_tasks = inspect.active() or {}
for worker_tasks in active_tasks.values():
for task_info in (worker_tasks or []):
args = task_info.get("args", [])
if args:
active_celery_line_ids.add(str(args[0]))
inspect_ok = True
except Exception as exc:
logger.warning(
"[watchdog] Celery inspect failed (%s) — will re-dispatch all timed-out Celery jobs",
exc,
)
# ── Load Flamenco Manager URL ─────────────────────────────────────────
manager_url = "http://flamenco-manager:8080"
try:
url_row = session.execute(
select(SystemSetting).where(SystemSetting.key == "flamenco_manager_url")
).scalar_one_or_none()
if url_row:
manager_url = url_row.value
except Exception:
pass
# ── Decide which lines to restart ────────────────────────────────────
to_restart: list[OrderLine] = []
for line in stalled_lines:
line_id = str(line.id)
if line.flamenco_job_id:
# Flamenco job: verify with manager before re-dispatching
try:
from app.services.flamenco_client import get_flamenco_client
client = get_flamenco_client(manager_url)
job = client.get_job(line.flamenco_job_id)
flamenco_status = job.get("status", "")
if flamenco_status in (
"active", "queued", "paused",
"pause-requested", "cancel-requested",
):
logger.info(
"[watchdog] Flamenco job %s is still %s — skipping line %s",
line.flamenco_job_id, flamenco_status, line_id,
)
continue
logger.info(
"[watchdog] Flamenco job %s status=%r → re-dispatching line %s",
line.flamenco_job_id, flamenco_status, line_id,
)
except Exception as exc:
# Manager unreachable — skip to avoid false restarts
logger.warning(
"[watchdog] Cannot reach Flamenco for job %s (%s) — skipping line %s",
line.flamenco_job_id, exc, line_id,
)
continue
else:
# Celery job: skip if still actively running on a worker
if inspect_ok and line_id in active_celery_line_ids:
logger.info(
"[watchdog] Celery render for line %s still active — skipping", line_id
)
continue
logger.info(
"[watchdog] Celery render for line %s not found in active tasks — re-dispatching",
line_id,
)
to_restart.append(line)
if not to_restart:
engine.dispose()
return {
"checked": len(stalled_lines),
"restarted": 0,
"timeout_minutes": timeout_minutes,
}
# ── Reset stalled lines to pending ───────────────────────────────────
for line in to_restart:
session.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(
render_status="pending",
render_started_at=None,
render_backend_used=None,
flamenco_job_id=None,
render_log={
"watchdog": (
f"Auto-restarted after {timeout_minutes} min stall "
f"(previous backend: {line.render_backend_used or 'unknown'})"
)
},
)
)
session.commit()
engine.dispose()
# ── Re-dispatch outside DB session ───────────────────────────────────────
from app.services.render_dispatcher import dispatch_render
restarted = 0
for line in to_restart:
try:
dispatch_render(str(line.id))
restarted += 1
logger.info("[watchdog] Re-dispatched render for order line %s", line.id)
except Exception as exc:
logger.error(
"[watchdog] Failed to re-dispatch line %s: %s — left as pending", line.id, exc
)
return {
"checked": len(stalled_lines),
"restarted": restarted,
"timeout_minutes": timeout_minutes,
}
+581
View File
@@ -0,0 +1,581 @@
"""Celery tasks for STEP file processing and thumbnail generation."""
import logging
from app.tasks.celery_app import celery_app
logger = logging.getLogger(__name__)
@celery_app.task(bind=True, name="app.tasks.step_tasks.process_step_file", queue="step_processing")
def process_step_file(self, cad_file_id: str):
"""Process a STEP file: extract objects, generate thumbnail, convert to glTF.
After processing completes, auto-populate cad_part_materials from Excel
component data for any linked products that don't yet have materials assigned.
A per-file Redis lock (TTL = 10 min) prevents duplicate tasks from processing
the same file concurrently — e.g. when 'Process Unprocessed' is clicked while
a file is already being processed.
"""
import redis as redis_lib
from app.config import settings as app_settings
lock_key = f"step_processing_lock:{cad_file_id}"
r = redis_lib.from_url(app_settings.redis_url)
acquired = r.set(lock_key, "1", nx=True, ex=600) # 10-minute TTL
if not acquired:
logger.warning(f"STEP file {cad_file_id} is already being processed — skipping duplicate task")
return
try:
logger.info(f"Processing STEP file (metadata only): {cad_file_id}")
try:
from app.services.step_processor import extract_cad_metadata
extract_cad_metadata(cad_file_id)
except Exception as exc:
logger.error(f"STEP metadata extraction failed for {cad_file_id}: {exc}")
r.delete(lock_key) # release lock so a retry can proceed
raise self.retry(exc=exc, countdown=60, max_retries=3)
finally:
r.delete(lock_key) # always release on completion or unhandled error
# Queue thumbnail rendering on the dedicated single-concurrency worker
render_step_thumbnail.delay(cad_file_id)
def _auto_populate_materials_for_cad(cad_file_id: str) -> None:
"""Sync helper: auto-populate cad_part_materials from Excel for newly-processed CAD files.
Only fills products where cad_part_materials is empty or all-blank,
preventing overwrites of manually assigned materials.
"""
from sqlalchemy import create_engine, select as sql_select, update as sql_update
from sqlalchemy.orm import Session
from app.config import settings as app_settings
from app.models.cad_file import CadFile
from app.models.product import Product
from app.api.routers.products import build_materials_from_excel
from app.services.step_processor import build_part_colors
sync_url = app_settings.database_url.replace("+asyncpg", "")
eng = create_engine(sync_url)
with Session(eng) as session:
# Load the CAD file to get parsed objects
cad_file = session.execute(
sql_select(CadFile).where(CadFile.id == cad_file_id)
).scalar_one_or_none()
if cad_file is None:
return
parsed_objects = cad_file.parsed_objects or {}
cad_parts: list[str] = parsed_objects.get("objects", [])
if not cad_parts:
return
# Find products linked to this CAD file that have Excel components
products = session.execute(
sql_select(Product).where(
Product.cad_file_id == cad_file.id,
Product.is_active.is_(True),
)
).scalars().all()
final_part_colors = None
for product in products:
excel_components: list[dict] = product.components or []
if not excel_components:
continue
# Only auto-fill when cad_part_materials is empty or all-blank
existing = product.cad_part_materials or []
if existing and any(m.get("material", "").strip() for m in existing):
continue # has at least one real material — don't overwrite
new_materials = build_materials_from_excel(cad_parts, excel_components)
session.execute(
sql_update(Product)
.where(Product.id == product.id)
.values(cad_part_materials=new_materials)
)
session.flush()
# Compute part colors; thumbnail queued once after the loop
try:
final_part_colors = build_part_colors(cad_parts, new_materials)
except Exception:
logger.exception(f"Part colors build failed for product {product.id}")
logger.info(
f"Auto-populated {len(new_materials)} materials for product {product.id} "
f"from {len(excel_components)} Excel components"
)
session.commit()
# Queue exactly ONE thumbnail regeneration per CAD file regardless of how many
# products were auto-populated. Queuing once-per-product multiplies the task
# count needlessly and causes the Redis queue depth to grow instead of shrink.
if final_part_colors is not None:
try:
regenerate_thumbnail.delay(str(cad_file_id), final_part_colors)
except Exception:
logger.exception(f"Thumbnail regen queue failed for cad_file {cad_file_id}")
eng.dispose()
@celery_app.task(bind=True, name="app.tasks.step_tasks.render_step_thumbnail", queue="thumbnail_rendering")
def render_step_thumbnail(self, cad_file_id: str):
"""Render the thumbnail for a freshly-processed STEP file.
Runs on the dedicated thumbnail_rendering queue (concurrency=1) so the
blender-renderer service is never overwhelmed by concurrent requests.
On success, also auto-populates materials and marks the CadFile as completed.
"""
logger.info(f"Rendering thumbnail for CAD file: {cad_file_id}")
try:
from app.services.step_processor import regenerate_cad_thumbnail
success = regenerate_cad_thumbnail(cad_file_id, part_colors={})
if not success:
raise RuntimeError("regenerate_cad_thumbnail returned False")
except Exception as exc:
logger.error(f"Thumbnail render failed for {cad_file_id}: {exc}")
raise self.retry(exc=exc, countdown=30, max_retries=2)
# Auto-populate materials now that parsed_objects are available
try:
_auto_populate_materials_for_cad(cad_file_id)
except Exception:
logger.exception(
f"Auto material population failed for cad_file {cad_file_id} (non-fatal)"
)
@celery_app.task(bind=True, name="app.tasks.step_tasks.generate_stl_cache", queue="thumbnail_rendering")
def generate_stl_cache(self, cad_file_id: str, quality: str):
"""Generate and cache STL for a CAD file without triggering a full render."""
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from app.config import settings as app_settings
from app.models.cad_file import CadFile
import httpx
logger.info(f"Generating {quality}-quality STL for CAD file: {cad_file_id}")
sync_url = app_settings.database_url.replace("+asyncpg", "")
eng = create_engine(sync_url)
with Session(eng) as session:
cad_file = session.get(CadFile, cad_file_id)
if not cad_file or not cad_file.stored_path:
logger.error(f"CAD file not found or no stored_path: {cad_file_id}")
return
step_path = cad_file.stored_path
eng.dispose()
try:
resp = httpx.post(
"http://blender-renderer:8100/convert-stl",
json={"step_path": step_path, "quality": quality},
timeout=600.0,
)
if resp.status_code == 200:
data = resp.json()
logger.info(f"STL cached: {data['stl_path']} ({data['size_bytes']} bytes) in {data['duration_s']}s")
else:
raise RuntimeError(f"blender-renderer returned {resp.status_code}: {resp.text[:300]}")
except Exception as exc:
logger.error(f"STL generation failed for {cad_file_id} quality={quality}: {exc}")
raise self.retry(exc=exc, countdown=30, max_retries=2)
@celery_app.task(bind=True, name="app.tasks.step_tasks.regenerate_thumbnail", queue="thumbnail_rendering")
def regenerate_thumbnail(self, cad_file_id: str, part_colors: dict):
"""Regenerate thumbnail with per-part colours."""
logger.info(f"Regenerating thumbnail for CAD file: {cad_file_id}")
try:
from app.services.step_processor import regenerate_cad_thumbnail
success = regenerate_cad_thumbnail(cad_file_id, part_colors)
if not success:
raise RuntimeError("regenerate_cad_thumbnail returned False")
except Exception as exc:
logger.error(f"Thumbnail regeneration failed for {cad_file_id}: {exc}")
raise self.retry(exc=exc, countdown=30, max_retries=2)
@celery_app.task(name="app.tasks.step_tasks.dispatch_order_line_render", queue="step_processing")
def dispatch_order_line_render(order_line_id: str):
"""Thin wrapper that calls render_dispatcher.dispatch_render()."""
logger.info(f"Dispatching render for order line: {order_line_id}")
try:
from app.services.render_dispatcher import dispatch_render
result = dispatch_render(order_line_id)
logger.info(f"Dispatch result for {order_line_id}: {result}")
return result
except Exception as exc:
logger.error(f"dispatch_order_line_render failed for {order_line_id}: {exc}")
# Mark line as failed so it doesn't stay stuck in "processing"
try:
from sqlalchemy import create_engine, update as sql_update
from sqlalchemy.orm import Session
from app.config import settings as app_settings
from app.models.order_line import OrderLine
from datetime import datetime
sync_url = app_settings.database_url.replace("+asyncpg", "")
eng = create_engine(sync_url)
with Session(eng) as s:
s.execute(
sql_update(OrderLine)
.where(OrderLine.id == order_line_id)
.values(
render_status="failed",
render_completed_at=datetime.utcnow(),
render_log={"error": f"Dispatch failed: {str(exc)[:500]}"},
)
)
s.commit()
eng.dispose()
except Exception:
logger.exception(f"Failed to mark {order_line_id} as failed after dispatch error")
raise
@celery_app.task(bind=True, name="app.tasks.step_tasks.render_order_line_task", queue="step_processing", max_retries=3)
def render_order_line_task(self, order_line_id: str):
"""Render a specific output type for an order line.
Loads OrderLine → Product → CadFile → OutputType.render_settings.
Merges with system render settings. Stores result at order_line.result_path.
"""
logger.info(f"Rendering order line: {order_line_id}")
from app.services.render_log import emit
emit(order_line_id, "Celery render task started")
try:
from sqlalchemy import create_engine, select, update as sql_update
from sqlalchemy.orm import Session, joinedload
from app.config import settings as app_settings
# Use sync session for Celery (no async event loop)
sync_url = app_settings.database_url.replace("+asyncpg", "")
engine = create_engine(sync_url)
with Session(engine) as session:
from app.models.order_line import OrderLine
from app.models.product import Product
emit(order_line_id, "Loading order line from database")
line = session.execute(
select(OrderLine)
.where(OrderLine.id == order_line_id)
.options(
joinedload(OrderLine.product).joinedload(Product.cad_file),
joinedload(OrderLine.output_type),
)
).scalar_one_or_none()
if line is None:
emit(order_line_id, "Order line not found in database", "error")
logger.error(f"OrderLine {order_line_id} not found")
return
if line.product.cad_file_id is None:
emit(order_line_id, "Product has no CAD file — marking as failed", "error")
logger.warning(f"OrderLine {order_line_id}: product has no CAD file")
session.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(render_status="failed")
)
session.commit()
return
# Mark as processing with timing
from datetime import datetime
render_start = datetime.utcnow()
session.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(
render_status="processing",
render_backend_used="celery",
render_started_at=render_start,
)
)
session.commit()
cad_file = line.product.cad_file
materials_source = line.product.cad_part_materials
part_colors = {}
if cad_file and cad_file.parsed_objects:
parsed_names = cad_file.parsed_objects.get("objects", [])
if materials_source:
from app.services.step_processor import build_part_colors
part_colors = build_part_colors(parsed_names, materials_source)
# Resolve render template + material library
from app.services.template_service import resolve_template, get_material_library_path
category_key = line.product.category_key if line.product else None
ot_id = str(line.output_type_id) if line.output_type_id else None
template = resolve_template(category_key=category_key, output_type_id=ot_id)
material_library = get_material_library_path()
# Build material_map (part_name → material_name) for material replacement.
# Works with or without a render template — only suppressed if a
# template explicitly has material_replace_enabled=False.
material_map = None
use_materials = bool(material_library and materials_source)
if template and not template.material_replace_enabled:
use_materials = False
if use_materials:
material_map = {
m["part_name"]: m["material"]
for m in materials_source
if m.get("part_name") and m.get("material")
}
# Resolve raw material names to SCHAEFFLER library names via aliases
from app.services.material_service import resolve_material_map
material_map = resolve_material_map(material_map)
if template:
emit(order_line_id, f"Using render template: {template.name} (collection={template.target_collection}, material_replace={template.material_replace_enabled}, lighting_only={template.lighting_only})")
logger.info(f"Render template resolved: '{template.name}' path={template.blend_file_path}, lighting_only={template.lighting_only}")
else:
emit(order_line_id, "No render template found — using factory settings (Mode A)")
logger.info(f"No render template for category_key={category_key!r}, output_type_id={ot_id!r}")
cad_name = cad_file.original_name if cad_file else "?"
# Load render_position for rotation values
rotation_x = rotation_y = rotation_z = 0.0
if line.render_position_id:
from app.models.render_position import ProductRenderPosition
rp = session.get(ProductRenderPosition, line.render_position_id)
if rp:
rotation_x, rotation_y, rotation_z = rp.rotation_x, rp.rotation_y, rp.rotation_z
emit(order_line_id, f"Render position: '{rp.name}' ({rotation_x}°, {rotation_y}°, {rotation_z}°)")
emit(order_line_id, f"Starting render for {cad_name} ({len(part_colors)} coloured parts)")
# Determine output format from output_type (default jpg)
out_ext = "jpg"
if line.output_type and line.output_type.output_format:
fmt = line.output_type.output_format.lower()
if fmt in ("png", "jpg", "jpeg"):
out_ext = "png" if fmt == "png" else "jpg"
# Build meaningful output filename
import re
def _sanitize(s: str) -> str:
return re.sub(r'[^\w\-.]', '_', s.strip())[:100]
product_name = line.product.name or line.product.pim_id or "product"
ot_name = line.output_type.name if line.output_type else "render"
filename = f"{_sanitize(product_name)}_{_sanitize(ot_name)}.{out_ext}"
# Render to per-line output directory (not the shared CadFile thumbnail)
from pathlib import Path as _Path
render_dir = _Path(app_settings.upload_dir) / "renders" / order_line_id
render_dir.mkdir(parents=True, exist_ok=True)
output_path = str(render_dir / filename)
# Extract per-output-type resolution from render_settings
render_width = None
render_height = None
if line.output_type and line.output_type.render_settings:
rs = line.output_type.render_settings
if rs.get("width"):
render_width = int(rs["width"])
if rs.get("height"):
render_height = int(rs["height"])
# Check if transparent background is requested
transparent_bg = False
if line.output_type and line.output_type.transparent_bg:
transparent_bg = True
# Extract per-OT engine and samples overrides
render_engine = None
render_samples = None
noise_threshold = ""
denoiser = ""
denoising_input_passes = ""
denoising_prefilter = ""
denoising_quality = ""
denoising_use_gpu = ""
if line.output_type and line.output_type.render_settings:
rs = line.output_type.render_settings
if rs.get("engine"):
render_engine = rs["engine"]
if rs.get("samples"):
render_samples = int(rs["samples"])
noise_threshold = str(rs.get("noise_threshold", ""))
denoiser = str(rs.get("denoiser", ""))
denoising_input_passes = str(rs.get("denoising_input_passes", ""))
denoising_prefilter = str(rs.get("denoising_prefilter", ""))
denoising_quality = str(rs.get("denoising_quality", ""))
denoising_use_gpu = str(rs.get("denoising_use_gpu", ""))
tmpl_info = f" template={template.name}" if template else ""
emit(order_line_id, f"Calling renderer (STEP → STL → render) {render_width or 'default'}x{render_height or 'default'}{' [transparent]' if transparent_bg else ''}{f' engine={render_engine}' if render_engine else ''}{f' samples={render_samples}' if render_samples else ''}{tmpl_info}")
from app.services.step_processor import render_to_file
# Build ordered part names list for index-based Blender matching
part_names_ordered = None
if cad_file and cad_file.parsed_objects:
part_names_ordered = cad_file.parsed_objects.get("objects", []) or None
success, render_log = render_to_file(
step_path=cad_file.stored_path,
output_path=output_path,
part_colors=part_colors,
width=render_width,
height=render_height,
transparent_bg=transparent_bg,
engine=render_engine,
samples=render_samples,
template_path=template.blend_file_path if template else None,
target_collection=template.target_collection if template else "Product",
material_library_path=material_library if use_materials else None,
material_map=material_map,
part_names_ordered=part_names_ordered,
lighting_only=bool(template.lighting_only) if template else False,
shadow_catcher=bool(template.shadow_catcher_enabled) if template else False,
cycles_device=line.output_type.cycles_device if line.output_type else None,
rotation_x=rotation_x,
rotation_y=rotation_y,
rotation_z=rotation_z,
job_id=order_line_id,
noise_threshold=noise_threshold,
denoiser=denoiser,
denoising_input_passes=denoising_input_passes,
denoising_prefilter=denoising_prefilter,
denoising_quality=denoising_quality,
denoising_use_gpu=denoising_use_gpu,
)
new_status = "completed" if success else "failed"
render_end = datetime.utcnow()
elapsed = (render_end - render_start).total_seconds()
update_values = dict(
render_status=new_status,
render_completed_at=render_end,
render_log=render_log,
)
if success:
update_values["result_path"] = output_path
session.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(**update_values)
)
session.commit()
if success:
emit(order_line_id, f"Render completed in {elapsed:.1f}s", "success")
else:
emit(order_line_id, f"Render failed after {elapsed:.1f}s", "error")
# Notify order creator about render result
try:
from app.models.order import Order as OrderModel
order_row = session.execute(
select(OrderModel.created_by, OrderModel.order_number)
.where(OrderModel.id == line.order_id)
).one_or_none()
if order_row:
from app.services.notification_service import emit_notification_sync
details: dict = {
"order_number": order_row[1],
"product_name": product_name,
"output_type": ot_name,
}
if not success and isinstance(render_log, dict):
err = render_log.get("error") or render_log.get("stderr", "")
if err:
details["error"] = str(err)[:300]
emit_notification_sync(
actor_user_id=None,
target_user_id=str(order_row[0]),
action="render.completed" if success else "render.failed",
entity_type="order",
entity_id=str(line.order_id),
details=details,
)
except Exception:
logger.exception("Failed to emit render notification")
# Check if all lines for this order are done → auto-advance
order_id_str = str(line.order_id)
engine.dispose()
from app.services.order_status_service import check_order_completion
check_order_completion(order_id_str)
except Exception as exc:
logger.error(f"render_order_line_task failed for {order_line_id}: {exc}")
# If retries exhausted, mark as failed so the line doesn't stay stuck
if self.request.retries >= self.max_retries:
logger.error(f"Max retries reached for {order_line_id}, marking as failed")
try:
from sqlalchemy import create_engine, update as sql_update2
from sqlalchemy.orm import Session as SyncSession
from app.config import settings as app_settings
from app.models.order_line import OrderLine as OL2
sync_url2 = app_settings.database_url.replace("+asyncpg", "")
eng2 = create_engine(sync_url2)
with SyncSession(eng2) as s2:
from datetime import datetime as dt2
s2.execute(
sql_update2(OL2).where(OL2.id == order_line_id)
.values(
render_status="failed",
render_completed_at=dt2.utcnow(),
render_log={"error": str(exc)[:500]},
)
)
s2.commit()
eng2.dispose()
from app.services.order_status_service import check_order_completion
# Try to get order_id from DB
eng3 = create_engine(sync_url2)
with SyncSession(eng3) as s3:
from sqlalchemy import select as sel
row = s3.execute(sel(OL2.order_id).where(OL2.id == order_line_id)).scalar_one_or_none()
if row:
check_order_completion(str(row))
eng3.dispose()
# Notify the order creator about the failure
try:
from sqlalchemy import select as sel2
from app.models.order import Order as OrderModel2
eng4 = create_engine(sync_url2)
with SyncSession(eng4) as s4:
order_row2 = s4.execute(
sel2(OrderModel2.created_by, OrderModel2.order_number)
.join(OL2, OL2.order_id == OrderModel2.id)
.where(OL2.id == order_line_id)
).one_or_none()
eng4.dispose()
if order_row2:
from app.services.notification_service import emit_notification_sync
emit_notification_sync(
actor_user_id=None,
target_user_id=str(order_row2[0]),
action="render.failed",
entity_type="order",
entity_id=None,
details={
"order_number": order_row2[1],
"product_name": "unknown",
"output_type": "unknown",
"error": str(exc)[:300],
},
)
except Exception:
logger.exception("Failed to emit render failure notification")
except Exception:
logger.exception(f"Failed to mark {order_line_id} as failed in DB")
raise
raise self.retry(exc=exc, countdown=60)