Files
HartOMat/backend/app/domains/pipeline/tasks/render_thumbnail.py
T
Hartmut 6c5873d51f feat: performance optimizations + part-materials validation
- @timed_step decorator with wall-clock + RSS tracking (pipeline_logger)
- Blender timing laps for sharp edges and material assignment
- MeshRegistry pattern: eliminate 13 scene.traverse() calls across viewers
- Lazy material cloning (clone-on-first-write in both viewers)
- _pipeline_session context manager: 7 create_engine() → 2 in render_thumbnail
- KD-tree spatial pre-filter for sharp edge marking (bbox-based pruning)
- Batch material library append: N bpy.ops.wm.append → single bpy.data.libraries.load
- GMSH single-session batching: compound all solids into one tessellation call
- Validate part-materials save endpoints against parsed_objects (prevents bogus keys)
- ROADMAP updated with completion status

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-13 11:53:14 +01:00

177 lines
8.0 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
"""Thumbnail rendering tasks.
Covers:
- render_step_thumbnail — render thumbnail for a freshly-processed STEP file
- regenerate_thumbnail — re-render thumbnail with updated per-part colours
"""
import logging
from contextlib import contextmanager
from pathlib import Path
from app.tasks.celery_app import celery_app
from app.core.task_logs import log_task_event
from app.core.pipeline_logger import PipelineLogger
logger = logging.getLogger(__name__)
@contextmanager
def _pipeline_session(tenant_id: str | None = None):
"""Single DB engine + session for the entire task lifetime.
Replaces the previous pattern of creating 3-7 separate create_engine()
+ Session() pairs per task invocation. Each create_engine() spins up a
new connection pool, wasting ~50ms + one PG connection per call.
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from app.config import settings as app_settings
from app.core.tenant_context import set_tenant_context_sync
sync_url = app_settings.database_url.replace("+asyncpg", "")
engine = create_engine(sync_url)
try:
with Session(engine) as session:
set_tenant_context_sync(session, tenant_id)
yield session
finally:
engine.dispose()
@celery_app.task(bind=True, name="app.tasks.step_tasks.render_step_thumbnail", queue="asset_pipeline")
def render_step_thumbnail(self, cad_file_id: str):
"""Render the thumbnail for a freshly-processed STEP file.
Runs on the dedicated asset_pipeline queue (concurrency=1) so the
blender-renderer service is never overwhelmed by concurrent requests.
On success, also auto-populates materials and marks the CadFile as completed.
"""
pl = PipelineLogger(task_id=self.request.id)
pl.step_start("render_step_thumbnail", {"cad_file_id": cad_file_id})
logger.info(f"Rendering thumbnail for CAD file: {cad_file_id}")
from app.core.tenant_context import resolve_tenant_id_for_cad
_tenant_id = resolve_tenant_id_for_cad(cad_file_id)
# ── Pre-render: compute hash ──────────────────────────────────────────
try:
from app.models.cad_file import CadFile
from app.domains.products.cache_service import compute_step_hash
with _pipeline_session(_tenant_id) as session:
cad = session.get(CadFile, cad_file_id)
if cad and cad.stored_path and not cad.step_file_hash:
cad.step_file_hash = compute_step_hash(cad.stored_path)
session.commit()
logger.info(f"Saved step_file_hash for {cad_file_id}: {cad.step_file_hash[:12]}")
except Exception:
logger.warning(f"step_file_hash computation failed for {cad_file_id} (non-fatal)")
# ── Render thumbnail ──────────────────────────────────────────────────
try:
from app.services.step_processor import regenerate_cad_thumbnail
pl.info("render_step_thumbnail", "Calling regenerate_cad_thumbnail")
success = regenerate_cad_thumbnail(cad_file_id, part_colors={})
if not success:
raise RuntimeError("regenerate_cad_thumbnail returned False")
except Exception as exc:
pl.step_error("render_step_thumbnail", f"Thumbnail render failed: {exc}", exc)
logger.error(f"Thumbnail render failed for {cad_file_id}: {exc}")
raise self.retry(exc=exc, countdown=30, max_retries=2)
# ── Post-render: bbox + sharp edges + materials (single session) ──────
try:
from app.models.cad_file import CadFile
from app.domains.pipeline.tasks.extract_metadata import _bbox_from_glb, _bbox_from_step_cadquery
with _pipeline_session(_tenant_id) as session:
cad = session.get(CadFile, cad_file_id)
if not cad:
logger.warning(f"CadFile {cad_file_id} not found in post-render phase")
else:
step_path = cad.stored_path
attrs = cad.mesh_attributes or {}
# Bounding box extraction
if step_path and not attrs.get("dimensions_mm"):
_step = Path(step_path)
_glb = _step.parent / f"{_step.stem}_thumbnail.glb"
bbox_data = _bbox_from_glb(str(_glb)) or _bbox_from_step_cadquery(step_path)
if bbox_data:
cad.mesh_attributes = {**attrs, **bbox_data}
attrs = cad.mesh_attributes
dims = bbox_data["dimensions_mm"]
logger.info(f"bbox for {cad_file_id}: {dims['x']}×{dims['y']}×{dims['z']} mm")
# Sharp edge extraction (PCurve-based, runs on render-worker with OCP)
if step_path and "sharp_edge_pairs" not in attrs:
try:
from app.services.step_processor import extract_mesh_edge_data
edge_data = extract_mesh_edge_data(step_path)
if edge_data:
cad.mesh_attributes = {**attrs, **edge_data}
n_pairs = len(edge_data.get("sharp_edge_pairs", []))
logger.info(f"Sharp edge data extracted for {cad_file_id}: {n_pairs} sharp edges")
except Exception:
logger.exception(f"Sharp edge extraction failed for {cad_file_id} (non-fatal)")
session.commit()
# WebSocket broadcast
_tid = str(cad.tenant_id) if cad.tenant_id else None
except Exception:
logger.exception(f"Post-render processing failed for {cad_file_id} (non-fatal)")
_tid = None
# Auto-populate materials
try:
from app.domains.pipeline.tasks.extract_metadata import _auto_populate_materials_for_cad
_auto_populate_materials_for_cad(cad_file_id, tenant_id=_tenant_id)
except Exception:
logger.exception(f"Auto material population failed for cad_file {cad_file_id} (non-fatal)")
# Broadcast WebSocket event
try:
if _tid:
from app.core.websocket import publish_event_sync
publish_event_sync(_tid, {
"type": "cad_processing_complete",
"cad_file_id": cad_file_id,
"status": "completed",
})
except Exception:
logger.debug("WebSocket publish for CAD complete skipped (non-fatal)")
# Auto-generate geometry GLB
try:
from app.domains.pipeline.tasks.export_glb import generate_gltf_geometry_task
generate_gltf_geometry_task.delay(cad_file_id)
pl.info("render_step_thumbnail", f"Queued generate_gltf_geometry_task for {cad_file_id}")
except Exception:
logger.debug("Could not queue generate_gltf_geometry_task (non-fatal)")
pl.step_done("render_step_thumbnail")
@celery_app.task(bind=True, name="app.tasks.step_tasks.regenerate_thumbnail", queue="asset_pipeline")
def regenerate_thumbnail(self, cad_file_id: str, part_colors: dict):
"""Regenerate thumbnail with per-part colours."""
pl = PipelineLogger(task_id=self.request.id)
pl.step_start("regenerate_thumbnail", {"cad_file_id": cad_file_id})
logger.info(f"Regenerating thumbnail for CAD file: {cad_file_id}")
# Resolve and log tenant context at task start (required for RLS)
from app.core.tenant_context import resolve_tenant_id_for_cad
_tenant_id = resolve_tenant_id_for_cad(cad_file_id)
try:
from app.services.step_processor import regenerate_cad_thumbnail
success = regenerate_cad_thumbnail(cad_file_id, part_colors)
if not success:
raise RuntimeError("regenerate_cad_thumbnail returned False")
except Exception as exc:
pl.step_error("regenerate_thumbnail", f"Thumbnail regeneration failed: {exc}", exc)
logger.error(f"Thumbnail regeneration failed for {cad_file_id}: {exc}")
raise self.retry(exc=exc, countdown=30, max_retries=2)
pl.step_done("regenerate_thumbnail")