diff --git a/CLAUDE.md b/CLAUDE.md index 25a9ab7..5484840 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -22,7 +22,7 @@ Automatisiertes Render-System für Schaeffler-Produktbilder. Kunden (intern) lad | `redis` | 6379 | Celery Broker | | `backend` | 8888 | FastAPI App (uvicorn) | | `worker` | – | Celery Worker, Queue: `step_processing`, concurrency=8 | -| `worker-thumbnail` | – | Celery Worker, Queue: `thumbnail_rendering`, **concurrency=1** | +| `worker-thumbnail` | – | Celery Worker, Queue: `asset_pipeline`, **concurrency=1** | | `beat` | – | Celery Beat (Scheduler) | | `blender-renderer` | 8100 | Blender HTTP-Service (STEP→PNG, STEP→STL) | | `threejs-renderer` | 8101 | Three.js/Playwright HTTP-Service | @@ -105,10 +105,10 @@ docker compose exec backend alembic current | Queue | Worker | Concurrency | Tasks | |---|---|---|---| | `step_processing` | `worker` | 8 | `process_step_file`, `render_order_line_task`, `dispatch_order_line_render` | -| `thumbnail_rendering` | `worker-thumbnail` | 1 | `render_step_thumbnail`, `regenerate_thumbnail`, `generate_stl_cache` | +| `asset_pipeline` | `worker-thumbnail` | 1 | `render_step_thumbnail`, `regenerate_thumbnail`, `generate_stl_cache` | | `ai_validation` | `worker` | 8 | Azure AI Validierung | -**Wichtig**: `thumbnail_rendering` läuft mit concurrency=1, weil der blender-renderer nur 1 Request gleichzeitig verarbeiten kann. Mehr parallele Requests führen zu Timeouts. +**Wichtig**: `asset_pipeline` läuft mit concurrency=1, weil der blender-renderer nur 1 Request gleichzeitig verarbeiten kann. Mehr parallele Requests führen zu Timeouts. ## STEP-Processing-Pipeline @@ -118,7 +118,7 @@ docker compose exec backend alembic current - `parsed_objects` in DB speichern - glTF konvertieren (falls konfiguriert) - Status: `processing` → queut `render_step_thumbnail` -3. **Thumbnail** (`render_step_thumbnail` auf `thumbnail_rendering`): +3. **Thumbnail** (`render_step_thumbnail` auf `asset_pipeline`): - Blender oder Three.js renderer aufrufen - STL-Cache erstellen: `{step_stem}_low.stl`, `{step_stem}_high.stl` - Status: `completed` oder `failed` diff --git a/LEARNINGS.md b/LEARNINGS.md index eee8ec9..909d8e1 100644 --- a/LEARNINGS.md +++ b/LEARNINGS.md @@ -52,7 +52,7 @@ Im Template-Modus (Mode B) lief Auto-Licht/World-Setup bedingungslos → übersc ### 2026-02-15 | Celery | Blender-Queue-Flooding durch falsche Concurrency Alle Tasks auf `step_processing` (concurrency=8) → 8 Workers gleichzeitig an blender-renderer (max 1) → 7× Timeout. -**Lösung:** `process_step_file` (step_processing, concurrency=8) nur schnelle Metadata; `render_step_thumbnail` (thumbnail_rendering, concurrency=1) für Blender. HTTP-Services mit max 1 Request immer auf eigener Queue mit concurrency=1. +**Lösung:** `process_step_file` (step_processing, concurrency=8) nur schnelle Metadata; `render_step_thumbnail` (asset_pipeline, concurrency=1) für Blender. HTTP-Services mit max 1 Request immer auf eigener Queue mit concurrency=1. ### 2026-02-18 | Frontend | Tailwind CSS-Variablen inkompatibel mit opacity-Syntax `bg-surface/50` erzeugt `rgb(var(--color-bg-surface) / 0.5)` — invalides CSS wenn Variable ein Hex-Wert ist. @@ -64,7 +64,7 @@ Three.js schrieb STL in tempfile und löschte es → Download-Endpoint fand nich ### 2026-02-20 | STL-Cache | blender-renderer fehlte /convert-stl Endpoint Blender renderte + konvertierte in einem Schritt, persistierte STL nicht. -**Lösung:** Neuer `/convert-stl` Endpoint (STEP→STL ohne Render), Celery-Task `generate_stl_cache` auf `thumbnail_rendering`, Admin-Batch-Funktion "Generate Missing STLs". +**Lösung:** Neuer `/convert-stl` Endpoint (STEP→STL ohne Render), Celery-Task `generate_stl_cache` auf `asset_pipeline`, Admin-Batch-Funktion "Generate Missing STLs". ### 2026-02-22 | Material-System | Fehlender Alias blockiert Material-Replacement `"Stahl v2"` in DB nicht in materials noch in material_aliases → keine Ersetzung, Silent-Fail. @@ -179,9 +179,9 @@ Celery-Task importierte nur `AssetLibrary` → `Material.creator` → String-Ref ### 2026-03-06 | Render-Pipeline | render_order_line_task auf falschem Worker (kein Blender) Task auf `step_processing` → `worker`-Container (kein Blender) → `is_blender_available()` = False → Pillow-Placeholder, kein Fehler. -**Lösung:** Queue auf `thumbnail_rendering` → `render-worker`-Container (Blender 5.0.1 + cadquery). Blender-Tasks IMMER auf `thumbnail_rendering`. +**Lösung:** Queue auf `asset_pipeline` → `render-worker`-Container (Blender 5.0.1 + cadquery). Blender-Tasks IMMER auf `asset_pipeline`. -### 2026-03-06 | Docker | worker-thumbnail vs render-worker — beide auf thumbnail_rendering +### 2026-03-06 | Docker | worker-thumbnail vs render-worker — beide auf asset_pipeline Zwei Services unterschiedlicher Capabilities auf gleicher Queue → round-robin → 50% Silent-Fail. **Lösung:** `worker-thumbnail` aus docker-compose entfernt. `render-worker` ist alleiniger Consumer. Nie zwei Services mit unterschiedlichen Fähigkeiten auf dieselbe Queue. diff --git a/PLAN.md b/PLAN.md index 27f1e4d..69d276e 100644 --- a/PLAN.md +++ b/PLAN.md @@ -1418,8 +1418,8 @@ Nach Aktivierung von Multi-Tenancy (Migration 035/036) hatten mehrere Bugs die g | Fix | Problem | Lösung | Datei | |---|---|---|---| -| B-Fix-1 | `worker-thumbnail` ohne Blender konkurrierte auf `thumbnail_rendering` → 50% Silent-Fails | `worker-thumbnail` aus docker-compose.yml entfernt | `docker-compose.yml` | -| B-Fix-2 | `render_order_line_task` auf `step_processing` Queue → `worker` ohne Blender → Pillow-Fallback | Queue zu `thumbnail_rendering` geändert | `step_tasks.py:247` | +| B-Fix-1 | `worker-thumbnail` ohne Blender konkurrierte auf `asset_pipeline` → 50% Silent-Fails | `worker-thumbnail` aus docker-compose.yml entfernt | `docker-compose.yml` | +| B-Fix-2 | `render_order_line_task` auf `step_processing` Queue → `worker` ohne Blender → Pillow-Fallback | Queue zu `asset_pipeline` geändert | `step_tasks.py:247` | | B-Fix-3 | Circular Import `template_service.py` ↔ `domains/rendering/service.py` → `resolve_template()` nie aufrufbar | Volle sync SQLAlchemy Implementierung in `template_service.py` wiederhergestellt | `services/template_service.py` | | B-Fix-4 | `audit_log.tenant_id NOT NULL` → Broadcast-Notifications scheiterten → Order Submit 500 | `ALTER TABLE audit_log ALTER COLUMN tenant_id DROP NOT NULL` | DB direkt | | B-Fix-5 | Shared System-Tabellen (`output_types`, `materials`, etc.) `tenant_id NOT NULL` → Create-Endpoints schlugen fehl | `tenant_id DROP NOT NULL` für alle System-Tabellen | DB direkt | @@ -1433,7 +1433,7 @@ Nach Aktivierung von Multi-Tenancy (Migration 035/036) hatten mehrere Bugs die g **`GET /api/worker/health/render`** — Render Health Endpoint: - Render-Worker connected (Celery inspect) - Blender erreichbar (HTTP GET blender-renderer:8100/health) -- `thumbnail_rendering` Queue Tiefe < 10 +- `asset_pipeline` Queue Tiefe < 10 - Letzter Render < 30 min alt und erfolgreich - Response: `{ status: "ok"|"degraded"|"down", render_worker_connected, blender_available, thumbnail_queue_depth, last_render_at, ... }` @@ -1449,7 +1449,7 @@ python scripts/test_render_pipeline.py --full # Alle Output-Types (langsam) | Queue | Worker | Concurrency | Tasks | |---|---|---|---| | `step_processing` | `worker` | 8 | `process_step_file`, `dispatch_order_line_render` | -| `thumbnail_rendering` | `render-worker` (Blender 5.0.1) | 1 | `render_step_thumbnail`, `regenerate_thumbnail`, `render_order_line_task`, `generate_stl_cache` | +| `asset_pipeline` | `render-worker` (Blender 5.0.1) | 1 | `render_step_thumbnail`, `regenerate_thumbnail`, `render_order_line_task`, `generate_stl_cache` | | `ai_validation` | `worker` | 8 | Azure AI Validierung | -**Schlüsselprinzip**: Alles was Blender aufruft → `thumbnail_rendering` Queue → nur `render-worker` → kein Timeout durch parallele Requests. +**Schlüsselprinzip**: Alles was Blender aufruft → `asset_pipeline` Queue → nur `render-worker` → kein Timeout durch parallele Requests. diff --git a/PLAN_REFACTOR.md b/PLAN_REFACTOR.md index 6360f3e..74d626e 100644 --- a/PLAN_REFACTOR.md +++ b/PLAN_REFACTOR.md @@ -15,7 +15,7 @@ Schaeffler Automat is a working Blender-based media production pipeline with: - 7 Docker services with GPU render-worker - PostgreSQL with tenant_id columns + Row Level Security (RLS) enabled but inconsistently applied at the application layer -- Celery task queues with two workers (step_processing + thumbnail_rendering) +- Celery task queues with two workers (step_processing + asset_pipeline) - WebSocket real-time events via Redis Pub/Sub - React/Vite frontend with workflow editor (ReactFlow), media browser, notifications @@ -584,7 +584,7 @@ internal teams), RLS + tenant_id partitioning is sufficient. - Each tenant gets own PostgreSQL schema (not separate DB) with schema-based routing - Shared MinIO with per-tenant bucket policies - Separate Redis database (0-15) per tenant (max 16 tenants) -- Celery routing: per-tenant queue prefix `{tenant_slug}.thumbnail_rendering` +- Celery routing: per-tenant queue prefix `{tenant_slug}.asset_pipeline` ### 4.4 Per-Tenant Feature Flags @@ -751,7 +751,7 @@ export const HELP_TEXTS: Record = { }, "action.regenerate_thumbnails": { title: "Regenerate All Thumbnails", - body: "Re-renders thumbnails for all STEP files using current settings. This queues all files on the thumbnail_rendering worker. Expected time: N × 30s. Only needed after changing renderer settings.", + body: "Re-renders thumbnails for all STEP files using current settings. This queues all files on the asset_pipeline worker. Expected time: N × 30s. Only needed after changing renderer settings.", warning: "This will queue a large number of tasks. Only run during off-peak hours.", }, // ... all settings @@ -889,7 +889,7 @@ rejection UI. `rejected_at` column exists but there is no rejection reason field ### 8.1 Current Concurrency Controls - `worker` (step_processing): `CELERY_WORKER_CONCURRENCY` env var, default 8 -- `render-worker` (thumbnail_rendering): hardcoded 1 (Blender serial access) +- `render-worker` (asset_pipeline): hardcoded 1 (Blender serial access) - Both require Docker service restart to change concurrency ### 8.2 Dynamic Worker Scaling @@ -901,7 +901,7 @@ Use Celery's built-in `autoscale` option: render-worker: command: celery -A app.tasks.celery_app worker --loglevel=info - -Q thumbnail_rendering + -Q asset_pipeline --autoscale=1,1 # min=1, max=1 (single Blender concurrency) --concurrency=1 ``` @@ -984,7 +984,7 @@ After Phase 2 decomposition, update `celery_app.conf.update(task_routes={...})`: ```python task_routes = { "app.domains.pipeline.tasks.*": {"queue": "step_processing"}, - "app.domains.rendering.tasks.*": {"queue": "thumbnail_rendering"}, + "app.domains.rendering.tasks.*": {"queue": "asset_pipeline"}, "app.domains.media.tasks.*": {"queue": "step_processing"}, "app.tasks.ai_tasks.*": {"queue": "ai_validation"}, "app.tasks.beat_tasks.*": {"queue": "step_processing"}, diff --git a/backend/alembic/versions/054_worker_configs.py b/backend/alembic/versions/054_worker_configs.py index c8c8ca8..5261015 100644 --- a/backend/alembic/versions/054_worker_configs.py +++ b/backend/alembic/versions/054_worker_configs.py @@ -26,7 +26,7 @@ def upgrade() -> None: INSERT INTO worker_configs (queue_name, max_concurrency, min_concurrency, enabled) VALUES ('step_processing', 8, 2, true), - ('thumbnail_rendering', 1, 1, true), + ('asset_pipeline', 1, 1, true), ('ai_validation', 4, 1, true) ON CONFLICT DO NOTHING """) diff --git a/backend/alembic/versions/6ebfe2737531_rename_tessellation_settings_gltf_.py b/backend/alembic/versions/6ebfe2737531_rename_tessellation_settings_gltf_.py new file mode 100644 index 0000000..7ff92e6 --- /dev/null +++ b/backend/alembic/versions/6ebfe2737531_rename_tessellation_settings_gltf_.py @@ -0,0 +1,44 @@ +"""rename_tessellation_settings_gltf_production_to_scene + +Revision ID: 6ebfe2737531 +Revises: 062 +Create Date: 2026-03-12 20:39:36.880236 + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision: str = '6ebfe2737531' +down_revision: Union[str, None] = '062' +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + op.execute(""" + UPDATE system_settings + SET key = 'scene_linear_deflection' + WHERE key = 'gltf_production_linear_deflection' + """) + op.execute(""" + UPDATE system_settings + SET key = 'scene_angular_deflection' + WHERE key = 'gltf_production_angular_deflection' + """) + + +def downgrade() -> None: + op.execute(""" + UPDATE system_settings + SET key = 'gltf_production_linear_deflection' + WHERE key = 'scene_linear_deflection' + """) + op.execute(""" + UPDATE system_settings + SET key = 'gltf_production_angular_deflection' + WHERE key = 'scene_angular_deflection' + """) diff --git a/backend/app/api/routers/admin.py b/backend/app/api/routers/admin.py index ac0c3b3..e55b887 100644 --- a/backend/app/api/routers/admin.py +++ b/backend/app/api/routers/admin.py @@ -366,7 +366,7 @@ async def update_settings( await db.commit() # Note: blender-renderer HTTP service removed; concurrency is now controlled - # via render-worker Docker concurrency setting (thumbnail_rendering queue). + # via render-worker Docker concurrency setting (asset_pipeline queue). return _settings_to_out(await _load_settings(db)) diff --git a/backend/app/api/routers/cad.py b/backend/app/api/routers/cad.py index df8caa0..328c468 100644 --- a/backend/app/api/routers/cad.py +++ b/backend/app/api/routers/cad.py @@ -1,4 +1,5 @@ """CAD file router - serve thumbnails, glTF models, parsed objects, and trigger reprocessing.""" +import logging import uuid from datetime import datetime from pathlib import Path @@ -20,6 +21,7 @@ from app.utils.auth import get_current_user, is_privileged from app.services.product_service import link_cad_to_product, lookup_product router = APIRouter(prefix="/cad", tags=["cad"]) +logger = logging.getLogger(__name__) # --------------------------------------------------------------------------- @@ -273,6 +275,7 @@ async def get_objects( "cad_file_id": str(cad.id), "original_name": cad.original_name, "processing_status": cad.processing_status.value, + "step_hash": cad.step_file_hash, "parsed_objects": cad.parsed_objects, } @@ -318,6 +321,11 @@ async def generate_gltf_production( if not cad.stored_path: raise HTTPException(status_code=404, detail="STEP file not uploaded for this CAD file") + logger.warning( + "generate_gltf_production called for cad %s — " + "deprecated: renders now consume usd_master directly", + id, + ) from app.tasks.step_tasks import generate_gltf_production_task task = generate_gltf_production_task.delay(str(id)) return {"status": "queued", "task_id": task.id, "cad_file_id": str(id)} diff --git a/backend/app/api/routers/orders.py b/backend/app/api/routers/orders.py index 5adc1cd..fe11f1a 100644 --- a/backend/app/api/routers/orders.py +++ b/backend/app/api/routers/orders.py @@ -1000,6 +1000,53 @@ async def cancel_line_render( } +class RejectLineBody(BaseModel): + reason: str = "" + + +@router.post("/{order_id}/lines/{line_id}/reject", status_code=200) +async def reject_order_line( + order_id: uuid.UUID, + line_id: uuid.UUID, + body: RejectLineBody, + user: User = Depends(get_current_user), + db: AsyncSession = Depends(get_db), +): + """Reject a single order line (admin/PM only). + + Sets item_status to 'rejected' and stores the reason in the notes field. + """ + if not _is_privileged(user): + raise HTTPException(status_code=403, detail="Insufficient permissions") + + result = await db.execute(select(Order).where(Order.id == order_id)) + order = result.scalar_one_or_none() + if not order: + raise HTTPException(404, detail="Order not found") + + line_result = await db.execute( + select(OrderLine).where(OrderLine.id == line_id, OrderLine.order_id == order_id) + ) + line = line_result.scalar_one_or_none() + if not line: + raise HTTPException(404, detail="Order line not found") + + from sqlalchemy import update as sql_update + + notes_value = body.reason.strip() if body.reason and body.reason.strip() else line.notes + await db.execute( + sql_update(OrderLine) + .where(OrderLine.id == line.id) + .values( + item_status="rejected", + notes=notes_value, + ) + ) + await db.commit() + + return {"rejected": True, "line_id": str(line.id), "reason": body.reason} + + @router.post("/{order_id}/cancel-renders") async def cancel_order_renders( order_id: uuid.UUID, diff --git a/backend/app/api/routers/worker.py b/backend/app/api/routers/worker.py index ef13788..1f6cdbe 100644 --- a/backend/app/api/routers/worker.py +++ b/backend/app/api/routers/worker.py @@ -237,7 +237,7 @@ async def reprocess_cad_file( # Queue inspection + control # --------------------------------------------------------------------------- -MONITORED_QUEUES = ["step_processing", "thumbnail_rendering", "ai_validation"] +MONITORED_QUEUES = ["step_processing", "asset_pipeline", "ai_validation"] def _parse_redis_task(raw: str) -> dict | None: @@ -515,7 +515,7 @@ async def render_health( details: dict = {} - # 1. Check if render-worker (thumbnail_rendering queue) is connected + has Blender + # 1. Check if render-worker (asset_pipeline queue) is connected + has Blender render_worker_connected = False blender_available = False @@ -534,10 +534,10 @@ async def render_health( else: all_workers = list(inspect_result.get("ping", {}).keys()) details["workers"] = all_workers - # Find any worker consuming thumbnail_rendering queue + # Find any worker consuming asset_pipeline queue for worker_name, queues in inspect_result.get("active_queues", {}).items(): queue_names = [q.get("name") for q in (queues or [])] - if "thumbnail_rendering" in queue_names: + if "asset_pipeline" in queue_names: render_worker_connected = True # render-worker always has Blender — it starts Blender successfully blender_available = True @@ -547,11 +547,11 @@ async def render_health( render_worker_connected = True details["worker_detection"] = "fallback" - # 3. Queue depth for thumbnail_rendering + # 3. Queue depth for asset_pipeline thumbnail_queue_depth = 0 try: r = redis_lib.from_url(app_settings.redis_url, decode_responses=True) - thumbnail_queue_depth = r.llen("thumbnail_rendering") or 0 + thumbnail_queue_depth = r.llen("asset_pipeline") or 0 except Exception as exc: details["redis_error"] = str(exc) diff --git a/backend/app/domains/materials/tasks.py b/backend/app/domains/materials/tasks.py index 80c6d35..97ecd77 100644 --- a/backend/app/domains/materials/tasks.py +++ b/backend/app/domains/materials/tasks.py @@ -18,7 +18,7 @@ CATALOG_SCRIPT = Path(os.environ.get("RENDER_SCRIPTS_DIR", "/render-scripts")) / @celery_app.task( name="app.domains.materials.tasks.refresh_asset_library_catalog", - queue="thumbnail_rendering", + queue="asset_pipeline", bind=True, max_retries=2, default_retry_delay=30, diff --git a/backend/app/domains/media/models.py b/backend/app/domains/media/models.py index d5b5e17..c58c012 100644 --- a/backend/app/domains/media/models.py +++ b/backend/app/domains/media/models.py @@ -14,8 +14,8 @@ class MediaAssetType(str, enum.Enum): turntable = "turntable" stl_low = "stl_low" stl_high = "stl_high" - gltf_geometry = "gltf_geometry" - gltf_production = "gltf_production" + gltf_geometry = "gltf_geometry" # DEPRECATED: use usd_master — viewer GLB auto-generated as part of USD pipeline + gltf_production = "gltf_production" # DEPRECATED: use usd_master — high-quality production GLB superseded by USD master blend_production = "blend_production" usd_master = "usd_master" diff --git a/backend/app/domains/pipeline/tasks/export_glb.py b/backend/app/domains/pipeline/tasks/export_glb.py index 28de3be..c43709c 100644 --- a/backend/app/domains/pipeline/tasks/export_glb.py +++ b/backend/app/domains/pipeline/tasks/export_glb.py @@ -13,7 +13,7 @@ from app.core.pipeline_logger import PipelineLogger logger = logging.getLogger(__name__) -@celery_app.task(bind=True, name="app.tasks.step_tasks.generate_gltf_geometry_task", queue="thumbnail_rendering", max_retries=1) +@celery_app.task(bind=True, name="app.tasks.step_tasks.generate_gltf_geometry_task", queue="asset_pipeline", max_retries=1) def generate_gltf_geometry_task(self, cad_file_id: str): """Export a geometry GLB directly from STEP via OCC (no STL intermediary). @@ -83,25 +83,47 @@ def generate_gltf_geometry_task(self, cad_file_id: str): settings_rows = session.execute(_select(_SysSetting)).scalars().all() sys_settings = {s.key: s.value for s in settings_rows} - # Hash-based cache check: skip tessellation if file hasn't changed - step_file_hash = cad_file.step_file_hash - if step_file_hash: - from app.domains.media.models import MediaAsset, MediaAssetType - import uuid as _uuid_check + linear_deflection = float(sys_settings.get("scene_linear_deflection", "0.1")) + angular_deflection = float(sys_settings.get("scene_angular_deflection", "0.1")) + tessellation_engine = sys_settings.get("tessellation_engine", "occ") + + # Hash-based cache check: skip tessellation if file and settings haven't changed + from app.domains.products.cache_service import compute_step_hash as _compute_step_hash + from app.domains.media.models import MediaAsset, MediaAssetType + import uuid as _uuid_check + _current_hash = _compute_step_hash(str(step_path_str)) + _cache_hit_asset_id = None + + # Composite cache key includes deflection settings so changing them invalidates cache + effective_cache_key = ( + f"{_current_hash}:{linear_deflection}:{angular_deflection}:{tessellation_engine}" + if _current_hash else None + ) + + if effective_cache_key: existing_geo = session.execute( _select(MediaAsset).where( MediaAsset.cad_file_id == _uuid_check.UUID(cad_file_id), MediaAsset.asset_type == MediaAssetType.gltf_geometry, ) ).scalars().first() - if existing_geo: - logger.info("[CACHE] hash match — skipping geometry GLB tessellation for %s", cad_file_id) - pl.step_done("export_glb_geometry", result={"cached": True, "asset_id": str(existing_geo.id)}) - _cache_hit_asset_id = str(existing_geo.id) + stored_key = (existing_geo.render_config or {}).get("cache_key", "") if existing_geo else "" + if stored_key == effective_cache_key: + _asset_disk_path = _Path(app_settings.upload_dir) / existing_geo.storage_key + if _asset_disk_path.exists(): + logger.info("[CACHE] cache key match — skipping geometry GLB tessellation for %s", cad_file_id) + pl.step_done("export_glb_geometry", result={"cached": True, "asset_id": str(existing_geo.id)}) + _cache_hit_asset_id = str(existing_geo.id) + else: + logger.info("[CACHE] cache key match but asset missing on disk — re-running tessellation for %s", cad_file_id) else: - _cache_hit_asset_id = None + # Cache miss: update stored hash so next run can use it + cad_file.step_file_hash = _current_hash + session.commit() else: - _cache_hit_asset_id = None + # No hash available: update stored hash and proceed + cad_file.step_file_hash = _current_hash + session.commit() eng.dispose() if _cache_hit_asset_id is not None: @@ -112,10 +134,6 @@ def generate_gltf_geometry_task(self, cad_file_id: str): logger.debug("Could not queue generate_usd_master_task from cache-hit path (non-fatal)") return {"cached": True, "asset_id": _cache_hit_asset_id} - linear_deflection = float(sys_settings.get("scene_linear_deflection", "0.1")) - angular_deflection = float(sys_settings.get("scene_angular_deflection", "0.1")) - tessellation_engine = sys_settings.get("tessellation_engine", "occ") - step = _Path(step_path_str) if not step.exists(): @@ -197,6 +215,7 @@ def generate_gltf_geometry_task(self, cad_file_id: str): existing.storage_key = _key existing.mime_type = "model/gltf-binary" existing.file_size_bytes = _file_size + existing.render_config = {"cache_key": effective_cache_key} if product_id: existing.product_id = _uuid.UUID(product_id) _sess.commit() @@ -209,6 +228,7 @@ def generate_gltf_geometry_task(self, cad_file_id: str): storage_key=_key, mime_type="model/gltf-binary", file_size_bytes=_file_size, + render_config={"cache_key": effective_cache_key}, ) _sess.add(asset) _sess.commit() @@ -234,7 +254,7 @@ def generate_gltf_geometry_task(self, cad_file_id: str): @celery_app.task( bind=True, name="app.tasks.step_tasks.generate_gltf_production_task", - queue="thumbnail_rendering", + queue="asset_pipeline", max_retries=2, ) def generate_gltf_production_task(self, cad_file_id: str, product_id: str | None = None) -> dict: @@ -511,7 +531,7 @@ def generate_gltf_production_task(self, cad_file_id: str, product_id: str | None @celery_app.task( bind=True, name="app.tasks.step_tasks.generate_usd_master_task", - queue="thumbnail_rendering", + queue="asset_pipeline", # needs pxr (usd-core) + OCC — both only in render-worker max_retries=1, ) def generate_usd_master_task(self, cad_file_id: str) -> dict: @@ -583,19 +603,44 @@ def generate_usd_master_task(self, cad_file_id: str) -> dict: settings_rows = sess.execute(_sel(SystemSetting)).scalars().all() sys_settings = {s.key: s.value for s in settings_rows} - # Hash-based cache check: skip tessellation if file hasn't changed - step_file_hash = cad_file.step_file_hash - if step_file_hash: + linear_deflection = float(sys_settings.get("render_linear_deflection", "0.03")) + angular_deflection = float(sys_settings.get("render_angular_deflection", "0.05")) + sharp_threshold = float(sys_settings.get("sharp_edge_threshold", "20.0")) + + # Hash-based cache check: skip tessellation if file and settings haven't changed + from app.domains.products.cache_service import compute_step_hash as _compute_step_hash_usd + _current_hash_usd = _compute_step_hash_usd(str(step_path)) + + # Composite cache key includes deflection settings so changing them invalidates cache + effective_cache_key = ( + f"{_current_hash_usd}:{linear_deflection}:{angular_deflection}:{sharp_threshold}" + if _current_hash_usd else None + ) + + if effective_cache_key: existing_usd = sess.execute( _sel(MediaAsset).where( MediaAsset.cad_file_id == cad_file.id, MediaAsset.asset_type == MediaAssetType.usd_master, ) ).scalars().first() - if existing_usd: - logger.info("[CACHE] hash match — skipping USD master tessellation for %s", cad_file_id) - pl.step_done("usd_master", result={"cached": True, "asset_id": str(existing_usd.id)}) - _cache_hit_asset_id = str(existing_usd.id) + stored_key = (existing_usd.render_config or {}).get("cache_key", "") if existing_usd else "" + if stored_key == effective_cache_key: + _usd_disk_path = _Path(app_settings.upload_dir) / existing_usd.storage_key + if _usd_disk_path.exists(): + logger.info("[CACHE] cache key match — skipping USD master tessellation for %s", cad_file_id) + pl.step_done("usd_master", result={"cached": True, "asset_id": str(existing_usd.id)}) + _cache_hit_asset_id = str(existing_usd.id) + else: + logger.info("[CACHE] cache key match but USD asset missing on disk — re-running tessellation for %s", cad_file_id) + else: + # Cache miss: update stored hash so next run can use it + cad_file.step_file_hash = _current_hash_usd + sess.commit() + else: + # No hash available: update stored hash and proceed + cad_file.step_file_hash = _current_hash_usd + sess.commit() eng.dispose() if _cache_hit_asset_id is not None: @@ -606,10 +651,6 @@ def generate_usd_master_task(self, cad_file_id: str) -> dict: pl.step_error("usd_master", err, None) raise RuntimeError(err) - linear_deflection = float(sys_settings.get("render_linear_deflection", "0.03")) - angular_deflection = float(sys_settings.get("render_angular_deflection", "0.05")) - sharp_threshold = float(sys_settings.get("sharp_edge_threshold", "20.0")) - output_path = step_path.parent / f"{step_path.stem}_master.usd" scripts_dir = _Path(_os.environ.get("RENDER_SCRIPTS_DIR", "/render-scripts")) script_path = scripts_dir / "export_step_to_usd.py" @@ -675,6 +716,7 @@ def generate_usd_master_task(self, cad_file_id: str) -> dict: existing.storage_key = _key existing.mime_type = "model/vnd.usd" existing.file_size_bytes = _file_size + existing.render_config = {"cache_key": effective_cache_key} sess2.commit() asset_id = str(existing.id) else: @@ -684,6 +726,7 @@ def generate_usd_master_task(self, cad_file_id: str) -> dict: storage_key=_key, mime_type="model/vnd.usd", file_size_bytes=_file_size, + render_config={"cache_key": effective_cache_key}, ) sess2.add(asset) sess2.commit() diff --git a/backend/app/domains/pipeline/tasks/extract_metadata.py b/backend/app/domains/pipeline/tasks/extract_metadata.py index 1342db0..98df43e 100644 --- a/backend/app/domains/pipeline/tasks/extract_metadata.py +++ b/backend/app/domains/pipeline/tasks/extract_metadata.py @@ -104,7 +104,7 @@ def process_step_file(self, cad_file_id: str): pl.info("process_step_file", f"Processing STEP file (metadata only): {cad_file_id}") try: from app.services.step_processor import extract_cad_metadata - extract_cad_metadata(cad_file_id) + extract_cad_metadata(cad_file_id, tenant_id=_tenant_id) except Exception as exc: pl.step_error("process_step_file", f"STEP metadata extraction failed: {exc}", exc) r.delete(lock_key) # release lock so a retry can proceed @@ -119,7 +119,7 @@ def process_step_file(self, cad_file_id: str): render_step_thumbnail.delay(cad_file_id) -def _auto_populate_materials_for_cad(cad_file_id: str) -> None: +def _auto_populate_materials_for_cad(cad_file_id: str, tenant_id: str | None = None) -> None: """Sync helper: auto-populate cad_part_materials from Excel for newly-processed CAD files. Only fills products where cad_part_materials is empty or all-blank, @@ -132,10 +132,12 @@ def _auto_populate_materials_for_cad(cad_file_id: str) -> None: from app.models.product import Product from app.api.routers.products import build_materials_from_excel from app.services.step_processor import build_part_colors + from app.core.tenant_context import set_tenant_context_sync sync_url = app_settings.database_url.replace("+asyncpg", "") eng = create_engine(sync_url) with Session(eng) as session: + set_tenant_context_sync(session, tenant_id) # Load the CAD file to get parsed objects cad_file = session.execute( sql_select(CadFile).where(CadFile.id == cad_file_id) @@ -201,7 +203,7 @@ def _auto_populate_materials_for_cad(cad_file_id: str) -> None: eng.dispose() -@celery_app.task(name="app.tasks.step_tasks.reextract_cad_metadata", queue="thumbnail_rendering") +@celery_app.task(name="app.tasks.step_tasks.reextract_cad_metadata", queue="asset_pipeline") def reextract_cad_metadata(cad_file_id: str): """Re-extract bounding-box dimensions for an already-completed CAD file. diff --git a/backend/app/domains/pipeline/tasks/render_order_line.py b/backend/app/domains/pipeline/tasks/render_order_line.py index ce63679..1b147a6 100644 --- a/backend/app/domains/pipeline/tasks/render_order_line.py +++ b/backend/app/domains/pipeline/tasks/render_order_line.py @@ -20,7 +20,7 @@ def dispatch_order_line_render(order_line_id: str): render_order_line_task.delay(order_line_id) -@celery_app.task(bind=True, name="app.tasks.step_tasks.render_order_line_task", queue="thumbnail_rendering", max_retries=3) +@celery_app.task(bind=True, name="app.tasks.step_tasks.render_order_line_task", queue="asset_pipeline", max_retries=3) def render_order_line_task(self, order_line_id: str): """Render a specific output type for an order line. diff --git a/backend/app/domains/pipeline/tasks/render_thumbnail.py b/backend/app/domains/pipeline/tasks/render_thumbnail.py index 8991c44..6b3cd7d 100644 --- a/backend/app/domains/pipeline/tasks/render_thumbnail.py +++ b/backend/app/domains/pipeline/tasks/render_thumbnail.py @@ -14,11 +14,11 @@ from app.core.pipeline_logger import PipelineLogger logger = logging.getLogger(__name__) -@celery_app.task(bind=True, name="app.tasks.step_tasks.render_step_thumbnail", queue="thumbnail_rendering") +@celery_app.task(bind=True, name="app.tasks.step_tasks.render_step_thumbnail", queue="asset_pipeline") def render_step_thumbnail(self, cad_file_id: str): """Render the thumbnail for a freshly-processed STEP file. - Runs on the dedicated thumbnail_rendering queue (concurrency=1) so the + Runs on the dedicated asset_pipeline queue (concurrency=1) so the blender-renderer service is never overwhelmed by concurrent requests. On success, also auto-populates materials and marks the CadFile as completed. """ @@ -139,7 +139,7 @@ def render_step_thumbnail(self, cad_file_id: str): # Auto-populate materials now that parsed_objects are available try: from app.domains.pipeline.tasks.extract_metadata import _auto_populate_materials_for_cad - _auto_populate_materials_for_cad(cad_file_id) + _auto_populate_materials_for_cad(cad_file_id, tenant_id=_tenant_id) except Exception: logger.exception( f"Auto material population failed for cad_file {cad_file_id} (non-fatal)" @@ -180,7 +180,7 @@ def render_step_thumbnail(self, cad_file_id: str): pl.step_done("render_step_thumbnail") -@celery_app.task(bind=True, name="app.tasks.step_tasks.regenerate_thumbnail", queue="thumbnail_rendering") +@celery_app.task(bind=True, name="app.tasks.step_tasks.regenerate_thumbnail", queue="asset_pipeline") def regenerate_thumbnail(self, cad_file_id: str, part_colors: dict): """Regenerate thumbnail with per-part colours.""" pl = PipelineLogger(task_id=self.request.id) diff --git a/backend/app/domains/rendering/tasks.py b/backend/app/domains/rendering/tasks.py index 92ccd09..52f9a7d 100644 --- a/backend/app/domains/rendering/tasks.py +++ b/backend/app/domains/rendering/tasks.py @@ -1,6 +1,6 @@ """Rendering domain tasks — Celery tasks for Blender-based rendering. -These tasks run on the `thumbnail_rendering` queue in the render-worker +These tasks run on the `asset_pipeline` queue in the render-worker container, which has Blender and cadquery available. Phase A2: Initial implementation replacing the blender-renderer HTTP service. @@ -48,7 +48,7 @@ def _update_workflow_run_status(order_line_id: str, status: str, error: str | No @celery_app.task( bind=True, name="app.domains.rendering.tasks.render_still_task", - queue="thumbnail_rendering", + queue="asset_pipeline", max_retries=2, ) def render_still_task( @@ -150,7 +150,7 @@ def render_still_task( @celery_app.task( bind=True, name="app.domains.rendering.tasks.render_turntable_task", - queue="thumbnail_rendering", + queue="asset_pipeline", max_retries=2, ) def render_turntable_task( @@ -391,7 +391,7 @@ def _resolve_step_path_for_order_line(order_line_id: str) -> tuple[str | None, s @celery_app.task( bind=True, name="app.domains.rendering.tasks.render_order_line_still_task", - queue="thumbnail_rendering", + queue="asset_pipeline", max_retries=2, ) def render_order_line_still_task(self, order_line_id: str, **params) -> dict: @@ -509,7 +509,7 @@ def render_order_line_still_task(self, order_line_id: str, **params) -> dict: @celery_app.task( bind=True, name="app.domains.rendering.tasks.export_gltf_for_order_line_task", - queue="thumbnail_rendering", + queue="asset_pipeline", max_retries=1, ) def export_gltf_for_order_line_task(self, order_line_id: str) -> dict: @@ -555,7 +555,7 @@ def export_gltf_for_order_line_task(self, order_line_id: str) -> dict: @celery_app.task( bind=True, name="app.domains.rendering.tasks.export_blend_for_order_line_task", - queue="thumbnail_rendering", + queue="asset_pipeline", max_retries=1, ) def export_blend_for_order_line_task(self, order_line_id: str) -> dict: @@ -646,7 +646,7 @@ def export_blend_for_order_line_task(self, order_line_id: str) -> dict: @celery_app.task( bind=True, name="app.domains.rendering.tasks.apply_asset_library_materials_task", - queue="thumbnail_rendering", + queue="asset_pipeline", max_retries=1, ) def apply_asset_library_materials_task(self, order_line_id: str, asset_library_id: str) -> dict: diff --git a/backend/app/services/part_key_service.py b/backend/app/services/part_key_service.py index e933187..3cf7a78 100644 --- a/backend/app/services/part_key_service.py +++ b/backend/app/services/part_key_service.py @@ -105,6 +105,10 @@ def build_scene_manifest(cad_file, usd_asset=None) -> dict: object_names: list[str] = cad_file.parsed_objects.get("objects") or [] seen_keys: set[str] = set() for source_name in object_names: + # Fallback: USD master not yet generated. Use source_name as xcaf_path proxy. + # Note: slugs produced here may differ from what export_step_to_usd.py will + # produce for unnamed parts (which use sha256 of the XCAF hierarchy path). + # Named parts will match once USD master is generated. part_key = generate_part_key(source_name, source_name, seen_keys) effective_material, provenance = _resolve_material( part_key, source_name, manual, resolved, source diff --git a/backend/app/services/render_blender.py b/backend/app/services/render_blender.py index 9b6cb9e..db946fa 100644 --- a/backend/app/services/render_blender.py +++ b/backend/app/services/render_blender.py @@ -15,8 +15,8 @@ from pathlib import Path logger = logging.getLogger(__name__) -def _glb_from_step(step_path: Path, glb_path: Path) -> None: - """Convert STEP → GLB via OCC (export_step_to_gltf.py, no Blender needed).""" +def _glb_from_step(step_path: Path, glb_path: Path, tessellation_engine: str = "occ") -> None: + """Convert STEP → GLB via OCC or GMSH (export_step_to_gltf.py, no Blender needed).""" import subprocess import sys as _sys @@ -32,12 +32,13 @@ def _glb_from_step(step_path: Path, glb_path: Path) -> None: "--output_path", str(glb_path), "--linear_deflection", str(linear_deflection), "--angular_deflection", str(angular_deflection), + "--tessellation_engine", tessellation_engine, ] result = subprocess.run(cmd, capture_output=True, text=True, timeout=120) for line in result.stdout.splitlines(): - logger.info("[occ-gltf] %s", line) + logger.info("[export-gltf] %s", line) for line in result.stderr.splitlines(): - logger.warning("[occ-gltf stderr] %s", line) + logger.warning("[export-gltf stderr] %s", line) if result.returncode != 0 or not glb_path.exists() or glb_path.stat().st_size == 0: raise RuntimeError( f"export_step_to_gltf.py failed (exit {result.returncode}).\n" @@ -90,8 +91,9 @@ def render_still( mesh_attributes: dict | None = None, log_callback: "Callable[[str], None] | None" = None, usd_path: "Path | None" = None, + tessellation_engine: str = "occ", ) -> dict: - """Convert STEP → GLB (OCC) → PNG (Blender subprocess). + """Convert STEP → GLB (OCC or GMSH) → PNG (Blender subprocess). When usd_path is provided and the file exists, the GLB conversion step is skipped and Blender imports the USD stage directly (--usd-path flag). @@ -125,7 +127,7 @@ def render_still( glb_size_bytes = 0 else: if not glb_path.exists() or glb_path.stat().st_size == 0: - _glb_from_step(step_path, glb_path) + _glb_from_step(step_path, glb_path, tessellation_engine) else: logger.info("GLB local hit: %s (%d KB)", glb_path.name, glb_path.stat().st_size // 1024) glb_size_bytes = glb_path.stat().st_size if glb_path.exists() else 0 @@ -310,6 +312,7 @@ def render_turntable_to_file( rotation_y: float = 0.0, rotation_z: float = 0.0, usd_path: "Path | None" = None, + tessellation_engine: str = "occ", ) -> dict: """Render a turntable animation: STEP → STL → N frames (Blender) → mp4 (ffmpeg). @@ -349,7 +352,7 @@ def render_turntable_to_file( logger.info("[render_blender] turntable using USD path: %s", usd_path) else: if not glb_path.exists() or glb_path.stat().st_size == 0: - _glb_from_step(step_path, glb_path) + _glb_from_step(step_path, glb_path, tessellation_engine) else: logger.info("GLB local hit: %s (%d KB)", glb_path.name, glb_path.stat().st_size // 1024) glb_duration_s = round(time.monotonic() - t_glb, 2) diff --git a/backend/app/services/step_processor.py b/backend/app/services/step_processor.py index 363bbcd..597bbf3 100644 --- a/backend/app/services/step_processor.py +++ b/backend/app/services/step_processor.py @@ -79,7 +79,7 @@ def match_cad_to_items( return matched -def extract_cad_metadata(cad_file_id: str) -> None: +def extract_cad_metadata(cad_file_id: str, tenant_id: str | None = None) -> None: """ Fast metadata extraction for a CAD file (no thumbnail generation). @@ -94,9 +94,11 @@ def extract_cad_metadata(cad_file_id: str) -> None: from sqlalchemy import create_engine from sqlalchemy.orm import Session from app.models.cad_file import CadFile, ProcessingStatus + from app.core.tenant_context import set_tenant_context_sync engine = create_engine(settings.database_url_sync) with Session(engine) as session: + set_tenant_context_sync(session, tenant_id) cad_file = session.get(CadFile, uuid.UUID(cad_file_id)) if not cad_file: logger.error(f"CAD file not found: {cad_file_id}") @@ -452,6 +454,7 @@ def _get_all_settings() -> dict[str, str]: "thumbnail_format": "jpg", "blender_smooth_angle": "30", "cycles_device": "auto", + "tessellation_engine": "occ", } try: from app.config import settings as app_settings @@ -533,6 +536,7 @@ def _generate_thumbnail( samples=samples, smooth_angle=int(settings["blender_smooth_angle"]), cycles_device=settings["cycles_device"], + tessellation_engine=settings["tessellation_engine"], ) rendered_png = tmp_png if tmp_png.exists() else None except Exception as exc: @@ -642,6 +646,7 @@ def render_to_file( denoising_use_gpu: str = "", order_line_id: str | None = None, usd_path: "Path | None" = None, + tessellation_engine: str | None = None, ) -> tuple[bool, dict]: """Render a STEP file to a specific output path using current system settings. @@ -777,6 +782,7 @@ def render_to_file( denoising_use_gpu=denoising_use_gpu, log_callback=_log_cb, usd_path=usd_path, + tessellation_engine=tessellation_engine or settings["tessellation_engine"], ) rendered_png = tmp_png if tmp_png.exists() else None except Exception as exc: diff --git a/backend/app/tasks/beat_tasks.py b/backend/app/tasks/beat_tasks.py index abaf670..311f138 100644 --- a/backend/app/tasks/beat_tasks.py +++ b/backend/app/tasks/beat_tasks.py @@ -73,7 +73,7 @@ def broadcast_queue_status() -> None: r = sync_redis.from_url(settings.redis_url, decode_responses=True) depths = { "step_processing": r.llen("step_processing"), - "thumbnail_rendering": r.llen("thumbnail_rendering"), + "asset_pipeline": r.llen("asset_pipeline"), } event = {"type": "queue_update", "depths": depths} r.publish("__broadcast__", json.dumps(event)) diff --git a/backend/app/tasks/celery_app.py b/backend/app/tasks/celery_app.py index 6707053..0877dd4 100644 --- a/backend/app/tasks/celery_app.py +++ b/backend/app/tasks/celery_app.py @@ -30,7 +30,7 @@ celery_app.conf.update( enable_utc=True, task_routes={ "app.domains.pipeline.tasks.*": {"queue": "step_processing"}, - "app.domains.rendering.tasks.*": {"queue": "thumbnail_rendering"}, + "app.domains.rendering.tasks.*": {"queue": "asset_pipeline"}, "app.tasks.beat_tasks.*": {"queue": "step_processing"}, "app.tasks.ai_tasks.*": {"queue": "ai_validation"}, # Legacy task names (shim) — keep until old queued tasks drain diff --git a/backend/app/tasks/gpu_tasks.py b/backend/app/tasks/gpu_tasks.py index 53e4eaf..394512d 100644 --- a/backend/app/tasks/gpu_tasks.py +++ b/backend/app/tasks/gpu_tasks.py @@ -5,7 +5,7 @@ from app.tasks.celery_app import celery_app logger = logging.getLogger(__name__) -@celery_app.task(name="app.tasks.gpu_tasks.probe_gpu", queue="thumbnail_rendering") +@celery_app.task(name="app.tasks.gpu_tasks.probe_gpu", queue="asset_pipeline") def probe_gpu() -> dict: """Run Blender GPU probe on the render-worker. Stores result in system_settings.""" import subprocess diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py index 36f2258..d894f5e 100644 --- a/backend/tests/conftest.py +++ b/backend/tests/conftest.py @@ -230,7 +230,6 @@ def mock_celery_tasks(monkeypatch): "app.domains.materials.tasks.refresh_asset_library_catalog", "app.tasks.step_tasks.process_step_file", "app.tasks.step_tasks.render_step_thumbnail", - "app.tasks.step_tasks.generate_stl_cache", "app.domains.imports.tasks.validate_excel_import", "app.domains.rendering.tasks.render_still_task", "app.domains.rendering.tasks.render_turntable_task", diff --git a/backend/tests/domains/test_rendering_service.py b/backend/tests/domains/test_rendering_service.py index fce5aac..2bd6e47 100644 --- a/backend/tests/domains/test_rendering_service.py +++ b/backend/tests/domains/test_rendering_service.py @@ -99,14 +99,14 @@ def test_generate_gltf_geometry_task_importable(): def test_render_order_line_still_task_importable(): from app.domains.rendering.tasks import render_order_line_still_task assert render_order_line_still_task.name == "app.domains.rendering.tasks.render_order_line_still_task" - assert render_order_line_still_task.queue == "thumbnail_rendering" + assert render_order_line_still_task.queue == "asset_pipeline" def test_export_gltf_for_order_line_task_importable(): from app.domains.rendering.tasks import export_gltf_for_order_line_task - assert export_gltf_for_order_line_task.queue == "thumbnail_rendering" + assert export_gltf_for_order_line_task.queue == "asset_pipeline" def test_export_blend_for_order_line_task_importable(): from app.domains.rendering.tasks import export_blend_for_order_line_task - assert export_blend_for_order_line_task.queue == "thumbnail_rendering" + assert export_blend_for_order_line_task.queue == "asset_pipeline" diff --git a/docker-compose.yml b/docker-compose.yml index 21aa968..e8df585 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -121,7 +121,7 @@ services: dockerfile: render-worker/Dockerfile args: - BLENDER_VERSION=${BLENDER_VERSION:-5.0.1} - command: bash -c "python3 /check_version.py && celery -A app.tasks.celery_app worker --loglevel=info -Q thumbnail_rendering --autoscale=1,1 --concurrency=1" + command: bash -c "python3 /check_version.py && celery -A app.tasks.celery_app worker --loglevel=info -Q asset_pipeline --autoscale=1,1 --concurrency=1" environment: - POSTGRES_DB=${POSTGRES_DB:-schaeffler} - POSTGRES_USER=${POSTGRES_USER:-schaeffler} diff --git a/frontend/src/__tests__/pages/WorkerActivity.test.tsx b/frontend/src/__tests__/pages/WorkerActivity.test.tsx index 619072f..6c7d57b 100644 --- a/frontend/src/__tests__/pages/WorkerActivity.test.tsx +++ b/frontend/src/__tests__/pages/WorkerActivity.test.tsx @@ -25,18 +25,18 @@ describe('worker API types', () => { test('CeleryWorker interface shape', () => { const worker = { name: 'celery@worker1', - queues: ['thumbnail_rendering'], + queues: ['asset_pipeline'], active_task_count: 2, active_tasks: [{ name: 'render_still_task', id: 'abc' }], total_tasks_processed: { render_still_task: 42 }, } - expect(worker.queues).toContain('thumbnail_rendering') + expect(worker.queues).toContain('asset_pipeline') expect(worker.active_tasks).toHaveLength(1) }) test('QueueStatus interface shape', () => { const qs = { - queue_depths: { step_processing: 3, thumbnail_rendering: 0 }, + queue_depths: { step_processing: 3, asset_pipeline: 0 }, pending_count: 3, active: [], reserved: [], diff --git a/frontend/src/api/orders.ts b/frontend/src/api/orders.ts index 8b73596..cb7bf26 100644 --- a/frontend/src/api/orders.ts +++ b/frontend/src/api/orders.ts @@ -276,6 +276,18 @@ export async function generateLinesFromItems( return res.data } +export async function rejectOrderLine( + orderId: string, + lineId: string, + reason: string, +): Promise<{ rejected: boolean; line_id: string; reason: string }> { + const res = await api.post<{ rejected: boolean; line_id: string; reason: string }>( + `/orders/${orderId}/lines/${lineId}/reject`, + { reason }, + ) + return res.data +} + export async function rejectOrder(orderId: string, reason: string, notifyClient: boolean = true): Promise { const res = await api.post(`/orders/${orderId}/reject`, { reason, diff --git a/frontend/src/components/cad/ThreeDViewer.tsx b/frontend/src/components/cad/ThreeDViewer.tsx index 9a67b61..f1e810d 100644 --- a/frontend/src/components/cad/ThreeDViewer.tsx +++ b/frontend/src/components/cad/ThreeDViewer.tsx @@ -537,6 +537,17 @@ export default function ThreeDViewer({ const map = glbExtras.partKeyMap as Record | undefined if (map && Object.keys(map).length > 0) { setPartKeyMap(map) + // Task 2: Stamp userData.partKey on every mesh (fallback for meshes whose + // GLB node extras were not populated — e.g. files generated before Task 1). + // For new GLBs, Three.js already set userData.partKey from node extras; + // the guard `if (obj.userData.partKey) return` avoids overwriting it. + sceneRef.current.traverse((obj) => { + if (!(obj instanceof THREE.Mesh)) return + if (obj.userData.partKey) return // already set by GLB node extras + const normalized = normalizeMeshName((obj.userData?.name as string) || obj.name) + const pk = map[normalized] ?? normalized + if (pk) obj.userData.partKey = pk + }) } const names = new Set() @@ -679,8 +690,11 @@ export default function ThreeDViewer({ e.stopPropagation() const mesh = e.object as THREE.Mesh const raw = (mesh?.userData?.name as string) || mesh?.name || (mesh?.parent?.userData?.name as string) || mesh?.parent?.name || '' - const name = normalizeMeshName(raw) || 'Part' - setHoverInfo({ name, x: e.nativeEvent.clientX, y: e.nativeEvent.clientY }) + const normalized = normalizeMeshName(raw) || 'Part' + // Task 3: prefer userData.partKey (set by GLB node extras or Task 2 stamp) over + // raw normalized name so tooltip shows canonical slug (e.g. "ring_outer") not OCC name + const displayName = (mesh?.userData?.partKey as string | undefined) ?? resolvePartKey(normalized) + setHoverInfo({ name: displayName, x: e.nativeEvent.clientX, y: e.nativeEvent.clientY }) // Restore previous hovered mesh (array-safe) if (hoveredMeshRef.current && hoveredMeshRef.current !== mesh) { @@ -703,7 +717,7 @@ export default function ThreeDViewer({ const mat = m as THREE.MeshStandardMaterial if (mat && 'emissive' in mat) { mat.emissive.set(0x333333); mat.emissiveIntensity = 0.5 } }) - }, [showUnassigned]) + }, [showUnassigned, resolvePartKey]) const handlePointerOut = useCallback(() => { setHoverInfo(null) diff --git a/frontend/src/components/layout/NotificationCenter.tsx b/frontend/src/components/layout/NotificationCenter.tsx index 9469571..72e4442 100644 --- a/frontend/src/components/layout/NotificationCenter.tsx +++ b/frontend/src/components/layout/NotificationCenter.tsx @@ -59,6 +59,41 @@ const ACTION_CONFIG: Record 5 * 60 * 1000) break + batchIds.push(m.id) + if (m.action === 'render.failed') failed++; else done++ + j++ + } + if (j - i >= 2) { + result.push({ kind: 'batch', count: done, failed, entityId: n.entity_id ?? null, latest: n, ids: batchIds }) + i = j; continue + } + } + result.push({ kind: 'single', item: n }) + i++ + } + return result +} + function relativeTime(ts: string): string { const diff = Date.now() - new Date(ts).getTime() const seconds = Math.floor(diff / 1000) @@ -182,7 +217,44 @@ export default function NotificationCenter() { {!data?.items.length && (
No notifications
)} - {data?.items.map((n) => { + {data?.items && groupNotifications(data.items).map((group) => { + if (group.kind === 'batch') { + const { count, failed, entityId, latest, ids } = group + const BatchIcon = failed > 0 ? AlertTriangle : CheckCircle + const batchColor = failed > 0 ? 'text-red-500' : 'text-status-success-text' + const batchLabel = `Render batch: ${count} done${failed > 0 ? `, ${failed} failed` : ''}` + return ( + + ) + } + const n = group.item const cfg = ACTION_CONFIG[n.action] ?? { icon: Bell, label: () => n.action, diff --git a/frontend/src/help/helpTexts.ts b/frontend/src/help/helpTexts.ts index a4b8059..6a06ba3 100644 --- a/frontend/src/help/helpTexts.ts +++ b/frontend/src/help/helpTexts.ts @@ -37,7 +37,7 @@ export const HELP_TEXTS: Record = { }, 'action.regenerate_thumbnails': { title: 'Regenerate All Thumbnails', - body: 'Re-renders thumbnails for all STEP files using current renderer settings. Queues every file on the thumbnail_rendering worker.', + body: 'Re-renders thumbnails for all STEP files using current renderer settings. Queues every file on the asset_pipeline worker.', warning: 'This queues a large number of tasks. Only run during off-peak hours.', }, 'action.process_unprocessed': { diff --git a/frontend/src/pages/Admin.tsx b/frontend/src/pages/Admin.tsx index d1e0428..acd0802 100644 --- a/frontend/src/pages/Admin.tsx +++ b/frontend/src/pages/Admin.tsx @@ -138,6 +138,7 @@ export default function AdminPage() { const [tessellationDraft, setTessellationDraft] = useState>({}) const tess = { ...settings, ...tessellationDraft } as Settings + const [showAdvancedTess, setShowAdvancedTess] = useState(false) const { data: rendererStatus, refetch: refetchStatus } = useQuery({ queryKey: ['renderer-status'], @@ -1332,6 +1333,7 @@ export default function AdminPage() { /> Apply Laplacian smoothing on export +

Smooths surface normals during GLB export for a less faceted look in the 3D viewer.

@@ -1348,8 +1350,10 @@ export default function AdminPage() { max="10000" value={viewer3d.viewer_max_distance ?? 50} onChange={e => setViewerDraft(d => ({ ...d, viewer_max_distance: parseFloat(e.target.value) }))} + title="Maximum camera distance from the model in the 3D viewer (in metres after mm→m conversion). Default: 50" className="input w-full" /> +

Maximum camera pull-back distance in the 3D viewer (metres).

@@ -1376,11 +1382,13 @@ export default function AdminPage() { +

Material data embedded in exported GLB files.

@@ -1525,10 +1537,19 @@ export default function AdminPage() { + + {/* Manual inputs */} + {showAdvancedTess && (<>
-

Scene / Viewer

+

Scene (USD Master)

rad
-

Used for the 3D viewer (canonical scene). Smaller = smoother surfaces.

+

Used for the USD master + 3D viewer GLB (canonical scene). Smaller = smoother surfaces.

Render output

@@ -1588,6 +1609,7 @@ export default function AdminPage() {

Used for final render output. Smaller = smoother surfaces, larger file sizes.

+ )}
) : items.length === 0 ? ( -
- -

No media assets found.

-

- Renders will appear here once orders are completed. Try adjusting your filters. -

-
+ (() => { + const hasActiveFilters = !!(q || assetType || categoryKey || renderStatus) + return ( +
+ +

+ {hasActiveFilters ? 'No assets match your filters.' : 'No assets yet — upload a STEP file to get started'} +

+ {hasActiveFilters && ( +

+ Try adjusting your search or filter settings. +

+ )} +
+ ) + })() ) : (
{items.map(asset => ( diff --git a/frontend/src/pages/OrderDetail.tsx b/frontend/src/pages/OrderDetail.tsx index 8da921c..23c7173 100644 --- a/frontend/src/pages/OrderDetail.tsx +++ b/frontend/src/pages/OrderDetail.tsx @@ -1,6 +1,7 @@ import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' import { useParams, useNavigate, Link } from 'react-router-dom' import { useState, useMemo, Fragment } from 'react' +import { createPortal } from 'react-dom' import { ArrowLeft, Send, Trash2, FileBox, AlertTriangle, CheckCircle2, Image as ImageIcon, Unlink, @@ -11,7 +12,7 @@ import { XCircle, RotateCw, Info, } from 'lucide-react' import { toast } from 'sonner' -import { getOrder, submitOrder, deleteOrder, unlinkCadFile, regenerateItemThumbnail, patchOrderItem, removeOrderLine, dispatchRenders, cancelLineRender, cancelOrderRenders, splitMissingStep, generateLinesFromItems, downloadOrderRenders, rejectOrder, resubmitOrder } from '../api/orders' +import { getOrder, submitOrder, deleteOrder, unlinkCadFile, regenerateItemThumbnail, patchOrderItem, removeOrderLine, dispatchRenders, cancelLineRender, cancelOrderRenders, splitMissingStep, generateLinesFromItems, downloadOrderRenders, rejectOrder, resubmitOrder, rejectOrderLine } from '../api/orders' import type { OrderItem, OrderLine } from '../api/orders' import { listOutputTypes } from '../api/outputTypes' import type { OutputType } from '../api/outputTypes' @@ -827,6 +828,8 @@ function OrderLineRow({ }) { const qc = useQueryClient() const [showInfo, setShowInfo] = useState(false) + const [rejectLineModalOpen, setRejectLineModalOpen] = useState(false) + const [rejectLineReason, setRejectLineReason] = useState('') const removeMut = useMutation({ mutationFn: () => removeOrderLine(orderId, line.id), @@ -843,7 +846,19 @@ function OrderLineRow({ onError: (e: any) => toast.error(e.response?.data?.detail || 'Cancel failed'), }) + const rejectLineMut = useMutation({ + mutationFn: () => rejectOrderLine(orderId, line.id, rejectLineReason), + onSuccess: () => { + toast.success('Line rejected') + setRejectLineModalOpen(false) + setRejectLineReason('') + qc.invalidateQueries({ queryKey: ['order', orderId] }) + }, + onError: (e: any) => toast.error(e.response?.data?.detail || 'Reject failed'), + }) + const canCancel = isPrivileged && (line.render_status === 'processing' || line.render_status === 'pending') && line.output_type_id + const canRejectLine = isPrivileged && line.item_status !== 'rejected' const renderStatusColor: Record = { pending: 'bg-surface-muted text-content-muted', @@ -1003,7 +1018,21 @@ function OrderLineRow({ {/* Item Status */} - +
+ + {canRejectLine && ( + + )} +
{/* Remove (draft only) */} @@ -1027,6 +1056,61 @@ function OrderLineRow({ renderStartedAt={line.render_started_at} renderCompletedAt={line.render_completed_at} /> + + {rejectLineModalOpen && createPortal( +
{ setRejectLineModalOpen(false); setRejectLineReason('') }} + > +
e.stopPropagation()} + > +
+ +

Reject this item?

+ +
+
+

+ Optionally provide a reason for rejecting this line. +

+