feat: initial commit

This commit is contained in:
2026-03-05 22:12:38 +01:00
commit bce762a783
380 changed files with 51955 additions and 0 deletions
View File
+110
View File
@@ -0,0 +1,110 @@
"""
Azure OpenAI GPT-4o Vision validator for thumbnail orientation.
"""
import base64
import logging
import uuid
from pathlib import Path
logger = logging.getLogger(__name__)
VALIDATION_PROMPT = """You are a quality control expert for Schaeffler bearing product catalog images.
Analyze this thumbnail of a bearing/mechanical component and evaluate:
1. Is the component orientation correct for a standard product catalog? (typically isometric view, 30° elevation, 45° rotation)
2. Are the key features visible? (rolling elements, rings, cage if present)
3. Does it match standard Schaeffler catalog angle conventions?
Respond in JSON with exactly these fields:
{
"passed": true/false,
"confidence": 0.0-1.0,
"feedback": "Brief explanation",
"suggested_rotation": "Description of recommended adjustment if needed"
}"""
def validate_thumbnail(order_item_id: str) -> dict:
"""
Validate thumbnail orientation using Azure GPT-4o Vision.
Updates the order_item AI validation fields in DB.
"""
from app.config import settings
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from app.models.order_item import OrderItem, AIValidationStatus
engine = create_engine(settings.database_url_sync)
with Session(engine) as session:
item = session.get(OrderItem, uuid.UUID(order_item_id))
if not item:
logger.error(f"OrderItem not found: {order_item_id}")
return {}
item.ai_validation_status = AIValidationStatus.pending
session.commit()
try:
result = _call_azure_vision(item.thumbnail_path, settings)
item.ai_validation_status = AIValidationStatus.completed
item.ai_validation_result = result
except Exception as exc:
logger.error(f"AI validation failed for {order_item_id}: {exc}")
item.ai_validation_status = AIValidationStatus.failed
item.ai_validation_result = {"error": str(exc)}
result = {}
session.commit()
return result
def _call_azure_vision(thumbnail_path: str | None, settings) -> dict:
"""Call Azure OpenAI GPT-4o with a base64-encoded thumbnail."""
import json
if not settings.azure_openai_api_key or not settings.azure_openai_endpoint:
raise ValueError("Azure OpenAI credentials not configured")
if not thumbnail_path or not Path(thumbnail_path).exists():
raise FileNotFoundError(f"Thumbnail not found: {thumbnail_path}")
try:
from openai import AzureOpenAI
client = AzureOpenAI(
api_key=settings.azure_openai_api_key,
azure_endpoint=settings.azure_openai_endpoint,
api_version=settings.azure_openai_api_version,
)
with open(thumbnail_path, "rb") as f:
image_b64 = base64.b64encode(f.read()).decode("utf-8")
response = client.chat.completions.create(
model=settings.azure_openai_deployment,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": VALIDATION_PROMPT},
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{image_b64}"},
},
],
}
],
max_tokens=500,
temperature=0.1,
)
content = response.choices[0].message.content or ""
# Extract JSON from response
start = content.find("{")
end = content.rfind("}") + 1
if start >= 0 and end > start:
return json.loads(content[start:end])
return {"passed": False, "confidence": 0.0, "feedback": content, "suggested_rotation": ""}
except Exception as exc:
raise RuntimeError(f"Azure OpenAI call failed: {exc}") from exc
+177
View File
@@ -0,0 +1,177 @@
"""Scale Flamenco worker containers via the Docker socket.
Uses the Docker Python SDK (docker>=6.1.0) to list, start, and stop containers.
Requires /var/run/docker.sock to be mounted into the backend container.
"""
import os
import logging
log = logging.getLogger(__name__)
COMPOSE_PROJECT = os.getenv("COMPOSE_PROJECT_NAME", "schaefflerautomat")
SERVICE_NAME = "flamenco-worker"
def _get_client():
import docker
return docker.from_env()
def get_worker_containers(client=None):
"""Return all flamenco-worker containers (running + stopped) sorted by name."""
if client is None:
client = _get_client()
return sorted(
client.containers.list(
all=True,
filters={
"label": [
f"com.docker.compose.project={COMPOSE_PROJECT}",
f"com.docker.compose.service={SERVICE_NAME}",
]
},
),
key=lambda c: c.name,
)
def get_running_worker_count(client=None) -> int:
"""Return how many flamenco-worker containers are currently running."""
try:
if client is None:
client = _get_client()
containers = get_worker_containers(client)
return sum(1 for c in containers if c.status == "running")
except Exception as exc:
log.warning("docker_scaler: could not read worker count: %s", exc)
return -1
def scale_workers(target: int) -> dict:
"""Scale flamenco-worker containers to *target* count.
Returns a dict with keys:
previous containers running before
current containers running after
delta change (negative = stopped, positive = started)
message human-readable summary
"""
import docker
from docker.types import Mount
client = _get_client()
all_workers = get_worker_containers(client)
running = [c for c in all_workers if c.status == "running"]
previous = len(running)
if target == previous:
return {"previous": previous, "current": previous, "delta": 0,
"message": f"Already at {previous} worker(s) — no change"}
# ── Scale down ────────────────────────────────────────────────────────────
if target < previous:
# Stop highest-numbered containers first to minimise disruption
to_stop = sorted(running, key=lambda c: c.name, reverse=True)[: previous - target]
for c in to_stop:
log.info("docker_scaler: stopping %s", c.name)
c.stop(timeout=20)
c.remove()
return {
"previous": previous,
"current": target,
"delta": target - previous,
"message": f"Stopped {len(to_stop)} worker(s): {[c.name for c in to_stop]}",
}
# ── Scale up ──────────────────────────────────────────────────────────────
template = running[0] if running else (all_workers[0] if all_workers else None)
if template is None:
raise RuntimeError(
"No existing flamenco-worker container found to clone configuration from. "
"Ensure at least one worker container exists (even if stopped)."
)
attrs = template.attrs
image = attrs["Config"]["Image"]
env = attrs["Config"].get("Env") or []
# Reconstruct mounts from the template container
mounts = []
for m in (attrs.get("Mounts") or []):
mount_type = m.get("Type", "bind")
source = m.get("Name", "") if mount_type == "volume" else m.get("Source", "")
mounts.append(
Mount(
target=m["Destination"],
source=source,
type=mount_type,
read_only=not m.get("RW", True),
)
)
# Reconstruct GPU device requests (nvidia)
device_requests = None
raw_dr = (attrs.get("HostConfig") or {}).get("DeviceRequests") or []
if raw_dr:
device_requests = []
for dr in raw_dr:
device_requests.append(
docker.types.DeviceRequest(
driver=dr.get("Driver", ""),
count=dr.get("Count", -1),
device_ids=dr.get("DeviceIDs") or [],
capabilities=dr.get("Capabilities") or [],
options=dr.get("Options") or {},
)
)
# Network(s) the template is connected to
network_names = list(
(attrs.get("NetworkSettings") or {}).get("Networks", {}).keys()
)
restart_policy_name = (
(attrs.get("HostConfig") or {})
.get("RestartPolicy", {})
.get("Name", "unless-stopped")
) or "unless-stopped"
started = []
for i in range(previous + 1, target + 1):
new_name = f"{COMPOSE_PROJECT}-{SERVICE_NAME}-{i}"
labels = {
"com.docker.compose.project": COMPOSE_PROJECT,
"com.docker.compose.service": SERVICE_NAME,
"com.docker.compose.container-number": str(i),
}
log.info("docker_scaler: creating %s from image %s", new_name, image)
container = client.containers.create(
image=image,
name=new_name,
environment=env,
labels=labels,
mounts=mounts,
restart_policy={"Name": restart_policy_name},
device_requests=device_requests,
)
for net_name in network_names:
try:
net = client.networks.get(net_name)
net.connect(container)
log.info("docker_scaler: connected %s to network %s", new_name, net_name)
except Exception as exc:
log.warning("docker_scaler: could not connect to network %s: %s", net_name, exc)
container.start()
started.append(new_name)
log.info("docker_scaler: started %s", new_name)
return {
"previous": previous,
"current": target,
"delta": target - previous,
"message": f"Started {len(started)} new worker(s): {started}",
}
+178
View File
@@ -0,0 +1,178 @@
"""Excel import service — maps parsed rows to Product library."""
from dataclasses import dataclass, field
from sqlalchemy.ext.asyncio import AsyncSession
from app.services.product_service import (
lookup_or_create_product,
lookup_product,
)
@dataclass
class PreviewResult:
"""Read-only preview: annotates rows without creating anything."""
rows: list[dict] = field(default_factory=list)
existing_product_count: int = 0
new_product_count: int = 0
no_pim_id_count: int = 0
has_step_count: int = 0
no_step_count: int = 0
duplicate_count: int = 0
warnings: list[str] = field(default_factory=list)
@dataclass
class ImportResult:
rows: list[dict] = field(default_factory=list)
matched_count: int = 0
created_count: int = 0
no_pim_id_count: int = 0
duplicate_baureihe_count: int = 0
warnings: list[str] = field(default_factory=list)
async def import_excel_to_products(
db: AsyncSession,
parsed_rows: list[dict],
source_excel: str,
category_key: str | None = None,
) -> ImportResult:
"""For each row, look up or create a Product.
Grouping strategy:
1. Primary key: produkt_baureihe (lowercased)
2. Fallback: pim_id (backward compat)
Annotates each row dict with product_id, product_created.
"""
result = ImportResult()
# Track seen produkt_baureihe values to skip duplicates
seen_baureihe: dict[str, str] = {} # lower(baureihe) → first product_id
for row in parsed_rows:
pim_id = row.get("pim_id")
produkt_baureihe = row.get("produkt_baureihe")
row_category = row.get("category_key") or category_key
# Need at least one identifier
if not pim_id and not produkt_baureihe:
row["product_id"] = None
row["product_created"] = False
result.no_pim_id_count += 1
continue
fields = {
"name": produkt_baureihe or row.get("gewaehltes_produkt"),
"category_key": row_category,
"ebene1": row.get("ebene1"),
"ebene2": row.get("ebene2"),
"baureihe": row.get("baureihe"),
"produkt_baureihe": produkt_baureihe,
"lagertyp": row.get("lagertyp"),
"name_cad_modell": row.get("name_cad_modell"),
"gewuenschte_bildnummer": row.get("gewuenschte_bildnummer"),
"medias_rendering": row.get("medias_rendering"),
"components": row.get("components", []),
"arbeitspaket": row.get("arbeitspaket"),
"source_excel": source_excel,
}
product, was_created = await lookup_or_create_product(db, pim_id, fields)
row["product_id"] = str(product.id)
row["product_created"] = was_created
# Carry forward any STEP file already linked to this product
row["product_cad_file_id"] = str(product.cad_file_id) if product.cad_file_id else None
if was_created:
result.created_count += 1
else:
result.matched_count += 1
# Track duplicate baureihe
if produkt_baureihe:
bkey = produkt_baureihe.lower()
if bkey in seen_baureihe:
result.duplicate_baureihe_count += 1
else:
seen_baureihe[bkey] = str(product.id)
result.rows = parsed_rows
# NOTE: caller is responsible for db.commit() — keeps the transaction
# composable with order + line creation in the finalize endpoint.
return result
async def preview_excel_rows(
db: AsyncSession,
parsed_rows: list[dict],
category_key: str | None = None,
) -> PreviewResult:
"""Read-only preview: annotates rows with product_exists / product_id / duplicate flags.
Uses lookup_product (read-only) to check what already exists in the DB.
New-vs-existing is determined per unique produkt_baureihe (or pim_id fallback).
Duplicate rows (same produkt_baureihe seen more than once in this batch) are
annotated with is_duplicate=True and duplicate_of_row=<first_row_index>.
"""
result = PreviewResult()
# Track unique identifiers we've already resolved in this batch
# key = lower(baureihe) or pim_id → (product_exists, product_id_str | None, has_step, first_row_index)
seen: dict[str, tuple[bool, str | None, bool, int]] = {}
for row in parsed_rows:
pim_id = row.get("pim_id")
produkt_baureihe = row.get("produkt_baureihe")
row_index = row.get("row_index", 0)
row["category_key"] = row.get("category_key") or category_key
# Must have at least one identifier
if not pim_id and not produkt_baureihe:
row["product_exists"] = False
row["product_id"] = None
row["has_step"] = False
row["is_duplicate"] = False
result.no_pim_id_count += 1
continue
# Build a cache key
cache_key = (produkt_baureihe or "").lower() or pim_id or ""
if cache_key in seen:
exists, pid, has_step, first_row = seen[cache_key]
row["product_exists"] = exists
row["product_id"] = pid
row["has_step"] = has_step
row["is_duplicate"] = True
row["duplicate_of_row"] = first_row
result.duplicate_count += 1
continue
product = await lookup_product(db, pim_id, produkt_baureihe)
row["is_duplicate"] = False
if product is not None:
has_step = product.cad_file_id is not None
row["product_exists"] = True
row["product_id"] = str(product.id)
row["has_step"] = has_step
seen[cache_key] = (True, str(product.id), has_step, row_index)
result.existing_product_count += 1
if has_step:
result.has_step_count += 1
else:
result.no_step_count += 1
else:
row["product_exists"] = False
row["product_id"] = None
row["has_step"] = False
seen[cache_key] = (False, None, False, row_index)
result.new_product_count += 1
result.no_step_count += 1
result.rows = parsed_rows
if result.duplicate_count > 0:
result.warnings.append(
f"{result.duplicate_count} duplicate Produkt-Baureihe row(s) detected — "
"these are pre-unchecked. Only one row per product will be imported."
)
return result
+505
View File
@@ -0,0 +1,505 @@
"""
Excel parser for Schaeffler CAD order lists.
Supports two formats:
Old format (per-category files):
Row 1-2: Instruction text (skip)
Row N: Column headers — detected as the first row containing "Ebene1"
Col 0 (A): Ebene1
Col 1 (B): Ebene2
...
Col 11+ : Component pairs alternating (part_name, material)
New format (unified file — TestScope_final layout):
Row 1: Column headers (no instruction rows)
Col 0 (A): Arbeitspaket
Col 1 (B): Ebene1
Col 2 (C): Ebene2
...
Col 12+ : Component pairs
Detection is header-driven: we find "Ebene1" in any column within the first 5 rows
and build a dynamic column_map from that header row.
"""
from __future__ import annotations
import logging
import re
from collections import Counter
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any
import openpyxl
logger = logging.getLogger(__name__)
# ---------------------------------------------------------------------------
# Category detection map: substring in col0 or col2 → category_key
# Priority order matters more specific first.
# ---------------------------------------------------------------------------
CATEGORY_MAP: dict[str, str] = {
# Linear / Anschlagplatten (check Ebene1 = "Linearsysteme")
"endplatten": "Anschlagplatten",
"anschlagplatten": "Anschlagplatten",
"laufrollenführungen": "Anschlagplatten",
"linearsysteme": "Linear_schiene", # Ebene1 value
"profilschienenführungen": "Linear_schiene",
"rollenumlaufeinheit": "Linear_schiene",
"kugelumlaufeinheit": "Linear_schiene",
# Bearings most specific first
"zylinderrollenlager": "CRB",
"axial-zylinderrollenlager": "CRB",
"axial-schrägrollenlager": "CRB",
"axiallagerscheiben": "CRB",
"torb": "SRB_TORB",
"radial srb": "SRB_TORB",
"pendelrollenlager": "SRB_TORB",
"kegelrollenlager": "TRB",
"kugellager": "Kugellager",
"axial-rillenkugellager": "Kugellager",
"rillenkugellager": "Kugellager",
"schrägkugellager": "Kugellager",
"gleitlager": "Gleitlager",
"gelenklager": "Gleitlager",
"gleitbuchsen": "Gleitlager",
# Fallback for generic Rollenlager → TRB (only if nothing else matched)
"rollenlager": "TRB",
}
# ---------------------------------------------------------------------------
# Header name normalization map: normalized header text → field name
# Supports multiple alternative column header texts for each field.
# ---------------------------------------------------------------------------
HEADER_FIELD_MAP: dict[str, str] = {
"arbeitspaket": "arbeitspaket",
"ebene1": "ebene1",
"ebene2": "ebene2",
"baureihe": "baureihe",
"pim-id": "pim_id",
"pim-id (klasse)": "pim_id",
"produkt (baureihe)": "produkt_baureihe",
"produkt": "produkt_baureihe",
"gewähltes produkt": "gewaehltes_produkt",
"gewaehltes produkt": "gewaehltes_produkt",
"name cad-modell": "name_cad_modell",
"name cad modell": "name_cad_modell",
"gewünschte bildnummer": "gewuenschte_bildnummer",
"gewuenschte bildnummer": "gewuenschte_bildnummer",
"lagertyp": "lagertyp",
"medias-rendering": "medias_rendering",
"medias": "medias_rendering",
}
@dataclass
class ParsedComponent:
part_name: str | None
material: str | None
component_type: str | None
column_index: int
@dataclass
class ParsedRow:
row_index: int
ebene1: str | None = None
ebene2: str | None = None
baureihe: str | None = None
pim_id: str | None = None
produkt_baureihe: str | None = None
gewaehltes_produkt: str | None = None
name_cad_modell: str | None = None
gewuenschte_bildnummer: str | None = None
lagertyp: str | None = None
medias_rendering: bool | None = None
components: list[ParsedComponent] = field(default_factory=list)
category_key: str | None = None
arbeitspaket: str | None = None
@dataclass
class ParsedExcel:
filename: str
category_key: str | None
template_name: str | None
column_headers: list[str]
rows: list[ParsedRow]
warnings: list[str] = field(default_factory=list)
material_mappings: list[dict] = field(default_factory=list)
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _clean(value: Any) -> str | None:
"""Strip whitespace, return None for empty values."""
if value is None:
return None
s = str(value).strip()
return s if s else None
def _normalize_filename(name: str | None) -> str | None:
"""Lowercase and strip trailing spaces from filenames. Returns None for empty strings."""
if name is None:
return None
stripped = name.strip()
if not stripped:
return None
return stripped.lower()
def _to_bool(value: Any) -> bool | None:
"""Convert Excel 1/0, 'ja'/'nein', True/False to Python bool."""
if value is None:
return None
if isinstance(value, bool):
return value
s = str(value).strip().lower()
if s in ("1", "true", "ja", "yes", "x"):
return True
if s in ("0", "false", "nein", "no", ""):
return False
return None
def _normalize_header(text: str) -> str:
"""Normalize a header cell value for matching."""
return text.strip().lower().replace("_", " ").replace("", "-").replace("", "-")
def _detect_row_category(ebene1: str | None, ebene2: str | None, baureihe: str | None) -> str | None:
"""Detect category for a single row from its Ebene1, Ebene2, Baureihe values."""
candidates = []
for val in (ebene1, ebene2, baureihe):
if val:
candidates.append(val.lower())
for keyword, cat in CATEGORY_MAP.items():
for cand in candidates:
if keyword in cand:
return cat
return None
def _detect_category(rows: list[list[Any]], column_map: dict[str, int]) -> str | None:
"""
Detect category by scanning Ebene1, Ebene2, and Baureihe columns
across all data rows. Priority: more specific keywords first (as ordered in map).
"""
ebene1_col = column_map.get("ebene1")
ebene2_col = column_map.get("ebene2")
baureihe_col = column_map.get("baureihe")
candidates: list[str] = []
for row in rows:
for col in (ebene1_col, ebene2_col, baureihe_col):
if col is not None and col < len(row):
val = _clean(row[col])
if val:
candidates.append(val.lower())
for keyword, cat in CATEGORY_MAP.items():
for cand in candidates:
if keyword in cand:
return cat
return None
def _build_column_map(headers: list[str]) -> dict[str, int]:
"""Build field_name → column_index mapping from header row."""
column_map: dict[str, int] = {}
for idx, raw_header in enumerate(headers):
if not raw_header:
continue
normalized = _normalize_header(raw_header)
field_name = HEADER_FIELD_MAP.get(normalized)
if field_name and field_name not in column_map:
column_map[field_name] = idx
return column_map
def _find_component_start(column_map: dict[str, int]) -> int:
"""Find the first column after medias_rendering for component pairs."""
medias_col = column_map.get("medias_rendering")
if medias_col is not None:
return medias_col + 1
# Fallback: find the highest mapped column and start after it
if column_map:
return max(column_map.values()) + 1
return 11 # Legacy default
def _get_cell(row: list[Any], col: int | None) -> Any:
"""Safely get a cell value by column index."""
if col is None or col >= len(row):
return None
return row[col]
# ---------------------------------------------------------------------------
# Material mapping sheet parser
# ---------------------------------------------------------------------------
def _parse_material_mapping(wb) -> list[dict]:
"""Parse 'materialmapping' sheet if it exists.
Expected columns: display_name (col A), render_name (col B).
Returns list of {"display_name": str, "render_name": str}.
"""
# Case-insensitive sheet name search
target_name = None
for name in wb.sheetnames:
if name.lower().replace(" ", "").replace("_", "") == "materialmapping":
target_name = name
break
if target_name is None:
return []
ws = wb[target_name]
mappings = []
rows = list(ws.iter_rows(values_only=True))
if not rows:
return []
# Detect header row — look for "display" or "anzeige" in first few rows
data_start = 0
for i, row in enumerate(rows[:3]):
if row and any(
_clean(cell) and ("display" in str(cell).lower() or "anzeige" in str(cell).lower() or "material" in str(cell).lower())
for cell in row[:3]
if cell is not None
):
data_start = i + 1
break
for row in rows[data_start:]:
if len(row) < 2:
continue
display = _clean(row[0])
render = _clean(row[1])
if display and render:
mappings.append({"display_name": display, "render_name": render})
return mappings
# ---------------------------------------------------------------------------
# Main parser
# ---------------------------------------------------------------------------
def parse_excel(file_path: str | Path) -> ParsedExcel:
"""
Parse a Schaeffler order list Excel file.
Returns a ParsedExcel with all data rows extracted.
Header-driven: finds "Ebene1" in any column within first 5 rows,
then builds column map dynamically.
"""
file_path = Path(file_path)
warnings: list[str] = []
try:
wb = openpyxl.load_workbook(file_path, data_only=True)
except Exception as exc:
raise ValueError(f"Cannot open Excel file: {exc}") from exc
ws = wb.active
# Collect all rows as raw values
all_rows: list[list[Any]] = []
for row in ws.iter_rows(values_only=True):
all_rows.append(list(row))
if len(all_rows) < 2:
raise ValueError("Excel file has fewer than 2 rows cannot find header row")
# Auto-detect header row: first row (within first 5) where ANY column == "Ebene1"
header_idx: int | None = None
for i, row in enumerate(all_rows[:5]):
for col_idx, cell in enumerate(row):
val = _clean(cell)
if val and val.lower() == "ebene1":
header_idx = i
break
if header_idx is not None:
break
if header_idx is None:
# Fallback: assume row 3 (index 2) is headers
header_idx = 2
warnings.append(
"Could not auto-detect header row (expected 'Ebene1' in any column); "
"falling back to row 3 as headers"
)
if len(all_rows) <= header_idx:
raise ValueError("Excel file has no data rows after the detected header row")
headers_raw = list(all_rows[header_idx])
# Remove trailing None from headers
while headers_raw and headers_raw[-1] is None:
headers_raw.pop()
max_col = len(headers_raw)
column_headers = [_clean(h) or "" for h in headers_raw]
# Build dynamic column map from headers
column_map = _build_column_map(column_headers)
# Data rows start immediately after the header row
data_rows_raw = all_rows[header_idx + 1:]
# Detect file-level category (backward compat)
category_key = _detect_category(data_rows_raw, column_map)
template_name = _category_to_template_name(category_key)
# Determine component column start
comp_start = _find_component_start(column_map)
# Build component header info (paired columns from comp_start)
component_col_info: list[tuple[int, int, str]] = [] # (part_col, material_col, component_type)
col = comp_start
while col < max_col:
part_type = column_headers[col] if col < len(column_headers) else f"part_{col}"
mat_col = col + 1
component_col_info.append((col, mat_col, part_type))
col += 2
# Parse data rows
parsed_rows: list[ParsedRow] = []
for row_idx, raw_row in enumerate(data_rows_raw):
# Pad row to max_col
while len(raw_row) < max_col:
raw_row.append(None)
# Check if the row is completely empty (check all mapped columns)
check_end = min(comp_start, max_col)
if all(v is None or str(v).strip() == "" for v in raw_row[:check_end]):
continue
ebene1 = _clean(_get_cell(raw_row, column_map.get("ebene1")))
ebene2 = _clean(_get_cell(raw_row, column_map.get("ebene2")))
baureihe = _clean(_get_cell(raw_row, column_map.get("baureihe")))
pr = ParsedRow(
row_index=row_idx + header_idx + 2, # 1-based Excel row number
ebene1=ebene1,
ebene2=ebene2,
baureihe=baureihe,
pim_id=_clean(_get_cell(raw_row, column_map.get("pim_id"))),
produkt_baureihe=_clean(_get_cell(raw_row, column_map.get("produkt_baureihe"))),
gewaehltes_produkt=_clean(_get_cell(raw_row, column_map.get("gewaehltes_produkt"))),
name_cad_modell=_normalize_filename(
_clean(_get_cell(raw_row, column_map.get("name_cad_modell")))
),
gewuenschte_bildnummer=_clean(
_get_cell(raw_row, column_map.get("gewuenschte_bildnummer"))
),
lagertyp=_clean(_get_cell(raw_row, column_map.get("lagertyp"))),
medias_rendering=_to_bool(_get_cell(raw_row, column_map.get("medias_rendering"))),
arbeitspaket=_clean(_get_cell(raw_row, column_map.get("arbeitspaket"))),
category_key=_detect_row_category(ebene1, ebene2, baureihe),
)
# Parse component pairs
for part_col, mat_col, comp_type in component_col_info:
part_name = _normalize_filename(_clean(raw_row[part_col] if part_col < len(raw_row) else None))
material = _clean(raw_row[mat_col] if mat_col < len(raw_row) else None)
if part_name or material:
pr.components.append(
ParsedComponent(
part_name=part_name,
material=material,
component_type=comp_type,
column_index=part_col,
)
)
parsed_rows.append(pr)
if not parsed_rows:
warnings.append("No data rows found (all rows empty after header)")
# Determine file-level category from most common row category
if parsed_rows:
row_cats = [r.category_key for r in parsed_rows if r.category_key]
if row_cats:
most_common = Counter(row_cats).most_common(1)[0][0]
category_key = most_common
template_name = _category_to_template_name(category_key)
# Parse material mapping sheet if present
material_mappings = _parse_material_mapping(wb)
return ParsedExcel(
filename=file_path.name,
category_key=category_key,
template_name=template_name,
column_headers=column_headers,
rows=parsed_rows,
warnings=warnings,
material_mappings=material_mappings,
)
def _category_to_template_name(category_key: str | None) -> str | None:
names = {
"TRB": "Tapered Roller Bearings (TRB)",
"Kugellager": "Kugellager (Ball Bearings)",
"Gleitlager": "Gleitlager (Plain Bearings)",
"CRB": "Cylindrical Roller Bearings (CRB)",
"Linear_schiene": "Linear Guide Rails",
"Anschlagplatten": "End Plates (Anschlagplatten)",
"SRB_TORB": "Spherical / Toroidal Roller Bearings (SRB/TORB)",
}
return names.get(category_key) if category_key else None
# ---------------------------------------------------------------------------
# Serialisation helpers (convert dataclasses → plain dicts for API)
# ---------------------------------------------------------------------------
def parsed_row_to_dict(pr: ParsedRow) -> dict:
return {
"row_index": pr.row_index,
"ebene1": pr.ebene1,
"ebene2": pr.ebene2,
"baureihe": pr.baureihe,
"pim_id": pr.pim_id,
"produkt_baureihe": pr.produkt_baureihe,
"gewaehltes_produkt": pr.gewaehltes_produkt,
"name_cad_modell": pr.name_cad_modell,
"gewuenschte_bildnummer": pr.gewuenschte_bildnummer,
"lagertyp": pr.lagertyp,
"medias_rendering": pr.medias_rendering,
"category_key": pr.category_key,
"arbeitspaket": pr.arbeitspaket,
"components": [
{
"part_name": c.part_name,
"material": c.material,
"component_type": c.component_type,
"column_index": c.column_index,
}
for c in pr.components
],
}
def parsed_excel_to_dict(pe: ParsedExcel) -> dict:
return {
"filename": pe.filename,
"category_key": pe.category_key,
"template_name": pe.template_name,
"row_count": len(pe.rows),
"column_headers": pe.column_headers,
"rows": [parsed_row_to_dict(r) for r in pe.rows],
"warnings": pe.warnings,
"material_mappings": pe.material_mappings,
}
+121
View File
@@ -0,0 +1,121 @@
"""Flamenco Manager REST API client.
Uses httpx (sync) for compatibility with Celery tasks and FastAPI endpoints.
"""
import logging
from typing import Any
import httpx
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10.0
class FlamencoClient:
"""Thin wrapper around the Flamenco Manager v3 REST API."""
def __init__(self, manager_url: str):
self.base_url = manager_url.rstrip("/")
def _url(self, path: str) -> str:
return f"{self.base_url}{path}"
# ── Job management ──────────────────────────────────────────────────────
def submit_job(
self,
name: str,
job_type: str,
settings: dict[str, Any],
metadata: dict[str, str] | None = None,
priority: int = 50,
) -> dict:
"""Submit a new render job to Flamenco Manager.
Returns the created job dict (includes 'id').
"""
payload = {
"name": name,
"type": job_type,
"submitter_platform": "linux",
"settings": settings,
"metadata": metadata or {},
"priority": priority,
}
resp = httpx.post(
self._url("/api/v3/jobs"),
json=payload,
timeout=DEFAULT_TIMEOUT,
)
resp.raise_for_status()
return resp.json()
def get_job(self, job_id: str) -> dict:
"""Get job details by ID."""
resp = httpx.get(
self._url(f"/api/v3/jobs/{job_id}"),
timeout=DEFAULT_TIMEOUT,
)
resp.raise_for_status()
return resp.json()
def cancel_job(self, job_id: str) -> None:
"""Request cancellation of a job."""
resp = httpx.post(
self._url(f"/api/v3/jobs/{job_id}/setstatus"),
json={"status": "cancel-requested"},
timeout=DEFAULT_TIMEOUT,
)
resp.raise_for_status()
# ── Workers ─────────────────────────────────────────────────────────────
def list_workers(self) -> list[dict]:
"""List all registered workers."""
resp = httpx.get(
self._url("/api/v3/worker-mgt/workers"),
timeout=DEFAULT_TIMEOUT,
)
resp.raise_for_status()
data = resp.json()
return data.get("workers", data) if isinstance(data, dict) else data
# ── Farm status ─────────────────────────────────────────────────────────
def get_farm_status(self) -> dict:
"""Get overall farm status from the Manager."""
resp = httpx.get(
self._url("/api/v3/configuration"),
timeout=DEFAULT_TIMEOUT,
)
resp.raise_for_status()
return resp.json()
def health_check(self) -> dict:
"""Check if the Flamenco Manager is reachable and return version info."""
try:
resp = httpx.get(
self._url("/api/v3/version"),
timeout=5.0,
)
resp.raise_for_status()
data = resp.json()
return {
"available": True,
"version": data.get("version", "unknown"),
"name": data.get("name", "Flamenco"),
}
except Exception as exc:
logger.warning(f"Flamenco health check failed: {exc}")
return {
"available": False,
"version": None,
"name": None,
"error": str(exc)[:200],
}
def get_flamenco_client(manager_url: str) -> FlamencoClient:
"""Factory that creates a FlamencoClient from a manager URL."""
return FlamencoClient(manager_url)
+471
View File
@@ -0,0 +1,471 @@
"""KPI / analytics query functions.
All functions return plain dicts or lists of dicts.
Uses text() for raw SQL to avoid ORM lazy-loading surprises.
Every function accepts date_from / date_to ISO-date strings to scope metrics.
"""
from datetime import date as _date
from sqlalchemy import text
from sqlalchemy.ext.asyncio import AsyncSession
def _parse_date(s: str) -> _date:
"""Convert ISO date string to datetime.date for asyncpg compatibility."""
return _date.fromisoformat(s)
async def order_throughput_by_week(
db: AsyncSession, date_from: str, date_to: str,
) -> list[dict]:
"""Weekly order creation + completion counts within the date range."""
sql = text(
"""
SELECT
TO_CHAR(DATE_TRUNC('week', created_at), 'IYYY-"W"IW') AS week,
COUNT(*) AS count,
COUNT(*) FILTER (WHERE status = 'completed') AS completed
FROM orders
WHERE created_at >= CAST(:date_from AS date)
AND created_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY DATE_TRUNC('week', created_at)
ORDER BY DATE_TRUNC('week', created_at)
"""
)
result = await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})
rows = result.fetchall()
return [{"week": r[0], "count": r[1], "completed": r[2]} for r in rows]
async def processing_time_stats(
db: AsyncSession, date_from: str, date_to: str,
) -> dict:
"""Average and percentile processing times for completed orders in range."""
sql = text(
"""
SELECT
EXTRACT(EPOCH FROM AVG(completed_at - submitted_at))::FLOAT
AS avg_submit_to_complete_s,
EXTRACT(EPOCH FROM AVG(processing_started_at - submitted_at))::FLOAT
AS avg_submit_to_processing_s,
EXTRACT(EPOCH FROM PERCENTILE_CONT(0.5) WITHIN GROUP (
ORDER BY completed_at - submitted_at
))::FLOAT AS p50_s,
EXTRACT(EPOCH FROM PERCENTILE_CONT(0.95) WITHIN GROUP (
ORDER BY completed_at - submitted_at
))::FLOAT AS p95_s
FROM orders
WHERE status = 'completed'
AND submitted_at IS NOT NULL
AND completed_at IS NOT NULL
AND submitted_at >= CAST(:date_from AS date)
AND submitted_at < CAST(:date_to AS date) + INTERVAL '1 day'
"""
)
result = await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})
row = result.fetchone()
if row is None:
return {
"avg_submit_to_complete_s": None,
"avg_submit_to_processing_s": None,
"p50_s": None,
"p95_s": None,
}
return {
"avg_submit_to_complete_s": row[0],
"avg_submit_to_processing_s": row[1],
"p50_s": row[2],
"p95_s": row[3],
}
async def revenue_overview(
db: AsyncSession, date_from: str, date_to: str,
) -> list[dict]:
"""Monthly revenue (sum of estimated_price for completed orders) in range."""
sql = text(
"""
SELECT
TO_CHAR(DATE_TRUNC('month', completed_at), 'YYYY-MM') AS month,
COALESCE(SUM(estimated_price), 0)::FLOAT AS revenue,
COUNT(*) AS order_count
FROM orders
WHERE status = 'completed'
AND completed_at >= CAST(:date_from AS date)
AND completed_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY DATE_TRUNC('month', completed_at)
ORDER BY DATE_TRUNC('month', completed_at)
"""
)
result = await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})
rows = result.fetchall()
return [{"month": r[0], "revenue": r[1], "order_count": r[2]} for r in rows]
async def item_status_breakdown(
db: AsyncSession, date_from: str, date_to: str,
) -> dict:
"""Count of order lines grouped by item_status, scoped to orders in range."""
sql = text(
"""
SELECT ol.item_status, COUNT(*) AS cnt
FROM order_lines ol
JOIN orders o ON o.id = ol.order_id
WHERE o.created_at >= CAST(:date_from AS date)
AND o.created_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY ol.item_status
"""
)
result = await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})
rows = result.fetchall()
out: dict = {"pending": 0, "approved": 0, "rejected": 0}
for row in rows:
key = str(row[0])
out[key] = int(row[1])
return out
async def render_time_breakdown(
db: AsyncSession, date_from: str, date_to: str,
) -> dict:
"""Average render duration from completed order lines, scoped to date range.
Uses render_started_at / render_completed_at on order_lines (added in migration 015).
avg_stl_s is not tracked at order-line level, so only avg_render_s and sample_count
are meaningful here; avg_stl_s is left None for UI compatibility.
"""
sql = text(
"""
SELECT
AVG(EXTRACT(EPOCH FROM (render_completed_at - render_started_at))) AS avg_render_s,
COUNT(*) AS sample_count
FROM order_lines
WHERE render_status = 'completed'
AND render_started_at IS NOT NULL
AND render_completed_at IS NOT NULL
AND render_completed_at >= CAST(:date_from AS date)
AND render_completed_at < CAST(:date_to AS date) + INTERVAL '1 day'
"""
)
result = await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})
row = result.fetchone()
if row is None or row[1] == 0:
return {"avg_stl_s": None, "avg_render_s": None, "avg_total_s": None, "sample_count": 0}
return {
"avg_stl_s": None,
"avg_render_s": float(row[0]) if row[0] is not None else None,
"avg_total_s": float(row[0]) if row[0] is not None else None,
"sample_count": int(row[1]),
}
async def render_time_by_output_type(
db: AsyncSession, date_from: str, date_to: str,
) -> list[dict]:
"""Render time statistics per output type for completed order lines."""
sql = text(
"""
SELECT
COALESCE(ot.name, 'Unknown') AS output_type,
COUNT(*) AS job_count,
AVG(EXTRACT(EPOCH FROM (ol.render_completed_at - ol.render_started_at))) AS avg_render_s,
MIN(EXTRACT(EPOCH FROM (ol.render_completed_at - ol.render_started_at))) AS min_render_s,
MAX(EXTRACT(EPOCH FROM (ol.render_completed_at - ol.render_started_at))) AS max_render_s,
PERCENTILE_CONT(0.5) WITHIN GROUP (
ORDER BY EXTRACT(EPOCH FROM (ol.render_completed_at - ol.render_started_at))
) AS p50_render_s
FROM order_lines ol
LEFT JOIN output_types ot ON ot.id = ol.output_type_id
WHERE ol.render_status = 'completed'
AND ol.render_started_at IS NOT NULL
AND ol.render_completed_at IS NOT NULL
AND ol.render_completed_at >= CAST(:date_from AS date)
AND ol.render_completed_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY ot.id, ot.name
ORDER BY avg_render_s DESC NULLS LAST
"""
)
result = await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})
return [
{
"output_type": r[0],
"job_count": int(r[1]),
"avg_render_s": float(r[2]) if r[2] is not None else None,
"min_render_s": float(r[3]) if r[3] is not None else None,
"max_render_s": float(r[4]) if r[4] is not None else None,
"p50_render_s": float(r[5]) if r[5] is not None else None,
}
for r in result.fetchall()
]
async def top_level_summary(
db: AsyncSession, date_from: str, date_to: str,
) -> dict:
"""High-level summary counts and totals within the date range."""
sql = text(
"""
SELECT
COUNT(*) AS total_orders,
COUNT(*) FILTER (WHERE status = 'completed') AS completed_orders,
COALESCE(SUM(estimated_price) FILTER (WHERE status = 'completed'), 0)::FLOAT
AS total_revenue
FROM orders
WHERE created_at >= CAST(:date_from AS date)
AND created_at < CAST(:date_to AS date) + INTERVAL '1 day'
"""
)
result = await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})
row = result.fetchone()
items_sql = text(
"""
SELECT COUNT(*)
FROM order_lines ol
JOIN orders o ON o.id = ol.order_id
WHERE ol.output_type_id IS NOT NULL
AND o.created_at >= CAST(:date_from AS date)
AND o.created_at < CAST(:date_to AS date) + INTERVAL '1 day'
"""
)
items_result = await db.execute(items_sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})
items_count = items_result.scalar() or 0
return {
"total_orders": int(row[0]) if row else 0,
"completed_orders": int(row[1]) if row else 0,
"total_revenue": float(row[2]) if row else 0.0,
"total_rendering_items": int(items_count),
}
async def product_and_category_stats(
db: AsyncSession, date_from: str, date_to: str,
) -> dict:
"""Product-level stats: unique rendered, total products, CAD coverage, by category."""
params = {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)}
rendered_sql = text(
"""
SELECT COUNT(DISTINCT ol.product_id)
FROM order_lines ol
JOIN orders o ON o.id = ol.order_id
WHERE ol.render_status = 'completed'
AND o.created_at >= CAST(:date_from AS date)
AND o.created_at < CAST(:date_to AS date) + INTERVAL '1 day'
"""
)
rendered = (await db.execute(rendered_sql, params)).scalar() or 0
totals_sql = text(
"""
SELECT COUNT(*) AS total, COUNT(cad_file_id) AS with_cad
FROM products
"""
)
totals_row = (await db.execute(totals_sql)).fetchone()
total_products = int(totals_row[0]) if totals_row else 0
products_with_cad = int(totals_row[1]) if totals_row else 0
cat_sql = text(
"""
SELECT COALESCE(category_key, 'unknown') AS category, COUNT(*) AS cnt
FROM products
GROUP BY category_key
ORDER BY cnt DESC
"""
)
cat_rows = (await db.execute(cat_sql)).fetchall()
return {
"unique_products_rendered": int(rendered),
"total_products": total_products,
"products_with_cad": products_with_cad,
"products_by_category": [
{"category": r[0], "count": int(r[1])} for r in cat_rows
],
}
async def output_type_usage(
db: AsyncSession, date_from: str, date_to: str,
) -> list[dict]:
"""Order lines grouped by output type name."""
sql = text(
"""
SELECT ot.name AS output_type, COUNT(*) AS cnt
FROM order_lines ol
JOIN output_types ot ON ot.id = ol.output_type_id
JOIN orders o ON o.id = ol.order_id
WHERE o.created_at >= CAST(:date_from AS date)
AND o.created_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY ot.name
ORDER BY cnt DESC
"""
)
rows = (await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})).fetchall()
return [{"output_type": r[0], "count": int(r[1])} for r in rows]
async def render_status_distribution(
db: AsyncSession, date_from: str, date_to: str,
) -> tuple[dict, list[dict]]:
"""(a) order_lines by render_status, (b) cad_files by renderer from render_log."""
params = {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)}
status_sql = text(
"""
SELECT ol.render_status, COUNT(*) AS cnt
FROM order_lines ol
JOIN orders o ON o.id = ol.order_id
WHERE o.created_at >= CAST(:date_from AS date)
AND o.created_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY ol.render_status
"""
)
status_rows = (await db.execute(status_sql, params)).fetchall()
status_map: dict = {"pending": 0, "processing": 0, "completed": 0, "failed": 0}
for row in status_rows:
key = str(row[0])
status_map[key] = int(row[1])
renderer_sql = text(
"""
SELECT render_log->>'renderer' AS renderer, COUNT(*) AS cnt
FROM cad_files
WHERE render_log IS NOT NULL
AND render_log->>'renderer' IS NOT NULL
AND created_at >= CAST(:date_from AS date)
AND created_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY render_log->>'renderer'
ORDER BY cnt DESC
"""
)
renderer_rows = (await db.execute(renderer_sql, params)).fetchall()
renderer_usage = [{"renderer": r[0], "count": int(r[1])} for r in renderer_rows]
return status_map, renderer_usage
async def top_products(
db: AsyncSession, date_from: str, date_to: str,
limit: int = 10,
) -> list[dict]:
"""Top N most-ordered products by order line count."""
sql = text(
"""
SELECT p.pim_id, p.name AS product_name,
COALESCE(p.category_key, 'unknown') AS category,
COUNT(*) AS order_count
FROM order_lines ol
JOIN products p ON p.id = ol.product_id
JOIN orders o ON o.id = ol.order_id
WHERE o.created_at >= CAST(:date_from AS date)
AND o.created_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY p.id, p.pim_id, p.name, p.category_key
ORDER BY order_count DESC
LIMIT :lim
"""
)
rows = (await db.execute(sql, {
"date_from": _parse_date(date_from),
"date_to": _parse_date(date_to),
"lim": limit,
})).fetchall()
return [
{"pim_id": r[0], "product_name": r[1], "category": r[2], "order_count": int(r[3])}
for r in rows
]
async def category_revenue(
db: AsyncSession, date_from: str, date_to: str,
) -> list[dict]:
"""Proportional revenue by category: order price / line count, summed per category."""
sql = text(
"""
WITH order_share AS (
SELECT o.id AS order_id,
COALESCE(o.estimated_price, 0) / GREATEST(COUNT(ol.id), 1) AS per_line_price,
COALESCE(p.category_key, 'unknown') AS category
FROM orders o
JOIN order_lines ol ON ol.order_id = o.id
JOIN products p ON p.id = ol.product_id
WHERE o.status = 'completed'
AND o.completed_at >= CAST(:date_from AS date)
AND o.completed_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY o.id, o.estimated_price, p.category_key
)
SELECT category,
COUNT(DISTINCT order_id) AS order_count,
COALESCE(SUM(per_line_price), 0)::FLOAT AS revenue
FROM order_share
GROUP BY category
ORDER BY revenue DESC
"""
)
rows = (await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})).fetchall()
return [{"category": r[0], "order_count": int(r[1]), "revenue": r[2]} for r in rows]
async def render_backend_stats(
db: AsyncSession, date_from: str, date_to: str,
) -> list[dict]:
"""Render time + count by backend (celery vs flamenco)."""
sql = text(
"""
SELECT
COALESCE(render_backend_used, 'unknown') AS backend,
COUNT(*) AS total,
COUNT(*) FILTER (WHERE render_status = 'completed') AS completed,
COUNT(*) FILTER (WHERE render_status = 'failed') AS failed,
EXTRACT(EPOCH FROM AVG(
render_completed_at - render_started_at
) FILTER (WHERE render_status = 'completed'))::FLOAT AS avg_render_s,
EXTRACT(EPOCH FROM PERCENTILE_CONT(0.5) WITHIN GROUP (
ORDER BY render_completed_at - render_started_at
) FILTER (WHERE render_status = 'completed'))::FLOAT AS p50_render_s
FROM order_lines ol
JOIN orders o ON o.id = ol.order_id
WHERE render_backend_used IS NOT NULL
AND o.created_at >= CAST(:date_from AS date)
AND o.created_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY render_backend_used
ORDER BY total DESC
"""
)
rows = (await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})).fetchall()
return [
{
"backend": r[0],
"total": int(r[1]),
"completed": int(r[2]),
"failed": int(r[3]),
"avg_render_s": r[4],
"p50_render_s": r[5],
}
for r in rows
]
async def orders_by_user(
db: AsyncSession, date_from: str, date_to: str,
) -> list[dict]:
"""Orders grouped by user with counts and revenue."""
sql = text(
"""
SELECT u.full_name, u.email, u.role,
COUNT(*) AS order_count,
COALESCE(SUM(o.estimated_price) FILTER (WHERE o.status = 'completed'), 0)::FLOAT AS revenue
FROM orders o
JOIN users u ON u.id = o.created_by
WHERE o.created_at >= CAST(:date_from AS date)
AND o.created_at < CAST(:date_to AS date) + INTERVAL '1 day'
GROUP BY u.id, u.full_name, u.email, u.role
ORDER BY order_count DESC
"""
)
rows = (await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})).fetchall()
return [
{"full_name": r[0], "email": r[1], "role": r[2], "order_count": int(r[3]), "revenue": r[4]}
for r in rows
]
+143
View File
@@ -0,0 +1,143 @@
"""Material alias resolution service.
Used from Celery tasks (sync context) to resolve raw material names
(from Excel / user input) to SCHAEFFLER library material names via aliases.
Resolution chain:
1. Exact Material.name match (case-insensitive) → use it
2. MaterialAlias lookup (case-insensitive) → use alias.material.name
3. Pass through unchanged → Blender will show FailedMaterial magenta
"""
import logging
from sqlalchemy import create_engine, select, func
from sqlalchemy.orm import Session, selectinload
from sqlalchemy.ext.asyncio import AsyncSession
from app.models.material import Material
from app.models.material_alias import MaterialAlias
logger = logging.getLogger(__name__)
_engine = None
def _get_engine():
global _engine
if _engine is None:
from app.config import settings as app_settings
_engine = create_engine(app_settings.database_url_sync)
return _engine
def resolve_material_map(raw_map: dict[str, str]) -> dict[str, str]:
"""Resolve raw material names to SCHAEFFLER library names via aliases.
For each value in raw_map:
1. If it already matches a Material.name (case-insensitive) → keep as-is (use canonical name)
2. Else look up MaterialAlias.alias (case-insensitive) → return alias.material.name
3. Else keep original (Blender will use FailedMaterial fallback)
Returns a new dict with the same keys but resolved material names.
"""
if not raw_map:
return raw_map
engine = _get_engine()
with Session(engine) as session:
# Load all materials
materials = session.execute(
select(Material).options(selectinload(Material.aliases))
).scalars().all()
# Build lookup dicts (case-insensitive)
# material name (lower) → canonical Material.name
name_lookup: dict[str, str] = {}
# alias (lower) → Material.name
alias_lookup: dict[str, str] = {}
for mat in materials:
name_lookup[mat.name.lower()] = mat.name
for a in mat.aliases:
alias_lookup[a.alias.lower()] = mat.name
resolved = {}
for part_name, raw_material in raw_map.items():
raw_lower = raw_material.lower()
# 1. Alias lookup first — aliases explicitly map intermediate/display names
# to the canonical SCHAEFFLER library names (e.g. "Steel--Stahl" →
# "SCHAEFFLER_010101_Steel-Bare"). This must take priority over the
# direct name match so that intermediate names are properly redirected.
if raw_lower in alias_lookup:
target = alias_lookup[raw_lower]
logger.info("resolved '%s''%s' (alias match)", raw_material, target)
resolved[part_name] = target
continue
# 2. Exact material name match (canonical name used as-is)
if raw_lower in name_lookup:
canonical = name_lookup[raw_lower]
if canonical != raw_material:
logger.info("resolved '%s''%s' (exact name match)", raw_material, canonical)
resolved[part_name] = canonical
continue
# 3. Pass through unchanged
logger.warning("no material match for '%s' — will use FailedMaterial fallback", raw_material)
resolved[part_name] = raw_material
return resolved
async def seed_material_aliases_from_mappings(
db: AsyncSession, mappings: list[dict]
) -> dict:
"""Seed material aliases from Excel materialmapping sheet.
For each {display_name, render_name}:
- Find or create Material by render_name
- Add display_name as alias if not already present
Returns {"created": N, "skipped": N}.
"""
created = 0
skipped = 0
for mapping in mappings:
display_name = mapping.get("display_name", "").strip()
render_name = mapping.get("render_name", "").strip()
if not display_name or not render_name:
skipped += 1
continue
# Find or create Material by render_name
result = await db.execute(
select(Material).where(func.lower(Material.name) == render_name.lower())
)
material = result.scalar_one_or_none()
if material is None:
material = Material(name=render_name, source="excel_mapping")
db.add(material)
await db.flush()
# Check if alias already exists
alias_result = await db.execute(
select(MaterialAlias).where(
func.lower(MaterialAlias.alias) == display_name.lower()
)
)
existing_alias = alias_result.scalar_one_or_none()
if existing_alias:
skipped += 1
continue
# Create alias
alias = MaterialAlias(material_id=material.id, alias=display_name)
db.add(alias)
created += 1
if created > 0:
await db.flush()
return {"created": created, "skipped": skipped}
@@ -0,0 +1,84 @@
"""Notification emission helpers.
Provides async (for routers) and sync (for Celery tasks) entry points
to create notification rows in the audit_log table.
"""
import logging
import uuid
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.asyncio import AsyncSession
from app.models.audit_log import AuditLog
logger = logging.getLogger(__name__)
_engine = None
def _get_engine():
global _engine
if _engine is None:
from app.config import settings as app_settings
_engine = create_engine(app_settings.database_url_sync)
return _engine
async def emit_notification(
db: AsyncSession,
*,
actor_user_id: str | uuid.UUID | None = None,
target_user_id: str | uuid.UUID | None = None,
action: str,
entity_type: str | None = None,
entity_id: str | None = None,
details: dict | None = None,
) -> None:
"""Create a notification (async — for use inside FastAPI routers)."""
try:
entry = AuditLog(
user_id=str(actor_user_id) if actor_user_id else None,
target_user_id=str(target_user_id) if target_user_id else None,
action=action,
entity_type=entity_type,
entity_id=str(entity_id) if entity_id else None,
details=details,
notification=True,
timestamp=datetime.utcnow(),
)
db.add(entry)
await db.commit()
except Exception:
logger.exception("Failed to emit notification (async)")
await db.rollback()
def emit_notification_sync(
*,
actor_user_id: str | uuid.UUID | None = None,
target_user_id: str | uuid.UUID | None = None,
action: str,
entity_type: str | None = None,
entity_id: str | None = None,
details: dict | None = None,
) -> None:
"""Create a notification (sync — for use inside Celery tasks)."""
engine = _get_engine()
try:
with Session(engine) as session:
entry = AuditLog(
user_id=str(actor_user_id) if actor_user_id else None,
target_user_id=str(target_user_id) if target_user_id else None,
action=action,
entity_type=entity_type,
entity_id=str(entity_id) if entity_id else None,
details=details,
notification=True,
timestamp=datetime.utcnow(),
)
session.add(entry)
session.commit()
except Exception:
logger.exception("Failed to emit notification (sync)")
+22
View File
@@ -0,0 +1,22 @@
"""Order number generation and business logic."""
from datetime import datetime
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, func
from app.models.order import Order
async def generate_order_number(db: AsyncSession) -> str:
"""Generate next sequential order number: SA-2026-XXXXX."""
year = datetime.utcnow().year
prefix = f"SA-{year}-"
# Use MAX to find the highest existing sequence number this year.
# COUNT-based approach breaks when orders are deleted (produces duplicates).
result = await db.execute(
select(func.max(Order.order_number)).where(Order.order_number.like(f"{prefix}%"))
)
max_num = result.scalar()
if max_num:
last_seq = int(max_num.split("-")[-1])
return f"{prefix}{last_seq + 1:05d}"
return f"{prefix}00001"
@@ -0,0 +1,86 @@
"""Service to auto-advance order status when all renders complete."""
import logging
from datetime import datetime
from sqlalchemy import create_engine, select, update as sql_update
from sqlalchemy.orm import Session
from app.models.order import Order, OrderStatus
from app.models.order_line import OrderLine
logger = logging.getLogger(__name__)
def check_order_completion(order_id: str) -> bool:
"""If all renderable lines are done, auto-advance order to completed.
Called from Celery tasks (sync context).
Returns True if the order was advanced to completed.
"""
from app.config import settings as app_settings
sync_url = app_settings.database_url.replace("+asyncpg", "")
engine = create_engine(sync_url)
try:
with Session(engine) as session:
# Get all lines that have an output type (i.e. renderable)
lines = session.execute(
select(OrderLine).where(
OrderLine.order_id == order_id,
OrderLine.output_type_id.isnot(None),
)
).scalars().all()
if not lines:
return False
# Check if all renderable lines are in a terminal state
all_terminal = all(
line.render_status in ("completed", "failed", "cancelled")
for line in lines
)
if not all_terminal:
return False
# Check order is still in processing state
order = session.execute(
select(Order).where(Order.id == order_id)
).scalar_one_or_none()
if order is None or order.status != OrderStatus.processing:
return False
# Auto-advance to completed
now = datetime.utcnow()
session.execute(
sql_update(Order)
.where(Order.id == order_id)
.values(
status=OrderStatus.completed,
completed_at=now,
updated_at=now,
)
)
session.commit()
logger.info(f"Order {order_id} auto-advanced to completed (all {len(lines)} lines done)")
# Notify order creator
try:
from app.services.notification_service import emit_notification_sync
emit_notification_sync(
actor_user_id=None,
target_user_id=str(order.created_by),
action="order.completed",
entity_type="order",
entity_id=str(order_id),
details={"order_number": order.order_number},
)
except Exception:
logger.exception("Failed to emit order.completed notification")
return True
finally:
engine.dispose()
+232
View File
@@ -0,0 +1,232 @@
"""Pricing service — price lookup and order price computation.
Price resolution cascade for order lines:
1. OutputType's linked pricing_tier (if active) → use its price_per_item
2. Product's category_key → look up PricingTier by category
3. "default" category tier → global fallback
4. None if nothing configured
"""
from decimal import Decimal
from typing import Any
from sqlalchemy import select, update as sql_update
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from app.models.pricing_tier import PricingTier
async def get_price_for(
db: AsyncSession,
category_key: str,
quality_level: str = "Normal",
) -> Decimal | None:
"""Return price_per_item for the given category + quality level.
Falls back to category_key='default' if no exact match is found.
Returns None if nothing is configured.
"""
# 1. Exact match
result = await db.execute(
select(PricingTier).where(
PricingTier.category_key == category_key,
PricingTier.quality_level == quality_level,
PricingTier.is_active.is_(True),
)
)
tier = result.scalar_one_or_none()
if tier is not None:
return tier.price_per_item
if category_key == "default":
return None
# 2. Fallback: default category
result = await db.execute(
select(PricingTier).where(
PricingTier.category_key == "default",
PricingTier.quality_level == quality_level,
PricingTier.is_active.is_(True),
)
)
tier = result.scalar_one_or_none()
return tier.price_per_item if tier is not None else None
async def resolve_line_price(
db: AsyncSession,
output_type_id: str | None,
product_category_key: str | None,
) -> Decimal | None:
"""Resolve the unit price for a single order line using the cascade.
1. OutputType's linked pricing_tier (if active)
2. Product's category_key → PricingTier by category
3. "default" category tier → global fallback
4. None
"""
if output_type_id is not None:
from app.models.output_type import OutputType
result = await db.execute(
select(OutputType)
.options(selectinload(OutputType.pricing_tier))
.where(OutputType.id == output_type_id)
)
ot = result.scalar_one_or_none()
if ot and ot.pricing_tier and ot.pricing_tier.is_active:
return ot.pricing_tier.price_per_item
# Step 2+3: category lookup with default fallback
cat = product_category_key or "default"
return await get_price_for(db, cat)
async def estimate_order_price(
db: AsyncSession,
lines: list[dict[str, Any]],
) -> dict:
"""Estimate price for a list of prospective order lines.
Each line dict should have: product_id, output_type_id.
Returns {total, line_count, breakdown: [{output_type_id, product_id, unit_price}], has_unpriced}.
"""
from app.models.product import Product
breakdown: list[dict] = []
total = Decimal("0.00")
has_unpriced = False
for line in lines:
product_id = line.get("product_id")
output_type_id = line.get("output_type_id")
# Get product category
cat = None
if product_id:
prod_result = await db.execute(
select(Product).where(Product.id == product_id)
)
prod = prod_result.scalar_one_or_none()
if prod:
cat = prod.category_key
price = await resolve_line_price(db, output_type_id, cat)
breakdown.append({
"output_type_id": str(output_type_id) if output_type_id else None,
"product_id": str(product_id) if product_id else None,
"unit_price": float(price) if price is not None else None,
})
if price is not None:
total += price
else:
has_unpriced = True
return {
"total": float(total),
"line_count": len(lines),
"breakdown": breakdown,
"has_unpriced": has_unpriced,
}
async def compute_order_estimated_price(
db: AsyncSession,
order,
items,
quality_level: str = "Normal",
) -> Decimal | None:
"""Compute estimated price for an order based on rendering items.
Returns None if no pricing is configured, or Decimal('0.00') if there
are no rendering items.
"""
rendering_count = sum(1 for i in items if i.medias_rendering)
if rendering_count == 0:
return Decimal("0.00")
# Resolve category from template
category_key = "default"
if order.template_id is not None:
from app.models.template import Template
tmpl_result = await db.execute(
select(Template).where(Template.id == order.template_id)
)
tmpl = tmpl_result.scalar_one_or_none()
if tmpl and tmpl.category_key:
category_key = tmpl.category_key
unit_price = await get_price_for(db, category_key, quality_level)
if unit_price is None:
return None
return unit_price * rendering_count
async def refresh_order_price(db: AsyncSession, order_id) -> Decimal | None:
"""Re-fetch order + lines, resolve per-line prices, snapshot to unit_price, update order total."""
from app.models.order import Order
from app.models.order_line import OrderLine
from app.models.output_type import OutputType
from app.models.product import Product
order_result = await db.execute(select(Order).where(Order.id == order_id))
order = order_result.scalar_one_or_none()
if order is None:
return None
lines_result = await db.execute(
select(OrderLine)
.options(
selectinload(OrderLine.output_type).selectinload(OutputType.pricing_tier),
selectinload(OrderLine.product),
)
.where(
OrderLine.order_id == order_id,
OrderLine.output_type_id.is_not(None),
)
)
lines = lines_result.scalars().all()
if not lines:
await db.execute(
sql_update(Order)
.where(Order.id == order_id)
.values(estimated_price=Decimal("0.00"))
)
await db.commit()
return Decimal("0.00")
total = Decimal("0.00")
any_priced = False
for line in lines:
# Cascade: 1) OT pricing tier, 2) product category, 3) default
price = None
if line.output_type and line.output_type.pricing_tier and line.output_type.pricing_tier.is_active:
price = line.output_type.pricing_tier.price_per_item
else:
cat = line.product.category_key if line.product else None
price = await get_price_for(db, cat or "default")
# Snapshot to line
await db.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(unit_price=price)
)
if price is not None:
total += price
any_priced = True
new_price = total if any_priced else None
await db.execute(
sql_update(Order)
.where(Order.id == order_id)
.values(estimated_price=new_price)
)
await db.commit()
return new_price
+143
View File
@@ -0,0 +1,143 @@
"""Product service — lookup/create products, link CAD files."""
import uuid
from sqlalchemy import select, func, update as sql_update
from sqlalchemy.ext.asyncio import AsyncSession
from app.models.product import Product
# Default render positions added to every newly created product.
DEFAULT_RENDER_POSITIONS = [
{"name": "3/4 Front", "rotation_x": -15.0, "rotation_y": 45.0, "rotation_z": 0.0, "is_default": True, "sort_order": 0},
{"name": "3/4 Rear", "rotation_x": -15.0, "rotation_y": -135.0, "rotation_z": 0.0, "is_default": False, "sort_order": 1},
{"name": "Default", "rotation_x": 0.0, "rotation_y": 0.0, "rotation_z": 0.0, "is_default": False, "sort_order": 2},
]
async def create_default_positions(db: AsyncSession, product_id: uuid.UUID) -> None:
"""Insert the default render positions for a newly created product."""
from app.models.render_position import ProductRenderPosition
for pos_data in DEFAULT_RENDER_POSITIONS:
db.add(ProductRenderPosition(product_id=product_id, **pos_data))
await db.flush()
def _fill_missing_fields(product: Product, pim_id: str | None, fields: dict) -> None:
"""Fill in null/empty fields on an existing product without overwriting manual edits."""
if pim_id and not product.pim_id:
product.pim_id = pim_id
for attr in (
"name", "category_key", "ebene1", "ebene2", "baureihe",
"lagertyp", "name_cad_modell", "arbeitspaket",
):
if fields.get(attr) and not getattr(product, attr, None):
setattr(product, attr, fields[attr])
# Update medias_rendering if not set
if fields.get("medias_rendering") is not None and product.medias_rendering is None:
product.medias_rendering = fields["medias_rendering"]
# Always update components from the latest Excel import (needed for auto-reassign)
if fields.get("components"):
product.components = fields["components"]
async def lookup_product(
db: AsyncSession, pim_id: str | None, produkt_baureihe: str | None
) -> Product | None:
"""Read-only lookup: produkt_baureihe (primary), then pim_id (fallback).
Same cascade as lookup_or_create_product but never creates or mutates.
"""
if produkt_baureihe:
result = await db.execute(
select(Product).where(
func.lower(Product.produkt_baureihe) == produkt_baureihe.lower(),
Product.is_active.is_(True),
)
)
product = result.scalar_one_or_none()
if product is not None:
return product
# baureihe provided but not found — skip pim_id fallback (same logic)
return None
if pim_id:
result = await db.execute(
select(Product).where(Product.pim_id == pim_id, Product.is_active.is_(True))
)
return result.scalar_one_or_none()
return None
async def lookup_or_create_product(
db: AsyncSession, pim_id: str | None, fields: dict
) -> tuple[Product, bool]:
"""Look up by produkt_baureihe (primary), then pim_id (fallback). Create if not found.
Returns (product, was_created).
Does NOT overwrite existing fields — preserves manual edits.
"""
produkt_baureihe = fields.get("produkt_baureihe")
# Primary lookup: by produkt_baureihe (case-insensitive)
if produkt_baureihe:
result = await db.execute(
select(Product).where(
func.lower(Product.produkt_baureihe) == produkt_baureihe.lower(),
Product.is_active.is_(True),
)
)
product = result.scalar_one_or_none()
if product is not None:
_fill_missing_fields(product, pim_id, fields)
await db.flush()
return product, False
# produkt_baureihe was provided but not found — each baureihe is a
# distinct product, so skip the pim_id fallback and create a new one.
# Fallback lookup: by pim_id (only when produkt_baureihe is absent,
# e.g. old per-category Excel files that don't have a Baureihe column).
if not produkt_baureihe and pim_id:
result = await db.execute(
select(Product).where(Product.pim_id == pim_id, Product.is_active.is_(True))
)
product = result.scalar_one_or_none()
if product is not None:
_fill_missing_fields(product, pim_id, fields)
await db.flush()
return product, False
product = Product(
pim_id=pim_id or f"auto-{uuid.uuid4().hex[:8]}",
name=fields.get("name"),
category_key=fields.get("category_key"),
ebene1=fields.get("ebene1"),
ebene2=fields.get("ebene2"),
baureihe=fields.get("baureihe"),
produkt_baureihe=produkt_baureihe,
lagertyp=fields.get("lagertyp"),
name_cad_modell=fields.get("name_cad_modell"),
arbeitspaket=fields.get("arbeitspaket"),
components=fields.get("components", []),
cad_part_materials=fields.get("cad_part_materials", []),
source_excel=fields.get("source_excel"),
)
db.add(product)
await db.flush()
await create_default_positions(db, product.id)
return product, True
async def link_cad_to_product(
db: AsyncSession, product_id: uuid.UUID, cad_file_id: uuid.UUID
) -> Product:
"""Set product.cad_file_id via direct SQL UPDATE."""
await db.execute(
sql_update(Product)
.where(Product.id == product_id)
.values(cad_file_id=cad_file_id)
)
await db.commit()
result = await db.execute(select(Product).where(Product.id == product_id))
return result.scalar_one()
+374
View File
@@ -0,0 +1,374 @@
"""Render dispatcher — routes render jobs to Celery or Flamenco.
Backend selection priority:
1. OutputType.render_backend per-type override ("celery" / "flamenco")
2. OutputType.is_animation — animations default to Flamenco
3. System setting render_backend — global default ("celery" / "flamenco" / "auto")
4. "auto" mode: stills → Celery, animations → Flamenco
"""
import json
import logging
from datetime import datetime
from sqlalchemy import select, update as sql_update
from sqlalchemy.orm import Session, joinedload
from app.models.order_line import OrderLine
from app.models.output_type import OutputType
from app.models.product import Product
from app.models.system_setting import SystemSetting
logger = logging.getLogger(__name__)
def _load_setting(session: Session, key: str, default: str = "") -> str:
"""Load a single system setting (sync)."""
row = session.execute(
select(SystemSetting).where(SystemSetting.key == key)
).scalar_one_or_none()
return row.value if row else default
def resolve_backend(output_type: OutputType | None, system_backend: str) -> str:
"""Determine which backend to use for a given output type.
Returns "celery" or "flamenco".
"""
if output_type is None:
return "celery"
# Priority 1: explicit per-type override
ot_backend = output_type.render_backend
if ot_backend in ("celery", "flamenco"):
return ot_backend
# Priority 2+3: is_animation + system setting
if system_backend in ("celery", "flamenco"):
return system_backend
# Priority 4: auto mode — animations → Flamenco, stills → Celery
if output_type.is_animation:
return "flamenco"
return "celery"
def build_flamenco_job_settings(
output_type: OutputType,
product: Product,
step_path: str,
output_dir: str,
system_settings: dict[str, str],
lighting_only: bool = False,
shadow_catcher: bool = False,
camera_orbit: bool = True,
cycles_device: str = "auto",
rotation_x: float = 0.0,
rotation_y: float = 0.0,
rotation_z: float = 0.0,
) -> dict:
"""Build Flamenco job settings from output type and product metadata."""
render_settings = output_type.render_settings or {}
engine = render_settings.get("engine", system_settings.get("blender_engine", "cycles"))
samples_key = f"blender_{engine}_samples"
samples = render_settings.get("samples", int(system_settings.get(samples_key, "256")))
stl_quality = render_settings.get("stl_quality", system_settings.get("stl_quality", "low"))
width = render_settings.get("width", 1920 if output_type.is_animation else 1024)
height = render_settings.get("height", 1080 if output_type.is_animation else 1024)
part_colors = {}
part_names_ordered = []
if product.cad_file and product.cad_file.parsed_objects:
part_names_ordered = product.cad_file.parsed_objects.get("objects", [])
materials_source = product.cad_part_materials
if materials_source:
from app.services.step_processor import build_part_colors
part_colors = build_part_colors(part_names_ordered, materials_source)
transparent_bg = bool(output_type.transparent_bg) if hasattr(output_type, 'transparent_bg') else False
settings = {
"step_path": step_path,
"engine": engine,
"samples": samples,
"stl_quality": stl_quality,
"width": width,
"height": height,
"part_colors_json": json.dumps(part_colors),
"transparent_bg": transparent_bg,
"template_path": "",
"target_collection": "Product",
"material_library_path": "",
"material_map_json": "{}",
"part_names_ordered_json": json.dumps(part_names_ordered),
"lighting_only": lighting_only,
"shadow_catcher": shadow_catcher,
"cycles_device": cycles_device,
"rotation_x": rotation_x,
"rotation_y": rotation_y,
"rotation_z": rotation_z,
}
for dk in ('noise_threshold', 'denoiser', 'denoising_input_passes',
'denoising_prefilter', 'denoising_quality', 'denoising_use_gpu'):
settings[dk] = str(render_settings.get(dk, ""))
if output_type.is_animation:
# Turntable-specific settings
output_name = render_settings.get("output_name", "turntable")
settings["output_dir"] = output_dir
settings["output_name"] = output_name
settings["frame_count"] = render_settings.get("frame_count", 120)
settings["fps"] = render_settings.get("fps", 30)
settings["turntable_degrees"] = render_settings.get("turntable_degrees", 360)
settings["turntable_axis"] = render_settings.get("turntable_axis", "world_z")
settings["bg_color"] = render_settings.get("bg_color", "")
settings["camera_orbit"] = camera_orbit
else:
# Still-specific settings
ext = output_type.output_format or "png"
settings["output_path"] = f"{output_dir}/render.{ext}"
return settings
def dispatch_render(order_line_id: str) -> dict:
"""Route a render job to Celery or Flamenco based on configuration.
Must be called from a sync context (Celery task or sync wrapper).
Returns {"backend": "celery"|"flamenco", "job_ref": str}.
"""
from app.config import settings as app_settings
from app.services.render_log import emit, clear
clear(order_line_id)
emit(order_line_id, "Dispatch started — loading order line data")
sync_url = app_settings.database_url.replace("+asyncpg", "")
from sqlalchemy import create_engine
engine_db = create_engine(sync_url)
with Session(engine_db) as session:
line = session.execute(
select(OrderLine)
.where(OrderLine.id == order_line_id)
.options(
joinedload(OrderLine.product).joinedload(Product.cad_file),
joinedload(OrderLine.output_type),
)
).scalar_one_or_none()
if line is None:
emit(order_line_id, "Order line not found", "error")
logger.error(f"OrderLine {order_line_id} not found")
return {"backend": "none", "job_ref": "", "error": "not_found"}
product_name = line.product.name or line.product.pim_id or "unknown"
output_name = line.output_type.name if line.output_type else "default"
emit(order_line_id, f"Product: {product_name} | Output: {output_name}")
if line.product.cad_file_id is None:
emit(order_line_id, "Product has no CAD file — marking as failed", "error")
logger.warning(f"OrderLine {order_line_id}: product has no CAD file")
session.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(render_status="failed")
)
session.commit()
return {"backend": "none", "job_ref": "", "error": "no_cad_file"}
cad_name = line.product.cad_file.original_name if line.product.cad_file else "?"
emit(order_line_id, f"CAD file: {cad_name}")
# Load system settings
system_backend = _load_setting(session, "render_backend", "celery")
flamenco_url = _load_setting(session, "flamenco_manager_url", "http://flamenco-manager:8080")
backend = resolve_backend(line.output_type, system_backend)
emit(order_line_id, f"Resolved backend: {backend}")
# Mark as processing
now = datetime.utcnow()
session.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(
render_status="processing",
render_backend_used=backend,
render_started_at=now,
)
)
session.commit()
if backend == "flamenco":
emit(order_line_id, f"Submitting job to Flamenco Manager ({flamenco_url})")
result = _dispatch_flamenco(session, line, flamenco_url)
if result.get("error"):
emit(order_line_id, f"Flamenco submit failed: {result['error']}", "error")
else:
emit(order_line_id, f"Flamenco job submitted: {result.get('job_ref', '?')}")
return result
else:
emit(order_line_id, "Dispatching to Celery render worker")
return _dispatch_celery(order_line_id)
engine_db.dispose()
def _dispatch_celery(order_line_id: str) -> dict:
"""Dispatch to the existing Celery render task."""
from app.tasks.step_tasks import render_order_line_task
result = render_order_line_task.delay(order_line_id)
return {"backend": "celery", "job_ref": result.id}
def _dispatch_flamenco(session: Session, line: OrderLine, flamenco_url: str) -> dict:
"""Submit a job to Flamenco Manager."""
import re
from app.services.flamenco_client import get_flamenco_client
# Load all needed system settings
all_keys = ["blender_engine", "blender_cycles_samples", "blender_eevee_samples", "stl_quality", "cycles_device"]
sys_settings = {}
for key in all_keys:
sys_settings[key] = _load_setting(session, key, "")
output_type = line.output_type
product = line.product
cad_file = product.cad_file
# Load render_position for rotation values
rotation_x = rotation_y = rotation_z = 0.0
if line.render_position_id:
from app.models.render_position import ProductRenderPosition
rp = session.get(ProductRenderPosition, line.render_position_id)
if rp:
rotation_x, rotation_y, rotation_z = rp.rotation_x, rp.rotation_y, rp.rotation_z
# Flamenco mounts the uploads volume at /shared, backend uses /app/uploads
raw_path = cad_file.stored_path if cad_file else ""
step_path = raw_path.replace("/app/uploads/", "/shared/") if raw_path else ""
output_dir = f"/shared/renders/{line.id}"
job_type = "schaeffler-turntable" if (output_type and output_type.is_animation) else "schaeffler-still"
# Resolve render template + material library BEFORE building job settings
# (template.lighting_only is needed by build_flamenco_job_settings)
from app.services.template_service import resolve_template, get_material_library_path
category_key = product.category_key if product else None
ot_id = str(line.output_type_id) if line.output_type_id else None
template = resolve_template(category_key=category_key, output_type_id=ot_id)
material_library = get_material_library_path()
# Resolve cycles_device: per-output-type override wins, fall back to system setting
ot_cycles_device = output_type.cycles_device if output_type else None
effective_cycles_device = ot_cycles_device or sys_settings.get("cycles_device", "gpu") or "gpu"
settings = build_flamenco_job_settings(
output_type=output_type,
product=product,
step_path=step_path,
output_dir=output_dir,
system_settings=sys_settings,
lighting_only=bool(template.lighting_only) if template else False,
shadow_catcher=bool(template.shadow_catcher_enabled) if template else False,
camera_orbit=bool(template.camera_orbit) if template else True,
cycles_device=effective_cycles_device,
rotation_x=rotation_x,
rotation_y=rotation_y,
rotation_z=rotation_z,
)
if template:
# Remap path for Flamenco shared volume
tmpl_path = template.blend_file_path.replace("/app/uploads/", "/shared/")
settings["template_path"] = tmpl_path
settings["target_collection"] = template.target_collection
logger.info(
f"Flamenco job: using render template '{template.name}' "
f"(id={template.id}, path={tmpl_path}, collection={template.target_collection})"
)
else:
logger.info(
f"Flamenco job: no render template found for "
f"category_key={category_key!r}, output_type_id={ot_id!r} — using factory settings"
)
# Material library + material map: send whenever library exists and product
# has material assignments — works with or without a render template.
# When a template is present, only apply if material_replace_enabled is set.
materials_source = product.cad_part_materials
use_materials = bool(material_library and materials_source)
if template and not template.material_replace_enabled:
use_materials = False
if use_materials:
mat_lib_path = material_library.replace("/app/uploads/", "/shared/")
settings["material_library_path"] = mat_lib_path
mat_map = {
m["part_name"]: m["material"]
for m in materials_source
if m.get("part_name") and m.get("material")
}
# Resolve raw material names to SCHAEFFLER library names via aliases
from app.services.material_service import resolve_material_map
mat_map = resolve_material_map(mat_map)
settings["material_map_json"] = json.dumps(mat_map)
# Output naming: meaningful filename instead of generic render.ext
def _sanitize(s: str) -> str:
return re.sub(r'[^\w\-.]', '_', s.strip())[:100]
product_name = product.name or product.pim_id or "product"
ot_name = output_type.name if output_type else "render"
if not (output_type and output_type.is_animation):
ext = output_type.output_format or "png" if output_type else "png"
filename = f"{_sanitize(product_name)}_{_sanitize(ot_name)}.{ext}"
settings["output_path"] = f"{output_dir}/{filename}"
metadata = {
"order_line_id": str(line.id),
"order_id": str(line.order_id),
"product_name": product.name or "",
"output_type": output_type.name if output_type else "",
"category": product.category_key or "",
}
job_name = f"{product.name or product.pim_id} - {output_type.name if output_type else 'render'}"
try:
client = get_flamenco_client(flamenco_url)
job = client.submit_job(
name=job_name[:200],
job_type=job_type,
settings=settings,
metadata=metadata,
)
job_id = job.get("id", "")
# Save flamenco_job_id
session.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(flamenco_job_id=job_id)
)
session.commit()
logger.info(f"Flamenco job submitted: {job_id} for OrderLine {line.id}")
return {"backend": "flamenco", "job_ref": job_id}
except Exception as exc:
logger.error(f"Flamenco submit failed for OrderLine {line.id}: {exc}")
session.execute(
sql_update(OrderLine)
.where(OrderLine.id == line.id)
.values(
render_status="failed",
render_completed_at=datetime.utcnow(),
render_log={"error": f"Flamenco submit failed: {str(exc)[:500]}"},
)
)
session.commit()
return {"backend": "flamenco", "job_ref": "", "error": str(exc)}
+72
View File
@@ -0,0 +1,72 @@
"""Redis-backed live render log for streaming task progress.
Each order line gets a Redis list keyed by render:log:{order_line_id}.
Entries are JSON objects with timestamp, level, and message.
Lists auto-expire after 1 hour.
"""
import json
import time
import logging
import redis
from app.config import settings
logger = logging.getLogger(__name__)
_LOG_TTL = 3600 # 1 hour
_MAX_ENTRIES = 500
def _redis() -> redis.Redis:
return redis.from_url(settings.redis_url, decode_responses=True)
def _key(order_line_id: str) -> str:
return f"render:log:{order_line_id}"
def emit(order_line_id: str, message: str, level: str = "info") -> None:
"""Push a log entry for a render job."""
entry = json.dumps({
"ts": time.time(),
"t": time.strftime("%H:%M:%S", time.gmtime()),
"level": level,
"msg": message,
})
try:
r = _redis()
key = _key(order_line_id)
r.rpush(key, entry)
r.ltrim(key, -_MAX_ENTRIES, -1)
r.expire(key, _LOG_TTL)
except Exception as exc:
logger.debug(f"render_log emit failed: {exc}")
def get_entries(order_line_id: str, after_index: int = 0) -> list[dict]:
"""Get log entries starting from after_index."""
try:
r = _redis()
raw = r.lrange(_key(order_line_id), after_index, -1)
return [json.loads(e) for e in raw]
except Exception:
return []
def count(order_line_id: str) -> int:
"""Get the number of log entries."""
try:
r = _redis()
return r.llen(_key(order_line_id))
except Exception:
return 0
def clear(order_line_id: str) -> None:
"""Clear log entries for a render job."""
try:
r = _redis()
r.delete(_key(order_line_id))
except Exception:
pass
+726
View File
@@ -0,0 +1,726 @@
"""
STEP file processor — Phase 3 implementation.
Extracts object names from STEP files using pythonocc-core (OCC),
generates thumbnails using trimesh + pyrender, and converts to glTF.
This module is invoked from the Celery worker (step_tasks.py).
"""
import logging
import uuid
from pathlib import Path
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from app.models.cad_file import CadFile
logger = logging.getLogger(__name__)
MATERIAL_PALETTE = [
"#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8",
"#4CD4E8", "#E84CA8", "#7EC850", "#E86B30", "#5088C8",
]
def _material_to_color(material_name: str | None, index: int) -> str:
"""Return a deterministic hex color: hash material name, or use palette by index."""
if material_name and material_name.strip():
i = abs(hash(material_name.strip().lower())) % len(MATERIAL_PALETTE)
return MATERIAL_PALETTE[i]
return MATERIAL_PALETTE[index % len(MATERIAL_PALETTE)]
def build_part_colors(
cad_parsed_objects: list[str],
cad_part_materials: list[dict],
) -> dict[str, str]:
"""
Build {part_name: hex_color} for thumbnail rendering.
Args:
cad_parsed_objects: List of part names from cad_file.parsed_objects["objects"].
cad_part_materials: List of {part_name, material} dicts from order_item.cad_part_materials.
"""
mat_map = {
m["part_name"].lower(): m.get("material")
for m in cad_part_materials
if m.get("part_name")
}
return {
name: _material_to_color(mat_map.get(name.lower()), i)
for i, name in enumerate(cad_parsed_objects)
}
def _normalize_stem(name: str) -> str:
"""Normalize a filename stem for comparison: lowercase, strip .stp/.step extension."""
stem = name.strip()
for ext in (".step", ".stp"):
if stem.lower().endswith(ext):
stem = stem[: -len(ext)]
break
return stem.lower()
def match_cad_to_items(
cad_file: "CadFile",
item_names: list[str],
) -> list[str]:
"""
Match a CadFile to a list of OrderItem name_cad_modell values.
Matching is case-insensitive and normalizes .stp/.step extensions so that
a file named '81113-L_cut.stp' matches an item named '81113-l_cut' or
'81113-L_cut.step'.
Args:
cad_file: A CadFile ORM object (needs .original_name).
item_names: List of name_cad_modell strings from OrderItems.
Returns:
List of matched item names (subset of item_names).
"""
cad_stem = _normalize_stem(cad_file.original_name or "")
matched = []
for name in item_names:
if not name:
continue
if _normalize_stem(name) == cad_stem:
matched.append(name)
return matched
def extract_cad_metadata(cad_file_id: str) -> None:
"""
Fast metadata extraction for a CAD file (no thumbnail generation).
Does everything process_cad_file() does EXCEPT thumbnail rendering:
- Sets status to processing
- Extracts STEP object names
- Converts to glTF
- Leaves status as processing (render_step_thumbnail task will complete it)
- On exception: sets status to failed
"""
from app.config import settings
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from app.models.cad_file import CadFile, ProcessingStatus
engine = create_engine(settings.database_url_sync)
with Session(engine) as session:
cad_file = session.get(CadFile, uuid.UUID(cad_file_id))
if not cad_file:
logger.error(f"CAD file not found: {cad_file_id}")
return
cad_file.processing_status = ProcessingStatus.processing
session.commit()
try:
step_path = Path(cad_file.stored_path)
if not step_path.exists():
raise FileNotFoundError(f"STEP file not found: {step_path}")
objects = _extract_step_objects(step_path)
cad_file.parsed_objects = {"objects": objects}
gltf_path = _convert_to_gltf(step_path, cad_file_id, settings.upload_dir)
if gltf_path:
cad_file.gltf_path = str(gltf_path)
# Leave status as processing — render_step_thumbnail will complete it
logger.info(f"CAD metadata extracted: {cad_file_id} ({len(objects)} objects)")
except Exception as exc:
logger.error(f"CAD metadata extraction failed for {cad_file_id}: {exc}")
cad_file.processing_status = ProcessingStatus.failed
cad_file.error_message = str(exc)[:2000]
session.commit()
def process_cad_file(cad_file_id: str) -> None:
"""
Full processing pipeline for a CAD file:
1. Load STEP file with pythonocc
2. Extract part/object names
3. Generate thumbnail PNG
4. Convert to glTF for browser viewer
5. Update DB record
"""
from app.config import settings
# Synchronous DB access for Celery worker
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from app.models.cad_file import CadFile, ProcessingStatus
engine = create_engine(settings.database_url_sync)
with Session(engine) as session:
cad_file = session.get(CadFile, uuid.UUID(cad_file_id))
if not cad_file:
logger.error(f"CAD file not found: {cad_file_id}")
return
cad_file.processing_status = ProcessingStatus.processing
session.commit()
try:
step_path = Path(cad_file.stored_path)
if not step_path.exists():
raise FileNotFoundError(f"STEP file not found: {step_path}")
# Step 1: Extract object names
objects = _extract_step_objects(step_path)
cad_file.parsed_objects = {"objects": objects}
# Step 2: Generate thumbnail — pass empty part_colors so the Three.js
# renderer extracts named parts and auto-assigns palette colours.
# Other renderers (Blender, Pillow) ignore the part_colors argument.
thumb_path, render_log = _generate_thumbnail(step_path, cad_file_id, settings.upload_dir, part_colors={})
if thumb_path:
cad_file.thumbnail_path = str(thumb_path)
cad_file.render_log = render_log
# Step 3: Convert to glTF
gltf_path = _convert_to_gltf(step_path, cad_file_id, settings.upload_dir)
if gltf_path:
cad_file.gltf_path = str(gltf_path)
cad_file.processing_status = ProcessingStatus.completed
logger.info(f"CAD file processed successfully: {cad_file_id}")
except Exception as exc:
logger.error(f"CAD processing failed for {cad_file_id}: {exc}")
cad_file.processing_status = ProcessingStatus.failed
cad_file.error_message = str(exc)[:2000]
session.commit()
def _extract_step_objects(step_path: Path) -> list[str]:
"""Extract part names from STEP file using pythonocc."""
try:
from OCC.Core.STEPCAFControl import STEPCAFControl_Reader
from OCC.Core.XCAFDoc import XCAFDoc_DocumentTool
from OCC.Core.TDocStd import TDocStd_Document
from OCC.Core.TDataStd import TDataStd_Name
from OCC.Core.TCollection import TCollection_ExtendedString
doc = TDocStd_Document(TCollection_ExtendedString("MDTV-CAF"))
reader = STEPCAFControl_Reader()
reader.SetColorMode(True)
reader.SetNameMode(True)
status = reader.ReadFile(str(step_path))
if not reader.Transfer(doc):
return []
shape_tool = XCAFDoc_DocumentTool.ShapeTool(doc.Main())
labels = []
shape_tool.GetFreeShapes(labels)
names = []
for label in labels:
name_attr = TDataStd_Name()
if label.FindAttribute(TDataStd_Name.GetID(), name_attr):
names.append(name_attr.Get().ToExtString())
return names
except ImportError:
logger.warning("pythonocc-core not available; skipping object extraction")
return _extract_step_objects_fallback(step_path)
except Exception as exc:
logger.warning(f"OCC extraction failed: {exc}")
return _extract_step_objects_fallback(step_path)
def _extract_step_objects_fallback(step_path: Path) -> list[str]:
"""Simple text-based extraction of part names from STEP file."""
names = []
try:
with open(step_path, "r", encoding="utf-8", errors="replace") as f:
for line in f:
# STEP format: PRODUCT('name','description',...
if "PRODUCT(" in line:
parts = line.split("PRODUCT(")
for part in parts[1:]:
if "'" in part:
name = part.split("'")[1]
if name and name not in names:
names.append(name)
except Exception:
pass
return names
def _get_all_settings() -> dict[str, str]:
"""Read all system settings from the database."""
defaults = {
"thumbnail_renderer": "pillow",
"blender_engine": "cycles",
"blender_cycles_samples": "256",
"blender_eevee_samples": "64",
"threejs_render_size": "1024",
"thumbnail_format": "jpg",
"stl_quality": "low",
"blender_smooth_angle": "30",
"cycles_device": "auto",
}
try:
from app.config import settings as app_settings
from sqlalchemy import create_engine, text
from sqlalchemy.orm import Session
engine = create_engine(app_settings.database_url_sync)
with Session(engine) as session:
result = session.execute(text("SELECT key, value FROM system_settings"))
stored = {row[0]: row[1] for row in result.fetchall()}
return {k: stored.get(k, v) for k, v in defaults.items()}
except Exception as exc:
logger.warning(f"Could not read settings: {exc}; using defaults")
return defaults
def _generate_thumbnail(
step_path: Path,
cad_file_id: str,
upload_dir: str,
part_colors: dict[str, str] | None = None,
) -> tuple[Path | None, dict]:
"""Generate thumbnail using the configured renderer.
Returns (thumb_path, render_log_dict).
render_log_dict contains all settings + timing + blender output.
"""
import time
out_dir = Path(upload_dir) / "thumbnails"
out_dir.mkdir(parents=True, exist_ok=True)
settings = _get_all_settings()
renderer = settings["thumbnail_renderer"]
fmt = settings["thumbnail_format"] # "jpg" or "png"
ext = "jpg" if fmt == "jpg" else "png"
# Clean up any existing thumbnail for this cad_file_id (either extension)
for old_ext in ("png", "jpg"):
old = out_dir / f"{cad_file_id}.{old_ext}"
if old.exists():
old.unlink(missing_ok=True)
final_path = out_dir / f"{cad_file_id}.{ext}"
# Intermediate PNG used when a service renderer produces PNG before conversion
tmp_png = out_dir / f"{cad_file_id}_tmp.png"
# Build the base render_log with the settings snapshot
render_log: dict = {
"renderer": renderer,
"format": fmt,
"started_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
}
if renderer == "blender":
engine = settings["blender_engine"]
render_log.update({
"engine": engine,
"samples": int(settings[f"blender_{engine}_samples"]),
"stl_quality": settings["stl_quality"],
"smooth_angle": int(settings["blender_smooth_angle"]),
"cycles_device": settings["cycles_device"],
"width": 512,
"height": 512,
})
elif renderer == "threejs":
size = int(settings["threejs_render_size"])
render_log.update({"width": size, "height": size})
logger.info(f"Thumbnail renderer={renderer}, format={fmt}")
rendered_png: Path | None = None
service_data: dict = {}
if renderer == "blender":
engine = settings["blender_engine"]
samples = int(settings[f"blender_{engine}_samples"])
extra = {
"engine": engine,
"samples": samples,
"stl_quality": settings["stl_quality"],
"smooth_angle": int(settings["blender_smooth_angle"]),
"cycles_device": settings["cycles_device"],
}
rendered_png, service_data = _render_via_service(
"http://blender-renderer:8100/render", step_path, tmp_png, extra
)
if not rendered_png:
logger.warning("Blender renderer failed; falling back to Pillow placeholder")
elif renderer == "threejs":
size = int(settings["threejs_render_size"])
extra2: dict = {"width": size, "height": size}
if part_colors is not None:
extra2["part_colors"] = part_colors
rendered_png, service_data = _render_via_service(
"http://threejs-renderer:8101/render", step_path, tmp_png, extra2
)
if not rendered_png:
logger.warning("Three.js renderer failed; falling back to Pillow placeholder")
# Merge rich service response data into render_log
if service_data:
for key in ("total_duration_s", "stl_duration_s", "render_duration_s",
"stl_size_bytes", "output_size_bytes", "parts_count",
"engine_used", "log_lines"):
if key in service_data:
render_log[key] = service_data[key]
render_log["completed_at"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
if rendered_png:
result = _finalise_image(rendered_png, final_path, fmt)
tmp_png.unlink(missing_ok=True)
render_log["fallback"] = False
return result, render_log
# Pillow placeholder
render_log["fallback"] = True
return _generate_thumbnail_placeholder(step_path, final_path, fmt), render_log
def _finalise_image(src: Path, dst: Path, fmt: str) -> Path | None:
"""Convert src image to dst using the requested format (jpg or png)."""
if fmt == "jpg":
try:
from PIL import Image
img = Image.open(src).convert("RGB")
img.save(str(dst), "JPEG", quality=92, optimize=True)
return dst
except Exception as exc:
logger.warning(f"JPG conversion failed: {exc}; keeping PNG")
src.rename(dst.with_suffix(".png"))
return dst.with_suffix(".png")
else:
src.rename(dst)
return dst
def _render_via_service(
url: str, step_path: Path, out_path: Path, extra: dict | None = None,
job_id: str | None = None,
) -> tuple[Path | None, dict]:
"""Call an external renderer microservice to generate a thumbnail.
Returns (path_or_None, response_data_dict).
job_id, when provided, is forwarded to the renderer so the render process
can be cancelled via the renderer's /cancel/{job_id} endpoint.
"""
try:
import httpx
payload = {
"step_path": str(step_path),
"output_path": str(out_path),
"width": 512,
"height": 512,
**(extra or {}),
}
if job_id:
payload["job_id"] = job_id
resp = httpx.post(url, json=payload, timeout=300.0)
data = {}
try:
data = resp.json()
except Exception:
pass
if resp.status_code == 200 and out_path.exists():
return out_path, data
logger.warning(f"Renderer service {url} returned {resp.status_code}: {resp.text[:500]}")
except Exception as exc:
logger.warning(f"Renderer service {url} unreachable: {exc}")
return None, {}
def _generate_thumbnail_placeholder(step_path: Path, out_path: Path, fmt: str = "png") -> Path | None:
"""Generate a simple placeholder thumbnail using Pillow."""
try:
from PIL import Image, ImageDraw, ImageFont
W, H = 512, 512
img = Image.new("RGB", (W, H), color=(245, 246, 248))
draw = ImageDraw.Draw(img)
# Subtle grid
for i in range(0, W, 32):
draw.line([(i, 0), (i, H)], fill=(228, 230, 235), width=1)
draw.line([(0, i), (W, i)], fill=(228, 230, 235), width=1)
# Isometric box (front / top / right faces)
cx, cy = 256, 260
s = 110 # half-size
# Front face
draw.polygon(
[(cx - s, cy), (cx, cy + s // 2), (cx + s, cy), (cx, cy - s // 2)],
fill=(195, 208, 220), outline=(90, 110, 130), width=2,
)
# Top face
draw.polygon(
[(cx - s, cy - s), (cx, cy - s - s // 2), (cx + s, cy - s), (cx, cy - s + s // 2)],
fill=(220, 230, 240), outline=(90, 110, 130), width=2,
)
# Right pillar
draw.polygon(
[(cx + s, cy - s), (cx + s, cy), (cx, cy + s // 2), (cx, cy - s + s // 2)],
fill=(160, 178, 196), outline=(90, 110, 130), width=2,
)
# Schaeffler green top bar
draw.rectangle([0, 0, W, 10], fill=(0, 137, 61))
# Model name strip at bottom
name = step_path.stem
draw.rectangle([0, H - 52, W, H], fill=(30, 50, 70))
try:
font = ImageFont.load_default(size=15)
draw.text((W // 2, H - 26), name, fill=(255, 255, 255), anchor="mm", font=font)
except Exception:
draw.text((10, H - 38), name, fill=(255, 255, 255))
if fmt == "jpg":
img = img.convert("RGB")
img.save(str(out_path), "JPEG", quality=92, optimize=True)
else:
img.save(str(out_path), "PNG")
return out_path
except Exception as exc:
logger.warning(f"Pillow placeholder thumbnail failed: {exc}")
return None
def regenerate_cad_thumbnail(cad_file_id: str, part_colors: dict[str, str]) -> bool:
"""
Regenerate a thumbnail with per-part colours for an existing CAD file.
Called from the `regenerate_thumbnail` Celery task.
Returns True on success.
"""
from app.config import settings as app_settings
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from app.models.cad_file import CadFile, ProcessingStatus
db_engine = create_engine(app_settings.database_url_sync)
with Session(db_engine) as session:
cad_file = session.get(CadFile, uuid.UUID(cad_file_id))
if not cad_file:
logger.error(f"CAD file not found: {cad_file_id}")
return False
step_path = Path(cad_file.stored_path)
if not step_path.exists():
logger.error(f"STEP file not found: {step_path}")
return False
# Mark as processing so the activity page shows it as active
cad_file.processing_status = ProcessingStatus.processing
session.commit()
try:
thumb_path, render_log = _generate_thumbnail(
step_path, cad_file_id, app_settings.upload_dir, part_colors=part_colors
)
if thumb_path:
cad_file.thumbnail_path = str(thumb_path)
cad_file.render_log = render_log
cad_file.processing_status = ProcessingStatus.completed
session.commit()
logger.info(f"Thumbnail regenerated for CAD file {cad_file_id}")
return True
except Exception as exc:
logger.error(f"Thumbnail regeneration failed for {cad_file_id}: {exc}")
cad_file.processing_status = ProcessingStatus.failed
cad_file.error_message = str(exc)[:2000]
session.commit()
return False
def render_to_file(
step_path: str,
output_path: str,
part_colors: dict[str, str] | None = None,
width: int | None = None,
height: int | None = None,
transparent_bg: bool = False,
engine: str | None = None,
samples: int | None = None,
template_path: str | None = None,
target_collection: str = "Product",
material_library_path: str | None = None,
material_map: dict | None = None,
part_names_ordered: list | None = None,
lighting_only: bool = False,
shadow_catcher: bool = False,
cycles_device: str | None = None,
rotation_x: float = 0.0,
rotation_y: float = 0.0,
rotation_z: float = 0.0,
job_id: str | None = None,
noise_threshold: str = "",
denoiser: str = "",
denoising_input_passes: str = "",
denoising_prefilter: str = "",
denoising_quality: str = "",
denoising_use_gpu: str = "",
) -> tuple[bool, dict]:
"""Render a STEP file to a specific output path using current system settings.
Unlike regenerate_cad_thumbnail, this does NOT modify the shared CadFile record.
Used by render_order_line_task for per-order-line render outputs.
Args:
step_path: Absolute path to the STEP file on disk.
output_path: Absolute path for the rendered output file.
part_colors: Optional {part_name: hex_color} map.
width: Optional render width (overrides system default).
height: Optional render height (overrides system default).
transparent_bg: If True and renderer=blender+PNG, render with transparent background.
engine: Optional per-OT engine override ("cycles" | "eevee"), or None for system default.
samples: Optional per-OT samples override, or None for system default.
template_path: Optional path to a .blend template file.
target_collection: Blender collection name to import geometry into.
material_library_path: Optional path to material library .blend file.
material_map: Optional {part_name: material_name} for material replacement.
Returns:
(success: bool, render_log: dict)
"""
import time
step = Path(step_path)
out = Path(output_path)
out.parent.mkdir(parents=True, exist_ok=True)
settings = _get_all_settings()
renderer = settings["thumbnail_renderer"]
fmt = out.suffix.lstrip(".") or settings.get("thumbnail_format", "jpg")
if fmt not in ("jpg", "png"):
fmt = "jpg"
# Temporary PNG for service renderers
tmp_png = out.parent / f"_tmp_{out.stem}.png"
render_log: dict = {
"renderer": renderer,
"format": fmt,
"started_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
}
rendered_png: Path | None = None
service_data: dict = {}
if renderer == "blender":
actual_engine = engine or settings["blender_engine"]
actual_samples = samples or int(settings[f"blender_{actual_engine}_samples"])
actual_cycles_device = cycles_device or settings["cycles_device"]
w = width or 512
h = height or 512
render_log.update({
"engine": actual_engine, "samples": actual_samples,
"stl_quality": settings["stl_quality"],
"smooth_angle": int(settings["blender_smooth_angle"]),
"cycles_device": actual_cycles_device,
"width": w, "height": h,
})
extra = {
"engine": actual_engine, "samples": actual_samples,
"stl_quality": settings["stl_quality"],
"smooth_angle": int(settings["blender_smooth_angle"]),
"cycles_device": actual_cycles_device,
"width": w, "height": h,
"transparent_bg": transparent_bg,
}
if part_colors is not None:
extra["part_colors"] = part_colors
if template_path:
extra["template_path"] = template_path
extra["target_collection"] = target_collection
extra["lighting_only"] = lighting_only
extra["shadow_catcher"] = shadow_catcher
render_log["template"] = template_path
render_log["target_collection"] = target_collection
if lighting_only:
render_log["lighting_only"] = True
if shadow_catcher:
render_log["shadow_catcher"] = True
if material_library_path and material_map:
extra["material_library_path"] = material_library_path
extra["material_map"] = material_map
render_log["material_replace"] = True
if part_names_ordered:
extra["part_names_ordered"] = part_names_ordered
if rotation_x or rotation_y or rotation_z:
extra["rotation_x"] = rotation_x
extra["rotation_y"] = rotation_y
extra["rotation_z"] = rotation_z
if noise_threshold:
extra["noise_threshold"] = noise_threshold
if denoiser:
extra["denoiser"] = denoiser
if denoising_input_passes:
extra["denoising_input_passes"] = denoising_input_passes
if denoising_prefilter:
extra["denoising_prefilter"] = denoising_prefilter
if denoising_quality:
extra["denoising_quality"] = denoising_quality
if denoising_use_gpu:
extra["denoising_use_gpu"] = denoising_use_gpu
rendered_png, service_data = _render_via_service(
"http://blender-renderer:8100/render", step, tmp_png, extra, job_id=job_id
)
elif renderer == "threejs":
default_size = int(settings["threejs_render_size"])
w = width or default_size
h = height or default_size
render_log.update({"width": w, "height": h})
extra2: dict = {"width": w, "height": h}
if part_colors is not None:
extra2["part_colors"] = part_colors
rendered_png, service_data = _render_via_service(
"http://threejs-renderer:8101/render", step, tmp_png, extra2
)
if service_data:
for key in ("total_duration_s", "stl_duration_s", "render_duration_s",
"stl_size_bytes", "output_size_bytes", "parts_count",
"engine_used", "log_lines"):
if key in service_data:
render_log[key] = service_data[key]
render_log["completed_at"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
if rendered_png:
result = _finalise_image(rendered_png, out, fmt)
tmp_png.unlink(missing_ok=True)
render_log["fallback"] = False
return result is not None, render_log
# Pillow placeholder fallback
render_log["fallback"] = True
result = _generate_thumbnail_placeholder(step, out, fmt)
return result is not None, render_log
def _convert_to_gltf(step_path: Path, cad_file_id: str, upload_dir: str) -> Path | None:
"""Convert STEP to glTF for browser 3D viewer."""
out_dir = Path(upload_dir) / "gltf"
out_dir.mkdir(parents=True, exist_ok=True)
out_path = out_dir / f"{cad_file_id}.gltf"
try:
import trimesh
mesh = trimesh.load(str(step_path))
if isinstance(mesh, trimesh.Scene):
exported = mesh.export(str(out_path))
else:
scene = trimesh.Scene(mesh)
exported = scene.export(str(out_path))
return out_path if out_path.exists() else None
except ImportError:
logger.warning("trimesh not available; skipping glTF conversion")
except Exception as exc:
logger.warning(f"glTF conversion failed: {exc}")
return None
+102
View File
@@ -0,0 +1,102 @@
"""Render template resolution service.
Used from Celery tasks (sync context) to find the best matching .blend template
for a given category + output type combination.
Cascade priority (first active match wins):
1. Exact: category_key + output_type_id
2. Category only: category_key + output_type_id IS NULL
3. OT only: category_key IS NULL + output_type_id
4. Global: both NULL
5. No template → caller falls back to factory-settings behavior
"""
import logging
from sqlalchemy import create_engine, select, and_
from sqlalchemy.orm import Session
from app.models.render_template import RenderTemplate
from app.models.system_setting import SystemSetting
logger = logging.getLogger(__name__)
_engine = None
def _get_engine():
global _engine
if _engine is None:
from app.config import settings as app_settings
_engine = create_engine(app_settings.database_url_sync)
return _engine
def resolve_template(
category_key: str | None = None,
output_type_id: str | None = None,
) -> RenderTemplate | None:
"""Find the best matching active render template.
Uses sync SQLAlchemy — safe for Celery tasks.
"""
engine = _get_engine()
with Session(engine) as session:
active = RenderTemplate.is_active == True # noqa: E712
# 1. Exact match
if category_key and output_type_id:
row = session.execute(
select(RenderTemplate).where(and_(
active,
RenderTemplate.category_key == category_key,
RenderTemplate.output_type_id == output_type_id,
))
).scalar_one_or_none()
if row:
return row
# 2. Category only
if category_key:
row = session.execute(
select(RenderTemplate).where(and_(
active,
RenderTemplate.category_key == category_key,
RenderTemplate.output_type_id.is_(None),
))
).scalar_one_or_none()
if row:
return row
# 3. OT only
if output_type_id:
row = session.execute(
select(RenderTemplate).where(and_(
active,
RenderTemplate.category_key.is_(None),
RenderTemplate.output_type_id == output_type_id,
))
).scalar_one_or_none()
if row:
return row
# 4. Global fallback (both NULL)
row = session.execute(
select(RenderTemplate).where(and_(
active,
RenderTemplate.category_key.is_(None),
RenderTemplate.output_type_id.is_(None),
))
).scalar_one_or_none()
return row
def get_material_library_path() -> str | None:
"""Read material_library_path from system_settings. Returns None if empty."""
engine = _get_engine()
with Session(engine) as session:
row = session.execute(
select(SystemSetting).where(SystemSetting.key == "material_library_path")
).scalar_one_or_none()
if row and row.value and row.value.strip():
return row.value.strip()
return None