refactor(P11+P12): codebase hygiene — CLAUDE.md rewrite, type safety, dead code removal

- Rewrite CLAUDE.md to match current 8-service architecture (was 11, 5 deleted)
- Remove all as-any casts in OrderDetail.tsx (9 casts → 0)
- Add cad_parsed_objects/cad_part_materials to OrderItem interface
- Rename require_admin → require_global_admin across 6 router files (22 calls)
- Remove EXPORT_GLB_PRODUCTION enum + generate_gltf_production_task (dead code)
- Remove worker-thumbnail from ALLOWED_SERVICES, replace Flamenco link
- Delete obsolete PLAN.md (1455 lines) and PLAN_REFACTOR.md (1174 lines)
- Fix digit-only USD prim names with p_ prefix

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-13 07:22:04 +01:00
parent 3dcfa7c0bd
commit 577dd1ca7e
21 changed files with 303 additions and 3229 deletions
-28
View File
@@ -303,34 +303,6 @@ async def generate_gltf_geometry(
return {"status": "queued", "task_id": task.id, "cad_file_id": str(id)}
@router.post("/{id}/generate-gltf-production", status_code=status.HTTP_202_ACCEPTED)
async def generate_gltf_production(
id: uuid.UUID,
user: User = Depends(get_current_user),
db: AsyncSession = Depends(get_db),
):
"""Queue production GLB export (Blender + PBR materials) from a geometry GLB.
Requires a gltf_geometry MediaAsset to already exist (run generate-gltf-geometry first).
Stores result as a MediaAsset with asset_type='gltf_production'.
"""
if not is_privileged(user):
raise HTTPException(status_code=403, detail="Insufficient permissions")
cad = await _get_cad_file(id, db)
if not cad.stored_path:
raise HTTPException(status_code=404, detail="STEP file not uploaded for this CAD file")
logger.warning(
"generate_gltf_production called for cad %s"
"deprecated: renders now consume usd_master directly",
id,
)
from app.tasks.step_tasks import generate_gltf_production_task
task = generate_gltf_production_task.delay(str(id))
return {"status": "queued", "task_id": task.id, "cad_file_id": str(id)}
@router.post(
"/{id}/regenerate-thumbnail",
status_code=status.HTTP_202_ACCEPTED,
@@ -10,7 +10,7 @@ from app.domains.rendering.schemas import (
GlobalRenderPositionPatch,
GlobalRenderPositionOut,
)
from app.utils.auth import require_admin, get_current_user
from app.utils.auth import require_global_admin, get_current_user
router = APIRouter(prefix="/render-positions/global", tags=["global-render-positions"])
@@ -31,7 +31,7 @@ async def list_global_render_positions(
async def create_global_render_position(
body: GlobalRenderPositionCreate,
db: AsyncSession = Depends(get_db),
_user=Depends(require_admin),
_user=Depends(require_global_admin),
):
"""Create a new global render position (admin only)."""
pos = GlobalRenderPosition(**body.model_dump())
@@ -46,7 +46,7 @@ async def update_global_render_position(
pos_id: uuid.UUID,
body: GlobalRenderPositionPatch,
db: AsyncSession = Depends(get_db),
_user=Depends(require_admin),
_user=Depends(require_global_admin),
):
"""Update a global render position (admin only)."""
result = await db.execute(select(GlobalRenderPosition).where(GlobalRenderPosition.id == pos_id))
@@ -64,7 +64,7 @@ async def update_global_render_position(
async def delete_global_render_position(
pos_id: uuid.UUID,
db: AsyncSession = Depends(get_db),
_user=Depends(require_admin),
_user=Depends(require_global_admin),
):
"""Delete a global render position (admin only)."""
result = await db.execute(select(GlobalRenderPosition).where(GlobalRenderPosition.id == pos_id))
+2 -2
View File
@@ -6,7 +6,7 @@ from sqlalchemy import select
from pydantic import BaseModel
from app.database import get_db
from app.models.template import Template
from app.utils.auth import get_current_user, require_admin
from app.utils.auth import get_current_user, require_global_admin
from app.models.user import User
router = APIRouter(prefix="/templates", tags=["templates"])
@@ -63,7 +63,7 @@ async def get_template(
async def update_template(
template_id: uuid.UUID,
body: TemplateUpdate,
user: User = Depends(require_admin),
user: User = Depends(require_global_admin),
db: AsyncSession = Depends(get_db),
):
result = await db.execute(select(Template).where(Template.id == template_id))
+8 -8
View File
@@ -17,7 +17,7 @@ from app.models.product import Product
from app.models.user import User
from app.models.worker_config import WorkerConfig
from app.models.system_setting import SystemSetting
from app.utils.auth import get_current_user, require_admin_or_pm, require_admin
from app.utils.auth import get_current_user, require_admin_or_pm, require_global_admin
router = APIRouter(prefix="/worker", tags=["worker"])
@@ -364,7 +364,7 @@ async def cancel_task(task_id: str, user: User = Depends(require_admin_or_pm)):
# ---------------------------------------------------------------------------
class ScaleRequest(BaseModel):
service: str # "render-worker" | "worker" | "worker-thumbnail"
service: str # "render-worker" | "worker"
count: int # 020
@@ -411,7 +411,7 @@ async def scale_workers(
body: ScaleRequest,
user: User = Depends(require_admin_or_pm),
):
"""Scale a Compose service (render-worker, worker, worker-thumbnail) up or down.
"""Scale a Compose service (render-worker, worker) up or down.
Requires the docker socket and compose file to be accessible inside the container
(see docker-compose.yml COMPOSE_PROJECT_DIR env var).
@@ -421,7 +421,7 @@ async def scale_workers(
import subprocess
from fastapi import HTTPException
ALLOWED_SERVICES = {"render-worker", "worker", "worker-thumbnail"}
ALLOWED_SERVICES = {"render-worker", "worker"}
if body.service not in ALLOWED_SERVICES:
raise HTTPException(400, detail=f"service must be one of {ALLOWED_SERVICES}")
if not (0 <= body.count <= 20):
@@ -462,7 +462,7 @@ async def scale_workers(
# ---------------------------------------------------------------------------
@router.post("/probe/gpu", status_code=http_status.HTTP_202_ACCEPTED)
async def trigger_gpu_probe(current_user: User = Depends(require_admin)):
async def trigger_gpu_probe(current_user: User = Depends(require_global_admin)):
"""Queue a GPU probe task on the render-worker."""
from app.tasks.gpu_tasks import probe_gpu
result = probe_gpu.delay()
@@ -471,7 +471,7 @@ async def trigger_gpu_probe(current_user: User = Depends(require_admin)):
@router.get("/probe/gpu/result")
async def get_gpu_probe_result(
current_user: User = Depends(require_admin),
current_user: User = Depends(require_global_admin),
db: AsyncSession = Depends(get_db),
):
"""Return the last GPU probe result from system_settings."""
@@ -622,7 +622,7 @@ class WorkerConfigUpdate(BaseModel):
@router.get("/configs", response_model=list[WorkerConfigOut])
async def list_worker_configs(
user: User = Depends(require_admin),
user: User = Depends(require_global_admin),
db: AsyncSession = Depends(get_db),
):
"""List all worker concurrency configurations (admin only)."""
@@ -644,7 +644,7 @@ async def list_worker_configs(
async def update_worker_config(
queue_name: str,
body: WorkerConfigUpdate,
user: User = Depends(require_admin),
user: User = Depends(require_global_admin),
db: AsyncSession = Depends(get_db),
):
"""Update concurrency settings for a specific queue (admin only)."""
-1
View File
@@ -29,7 +29,6 @@ class StepName(StrEnum):
# ── GLB / asset export ────────────────────────────────────────────
EXPORT_GLB_GEOMETRY = "export_glb_geometry"
EXPORT_GLB_PRODUCTION = "export_glb_production"
EXPORT_BLEND = "export_blend"
# ── STL cache ────────────────────────────────────────────────────
@@ -13,7 +13,7 @@ from app.domains.admin.dashboard_service import (
upsert_user_dashboard_config,
upsert_tenant_default,
)
from app.utils.auth import get_current_user, require_admin
from app.utils.auth import get_current_user, require_global_admin
from app.models.user import User
logger = logging.getLogger(__name__)
@@ -107,7 +107,7 @@ async def update_config(
@router.get("/tenant-default", response_model=DashboardConfigResponse)
async def get_tenant_default(
current_user: User = Depends(require_admin),
current_user: User = Depends(require_global_admin),
db: AsyncSession = Depends(get_db),
) -> DashboardConfigResponse:
"""Load the tenant-default dashboard widget config (admin only)."""
@@ -132,7 +132,7 @@ async def get_tenant_default(
@router.put("/tenant-default", response_model=DashboardConfigResponse)
async def update_tenant_default(
payload: DashboardConfigPayload,
current_user: User = Depends(require_admin),
current_user: User = Depends(require_global_admin),
db: AsyncSession = Depends(get_db),
) -> DashboardConfigResponse:
"""Set the tenant-default widget config (admin only)."""
@@ -1,8 +1,8 @@
"""GLB/GLTF export tasks.
"""GLB/GLTF and USD export tasks.
Covers:
- generate_gltf_geometry_task — OCC STEP → geometry GLB (fast preview)
- generate_gltf_production_task — OCC STEP → production GLB (Blender PBR materials)
- generate_usd_master_task — OCC STEP → USD canonical scene (pxr authoring)
"""
import logging
@@ -251,283 +251,6 @@ def generate_gltf_geometry_task(self, cad_file_id: str):
_r.delete(_lock_key)
@celery_app.task(
bind=True,
name="app.tasks.step_tasks.generate_gltf_production_task",
queue="asset_pipeline",
max_retries=2,
)
def generate_gltf_production_task(self, cad_file_id: str, product_id: str | None = None) -> dict:
"""Generate a production GLB (Blender + PBR materials) from a geometry GLB via export_gltf.py.
1. Ensures a gltf_geometry MediaAsset exists (runs OCC export inline if not).
2. Resolves SCHAEFFLER material map for the CadFile's product.
3. Runs Blender headless with export_gltf.py → production GLB.
4. Stores result as gltf_production MediaAsset.
"""
import json as _json
import os as _os
import subprocess as _subprocess
import sys as _sys
import uuid as _uuid
from pathlib import Path as _Path
from sqlalchemy import create_engine as _ce, delete as _del, select as _sel, update as _upd
from sqlalchemy.orm import Session as _Session
from app.config import settings as app_settings
from app.domains.media.models import MediaAsset, MediaAssetType
from app.services.render_blender import find_blender, is_blender_available
pl = PipelineLogger(task_id=self.request.id)
pl.step_start("export_glb_production", {"cad_file_id": cad_file_id})
log_task_event(self.request.id, f"generate_gltf_production_task started for cad {cad_file_id}", "info")
# Resolve and log tenant context at task start (required for RLS)
from app.core.tenant_context import resolve_tenant_id_for_cad, set_tenant_context_sync
_tenant_id = resolve_tenant_id_for_cad(cad_file_id)
_sync_url = app_settings.database_url.replace("+asyncpg", "")
_eng = _ce(_sync_url)
# --- 1. Resolve STEP file path and system settings ---
from app.models.cad_file import CadFile as _CF
from app.models.system_setting import SystemSetting
with _Session(_eng) as _sess:
set_tenant_context_sync(_sess, _tenant_id)
_cad = _sess.execute(
_sel(_CF).where(_CF.id == _uuid.UUID(cad_file_id))
).scalar_one_or_none()
step_path_str = _cad.stored_path if _cad else None
cad_mesh_attributes: dict = (_cad.mesh_attributes or {}) if _cad else {}
settings_rows = _sess.execute(_sel(SystemSetting)).scalars().all()
sys_settings = {s.key: s.value for s in settings_rows}
if not step_path_str:
raise RuntimeError(f"CadFile {cad_file_id} not found in DB")
step_path = _Path(step_path_str)
if not step_path.exists():
raise RuntimeError(f"STEP file not found: {step_path}")
smooth_angle = float(sys_settings.get("blender_smooth_angle", "30"))
prod_linear = float(sys_settings.get("render_linear_deflection", "0.03"))
prod_angular = float(sys_settings.get("render_angular_deflection", "0.05"))
tessellation_engine = sys_settings.get("tessellation_engine", "occ")
scripts_dir = _Path(_os.environ.get("RENDER_SCRIPTS_DIR", "/render-scripts"))
occ_script = scripts_dir / "export_step_to_gltf.py"
if not occ_script.exists():
raise RuntimeError(f"export_step_to_gltf.py not found at {occ_script}")
prod_geom_glb = step_path.parent / f"{step_path.stem}_production_geom.glb"
python_bin = _sys.executable
sharp_threshold = float(sys_settings.get("sharp_edge_threshold", "20.0"))
# --- Geometry GLB selection strategy ---
# When GMSH is enabled, the geometry GLB (_geometry.glb) is already a conforming
# mesh with correct seam topology — GMSH quality comes from the algorithm, not density.
# Re-tessellating at finer production settings only wastes time and RAM on large assemblies.
# → For GMSH: reuse the existing _geometry.glb if it is newer than the STEP file.
# → For OCC: generate a separate _production_geom.glb at finer settings (density matters).
step_mtime = step_path.stat().st_mtime if step_path.exists() else 0
preview_glb = step_path.parent / f"{step_path.stem}_geometry.glb"
preview_glb_valid = (
preview_glb.exists()
and preview_glb.stat().st_size > 0
and preview_glb.stat().st_mtime >= step_mtime
)
prod_geom_cache_valid = (
prod_geom_glb.exists()
and prod_geom_glb.stat().st_size > 0
and prod_geom_glb.stat().st_mtime >= step_mtime
)
if tessellation_engine == "gmsh" and preview_glb_valid:
# Fast path: reuse geometry GLB — GMSH topology is already correct at preview quality
geom_glb_path = preview_glb
log_task_event(
self.request.id,
f"GMSH: reusing geometry GLB as Blender input ({preview_glb.stat().st_size // 1024}KB, "
f"no re-tessellation needed)",
"info",
)
elif prod_geom_cache_valid:
# Cache hit: production_geom.glb exists and is up-to-date
geom_glb_path = prod_geom_glb
log_task_event(
self.request.id,
f"Cache hit: reusing production geometry GLB ({prod_geom_glb.stat().st_size // 1024}KB)",
"info",
)
else:
# No usable cache: run tessellation from STEP.
# When GMSH is selected, force preview-quality settings (0.1mm / 0.1rad) even here.
# Fine production settings (e.g. 0.03mm) combined with GMSH OOM-kill on large assemblies
# because CharacteristicLengthMax becomes too small. GMSH quality is algorithmic
# (conforming seams) not density-based — a denser GMSH mesh adds no UV-unwrap benefit.
if tessellation_engine == "gmsh":
eff_linear = float(sys_settings.get("scene_linear_deflection", "0.1"))
eff_angular = float(sys_settings.get("scene_angular_deflection", "0.1"))
else:
eff_linear = prod_linear
eff_angular = prod_angular
occ_cmd = [
python_bin, str(occ_script),
"--step_path", str(step_path),
"--output_path", str(prod_geom_glb),
"--linear_deflection", str(eff_linear),
"--angular_deflection", str(eff_angular),
"--sharp_threshold", str(sharp_threshold),
"--tessellation_engine", tessellation_engine,
]
log_task_event(
self.request.id,
f"Tessellating STEP for production ({tessellation_engine}, "
f"linear={eff_linear}mm, angular={eff_angular}rad)",
"info",
)
try:
occ_result = _subprocess.run(occ_cmd, capture_output=True, text=True, timeout=600)
for line in occ_result.stdout.splitlines():
logger.info("[occ-prod] %s", line)
if occ_result.returncode != 0 or not prod_geom_glb.exists() or prod_geom_glb.stat().st_size == 0:
raise RuntimeError(
f"OCC export failed (exit {occ_result.returncode}): {occ_result.stderr[-500:]}"
)
except Exception as exc:
log_task_event(self.request.id, f"OCC re-export failed: {exc}", "error")
pl.step_error("export_glb_production", f"OCC re-export failed: {exc}", exc)
raise self.retry(exc=exc, countdown=30)
geom_glb_path = prod_geom_glb
# --- 2. Resolve material map from Product.cad_part_materials (SCHAEFFLER library names) ---
# cad_part_materials lives on Product (list[dict]), NOT on CadFile.
# We look up the Product that owns this CadFile (prefer product_id arg if given).
from app.services.material_service import resolve_material_map
from app.domains.products.models import Product as _Product
with _Session(_eng) as _sess:
set_tenant_context_sync(_sess, _tenant_id)
_prod_query = _sel(_Product).where(_Product.cad_file_id == _uuid.UUID(cad_file_id))
if product_id:
_prod_query = _prod_query.where(_Product.id == _uuid.UUID(product_id))
_product = _sess.execute(_prod_query).scalars().first()
raw_materials: list[dict] = _product.cad_part_materials if _product else []
# Convert list[{"part_name": X, "material": Y}] → dict[str, str] for resolve_material_map
raw_mat_map: dict[str, str] = {
m["part_name"]: m["material"]
for m in raw_materials
if m.get("part_name") and m.get("material")
}
mat_map = resolve_material_map(raw_mat_map)
logger.info(
"generate_gltf_production_task: resolved %d material(s) for cad %s (product: %s)",
len(mat_map), cad_file_id, _product.id if _product else "none",
)
# --- 3. Run Blender: apply materials + smooth shading + export production GLB ---
# Use get_material_library_path() which checks active AssetLibrary first,
# then falls back to the legacy material_library_path system setting.
from app.services.template_service import get_material_library_path
asset_library_blend = get_material_library_path() or ""
_eng.dispose()
output_path = step_path.parent / f"{step_path.stem}_production.glb"
export_script = scripts_dir / "export_gltf.py"
if not is_blender_available():
raise RuntimeError("Blender is not available — cannot generate production GLB")
if not export_script.exists():
raise RuntimeError(f"export_gltf.py not found at {export_script}")
blender_bin = find_blender()
cmd = [
blender_bin, "--background",
"--python", str(export_script),
"--",
"--glb_path", str(geom_glb_path),
"--output_path", str(output_path),
"--material_map", _json.dumps(mat_map),
"--smooth_angle", str(smooth_angle),
"--mesh_attributes", _json.dumps(cad_mesh_attributes),
]
if asset_library_blend:
cmd += ["--asset_library_blend", asset_library_blend]
log_task_event(
self.request.id,
f"Running Blender export_gltf.py — {len(mat_map)} material(s), smooth={smooth_angle}°",
"info",
)
try:
result = _subprocess.run(cmd, capture_output=True, text=True, timeout=300)
for line in result.stdout.splitlines():
logger.info("[export-gltf] %s", line)
if result.returncode != 0:
raise RuntimeError(
f"export_gltf.py exited {result.returncode}:\n{result.stderr[-500:]}"
)
except Exception as exc:
log_task_event(self.request.id, f"Blender production GLB failed: {exc}", "error")
pl.step_error("export_glb_production", f"Blender production GLB failed: {exc}", exc)
logger.error("generate_gltf_production_task Blender failed for cad %s: %s", cad_file_id, exc)
raise self.retry(exc=exc, countdown=30)
# Note: _production_geom.glb is intentionally kept on disk as a tessellation cache.
# It is reused on subsequent runs when the STEP file hasn't changed.
log_task_event(self.request.id, f"Production GLB exported: {output_path.name}", "done")
# --- 4. Store MediaAsset (upsert: update existing record to keep stable ID/URL) ---
# Updating in-place (not DELETE+INSERT) preserves the existing asset UUID so that
# any frontend page holding a stale download_url continues to resolve correctly.
_eng2 = _ce(_sync_url)
with _Session(_eng2) as _sess:
set_tenant_context_sync(_sess, _tenant_id)
_key = str(output_path)
_prefix = str(app_settings.upload_dir).rstrip("/") + "/"
if _key.startswith(_prefix):
_key = _key[len(_prefix):]
_file_size = output_path.stat().st_size if output_path.exists() else None
existing = _sess.execute(
_sel(MediaAsset).where(
MediaAsset.cad_file_id == _uuid.UUID(cad_file_id),
MediaAsset.asset_type == MediaAssetType.gltf_production,
)
).scalars().first()
if existing:
existing.storage_key = _key
existing.mime_type = "model/gltf-binary"
existing.file_size_bytes = _file_size
if product_id:
existing.product_id = _uuid.UUID(product_id)
_sess.commit()
asset_id = str(existing.id)
else:
asset = MediaAsset(
cad_file_id=_uuid.UUID(cad_file_id),
product_id=_uuid.UUID(product_id) if product_id else None,
asset_type=MediaAssetType.gltf_production,
storage_key=_key,
mime_type="model/gltf-binary",
file_size_bytes=_file_size,
)
_sess.add(asset)
_sess.commit()
asset_id = str(asset.id)
_eng2.dispose()
pl.step_done("export_glb_production", result={"glb_path": str(output_path), "asset_id": asset_id})
logger.info("generate_gltf_production_task: MediaAsset %s created for cad %s", asset_id, cad_file_id)
return {"glb_path": str(output_path), "asset_id": asset_id}
@celery_app.task(
bind=True,
name="app.tasks.step_tasks.generate_usd_master_task",
@@ -56,8 +56,6 @@ STEP_TASK_MAP: dict[StepName, str] = {
# StepName.ORDER_LINE_SETUP — computed inline inside render_order_line_task
# StepName.RESOLVE_TEMPLATE — computed inline inside render_order_line_task
# StepName.OUTPUT_SAVE — handled via publish_asset after render tasks
# StepName.EXPORT_GLB_PRODUCTION — app.tasks.step_tasks.generate_gltf_production_task
StepName.EXPORT_GLB_PRODUCTION: "app.tasks.step_tasks.generate_gltf_production_task",
# StepName.NOTIFY — emitted inline via notification_service
}
@@ -20,7 +20,7 @@ from sqlalchemy.ext.asyncio import AsyncSession
from app.database import get_db
from app.domains.auth.models import User
from app.utils.auth import get_current_user, require_admin, require_admin_or_pm, require_pm_or_above
from app.utils.auth import get_current_user, require_global_admin, require_admin_or_pm, require_pm_or_above
from app.domains.rendering.models import WorkflowDefinition, WorkflowRun
from app.domains.rendering.schemas import (
WorkflowDefinitionCreate,
@@ -52,7 +52,6 @@ _STEP_CATEGORIES: dict[StepName, StepCategory] = {
StepName.BLENDER_TURNTABLE: "rendering",
StepName.OUTPUT_SAVE: "output",
StepName.EXPORT_GLB_GEOMETRY: "output",
StepName.EXPORT_GLB_PRODUCTION: "output",
StepName.EXPORT_BLEND: "output",
StepName.STL_CACHE_GENERATE: "processing",
StepName.NOTIFY: "output",
@@ -74,7 +73,6 @@ _STEP_DESCRIPTIONS: dict[StepName, str] = {
StepName.BLENDER_TURNTABLE: "Render all turntable animation frames via Blender HTTP micro-service",
StepName.OUTPUT_SAVE: "Upload the rendered output file to storage and create a MediaAsset record",
StepName.EXPORT_GLB_GEOMETRY: "Export a geometry-only GLB for the 3-D viewer (no materials)",
StepName.EXPORT_GLB_PRODUCTION: "Export a production GLB with full materials from the .blend template",
StepName.EXPORT_BLEND: "Save the production .blend file as a downloadable MediaAsset",
StepName.STL_CACHE_GENERATE: "Convert STEP → STL (low + high quality) and cache next to the STEP file",
StepName.NOTIFY: "Emit a user notification via the audit-log notification channel",
@@ -140,7 +138,7 @@ async def get_workflow(
@router.post("", response_model=WorkflowDefinitionOut, status_code=201)
async def create_workflow(
body: WorkflowDefinitionCreate,
_user: User = Depends(require_admin),
_user: User = Depends(require_global_admin),
db: AsyncSession = Depends(get_db),
):
if body.config:
@@ -164,7 +162,7 @@ async def create_workflow(
async def update_workflow(
workflow_id: uuid.UUID,
body: WorkflowDefinitionUpdate,
_user: User = Depends(require_admin),
_user: User = Depends(require_global_admin),
db: AsyncSession = Depends(get_db),
):
result = await db.execute(
@@ -193,7 +191,7 @@ async def update_workflow(
@router.delete("/{workflow_id}", status_code=204)
async def delete_workflow(
workflow_id: uuid.UUID,
_user: User = Depends(require_admin),
_user: User = Depends(require_global_admin),
db: AsyncSession = Depends(get_db),
):
result = await db.execute(
+9 -9
View File
@@ -4,7 +4,7 @@ from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import update
from app.database import get_db
from app.utils.auth import require_admin
from app.utils.auth import require_global_admin
from app.domains.tenants.schemas import (
TenantCreate, TenantUpdate, TenantOut,
TenantAIConfigUpdate, TenantAIConfigOut,
@@ -18,7 +18,7 @@ router = APIRouter(prefix="/tenants", tags=["tenants"])
@router.get("/", response_model=list[TenantOut])
async def list_tenants(
db: AsyncSession = Depends(get_db),
_: object = Depends(require_admin),
_: object = Depends(require_global_admin),
):
rows = await service.list_tenants(db)
result = []
@@ -34,7 +34,7 @@ async def list_tenants(
async def get_tenant(
tenant_id: uuid.UUID,
db: AsyncSession = Depends(get_db),
_: object = Depends(require_admin),
_: object = Depends(require_global_admin),
):
tenant = await service.get_tenant(db, tenant_id)
if not tenant:
@@ -46,7 +46,7 @@ async def get_tenant(
async def create_tenant(
body: TenantCreate,
db: AsyncSession = Depends(get_db),
_: object = Depends(require_admin),
_: object = Depends(require_global_admin),
):
tenant = await service.create_tenant(db, name=body.name, slug=body.slug, is_active=body.is_active)
return TenantOut.model_validate(tenant)
@@ -57,7 +57,7 @@ async def update_tenant(
tenant_id: uuid.UUID,
body: TenantUpdate,
db: AsyncSession = Depends(get_db),
_: object = Depends(require_admin),
_: object = Depends(require_global_admin),
):
tenant = await service.update_tenant(
db, tenant_id,
@@ -74,7 +74,7 @@ async def update_tenant(
async def delete_tenant(
tenant_id: uuid.UUID,
db: AsyncSession = Depends(get_db),
_: object = Depends(require_admin),
_: object = Depends(require_global_admin),
):
ok = await service.delete_tenant(db, tenant_id)
if not ok:
@@ -107,7 +107,7 @@ def _tenant_ai_config_out(tenant: Tenant) -> TenantAIConfigOut:
async def get_tenant_ai_config(
tenant_id: uuid.UUID,
db: AsyncSession = Depends(get_db),
_: object = Depends(require_admin),
_: object = Depends(require_global_admin),
):
"""Return AI config for a tenant (without the raw api_key)."""
tenant = await service.get_tenant(db, tenant_id)
@@ -121,7 +121,7 @@ async def update_tenant_ai_config(
tenant_id: uuid.UUID,
body: TenantAIConfigUpdate,
db: AsyncSession = Depends(get_db),
_: object = Depends(require_admin),
_: object = Depends(require_global_admin),
):
"""Merge AI configuration into tenant_config JSONB.
If ai_api_key is None in the request body, the existing key is preserved.
@@ -160,7 +160,7 @@ async def update_tenant_ai_config(
async def test_tenant_ai_config(
tenant_id: uuid.UUID,
db: AsyncSession = Depends(get_db),
_: object = Depends(require_admin),
_: object = Depends(require_global_admin),
):
"""Send a minimal ping to Azure OpenAI using the tenant's stored credentials.
Returns {"ok": true} or {"ok": false, "error": "human readable message"}.
-1
View File
@@ -19,6 +19,5 @@ from app.domains.pipeline.tasks.render_order_line import ( # noqa: F401
)
from app.domains.pipeline.tasks.export_glb import ( # noqa: F401
generate_gltf_geometry_task,
generate_gltf_production_task,
generate_usd_master_task,
)