refactor(phase3): remove dead services + STL remnant cleanup

Phase 3.2 — Delete orphaned service directories:
  - blender-renderer/ (HTTP microservice replaced by render-worker subprocess)
  - threejs-renderer/ (replaced by render-worker)
  - flamenco/ (removed in migration 032, directory still existed on disk)

Phase 3.2 — Remove STL workflow remnants:
  - analytics.py: remove avg_stl_s from RenderTimeBreakdown schema (always None)
  - kpi_service.py: remove avg_stl_s from return dicts + update docstring
  - frontend/src/api/analytics.ts: remove avg_stl_s from RenderTimeBreakdown interface
  - admin.py: remove dead blender-renderer HTTP configure call (service gone)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-08 19:30:52 +01:00
parent 966c3aed57
commit 121fbdafd3
22 changed files with 3 additions and 4674 deletions
+2 -11
View File
@@ -366,17 +366,8 @@ async def update_settings(
await _save_setting(db, k, v)
await db.commit()
# Propagate concurrency limit to blender-renderer immediately (no restart needed)
if body.blender_max_concurrent_renders is not None:
try:
import httpx
async with httpx.AsyncClient(timeout=3.0) as client:
await client.post(
"http://blender-renderer:8100/configure",
params={"max_concurrent": body.blender_max_concurrent_renders},
)
except Exception:
pass # best-effort; setting is persisted in DB regardless
# Note: blender-renderer HTTP service removed; concurrency is now controlled
# via render-worker Docker concurrency setting (thumbnail_rendering queue).
return _settings_to_out(await _load_settings(db))
-1
View File
@@ -41,7 +41,6 @@ class ItemStatusBreakdown(BaseModel):
class RenderTimeBreakdown(BaseModel):
avg_stl_s: Optional[float]
avg_render_s: Optional[float]
avg_total_s: Optional[float]
sample_count: int
+1 -4
View File
@@ -130,8 +130,6 @@ async def render_time_breakdown(
"""Average render duration from completed order lines, scoped to date range.
Uses render_started_at / render_completed_at on order_lines (added in migration 015).
avg_stl_s is not tracked at order-line level, so only avg_render_s and sample_count
are meaningful here; avg_stl_s is left None for UI compatibility.
"""
sql = text(
"""
@@ -149,9 +147,8 @@ async def render_time_breakdown(
result = await db.execute(sql, {"date_from": _parse_date(date_from), "date_to": _parse_date(date_to)})
row = result.fetchone()
if row is None or row[1] == 0:
return {"avg_stl_s": None, "avg_render_s": None, "avg_total_s": None, "sample_count": 0}
return {"avg_render_s": None, "avg_total_s": None, "sample_count": 0}
return {
"avg_stl_s": None,
"avg_render_s": float(row[0]) if row[0] is not None else None,
"avg_total_s": float(row[0]) if row[0] is not None else None,
"sample_count": int(row[1]),
-47
View File
@@ -1,47 +0,0 @@
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
# OSMesa for headless cadquery/VTK (no display needed)
ENV PYOPENGL_PLATFORM=osmesa
ENV VTK_DEFAULT_EGL=0
# Runtime libraries for cadquery/VTK + Blender 5.x
RUN apt-get update && apt-get install -y \
python3-pip \
python3-dev \
libxrender1 \
libxi6 \
libxkbcommon-x11-0 \
libsm6 \
libglib2.0-0 \
libgl1-mesa-glx \
libosmesa6 \
libgomp1 \
libxfixes3 \
libxrandr2 \
libxcursor1 \
libxinerama1 \
libwayland-client0 \
libwayland-cursor0 \
libwayland-egl1 \
libvulkan1 \
mesa-vulkan-drivers \
libegl1 \
libegl-mesa0 \
libgbm1 \
&& rm -rf /var/lib/apt/lists/*
# Blender 5.0.1 is mounted from the host at /opt/blender (see docker-compose.yml)
ENV BLENDER_BIN=/opt/blender/blender
WORKDIR /app
COPY requirements.txt .
RUN pip3 install --no-cache-dir -r requirements.txt
COPY . .
EXPOSE 8100
CMD ["python3", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8100"]
-581
View File
@@ -1,581 +0,0 @@
"""
Blender renderer service — FastAPI microservice.
Accepts a STEP file path (on shared uploads volume) and renders a thumbnail PNG
using the pipeline: STEP → STL (via cadquery) → PNG (via Blender headless).
"""
import asyncio
import json as _json_mod
import logging
import os
import signal
import shutil
import subprocess
import tempfile
import threading
import time
from pathlib import Path
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
logger = logging.getLogger(__name__)
app = FastAPI(title="Blender Renderer", version="1.0.0")
# Active render subprocesses keyed by job_id for cancellation support
_active_procs: dict[str, subprocess.Popen] = {}
_procs_lock = threading.Lock()
# Limit concurrent Blender renders to avoid memory exhaustion from parallel threads
# (each thread loads cadquery/OCC, ~300-500 MB each).
# Resizable at runtime via POST /configure without restart.
_max_concurrent: int = 3
_render_semaphore = threading.Semaphore(_max_concurrent)
_config_lock = threading.Lock()
def _set_max_concurrent(n: int) -> None:
"""Replace the global semaphore with a new one sized to n.
In-flight renders hold a reference to the old semaphore and will release it
normally; new renders pick up the new one.
"""
global _render_semaphore, _max_concurrent
with _config_lock:
_max_concurrent = n
_render_semaphore = threading.Semaphore(n)
class RenderRequest(BaseModel):
step_path: str
output_path: str
width: int = 512
height: int = 512
engine: str = "cycles" # "cycles" or "eevee"
samples: int = 256
stl_quality: str = "low" # "low" or "high"
smooth_angle: int = 30 # degrees; 0 = shade_flat, >0 = shade_smooth_by_angle
cycles_device: str = "auto" # "auto", "gpu", or "cpu"
transparent_bg: bool = False # render with transparent background (PNG only)
part_colors: dict | None = None # optional {part_name: hex_color}
template_path: str | None = None # Path to .blend template file
target_collection: str = "Product" # Collection to import geometry into
material_library_path: str | None = None # Path to material library .blend
material_map: dict | None = None # {part_name: material_name} from Excel
part_names_ordered: list | None = None # ordered STEP part names for index matching
lighting_only: bool = False # use template World/HDRI only; force auto-camera
shadow_catcher: bool = False # enable Shadowcatcher collection + position plane at bbox min Z
rotation_x: float = 0.0 # Euler X rotation in degrees (applied to imported STL)
rotation_y: float = 0.0 # Euler Y rotation in degrees
rotation_z: float = 0.0 # Euler Z rotation in degrees
job_id: str | None = None # Optional ID for cancellation tracking
noise_threshold: str = "" # Adaptive sampling noise threshold (empty = Blender default)
denoiser: str = "" # "OPTIX" | "OPENIMAGEDENOISE" (empty = auto)
denoising_input_passes: str = "" # "RGB" | "RGB_ALBEDO" | "RGB_ALBEDO_NORMAL"
denoising_prefilter: str = "" # "NONE" | "FAST" | "ACCURATE"
denoising_quality: str = "" # "HIGH" | "BALANCED" | "FAST" (Blender 4.2+)
denoising_use_gpu: str = "" # "1" = GPU, "0" = CPU, "" = auto
def _find_blender() -> str:
"""Locate the Blender binary: prefer $BLENDER_BIN, then PATH."""
import os, shutil
env_bin = os.environ.get("BLENDER_BIN", "")
if env_bin and Path(env_bin).exists():
return env_bin
return shutil.which("blender") or "blender"
@app.get("/health")
async def health():
blender_bin = _find_blender()
version = "unknown"
try:
result = subprocess.run(
[blender_bin, "--version"], capture_output=True, text=True, timeout=10
)
first_line = (result.stdout or result.stderr or "").splitlines()
version = first_line[0].strip() if first_line else "unknown"
except Exception:
pass
return {
"status": "ok",
"renderer": "blender",
"blender_path": blender_bin,
"blender_version": version,
}
class ConvertStlRequest(BaseModel):
step_path: str
quality: str = "low" # "low" or "high"
@app.post("/convert-stl")
async def convert_stl(req: ConvertStlRequest):
"""Convert a STEP file to STL and cache it — no Blender render."""
if req.quality not in ("low", "high"):
raise HTTPException(400, detail="quality must be 'low' or 'high'")
step_path = Path(req.step_path)
if not step_path.exists():
raise HTTPException(404, detail=f"STEP file not found: {step_path}")
stl_path = step_path.parent / f"{step_path.stem}_{req.quality}.stl"
parts_dir = step_path.parent / f"{step_path.stem}_{req.quality}_parts"
t0 = time.monotonic()
try:
if not stl_path.exists() or stl_path.stat().st_size == 0:
await asyncio.to_thread(_convert_step_to_stl, step_path, stl_path, req.quality)
logger.info("STL generated: %s (%d KB)", stl_path.name, stl_path.stat().st_size // 1024)
else:
logger.info("STL cache hit: %s (%d KB)", stl_path.name, stl_path.stat().st_size // 1024)
except Exception as e:
logger.error("STEP→STL conversion failed: %s", e)
raise HTTPException(500, detail=f"STEP conversion failed: {e}")
try:
if not (parts_dir / "manifest.json").exists():
await asyncio.to_thread(_export_per_part_stls, step_path, parts_dir, req.quality)
except Exception as e:
logger.warning("per-part STL export failed (non-fatal): %s", e)
return {
"stl_path": str(stl_path),
"size_bytes": stl_path.stat().st_size if stl_path.exists() else 0,
"duration_s": round(time.monotonic() - t0, 2),
}
@app.post("/cancel/{job_id}")
async def cancel_render(job_id: str):
"""Kill the Blender subprocess for a running job (best-effort)."""
with _procs_lock:
proc = _active_procs.pop(job_id, None)
if proc is None:
return {"status": "not_found", "job_id": job_id}
try:
pgid = os.getpgid(proc.pid)
os.killpg(pgid, signal.SIGTERM)
logger.info("Sent SIGTERM to process group %d for job %s", pgid, job_id)
except (ProcessLookupError, OSError):
pass # process already finished
return {"status": "cancelled", "job_id": job_id}
@app.get("/status")
async def status():
"""Return current render queue depth and concurrency setting."""
with _procs_lock:
active = len(_active_procs)
with _config_lock:
current_max = _max_concurrent
return {"active_jobs": active, "max_concurrent": current_max}
@app.post("/configure")
async def configure(max_concurrent: int):
"""Dynamically update the maximum number of concurrent Blender renders."""
if not (1 <= max_concurrent <= 16):
from fastapi import HTTPException
raise HTTPException(400, detail="max_concurrent must be between 1 and 16")
_set_max_concurrent(max_concurrent)
logger.info("max_concurrent_renders updated to %d", max_concurrent)
return {"max_concurrent": max_concurrent}
@app.post("/render")
async def render(req: RenderRequest):
step_path = Path(req.step_path)
output_path = Path(req.output_path)
if not step_path.exists():
raise HTTPException(404, detail=f"STEP file not found: {step_path}")
output_path.parent.mkdir(parents=True, exist_ok=True)
t_start = time.monotonic()
# Acquire render slot — blocks if 3 renders are already running.
# asyncio.to_thread is used so the semaphore acquire doesn't block the event loop.
acquired = await asyncio.to_thread(_render_semaphore.acquire)
# 1. Get/create STL cache — persistent next to STEP file so re-renders skip conversion
stl_path = step_path.parent / f"{step_path.stem}_{req.stl_quality}.stl"
parts_dir = step_path.parent / f"{step_path.stem}_{req.stl_quality}_parts"
stl_size_bytes = 0
t_stl_start = time.monotonic()
try:
if not stl_path.exists() or stl_path.stat().st_size == 0:
logger.info("STL cache miss — converting: %s", step_path.name)
_convert_step_to_stl(step_path, stl_path, req.stl_quality)
else:
logger.info("STL cache hit: %s (%d KB)", stl_path.name, stl_path.stat().st_size // 1024)
stl_size_bytes = stl_path.stat().st_size if stl_path.exists() else 0
except Exception as e:
_render_semaphore.release()
logger.error(f"STEP→STL conversion failed: {e}")
raise HTTPException(500, detail=f"STEP conversion failed: {e}")
# Per-part export (non-fatal — Blender falls back to combined STL)
try:
if not (parts_dir / "manifest.json").exists():
_export_per_part_stls(step_path, parts_dir, req.stl_quality)
except Exception as e:
logger.warning("per-part STL export failed (non-fatal): %s", e)
stl_duration_s = round(time.monotonic() - t_stl_start, 2)
# 2. Render STL → PNG via Blender
render_log_lines: list[str] = []
parts_count = 0
engine_used = req.engine
t_render_start = time.monotonic()
try:
render_log_lines, parts_count, engine_used = _render_stl_with_blender(
stl_path, output_path, req.width, req.height,
req.engine, req.samples, req.smooth_angle, req.cycles_device,
req.transparent_bg,
template_path=req.template_path,
target_collection=req.target_collection,
material_library_path=req.material_library_path,
material_map=req.material_map,
part_names_ordered=req.part_names_ordered,
lighting_only=req.lighting_only,
shadow_catcher=req.shadow_catcher,
rotation_x=req.rotation_x,
rotation_y=req.rotation_y,
rotation_z=req.rotation_z,
job_id=req.job_id,
noise_threshold=req.noise_threshold,
denoiser=req.denoiser,
denoising_input_passes=req.denoising_input_passes,
denoising_prefilter=req.denoising_prefilter,
denoising_quality=req.denoising_quality,
denoising_use_gpu=req.denoising_use_gpu,
)
except Exception as e:
logger.error(f"Blender render failed: {e}")
raise HTTPException(500, detail=f"Blender render failed: {e}")
finally:
_render_semaphore.release()
# STL cache is persistent — do NOT delete stl_path or parts_dir
render_duration_s = round(time.monotonic() - t_render_start, 2)
if not output_path.exists():
raise HTTPException(500, detail="Render produced no output file")
total_duration_s = round(time.monotonic() - t_start, 2)
output_size_bytes = output_path.stat().st_size
return {
"output_path": str(output_path),
"status": "ok",
"renderer": "blender",
# Timing
"total_duration_s": total_duration_s,
"stl_duration_s": stl_duration_s,
"render_duration_s": render_duration_s,
# Mesh info
"stl_size_bytes": stl_size_bytes,
"output_size_bytes": output_size_bytes,
"parts_count": parts_count,
# Effective settings (engine may differ from requested if EEVEE fell back)
"engine_used": engine_used,
# Blender log lines (filtered to [blender_render] prefix lines)
"log_lines": render_log_lines,
}
def _convert_step_to_stl(step_path: Path, stl_path: Path, quality: str = "low") -> None:
"""Convert STEP file to STL using cadquery.
quality="low" → tolerance=0.3, angularTolerance=0.3 (fast, coarser mesh)
quality="high" → tolerance=0.01, angularTolerance=0.02 (slower, finer mesh)
"""
import cadquery as cq
shape = cq.importers.importStep(str(step_path))
if quality == "high":
cq.exporters.export(shape, str(stl_path), tolerance=0.01, angularTolerance=0.02)
else:
cq.exporters.export(shape, str(stl_path), tolerance=0.3, angularTolerance=0.3)
if not stl_path.exists() or stl_path.stat().st_size == 0:
raise RuntimeError("cadquery produced empty STL")
def _export_per_part_stls(step_path: Path, parts_dir: Path, quality: str = "low") -> list:
"""Export one STL per named STEP leaf shape using OCP XCAF.
Creates parts_dir with individual STL files and a manifest.json.
Returns the manifest list, or empty list on failure.
"""
tol = 0.01 if quality == "high" else 0.3
angular_tol = 0.05 if quality == "high" else 0.3
try:
from OCP.STEPCAFControl import STEPCAFControl_Reader
from OCP.XCAFDoc import XCAFDoc_DocumentTool, XCAFDoc_ShapeTool
from OCP.TDataStd import TDataStd_Name
from OCP.TDF import TDF_Label as TDF_Label_cls, TDF_LabelSequence
from OCP.XCAFApp import XCAFApp_Application
from OCP.TDocStd import TDocStd_Document
from OCP.TCollection import TCollection_ExtendedString
from OCP.IFSelect import IFSelect_RetDone
import cadquery as cq
except ImportError as e:
logger.warning("per-part export skipped (import error): %s", e)
return []
app = XCAFApp_Application.GetApplication_s()
doc = TDocStd_Document(TCollection_ExtendedString("XmlOcaf"))
app.InitDocument(doc)
reader = STEPCAFControl_Reader()
reader.SetNameMode(True)
status = reader.ReadFile(str(step_path))
if status != IFSelect_RetDone:
logger.warning("XCAF reader failed with status %s", status)
return []
if not reader.Transfer(doc):
logger.warning("XCAF transfer failed")
return []
shape_tool = XCAFDoc_DocumentTool.ShapeTool_s(doc.Main())
name_id = TDataStd_Name.GetID_s()
leaves = []
def _get_label_name(label):
name_attr = TDataStd_Name()
if label.FindAttribute(name_id, name_attr):
return name_attr.Get().ToExtString()
return ""
def _collect_leaves(label):
if XCAFDoc_ShapeTool.IsAssembly_s(label):
components = TDF_LabelSequence()
XCAFDoc_ShapeTool.GetComponents_s(label, components)
for i in range(1, components.Length() + 1):
comp_label = components.Value(i)
if XCAFDoc_ShapeTool.IsReference_s(comp_label):
ref_label = TDF_Label_cls()
XCAFDoc_ShapeTool.GetReferredShape_s(comp_label, ref_label)
comp_name = _get_label_name(comp_label)
ref_name = _get_label_name(ref_label)
# Prefer referred shape name — matches material_map keys
name = ref_name or comp_name
if XCAFDoc_ShapeTool.IsAssembly_s(ref_label):
_collect_leaves(ref_label)
elif XCAFDoc_ShapeTool.IsSimpleShape_s(ref_label):
# Use comp_label shape — includes instance transform (position)
shape = XCAFDoc_ShapeTool.GetShape_s(comp_label)
leaves.append((name or f"unnamed_{len(leaves)}", shape))
else:
_collect_leaves(comp_label)
elif XCAFDoc_ShapeTool.IsSimpleShape_s(label):
name = _get_label_name(label)
shape = XCAFDoc_ShapeTool.GetShape_s(label)
leaves.append((name or f"unnamed_{len(leaves)}", shape))
top_labels = TDF_LabelSequence()
shape_tool.GetFreeShapes(top_labels)
for i in range(1, top_labels.Length() + 1):
_collect_leaves(top_labels.Value(i))
if not leaves:
logger.warning("no leaf shapes found via XCAF")
return []
parts_dir.mkdir(parents=True, exist_ok=True)
manifest = []
for idx, (name, shape) in enumerate(leaves):
safe_name = name.replace("/", "_").replace("\\", "_").replace(" ", "_")
filename = f"{idx:02d}_{safe_name}.stl"
filepath = str(parts_dir / filename)
try:
import cadquery as cq
cq_shape = cq.Shape(shape)
cq_shape.exportStl(filepath, tolerance=tol, angularTolerance=angular_tol)
manifest.append({"index": idx, "name": name, "file": filename})
except Exception as e:
logger.warning("failed to export part '%s': %s", name, e)
manifest_path = parts_dir / "manifest.json"
with open(manifest_path, "w") as f:
_json_mod.dump({"parts": manifest}, f, indent=2)
total_size = sum(
os.path.getsize(str(parts_dir / p["file"]))
for p in manifest
if (parts_dir / p["file"]).exists()
)
logger.info("exported %d per-part STLs (%d KB) to %s", len(manifest), total_size // 1024, parts_dir)
return manifest
def _parse_blender_log(stdout: str) -> tuple[list[str], int]:
"""Extract [blender_render] lines and parts count from Blender stdout."""
lines = []
parts_count = 0
for line in (stdout or "").splitlines():
stripped = line.strip()
if "[blender_render]" in stripped or "[blender_render" in stripped:
lines.append(stripped)
if "separated into" in stripped:
try:
parts_count = int(stripped.split("separated into")[1].split("part")[0].strip())
except Exception:
pass
elif "imported" in stripped and "named parts" in stripped:
try:
parts_count = int(stripped.split("imported")[1].split("named")[0].strip())
except Exception:
pass
elif stripped.startswith("Saved:") or stripped.startswith("Fra:"):
lines.append(stripped)
return lines, parts_count
def _render_stl_with_blender(
stl_path: Path, output_path: Path, width: int, height: int,
engine: str = "cycles", samples: int = 256, smooth_angle: int = 30,
cycles_device: str = "auto", transparent_bg: bool = False,
template_path: str | None = None, target_collection: str = "Product",
material_library_path: str | None = None, material_map: dict | None = None,
part_names_ordered: list | None = None, lighting_only: bool = False,
shadow_catcher: bool = False,
rotation_x: float = 0.0, rotation_y: float = 0.0, rotation_z: float = 0.0,
job_id: str | None = None,
noise_threshold: str = "",
denoiser: str = "",
denoising_input_passes: str = "",
denoising_prefilter: str = "",
denoising_quality: str = "",
denoising_use_gpu: str = "",
) -> tuple[list[str], int, str]:
"""Render STL to PNG using Blender in background mode.
Returns (log_lines, parts_count, engine_used).
Blender is launched in its own process group (start_new_session=True) so
that SIGTERM from a cancel request kills the entire Blender tree.
"""
import json as _json
blender_bin = _find_blender()
script_path = Path(__file__).parent / "blender_render.py"
env = dict(os.environ)
if engine == "eevee":
env.update({
"VK_ICD_FILENAMES": "/usr/share/vulkan/icd.d/lvp_icd.x86_64.json",
"LIBGL_ALWAYS_SOFTWARE": "1",
"MESA_GL_VERSION_OVERRIDE": "4.5",
"EGL_PLATFORM": "surfaceless",
})
else:
env.update({
"EGL_PLATFORM": "surfaceless",
})
def _build_cmd(eng: str) -> list:
return [
blender_bin,
"--background",
"--python", str(script_path),
"--",
str(stl_path),
str(output_path),
str(width),
str(height),
eng,
str(samples),
str(smooth_angle),
cycles_device,
"1" if transparent_bg else "0",
template_path or "",
target_collection,
material_library_path or "",
_json.dumps(material_map) if material_map else "{}",
_json.dumps(part_names_ordered) if part_names_ordered else "[]",
"1" if lighting_only else "0",
"1" if shadow_catcher else "0",
str(rotation_x),
str(rotation_y),
str(rotation_z),
noise_threshold or "",
denoiser or "",
denoising_input_passes or "",
denoising_prefilter or "",
denoising_quality or "",
denoising_use_gpu or "",
]
def _run_blender(eng: str) -> subprocess.CompletedProcess:
"""Launch Blender in an isolated process group and wait for completion."""
cmd = _build_cmd(eng)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
env=env,
start_new_session=True, # new process group → SIGTERM kills entire tree
)
if job_id:
with _procs_lock:
_active_procs[job_id] = proc
try:
stdout, stderr = proc.communicate(timeout=300)
except subprocess.TimeoutExpired:
try:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
except (ProcessLookupError, OSError):
pass
stdout, stderr = proc.communicate()
finally:
if job_id:
with _procs_lock:
_active_procs.pop(job_id, None)
return subprocess.CompletedProcess(cmd, proc.returncode, stdout, stderr)
result = _run_blender(engine)
engine_used = engine
# Log to uvicorn output
if result.stdout:
for line in result.stdout.splitlines():
logger.info("[blender] %s", line)
if result.stderr:
for line in result.stderr.splitlines():
logger.warning("[blender stderr] %s", line)
# If EEVEE fails with a non-signal error, automatically retry with Cycles.
# A negative returncode means the process was killed by a signal (e.g. cancel)
# — do NOT retry in that case.
if result.returncode > 0 and engine == "eevee":
logger.warning(
"EEVEE render failed (exit %d) retrying with Cycles (CPU).",
result.returncode,
)
result = _run_blender("cycles")
engine_used = "cycles (eevee fallback)"
if result.stdout:
for line in result.stdout.splitlines():
logger.info("[blender-cycles-fallback] %s", line)
if result.stderr:
for line in result.stderr.splitlines():
logger.warning("[blender-cycles-fallback stderr] %s", line)
if result.returncode != 0:
stdout_tail = result.stdout[-2000:] if result.stdout else ""
stderr_tail = result.stderr[-2000:] if result.stderr else ""
raise RuntimeError(
f"Blender exited {result.returncode}.\n"
f"STDOUT: {stdout_tail}\nSTDERR: {stderr_tail}"
)
log_lines, parts_count = _parse_blender_log(result.stdout)
return log_lines, parts_count, engine_used
-679
View File
@@ -1,679 +0,0 @@
"""
Blender Python script for rendering a GLB file to PNG.
Targets Blender 5.0+ (EEVEE / Cycles).
Called by Blender:
blender --background --python blender_render.py -- \
<glb_path> <output_path> <width> <height> [engine] [samples]
engine: "cycles" (default) | "eevee"
Features:
- OCC-generated GLB: one mesh per STEP part, already in metres.
- Bounding-box-aware camera: object fills ~85 % of the frame.
- Isometric-style angle (elevation 28°, azimuth 40°).
- Dynamic clip planes.
- Standard (non-Filmic) colour management → no grey tint.
- Schaeffler green top bar + model name label via Pillow post-processing.
"""
import sys
import os
import math
import bpy
from mathutils import Vector, Matrix
# ── Colour palette (matches Three.js renderer) ───────────────────────────────
PALETTE_HEX = [
"#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8",
"#4CD4E8", "#E84CA8", "#7EC850", "#E86B30", "#5088C8",
]
def _srgb_to_linear(c: int) -> float:
"""Convert 0-255 sRGB integer to linear float."""
v = c / 255.0
return v / 12.92 if v <= 0.04045 else ((v + 0.055) / 1.055) ** 2.4
def _hex_to_linear(hex_color: str) -> tuple:
"""Return (r, g, b, 1.0) in Blender linear colour space."""
h = hex_color.lstrip('#')
return (
_srgb_to_linear(int(h[0:2], 16)),
_srgb_to_linear(int(h[2:4], 16)),
_srgb_to_linear(int(h[4:6], 16)),
1.0,
)
PALETTE_LINEAR = [_hex_to_linear(h) for h in PALETTE_HEX]
# ── Parse arguments ───────────────────────────────────────────────────────────
argv = sys.argv
if "--" in argv:
argv = argv[argv.index("--") + 1:]
else:
argv = []
if len(argv) < 4:
print("Usage: blender --background --python blender_render.py -- "
"<glb_path> <output_path> <width> <height> [engine] [samples] [smooth_angle] [cycles_device] [transparent_bg]")
sys.exit(1)
import json as _json
glb_path = argv[0]
output_path = argv[1]
width = int(argv[2])
height = int(argv[3])
engine = argv[4].lower() if len(argv) > 4 else "cycles"
samples = int(argv[5]) if len(argv) > 5 else (64 if engine == "eevee" else 256)
smooth_angle = int(argv[6]) if len(argv) > 6 else 30 # degrees; 0 = flat shading
cycles_device = argv[7].lower() if len(argv) > 7 else "auto" # "auto", "gpu", "cpu"
transparent_bg = argv[8] == "1" if len(argv) > 8 else False
template_path = argv[9] if len(argv) > 9 and argv[9] else ""
target_collection = argv[10] if len(argv) > 10 else "Product"
material_library_path = argv[11] if len(argv) > 11 and argv[11] else ""
material_map_raw = argv[12] if len(argv) > 12 else "{}"
try:
material_map = _json.loads(material_map_raw) if material_map_raw else {}
except _json.JSONDecodeError:
material_map = {}
part_names_ordered_raw = argv[13] if len(argv) > 13 else "[]"
try:
part_names_ordered = _json.loads(part_names_ordered_raw) if part_names_ordered_raw else []
except _json.JSONDecodeError:
part_names_ordered = []
lighting_only = argv[14] == "1" if len(argv) > 14 else False
shadow_catcher = argv[15] == "1" if len(argv) > 15 else False
rotation_x = float(argv[16]) if len(argv) > 16 else 0.0
rotation_y = float(argv[17]) if len(argv) > 17 else 0.0
rotation_z = float(argv[18]) if len(argv) > 18 else 0.0
noise_threshold_arg = argv[19] if len(argv) > 19 else ""
denoiser_arg = argv[20] if len(argv) > 20 else ""
denoising_input_passes_arg = argv[21] if len(argv) > 21 else ""
denoising_prefilter_arg = argv[22] if len(argv) > 22 else ""
denoising_quality_arg = argv[23] if len(argv) > 23 else ""
denoising_use_gpu_arg = argv[24] if len(argv) > 24 else ""
# Validate template path: if provided it MUST exist on disk.
# Fail loudly rather than silently rendering with factory settings.
if template_path and not os.path.isfile(template_path):
print(f"[blender_render] ERROR: template_path was provided but file not found: {template_path}")
print("[blender_render] Check that the blend-templates directory is on the shared volume.")
sys.exit(1)
use_template = bool(template_path)
print(f"[blender_render] engine={engine}, samples={samples}, size={width}x{height}, smooth_angle={smooth_angle}°, device={cycles_device}, transparent={transparent_bg}")
print(f"[blender_render] part_names_ordered: {len(part_names_ordered)} entries")
if use_template:
print(f"[blender_render] template={template_path}, collection={target_collection}, lighting_only={lighting_only}")
else:
print("[blender_render] no template — using factory settings (Mode A)")
if material_library_path:
print(f"[blender_render] material_library={material_library_path}, material_map keys={list(material_map.keys())}")
# ── Helper: find or create collection by name ────────────────────────────────
def _ensure_collection(name: str):
"""Return a collection by name, creating it if needed."""
if name in bpy.data.collections:
return bpy.data.collections[name]
col = bpy.data.collections.new(name)
bpy.context.scene.collection.children.link(col)
return col
def _apply_smooth(part_obj, angle_deg):
"""Apply smooth or flat shading to a mesh object."""
bpy.context.view_layer.objects.active = part_obj
part_obj.select_set(True)
if angle_deg > 0:
try:
bpy.ops.object.shade_smooth_by_angle(angle=math.radians(angle_deg))
except AttributeError:
bpy.ops.object.shade_smooth()
part_obj.data.use_auto_smooth = True
part_obj.data.auto_smooth_angle = math.radians(angle_deg)
else:
bpy.ops.object.shade_flat()
def _assign_palette_material(part_obj, index):
"""Assign a palette colour material to a mesh part."""
color = PALETTE_LINEAR[index % len(PALETTE_LINEAR)]
mat = bpy.data.materials.new(name=f"Part_{index}")
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
bsdf.inputs["Base Color"].default_value = color
bsdf.inputs["Metallic"].default_value = 0.35
bsdf.inputs["Roughness"].default_value = 0.40
try:
bsdf.inputs["Specular IOR Level"].default_value = 0.5
except KeyError:
pass
part_obj.data.materials.clear()
part_obj.data.materials.append(mat)
import re as _re
def _apply_rotation(parts, rx, ry, rz):
"""Apply Euler rotation (degrees, XYZ order) to all parts around world origin.
After _import_glb the combined bbox center is at world origin,
so rotating around origin is equivalent to rotating around the assembly center.
"""
if not parts or (rx == 0.0 and ry == 0.0 and rz == 0.0):
return
from mathutils import Euler
rot_mat = Euler((math.radians(rx), math.radians(ry), math.radians(rz)), 'XYZ').to_matrix().to_4x4()
for p in parts:
p.matrix_world = rot_mat @ p.matrix_world
# Bake rotation into mesh data so camera bbox calculations see the rotated geometry
bpy.ops.object.select_all(action='DESELECT')
for p in parts:
p.select_set(True)
bpy.context.view_layer.objects.active = parts[0]
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
print(f"[blender_render] applied rotation ({rx}°, {ry}°, {rz}°) to {len(parts)} parts")
def _import_glb(glb_file):
"""Import OCC-generated GLB into Blender.
OCC exports one mesh object per STEP part, already in metres.
Returns list of Blender mesh objects, centred at world origin.
"""
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.import_scene.gltf(filepath=glb_file)
parts = [o for o in bpy.context.selected_objects if o.type == 'MESH']
if not parts:
print(f"ERROR: No mesh objects imported from {glb_file}")
sys.exit(1)
print(f"[blender_render] imported {len(parts)} part(s) from GLB: "
f"{[p.name for p in parts[:5]]}")
# Centre combined bbox at world origin
all_corners = []
for p in parts:
all_corners.extend(p.matrix_world @ Vector(c) for c in p.bound_box)
if all_corners:
mins = Vector((min(v.x for v in all_corners),
min(v.y for v in all_corners),
min(v.z for v in all_corners)))
maxs = Vector((max(v.x for v in all_corners),
max(v.y for v in all_corners),
max(v.z for v in all_corners)))
center = (mins + maxs) * 0.5
for p in parts:
p.location -= center
return parts
def _resolve_part_name(index, part_obj):
"""Get the STEP part name for a Blender part by index.
With GLB import, part_obj.name IS the STEP name (possibly with
Blender .NNN suffix for duplicates). Strip that suffix for lookup.
Falls back to part_names_ordered index mapping.
"""
# Strip Blender auto-suffix (.001, .002, etc.)
base_name = _re.sub(r'\.\d{3}$', '', part_obj.name)
# If the base name looks like a real STEP part name (not generic "Cube" etc.),
# use it directly
if part_names_ordered and index < len(part_names_ordered):
return part_names_ordered[index]
return base_name
def _apply_material_library(parts, mat_lib_path, mat_map):
"""Append materials from library .blend and assign to parts via material_map.
GLB-imported objects are named after STEP parts, so matching is by name
(stripping Blender .NNN suffix for duplicates). Falls back to
part_names_ordered index-based matching.
mat_map: {part_name_lower: material_name}
Parts without a match keep their current material.
"""
if not mat_lib_path or not os.path.isfile(mat_lib_path):
print(f"[blender_render] material library not found: {mat_lib_path}")
return
# Collect unique material names needed
needed = set(mat_map.values())
if not needed:
return
# Append materials from library
appended = {}
for mat_name in needed:
inner_path = f"{mat_lib_path}/Material/{mat_name}"
try:
bpy.ops.wm.append(
filepath=inner_path,
directory=f"{mat_lib_path}/Material/",
filename=mat_name,
link=False,
)
if mat_name in bpy.data.materials:
appended[mat_name] = bpy.data.materials[mat_name]
print(f"[blender_render] appended material: {mat_name}")
else:
print(f"[blender_render] WARNING: material '{mat_name}' not found after append")
except Exception as exc:
print(f"[blender_render] WARNING: failed to append material '{mat_name}': {exc}")
if not appended:
return
# Assign materials to parts — primary: name-based (GLB object names),
# secondary: index-based via part_names_ordered
assigned_count = 0
for i, part in enumerate(parts):
# Try name-based matching first (strip Blender .NNN suffix)
base_name = _re.sub(r'\.\d{3}$', '', part.name)
part_key = base_name.lower().strip()
mat_name = mat_map.get(part_key)
# Fall back to index-based matching via part_names_ordered
if not mat_name and part_names_ordered and i < len(part_names_ordered):
step_name = part_names_ordered[i]
part_key = step_name.lower().strip()
mat_name = mat_map.get(part_key)
if mat_name and mat_name in appended:
part.data.materials.clear()
part.data.materials.append(appended[mat_name])
assigned_count += 1
print(f"[blender_render] assigned '{mat_name}' to part '{part.name}'")
print(f"[blender_render] material assignment: {assigned_count}/{len(parts)} parts matched")
# ── SCENE SETUP ──────────────────────────────────────────────────────────────
if use_template:
# ── MODE B: Template-based render ────────────────────────────────────────
print(f"[blender_render] Opening template: {template_path}")
bpy.ops.wm.open_mainfile(filepath=template_path)
# Find or create target collection
target_col = _ensure_collection(target_collection)
# Import GLB (already in metres from OCC export)
parts = _import_glb(glb_path)
# Apply render position rotation (before camera/bbox calculations)
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
# Move imported parts into target collection
for part in parts:
# Remove from all existing collections
for col in list(part.users_collection):
col.objects.unlink(part)
target_col.objects.link(part)
# Apply smooth shading
for part in parts:
_apply_smooth(part, smooth_angle)
# Material assignment: library materials if available, otherwise palette
if material_library_path and material_map:
# Build lowercased material_map for matching
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower)
# Parts not matched by library get palette fallback
for i, part in enumerate(parts):
if not part.data.materials or len(part.data.materials) == 0:
_assign_palette_material(part, i)
else:
for i, part in enumerate(parts):
_assign_palette_material(part, i)
# ── Shadow catcher (Cycles only, template mode only) ─────────────────────
if shadow_catcher:
sc_col_name = "Shadowcatcher"
sc_obj_name = "Shadowcatcher"
# Enable the Shadowcatcher collection in all view layers
for vl in bpy.context.scene.view_layers:
def _enable_col_recursive(layer_col):
if layer_col.collection.name == sc_col_name:
layer_col.exclude = False
layer_col.collection.hide_render = False
layer_col.collection.hide_viewport = False
return True
for child in layer_col.children:
if _enable_col_recursive(child):
return True
return False
_enable_col_recursive(vl.layer_collection)
sc_obj = bpy.data.objects.get(sc_obj_name)
if sc_obj:
# Calculate product bbox min Z (world space)
all_world_corners = []
for part in parts:
for corner in part.bound_box:
all_world_corners.append((part.matrix_world @ Vector(corner)).z)
if all_world_corners:
sc_obj.location.z = min(all_world_corners)
print(f"[blender_render] shadow catcher enabled, plane Z={sc_obj.location.z:.4f}")
else:
print(f"[blender_render] WARNING: shadow catcher object '{sc_obj_name}' not found in template")
# lighting_only: use template World/HDRI but force auto-camera UNLESS the shadow
# catcher is enabled — in that case the template camera is already positioned to
# show both the product and its shadow on the ground plane.
needs_auto_camera = (lighting_only and not shadow_catcher) or not bpy.context.scene.camera
if lighting_only and not shadow_catcher:
print("[blender_render] lighting_only mode: using template World/HDRI, forcing auto-camera")
elif needs_auto_camera:
print("[blender_render] WARNING: template has no camera — will create auto-camera")
# Set very close near clip on template camera for mm-scale parts (now in metres)
if not needs_auto_camera and bpy.context.scene.camera:
bpy.context.scene.camera.data.clip_start = 0.001
print(f"[blender_render] template mode: {len(parts)} parts imported into collection '{target_collection}'")
else:
# ── MODE A: Factory settings (original behavior) ─────────────────────────
needs_auto_camera = True
bpy.ops.wm.read_factory_settings(use_empty=True)
parts = _import_glb(glb_path)
# Apply render position rotation (before camera/bbox calculations)
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
for i, part in enumerate(parts):
_apply_smooth(part, smooth_angle)
_assign_palette_material(part, i)
# Apply material library on top of palette colours (same logic as Mode B).
# material_library_path / material_map are parsed from argv even in Mode A
# but were previously never used here — that was the bug.
if material_library_path and material_map:
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower)
# Parts not matched by the library keep their palette material (already set above)
if needs_auto_camera:
# ── Combined bounding box / bounding sphere ──────────────────────────────
all_corners = []
for part in parts:
all_corners.extend(part.matrix_world @ Vector(c) for c in part.bound_box)
bbox_min = Vector((
min(v.x for v in all_corners),
min(v.y for v in all_corners),
min(v.z for v in all_corners),
))
bbox_max = Vector((
max(v.x for v in all_corners),
max(v.y for v in all_corners),
max(v.z for v in all_corners),
))
bbox_center = (bbox_min + bbox_max) * 0.5
bbox_dims = bbox_max - bbox_min
bsphere_radius = max(bbox_dims.length * 0.5, 0.001)
print(f"[blender_render] bbox_dims={tuple(round(d,4) for d in bbox_dims)}, "
f"bsphere_radius={bsphere_radius:.4f}, center={tuple(round(c,4) for c in bbox_center)}")
# ── Lighting — only in Mode A (factory settings) ─────────────────────────
# In template mode the .blend file provides its own World/HDRI lighting.
# Adding auto-lights would overpower the template's intended look.
if not use_template:
light_dist = bsphere_radius * 6.0
bpy.ops.object.light_add(type='SUN', location=(
bbox_center.x + light_dist * 0.5,
bbox_center.y - light_dist * 0.35,
bbox_center.z + light_dist,
))
sun = bpy.context.active_object
sun.data.energy = 4.0
sun.rotation_euler = (math.radians(45), 0, math.radians(30))
bpy.ops.object.light_add(type='AREA', location=(
bbox_center.x - light_dist * 0.4,
bbox_center.y + light_dist * 0.4,
bbox_center.z + light_dist * 0.7,
))
fill = bpy.context.active_object
fill.data.energy = max(800.0, bsphere_radius ** 2 * 2000.0)
fill.data.size = max(4.0, bsphere_radius * 4.0)
# ── Camera ───────────────────────────────────────────────────────────────
ELEVATION_DEG = 28.0
AZIMUTH_DEG = 40.0
LENS_MM = 50.0
SENSOR_WIDTH_MM = 36.0
FILL_FACTOR = 0.85
elevation_rad = math.radians(ELEVATION_DEG)
azimuth_rad = math.radians(AZIMUTH_DEG)
cam_dir = Vector((
math.cos(elevation_rad) * math.cos(azimuth_rad),
math.cos(elevation_rad) * math.sin(azimuth_rad),
math.sin(elevation_rad),
)).normalized()
fov_h = math.atan(SENSOR_WIDTH_MM / (2.0 * LENS_MM))
fov_v = math.atan(SENSOR_WIDTH_MM * (height / width) / (2.0 * LENS_MM))
fov_used = min(fov_h, fov_v)
dist = (bsphere_radius / math.tan(fov_used)) / FILL_FACTOR
dist = max(dist, bsphere_radius * 1.5)
print(f"[blender_render] camera dist={dist:.4f}, fov={math.degrees(fov_used):.2f}°")
cam_location = bbox_center + cam_dir * dist
bpy.ops.object.camera_add(location=cam_location)
cam_obj = bpy.context.active_object
cam_obj.data.lens = LENS_MM
bpy.context.scene.camera = cam_obj
look_dir = (bbox_center - cam_location).normalized()
up_world = Vector((0.0, 0.0, 1.0))
right = look_dir.cross(up_world)
if right.length < 1e-6:
right = Vector((1.0, 0.0, 0.0))
right.normalize()
cam_up = right.cross(look_dir).normalized()
rot_mat = Matrix((
( right.x, right.y, right.z),
( cam_up.x, cam_up.y, cam_up.z),
(-look_dir.x, -look_dir.y, -look_dir.z),
)).transposed()
cam_obj.rotation_euler = rot_mat.to_euler('XYZ')
cam_obj.data.clip_start = max(dist * 0.001, 0.0001)
cam_obj.data.clip_end = dist + bsphere_radius * 3.0
print(f"[blender_render] clip {cam_obj.data.clip_start:.6f}{cam_obj.data.clip_end:.4f}")
# ── World background — only in Mode A ────────────────────────────────────
# In template mode the .blend file owns its World (HDRI, sky texture, studio
# lighting). Overwriting it would destroy the HDR look the template was
# designed to use (e.g. Alpha-HDR output types with Filmic tonemapping).
if not use_template:
world = bpy.data.worlds.new("World")
bpy.context.scene.world = world
world.use_nodes = True
bg = world.node_tree.nodes["Background"]
bg.inputs["Color"].default_value = (0.96, 0.96, 0.97, 1.0)
bg.inputs["Strength"].default_value = 0.15
# ── Render engine ─────────────────────────────────────────────────────────────
scene = bpy.context.scene
if engine == "eevee":
# Blender 4.x used 'BLENDER_EEVEE_NEXT'; Blender 5.x reverted to 'BLENDER_EEVEE'.
# Try both names so the script works across versions.
set_ok = False
for eevee_id in ('BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT'):
try:
scene.render.engine = eevee_id
set_ok = True
print(f"[blender_render] EEVEE engine id: {eevee_id}")
break
except TypeError:
continue
if not set_ok:
print("[blender_render] WARNING: could not set EEVEE engine falling back to Cycles")
engine = "cycles"
if engine == "eevee":
# Sample attribute name changed across minor versions
for attr in ('taa_render_samples', 'samples'):
try:
setattr(scene.eevee, attr, samples)
print(f"[blender_render] EEVEE samples: scene.eevee.{attr}={samples}")
break
except AttributeError:
continue
if engine != "eevee": # covers both explicit Cycles and EEVEE-fallback
scene.render.engine = 'CYCLES'
scene.cycles.samples = samples
scene.cycles.use_denoising = True
scene.cycles.denoiser = denoiser_arg if denoiser_arg else 'OPENIMAGEDENOISE'
if denoising_input_passes_arg:
try: scene.cycles.denoising_input_passes = denoising_input_passes_arg
except Exception: pass
if denoising_prefilter_arg:
try: scene.cycles.denoising_prefilter = denoising_prefilter_arg
except Exception: pass
if denoising_quality_arg:
try: scene.cycles.denoising_quality = denoising_quality_arg
except Exception: pass
if denoising_use_gpu_arg:
try: scene.cycles.denoising_use_gpu = (denoising_use_gpu_arg == "1")
except AttributeError: pass
if noise_threshold_arg:
scene.cycles.use_adaptive_sampling = True
scene.cycles.adaptive_threshold = float(noise_threshold_arg)
# ── Device selection: "cpu" forces CPU, "gpu" forces GPU (fail if unavailable),
# "auto" tries GPU first and falls back to CPU.
gpu_type_found = None
if cycles_device != "cpu":
try:
cycles_prefs = bpy.context.preferences.addons['cycles'].preferences
for device_type in ('OPTIX', 'CUDA', 'HIP', 'ONEAPI'):
try:
cycles_prefs.compute_device_type = device_type
cycles_prefs.get_devices()
gpu_devs = [d for d in cycles_prefs.devices if d.type != 'CPU']
if gpu_devs:
for d in gpu_devs:
d.use = True
gpu_type_found = device_type
break
except Exception as e:
print(f"[blender_render] {device_type} not available: {e}")
except Exception as e:
print(f"[blender_render] GPU probe failed: {e}")
if gpu_type_found:
scene.cycles.device = 'GPU'
print(f"[blender_render] Cycles GPU ({gpu_type_found}), samples={samples}")
else:
scene.cycles.device = 'CPU'
print(f"[blender_render] WARNING: GPU not found — falling back to CPU, samples={samples}")
# ── Colour management ─────────────────────────────────────────────────────────
# In template mode the .blend file owns its colour management (e.g. Filmic/
# AgX for HDR, custom exposure for Alpha-HDR output types). Overwriting it
# would destroy the look the template was designed for.
# In factory-settings mode (Mode A) force Standard to avoid the grey Filmic
# tint that Blender applies by default.
if not use_template:
scene.view_settings.view_transform = 'Standard'
scene.view_settings.exposure = 0.0
scene.view_settings.gamma = 1.0
try:
scene.view_settings.look = 'None'
except Exception:
pass
# ── Render settings ───────────────────────────────────────────────────────────
scene.render.resolution_x = width
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.render.image_settings.file_format = 'PNG'
scene.render.filepath = output_path
scene.render.film_transparent = transparent_bg
# ── Render ────────────────────────────────────────────────────────────────────
print(f"[blender_render] Rendering → {output_path} (Blender {bpy.app.version_string})")
bpy.ops.render.render(write_still=True)
print("[blender_render] render done.")
# ── Pillow post-processing: green bar + model name label ─────────────────────
# Skip overlay for transparent renders to keep clean alpha channel
if transparent_bg:
print("[blender_render] Transparent mode — skipping Pillow overlay.")
else:
try:
from PIL import Image, ImageDraw, ImageFont
img = Image.open(output_path).convert("RGBA")
draw = ImageDraw.Draw(img)
W, H = img.size
# Schaeffler green top bar
bar_h = max(8, H // 32)
draw.rectangle([0, 0, W - 1, bar_h - 1], fill=(0, 137, 61, 255))
# Model name strip at bottom
model_name = os.path.splitext(os.path.basename(glb_path))[0]
label_h = max(20, H // 20)
img.alpha_composite(
Image.new("RGBA", (W, label_h), (30, 30, 30, 180)),
dest=(0, H - label_h),
)
font_size = max(10, label_h - 6)
font = None
for fp in [
"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
"/usr/share/fonts/truetype/freefont/FreeSansBold.ttf",
]:
if os.path.exists(fp):
try:
font = ImageFont.truetype(fp, font_size)
break
except Exception:
pass
if font is None:
font = ImageFont.load_default()
tb = draw.textbbox((0, 0), model_name, font=font)
text_w = tb[2] - tb[0]
draw.text(
((W - text_w) // 2, H - label_h + (label_h - (tb[3] - tb[1])) // 2),
model_name, font=font, fill=(255, 255, 255, 255),
)
img.convert("RGB").save(output_path, format="PNG")
print(f"[blender_render] Pillow overlay applied.")
except ImportError:
print("[blender_render] Pillow not in Blender Python skipping overlay.")
except Exception as exc:
print(f"[blender_render] Pillow overlay failed (non-fatal): {exc}")
print("[blender_render] Done.")
-4
View File
@@ -1,4 +0,0 @@
fastapi>=0.110.0
uvicorn[standard]>=0.27.0
cadquery>=2.4.0
pillow>=10.2.0
-37
View File
@@ -1,37 +0,0 @@
FROM ubuntu:24.04
ENV DEBIAN_FRONTEND=noninteractive
ENV FLAMENCO_VERSION=3.8
RUN apt-get update && apt-get install -y --no-install-recommends \
wget ca-certificates ffmpeg python3 python3-pip python3-venv \
libgl1 libglib2.0-0 libxrender1 libsm6 libxext6 \
libegl1 libgles2 \
&& rm -rf /var/lib/apt/lists/*
# Install Flamenco binary
RUN mkdir -p /opt/flamenco && \
wget -qO /tmp/flamenco.tar.gz \
"https://flamenco.blender.org/downloads/flamenco-${FLAMENCO_VERSION}-linux-amd64.tar.gz" && \
tar -xzf /tmp/flamenco.tar.gz -C /opt/flamenco --strip-components=1 && \
rm /tmp/flamenco.tar.gz
# Install cadquery for STEP→STL conversion
RUN python3 -m venv /opt/venv && \
/opt/venv/bin/pip install --no-cache-dir cadquery
WORKDIR /opt/flamenco
COPY entrypoint.sh /opt/flamenco/entrypoint.sh
COPY manager-config.yaml /opt/flamenco/flamenco-manager.yaml
COPY worker-config.yaml /opt/flamenco/flamenco-worker.yaml
COPY scripts/ /opt/flamenco/scripts/
RUN chmod +x /opt/flamenco/entrypoint.sh
# Shared storage for render outputs
VOLUME ["/shared", "/data"]
EXPOSE 8080
ENTRYPOINT ["/opt/flamenco/entrypoint.sh"]
-18
View File
@@ -1,18 +0,0 @@
#!/bin/bash
set -e
FLAMENCO_MODE="${FLAMENCO_MODE:-manager}"
if [ "$FLAMENCO_MODE" = "manager" ]; then
echo "Starting Flamenco Manager..."
# Flamenco 3.x reads flamenco-manager.yaml from the working directory
exec /opt/flamenco/flamenco-manager
elif [ "$FLAMENCO_MODE" = "worker" ]; then
echo "Starting Flamenco Worker..."
echo "Manager URL: ${FLAMENCO_MANAGER_URL:-http://flamenco-manager:8080}"
# Flamenco 3.x reads flamenco-worker.yaml from the working directory
exec /opt/flamenco/flamenco-worker
else
echo "ERROR: Unknown FLAMENCO_MODE='${FLAMENCO_MODE}'. Use 'manager' or 'worker'."
exit 1
fi
-29
View File
@@ -1,29 +0,0 @@
_meta:
version: 3
manager_name: Schaeffler Render Farm
database: /data/flamenco-manager.sqlite
listen: :8080
autodiscoverable: true
# Storage
local_manager_storage_path: /data/manager-storage
shared_storage_path: /shared
shaman:
enabled: false
# Timeouts
task_timeout: 30m
worker_timeout: 1m
# Variables available to job scripts
variables:
blender:
values:
- platform: linux
value: /opt/blender/blender
python:
values:
- platform: linux
value: /opt/venv/bin/python3
-216
View File
@@ -1,216 +0,0 @@
"""STEP to STL converter for Flamenco tasks.
Usage: python convert_step.py <step_path> <stl_path> <quality>
quality: 'low' or 'high'
Produces:
- Combined STL at <stl_path> (for fallback)
- Per-part STLs in <stl_path_without_ext>_parts/ with manifest.json
"""
import sys
import os
import json
import time
def _export_per_part_stls(step_path, parts_dir, quality):
"""Export one STL per named STEP leaf shape using OCP XCAF.
Creates parts_dir with individual STL files and a manifest.json:
{"parts": [{"index": 0, "name": "PartName", "file": "00_PartName.stl"}, ...]}
Returns the manifest list, or empty list on failure.
"""
tol = 0.01 if quality == "high" else 0.3
angular_tol = 0.05 if quality == "high" else 0.3
try:
from OCP.STEPCAFControl import STEPCAFControl_Reader
from OCP.XCAFDoc import XCAFDoc_DocumentTool, XCAFDoc_ShapeTool
from OCP.TDataStd import TDataStd_Name
from OCP.TDF import TDF_Label as TDF_Label_cls, TDF_LabelSequence
from OCP.XCAFApp import XCAFApp_Application
from OCP.TDocStd import TDocStd_Document
from OCP.TCollection import TCollection_ExtendedString
from OCP.IFSelect import IFSelect_RetDone
import cadquery as cq
except ImportError as e:
print(f"[convert_step] per-part export skipped (import error): {e}")
return []
# Read STEP with XCAF
app = XCAFApp_Application.GetApplication_s()
doc = TDocStd_Document(TCollection_ExtendedString("XmlOcaf"))
app.InitDocument(doc)
reader = STEPCAFControl_Reader()
reader.SetNameMode(True)
status = reader.ReadFile(str(step_path))
if status != IFSelect_RetDone:
print(f"[convert_step] XCAF reader failed with status {status}")
return []
if not reader.Transfer(doc):
print("[convert_step] XCAF transfer failed")
return []
shape_tool = XCAFDoc_DocumentTool.ShapeTool_s(doc.Main())
name_id = TDataStd_Name.GetID_s()
# Recursively collect leaf shapes with their names
leaves = [] # list of (name, TopoDS_Shape)
def _get_label_name(label):
"""Extract name string from a TDF_Label."""
name_attr = TDataStd_Name()
if label.FindAttribute(name_id, name_attr):
return name_attr.Get().ToExtString()
return ""
def _collect_leaves(label):
"""Recursively collect leaf (simple shape) labels."""
if XCAFDoc_ShapeTool.IsAssembly_s(label):
# Get components of this assembly
components = TDF_LabelSequence()
XCAFDoc_ShapeTool.GetComponents_s(label, components)
for i in range(1, components.Length() + 1):
comp_label = components.Value(i)
if XCAFDoc_ShapeTool.IsReference_s(comp_label):
ref_label = TDF_Label_cls()
XCAFDoc_ShapeTool.GetReferredShape_s(comp_label, ref_label)
# Use the component name (instance name), fall back to referred shape name
comp_name = _get_label_name(comp_label)
ref_name = _get_label_name(ref_label)
# Prefer referred shape name — matches material_map keys
name = ref_name or comp_name
if XCAFDoc_ShapeTool.IsAssembly_s(ref_label):
_collect_leaves(ref_label)
elif XCAFDoc_ShapeTool.IsSimpleShape_s(ref_label):
# Use comp_label shape — includes instance transform (position)
shape = XCAFDoc_ShapeTool.GetShape_s(comp_label)
leaves.append((name or f"unnamed_{len(leaves)}", shape))
else:
_collect_leaves(comp_label)
elif XCAFDoc_ShapeTool.IsSimpleShape_s(label):
name = _get_label_name(label)
shape = XCAFDoc_ShapeTool.GetShape_s(label)
leaves.append((name or f"unnamed_{len(leaves)}", shape))
# Get top-level free shapes
top_labels = TDF_LabelSequence()
shape_tool.GetFreeShapes(top_labels)
for i in range(1, top_labels.Length() + 1):
_collect_leaves(top_labels.Value(i))
if not leaves:
print("[convert_step] no leaf shapes found via XCAF")
return []
# Export each leaf shape as individual STL
os.makedirs(parts_dir, exist_ok=True)
manifest = []
for idx, (name, shape) in enumerate(leaves):
# Sanitize filename: replace problematic chars
safe_name = name.replace("/", "_").replace("\\", "_").replace(" ", "_")
filename = f"{idx:02d}_{safe_name}.stl"
filepath = os.path.join(parts_dir, filename)
try:
cq_shape = cq.Shape(shape)
cq_shape.exportStl(filepath, tolerance=tol, angularTolerance=angular_tol)
manifest.append({"index": idx, "name": name, "file": filename})
except Exception as e:
print(f"[convert_step] WARNING: failed to export part '{name}': {e}")
# Write manifest
manifest_path = os.path.join(parts_dir, "manifest.json")
with open(manifest_path, "w") as f:
json.dump({"parts": manifest}, f, indent=2)
total_size = sum(
os.path.getsize(os.path.join(parts_dir, p["file"]))
for p in manifest
if os.path.exists(os.path.join(parts_dir, p["file"]))
)
print(f"[convert_step] exported {len(manifest)} per-part STLs "
f"({total_size / 1024:.0f} KB total) to {parts_dir}")
return manifest
def main():
if len(sys.argv) < 4:
print("Usage: convert_step.py <step_path> <stl_path> <quality>")
sys.exit(1)
step_path = sys.argv[1]
stl_path = sys.argv[2]
quality = sys.argv[3]
if not os.path.isfile(step_path):
print(f"ERROR: STEP file not found: {step_path}")
sys.exit(1)
os.makedirs(os.path.dirname(stl_path), exist_ok=True)
# Cache hit: skip re-conversion if STL already exists and is non-empty
if os.path.isfile(stl_path) and os.path.getsize(stl_path) > 0:
size_kb = os.path.getsize(stl_path) / 1024
print(f"[convert_step] Cache hit: {stl_path} ({size_kb:.0f} KB) — skipping STEP conversion")
stl_stem = os.path.splitext(stl_path)[0]
parts_dir = stl_stem + "_parts"
manifest_path = os.path.join(parts_dir, "manifest.json")
if not os.path.isfile(manifest_path):
print("[convert_step] Per-part STLs missing — exporting from STEP")
t1 = time.time()
try:
manifest = _export_per_part_stls(step_path, parts_dir, quality)
if manifest:
print(f"[convert_step] per-part export took {time.time() - t1:.1f}s")
else:
print("[convert_step] per-part export empty — combined STL only")
except Exception as e:
print(f"[convert_step] per-part export failed (non-fatal): {e}")
else:
print(f"[convert_step] Per-part STLs exist: {parts_dir}")
return
print(f"Converting STEP -> STL: {step_path}")
print(f"Quality: {quality}")
t0 = time.time()
import cadquery as cq
tol = 0.01 if quality == "high" else 0.3
angular_tol = 0.05 if quality == "high" else 0.3
result = cq.importers.importStep(step_path)
cq.exporters.export(
result,
stl_path,
exportType="STL",
tolerance=tol,
angularTolerance=angular_tol,
)
elapsed = time.time() - t0
size_kb = os.path.getsize(stl_path) / 1024
print(f"STL written: {stl_path} ({size_kb:.0f} KB, {elapsed:.1f}s)")
# Export per-part STLs alongside the combined STL (non-fatal)
stl_stem = os.path.splitext(stl_path)[0]
parts_dir = stl_stem + "_parts"
t1 = time.time()
try:
manifest = _export_per_part_stls(step_path, parts_dir, quality)
if manifest:
print(f"[convert_step] per-part export took {time.time() - t1:.1f}s")
else:
print("[convert_step] per-part export failed or empty — combined STL only")
except Exception as e:
print(f"[convert_step] per-part export failed (non-fatal): {e}")
if __name__ == "__main__":
main()
-121
View File
@@ -1,121 +0,0 @@
// Schaeffler Still Render job type for Flamenco 3.x
// Pipeline: STEP -> STL (cadquery) -> Blender single-frame render
const JOB_TYPE = {
label: "Schaeffler Still",
settings: [
{ key: "step_path", type: "string", required: true,
description: "Absolute path to STEP file" },
{ key: "output_path", type: "string", required: true,
description: "Full path for output image (e.g. /shared/render.png)" },
{ key: "width", type: "int32", default: 1024,
description: "Output width in pixels" },
{ key: "height", type: "int32", default: 1024,
description: "Output height in pixels" },
{ key: "engine", type: "string", default: "cycles",
description: "Blender render engine: cycles or eevee" },
{ key: "samples", type: "int32", default: 256,
description: "Render samples" },
{ key: "stl_quality", type: "string", default: "low",
description: "STL mesh quality: low or high" },
{ key: "part_colors_json", type: "string", default: "{}",
description: "JSON dict mapping part names to hex colors" },
{ key: "transparent_bg", type: "bool", default: false,
description: "Render with transparent background (PNG alpha)" },
{ key: "template_path", type: "string", default: "",
description: "Path to .blend template file (empty = factory settings)" },
{ key: "target_collection", type: "string", default: "Product",
description: "Blender collection name to import geometry into" },
{ key: "material_library_path", type: "string", default: "",
description: "Path to material library .blend file" },
{ key: "material_map_json", type: "string", default: "{}",
description: "JSON dict mapping part names to material names" },
{ key: "part_names_ordered_json", type: "string", default: "[]",
description: "JSON array of STEP part names in solid order (for index-based matching)" },
{ key: "lighting_only", type: "bool", default: false,
description: "Use template only for World/HDRI lighting; always auto-frame with computed camera" },
{ key: "cycles_device", type: "string", default: "auto",
description: "Cycles compute device: auto (try GPU, fall back to CPU), gpu (force GPU), cpu (force CPU)" },
{ key: "shadow_catcher", type: "bool", default: false,
description: "Enable Shadowcatcher collection from template and position plane under product (Cycles only)" },
{ key: "rotation_x", type: "float", default: 0.0,
description: "Product rotation around X axis in degrees (render position)" },
{ key: "rotation_y", type: "float", default: 0.0,
description: "Product rotation around Y axis in degrees (render position)" },
{ key: "rotation_z", type: "float", default: 0.0,
description: "Product rotation around Z axis in degrees (render position)" },
{ key: "noise_threshold", type: "string", default: "",
description: "Adaptive sampling noise threshold (empty = Blender default 0.01)" },
{ key: "denoiser", type: "string", default: "",
description: "Cycles denoiser: OPTIX, OPENIMAGEDENOISE, or empty for auto" },
{ key: "denoising_input_passes", type: "string", default: "",
description: "Denoising input passes: RGB, RGB_ALBEDO, RGB_ALBEDO_NORMAL, or empty for default" },
{ key: "denoising_prefilter", type: "string", default: "",
description: "Denoising prefilter: NONE, FAST, ACCURATE, or empty for default" },
{ key: "denoising_quality", type: "string", default: "",
description: "Denoising quality: HIGH, BALANCED, FAST, or empty for default (Blender 4.2+)" },
{ key: "denoising_use_gpu", type: "string", default: "",
description: "Route OIDN denoising through GPU: 1, 0, or empty for auto" },
],
};
function compileJob(job) {
const settings = job.settings;
// Cache STL next to STEP file: {step_dir}/{step_stem}_{quality}.stl
// This allows re-renders to skip the STEP→STL conversion step.
const stepDir = settings.step_path.replace(/\/[^/]+$/, "");
const stepBasename = settings.step_path.replace(/.*\//, "");
const stepStem = stepBasename.replace(/\.[^.]+$/, "");
const stlPath = stepDir + "/" + stepStem + "_" + settings.stl_quality + ".stl";
// Task 1: Convert STEP to STL
const convertTask = author.Task("convert-step", "misc");
convertTask.addCommand(author.Command("exec", {
exe: "{python}",
args: [
"/opt/flamenco/scripts/convert_step.py",
settings.step_path,
stlPath,
settings.stl_quality,
],
}));
job.addTask(convertTask);
// Task 2: Render single image with Blender
const renderTask = author.Task("render-image", "blender");
renderTask.addCommand(author.Command("exec", {
exe: "{blender}",
args: [
"--background", "--python",
"/opt/flamenco/scripts/still_render.py",
"--",
stlPath,
settings.output_path,
String(settings.width),
String(settings.height),
settings.engine,
String(settings.samples),
settings.part_colors_json,
settings.transparent_bg ? "1" : "0",
settings.template_path || "",
settings.target_collection || "Product",
settings.material_library_path || "",
settings.material_map_json || "{}",
settings.part_names_ordered_json || "[]",
settings.lighting_only ? "1" : "0",
settings.cycles_device || "auto",
settings.shadow_catcher ? "1" : "0",
String(settings.rotation_x || 0),
String(settings.rotation_y || 0),
String(settings.rotation_z || 0),
settings.noise_threshold || "",
settings.denoiser || "",
settings.denoising_input_passes || "",
settings.denoising_prefilter || "",
settings.denoising_quality || "",
settings.denoising_use_gpu || "",
],
}));
renderTask.addDependency(convertTask);
job.addTask(renderTask);
}
-211
View File
@@ -1,211 +0,0 @@
// Schaeffler Turntable Animation job type for Flamenco 3.x
// Pipeline: STEP -> STL (cadquery) -> Blender scene setup -> Blender -a render -> FFmpeg video
//
// Task flow:
// 1. convert-step : STEP → STL via cadquery
// 2. setup-scene : turntable_setup.py imports STL, applies materials/camera/animation,
// saves a ready-to-render .blend to output_dir/scene.blend
// 3. render-frames : blender --background scene.blend --python turntable_gpu_setup.py -a
// Blender's native -a keeps GPU scene (BVH, textures) loaded for ALL
// frames — no per-frame re-upload overhead.
// 4. compose-video : FFmpeg encodes frame PNGs → MP4
const JOB_TYPE = {
label: "Schaeffler Turntable",
settings: [
{ key: "step_path", type: "string", required: true,
description: "Absolute path to STEP file" },
{ key: "output_dir", type: "string", required: true,
description: "Directory for rendered frames and final video" },
{ key: "output_name", type: "string", required: true, default: "turntable",
description: "Base name for output files" },
{ key: "frame_count", type: "int32", default: 120,
description: "Number of frames to render" },
{ key: "fps", type: "int32", default: 30,
description: "Frames per second for output video" },
{ key: "turntable_degrees", type: "int32", default: 360,
description: "Total rotation in degrees" },
{ key: "width", type: "int32", default: 1920,
description: "Output width in pixels" },
{ key: "height", type: "int32", default: 1080,
description: "Output height in pixels" },
{ key: "engine", type: "string", default: "cycles",
description: "Blender render engine: cycles or eevee" },
{ key: "samples", type: "int32", default: 128,
description: "Render samples" },
{ key: "stl_quality", type: "string", default: "low",
description: "STL mesh quality: low or high" },
{ key: "part_colors_json", type: "string", default: "{}",
description: "JSON dict mapping part names to hex colors" },
{ key: "template_path", type: "string", default: "",
description: "Path to .blend template file (empty = factory settings)" },
{ key: "target_collection", type: "string", default: "Product",
description: "Blender collection name to import geometry into" },
{ key: "material_library_path", type: "string", default: "",
description: "Path to material library .blend file" },
{ key: "material_map_json", type: "string", default: "{}",
description: "JSON dict mapping part names to material names" },
{ key: "part_names_ordered_json", type: "string", default: "[]",
description: "JSON array of STEP part names in solid order (for index-based matching)" },
{ key: "lighting_only", type: "bool", default: false,
description: "Use template only for World/HDRI lighting; always auto-frame with computed camera" },
{ key: "cycles_device", type: "string", default: "auto",
description: "Cycles compute device: auto (try GPU, fall back to CPU), gpu (force GPU), cpu (force CPU)" },
{ key: "shadow_catcher", type: "bool", default: false,
description: "Enable Shadowcatcher collection from template and position plane under product (Cycles only)" },
{ key: "rotation_x", type: "float", default: 0.0,
description: "Product rotation around X axis in degrees (render position)" },
{ key: "rotation_y", type: "float", default: 0.0,
description: "Product rotation around Y axis in degrees (render position)" },
{ key: "rotation_z", type: "float", default: 0.0,
description: "Product rotation around Z axis in degrees (render position)" },
{ key: "turntable_axis", type: "string", default: "world_z",
description: "Turntable rotation axis: world_z (default), world_x, or world_y" },
{ key: "bg_color", type: "string", default: "",
description: "Solid background hex color for compositing (e.g. #1a1a2e); empty = HDR visible as background" },
{ key: "camera_orbit", type: "bool", default: true,
description: "Rotate camera around product instead of rotating product (true = better GPU performance, BVH cached)" },
{ key: "noise_threshold", type: "string", default: "",
description: "Adaptive sampling noise threshold (empty = Blender default 0.01)" },
{ key: "denoiser", type: "string", default: "",
description: "Cycles denoiser: OPTIX, OPENIMAGEDENOISE, or empty for auto" },
{ key: "denoising_input_passes", type: "string", default: "",
description: "Denoising input passes: RGB, RGB_ALBEDO, RGB_ALBEDO_NORMAL, or empty for default" },
{ key: "denoising_prefilter", type: "string", default: "",
description: "Denoising prefilter: NONE, FAST, ACCURATE, or empty for default" },
{ key: "denoising_quality", type: "string", default: "",
description: "Denoising quality: HIGH, BALANCED, FAST, or empty for default (Blender 4.2+)" },
{ key: "denoising_use_gpu", type: "string", default: "",
description: "Route OIDN denoising through GPU: 1, 0, or empty for auto" },
],
};
function compileJob(job) {
const settings = job.settings;
// Cache STL next to STEP file: {step_dir}/{step_stem}_{quality}.stl
const stepDir = settings.step_path.replace(/\/[^/]+$/, "");
const stepBasename = settings.step_path.replace(/.*\//, "");
const stepStem = stepBasename.replace(/\.[^.]+$/, "");
const stlPath = stepDir + "/" + stepStem + "_" + settings.stl_quality + ".stl";
const framesDir = settings.output_dir + "/frames";
const scenePath = settings.output_dir + "/scene.blend";
const videoPath = settings.output_dir + "/" + settings.output_name + ".mp4";
// Task 1: Convert STEP to STL
const convertTask = author.Task("convert-step", "misc");
convertTask.addCommand(author.Command("exec", {
exe: "{python}",
args: [
"/opt/flamenco/scripts/convert_step.py",
settings.step_path,
stlPath,
settings.stl_quality,
],
}));
job.addTask(convertTask);
// Task 2: Setup Blender scene and save to scene.blend
// turntable_setup.py imports the STL, assigns materials, sets up the
// camera rig and pivot animation, configures the compositor (bg_color),
// and saves the complete scene — ready for native -a rendering.
const setupTask = author.Task("setup-scene", "blender");
setupTask.addCommand(author.Command("exec", {
exe: "{blender}",
args: [
"--background", "--python",
"/opt/flamenco/scripts/turntable_setup.py",
"--",
stlPath,
framesDir,
String(settings.frame_count),
String(settings.turntable_degrees),
String(settings.width),
String(settings.height),
settings.engine,
String(settings.samples),
settings.part_colors_json,
settings.template_path || "",
settings.target_collection || "Product",
settings.material_library_path || "",
settings.material_map_json || "{}",
settings.part_names_ordered_json || "[]",
settings.lighting_only ? "1" : "0",
settings.cycles_device || "gpu",
settings.shadow_catcher ? "1" : "0",
String(settings.rotation_x || 0),
String(settings.rotation_y || 0),
String(settings.rotation_z || 0),
settings.turntable_axis || "world_z",
settings.bg_color || "",
settings.transparent_bg ? "1" : "0",
scenePath,
settings.camera_orbit !== false ? "1" : "0",
settings.noise_threshold || "",
settings.denoiser || "",
settings.denoising_input_passes || "",
settings.denoising_prefilter || "",
settings.denoising_quality || "",
settings.denoising_use_gpu || "",
],
}));
setupTask.addDependency(convertTask);
job.addTask(setupTask);
// Task 3: Render all frames using Blender's native -a (--render-anim)
// turntable_gpu_setup.py re-applies GPU preferences (user-level, not stored
// in .blend), then -a renders all frames in one process — GPU scene stays
// loaded between frames, no per-frame BVH re-upload.
const renderTask = author.Task("render-frames", "blender");
renderTask.addCommand(author.Command("exec", {
exe: "{blender}",
args: [
"--background",
scenePath,
"--python",
"/opt/flamenco/scripts/turntable_gpu_setup.py",
"-a",
],
}));
renderTask.addDependency(setupTask);
job.addTask(renderTask);
// Task 4: Compose video with FFmpeg
// Blender writes transparent PNG frames (film_transparent=True) when bg_color is set.
// FFmpeg composites them over a solid colour background using the lavfi color source.
// Without bg_color, frames are opaque and encoded directly.
const composeTask = author.Task("compose-video", "misc");
const bgHex = (settings.bg_color || "").replace(/^#/, "");
const ffmpegArgs = bgHex
? [
"-y",
// Background: solid colour at video resolution and frame rate
"-f", "lavfi",
"-i", "color=c=0x" + bgHex + ":size=" + String(settings.width) + "x" + String(settings.height) + ":rate=" + String(settings.fps),
// Foreground: transparent PNG frame sequence
"-framerate", String(settings.fps),
"-i", framesDir + "/frame_%04d.png",
// Composite foreground over background
"-filter_complex", "[0:v][1:v]overlay=0:0:shortest=1",
"-c:v", "libx264",
"-pix_fmt", "yuv420p",
"-preset", "medium",
"-crf", "18",
videoPath,
]
: [
"-y",
"-framerate", String(settings.fps),
"-i", framesDir + "/frame_%04d.png",
"-c:v", "libx264",
"-pix_fmt", "yuv420p",
"-preset", "medium",
"-crf", "18",
videoPath,
];
composeTask.addCommand(author.Command("exec", {
exe: "ffmpeg",
args: ffmpegArgs,
}));
composeTask.addDependency(renderTask);
job.addTask(composeTask);
}
-781
View File
@@ -1,781 +0,0 @@
"""Blender Python script: single-frame still render for Flamenco.
Matches the lighting, camera, materials, and post-processing of the
Celery blender_render.py so that LQ and HQ renders look consistent.
Usage (from Blender):
blender --background --python still_render.py -- \
<stl_path> <output_path> <width> <height> <engine> <samples> \
<part_colors_json> <transparent_bg> \
[template_path] [target_collection] [material_library_path] [material_map_json]
"""
import bpy
import sys
import os
import json
import math
from mathutils import Vector, Matrix
# ── Colour palette (matches blender_render.py / Three.js renderer) ───────────
PALETTE_HEX = [
"#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8",
"#4CD4E8", "#E84CA8", "#7EC850", "#E86B30", "#5088C8",
]
def _srgb_to_linear(c: int) -> float:
v = c / 255.0
return v / 12.92 if v <= 0.04045 else ((v + 0.055) / 1.055) ** 2.4
def _hex_to_linear(hex_color: str) -> tuple:
h = hex_color.lstrip('#')
return (
_srgb_to_linear(int(h[0:2], 16)),
_srgb_to_linear(int(h[2:4], 16)),
_srgb_to_linear(int(h[4:6], 16)),
1.0,
)
PALETTE_LINEAR = [_hex_to_linear(h) for h in PALETTE_HEX]
SMOOTH_ANGLE = 30 # degrees
# ── Helper functions ─────────────────────────────────────────────────────────
def _ensure_collection(name: str):
"""Return a collection by name, creating it if needed."""
if name in bpy.data.collections:
return bpy.data.collections[name]
col = bpy.data.collections.new(name)
bpy.context.scene.collection.children.link(col)
return col
def _assign_palette_material(part_obj, index):
"""Assign a palette colour material to a mesh part."""
color = PALETTE_LINEAR[index % len(PALETTE_LINEAR)]
mat = bpy.data.materials.new(name=f"Part_{index}")
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
bsdf.inputs["Base Color"].default_value = color
bsdf.inputs["Metallic"].default_value = 0.35
bsdf.inputs["Roughness"].default_value = 0.40
try:
bsdf.inputs["Specular IOR Level"].default_value = 0.5
except KeyError:
pass
part_obj.data.materials.clear()
part_obj.data.materials.append(mat)
def _apply_smooth(part_obj, angle_deg):
"""Apply smooth or flat shading to a mesh object."""
bpy.context.view_layer.objects.active = part_obj
part_obj.select_set(True)
if angle_deg > 0:
try:
bpy.ops.object.shade_smooth_by_angle(angle=math.radians(angle_deg))
except AttributeError:
bpy.ops.object.shade_smooth()
part_obj.data.use_auto_smooth = True
part_obj.data.auto_smooth_angle = math.radians(angle_deg)
else:
bpy.ops.object.shade_flat()
import re as _re
def _scale_mm_to_m(parts):
"""Scale imported STL objects from mm to Blender metres (×0.001).
STEP/STL coordinates are in mm; Blender's default unit is metres.
Without scaling a 50 mm part appears as 50 m inside Blender — way too large
relative to any template environment designed in metric units.
"""
if not parts:
return
bpy.ops.object.select_all(action='DESELECT')
for p in parts:
p.scale = (0.001, 0.001, 0.001)
p.location *= 0.001
p.select_set(True)
bpy.context.view_layer.objects.active = parts[0]
bpy.ops.object.transform_apply(scale=True, location=False, rotation=False)
print(f"[still_render] scaled {len(parts)} parts mm→m (×0.001)")
def _apply_rotation(parts, rx, ry, rz):
"""Apply Euler rotation (degrees, XYZ order) to all parts around world origin."""
if not parts or (rx == 0.0 and ry == 0.0 and rz == 0.0):
return
import math
from mathutils import Euler
rot_mat = Euler((math.radians(rx), math.radians(ry), math.radians(rz)), 'XYZ').to_matrix().to_4x4()
for p in parts:
p.matrix_world = rot_mat @ p.matrix_world
bpy.ops.object.select_all(action='DESELECT')
for p in parts:
p.select_set(True)
bpy.context.view_layer.objects.active = parts[0]
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
print(f"[still_render] applied rotation ({rx}°, {ry}°, {rz}°) to {len(parts)} parts")
def _import_stl(stl_file):
"""Import STL into Blender, using per-part STLs if available.
Checks for {stl_stem}_parts/manifest.json next to the STL file.
- Per-part mode: imports each part STL, names Blender object after STEP part name.
- Fallback: imports combined STL and splits by loose geometry.
Returns list of Blender mesh objects, centred at origin.
"""
stl_dir = os.path.dirname(stl_file)
stl_stem = os.path.splitext(os.path.basename(stl_file))[0]
parts_dir = os.path.join(stl_dir, stl_stem + "_parts")
manifest_path = os.path.join(parts_dir, "manifest.json")
parts = []
if os.path.isfile(manifest_path):
# ── Per-part mode ────────────────────────────────────────────────
try:
with open(manifest_path, "r") as f:
manifest = json.loads(f.read())
part_entries = manifest.get("parts", [])
except Exception as e:
print(f"[still_render] WARNING: failed to read manifest: {e}")
part_entries = []
if part_entries:
for entry in part_entries:
part_file = os.path.join(parts_dir, entry["file"])
part_name = entry["name"]
if not os.path.isfile(part_file):
print(f"[still_render] WARNING: part STL missing: {part_file}")
continue
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.wm.stl_import(filepath=part_file)
imported = bpy.context.selected_objects
if imported:
obj = imported[0]
obj.name = part_name
if obj.data:
obj.data.name = part_name
parts.append(obj)
if parts:
print(f"[still_render] imported {len(parts)} named parts from per-part STLs")
# ── Fallback: combined STL + separate by loose ───────────────────────
if not parts:
bpy.ops.wm.stl_import(filepath=stl_file)
obj = bpy.context.selected_objects[0] if bpy.context.selected_objects else None
if obj is None:
print(f"ERROR: No objects imported from {stl_file}")
sys.exit(1)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
obj.location = (0.0, 0.0, 0.0)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.separate(type='LOOSE')
bpy.ops.object.mode_set(mode='OBJECT')
parts = list(bpy.context.selected_objects)
print(f"[still_render] fallback: separated into {len(parts)} part(s)")
return parts
# ── Centre per-part imports at origin (combined bbox) ────────────────
all_corners = []
for p in parts:
all_corners.extend(p.matrix_world @ Vector(c) for c in p.bound_box)
if all_corners:
mins = Vector((min(v.x for v in all_corners),
min(v.y for v in all_corners),
min(v.z for v in all_corners)))
maxs = Vector((max(v.x for v in all_corners),
max(v.y for v in all_corners),
max(v.z for v in all_corners)))
center = (mins + maxs) * 0.5
for p in parts:
p.location -= center
return parts
def _resolve_part_name(index, part_obj, part_names_ordered):
"""Get the STEP part name for a Blender part by index.
With per-part import, part_obj.name IS the STEP name (possibly with
Blender .NNN suffix). Falls back to part_names_ordered for combined-STL mode.
"""
base_name = _re.sub(r'\.\d{3}$', '', part_obj.name)
if part_names_ordered and index < len(part_names_ordered):
return part_names_ordered[index]
return base_name
def _apply_material_library(parts, mat_lib_path, mat_map, part_names_ordered=None):
"""Append materials from library .blend and assign to parts via material_map.
With per-part STL import, Blender objects are named after STEP parts,
so matching is by name (stripping Blender .NNN suffix for duplicates).
Falls back to part_names_ordered index-based matching for combined-STL mode.
mat_map: {part_name_lower: material_name}
Parts without a match keep their current material.
"""
if not mat_lib_path or not os.path.isfile(mat_lib_path):
print(f"[still_render] material library not found: {mat_lib_path}")
return
# Collect unique material names needed
needed = set(mat_map.values())
if not needed:
return
# Append materials from library
appended = {}
for mat_name in needed:
inner_path = f"{mat_lib_path}/Material/{mat_name}"
try:
bpy.ops.wm.append(
filepath=inner_path,
directory=f"{mat_lib_path}/Material/",
filename=mat_name,
link=False,
)
if mat_name in bpy.data.materials:
appended[mat_name] = bpy.data.materials[mat_name]
print(f"[still_render] appended material: {mat_name}")
else:
print(f"[still_render] WARNING: material '{mat_name}' not found after append")
except Exception as exc:
print(f"[still_render] WARNING: failed to append material '{mat_name}': {exc}")
if not appended:
return
# Assign materials to parts — primary: name-based (per-part STL mode),
# secondary: index-based via part_names_ordered (combined STL fallback)
assigned_count = 0
for i, part in enumerate(parts):
# Try name-based matching first (strip Blender .NNN suffix)
base_name = _re.sub(r'\.\d{3}$', '', part.name)
part_key = base_name.lower().strip()
mat_name = mat_map.get(part_key)
# Fall back to index-based matching via part_names_ordered
if not mat_name and part_names_ordered and i < len(part_names_ordered):
step_name = part_names_ordered[i]
part_key = step_name.lower().strip()
mat_name = mat_map.get(part_key)
if mat_name and mat_name in appended:
part.data.materials.clear()
part.data.materials.append(appended[mat_name])
assigned_count += 1
print(f"[still_render] assigned '{mat_name}' to part '{part.name}'")
print(f"[still_render] material assignment: {assigned_count}/{len(parts)} parts matched")
def main():
argv = sys.argv
args = argv[argv.index("--") + 1:]
stl_path = args[0]
output_path = args[1]
width = int(args[2])
height = int(args[3])
engine = args[4]
samples = int(args[5])
part_colors_json = args[6] if len(args) > 6 else "{}"
transparent_bg = args[7] == "1" if len(args) > 7 else False
# Template + material library args (passed by schaeffler-still.js)
template_path = args[8] if len(args) > 8 and args[8] else ""
target_collection = args[9] if len(args) > 9 else "Product"
material_library_path = args[10] if len(args) > 10 and args[10] else ""
material_map_raw = args[11] if len(args) > 11 else "{}"
part_names_ordered_raw = args[12] if len(args) > 12 else "[]"
lighting_only = args[13] == "1" if len(args) > 13 else False
cycles_device = args[14].lower() if len(args) > 14 else "auto" # "auto", "gpu", "cpu"
shadow_catcher = args[15] == "1" if len(args) > 15 else False
rotation_x = float(args[16]) if len(args) > 16 else 0.0
rotation_y = float(args[17]) if len(args) > 17 else 0.0
rotation_z = float(args[18]) if len(args) > 18 else 0.0
noise_threshold_arg = args[19] if len(args) > 19 else ""
denoiser_arg = args[20] if len(args) > 20 else ""
denoising_input_passes_arg = args[21] if len(args) > 21 else ""
denoising_prefilter_arg = args[22] if len(args) > 22 else ""
denoising_quality_arg = args[23] if len(args) > 23 else ""
denoising_use_gpu_arg = args[24] if len(args) > 24 else ""
os.makedirs(os.path.dirname(output_path), exist_ok=True)
try:
part_colors = json.loads(part_colors_json)
except json.JSONDecodeError:
part_colors = {}
try:
material_map = json.loads(material_map_raw) if material_map_raw else {}
except json.JSONDecodeError:
material_map = {}
try:
part_names_ordered = json.loads(part_names_ordered_raw) if part_names_ordered_raw else []
except json.JSONDecodeError:
part_names_ordered = []
# Validate template path: if provided it MUST exist on disk.
# A missing template is a configuration error — fail loudly rather than
# silently falling back to factory-settings mode which produces renders that
# look completely wrong.
if template_path and not os.path.isfile(template_path):
print(f"[still_render] ERROR: template_path was provided but file not found: {template_path}")
print("[still_render] Ensure the blend-templates directory is accessible on this worker.")
sys.exit(1)
use_template = bool(template_path)
print(f"[still_render] engine={engine}, samples={samples}, size={width}x{height}, transparent={transparent_bg}")
print(f"[still_render] part_names_ordered: {len(part_names_ordered)} entries")
if use_template:
print(f"[still_render] template={template_path}, collection={target_collection}, lighting_only={lighting_only}")
else:
print("[still_render] no template — using factory settings (Mode A)")
if material_library_path:
print(f"[still_render] material_library={material_library_path}, material_map keys={list(material_map.keys())}")
# ── SCENE SETUP ──────────────────────────────────────────────────────────
if use_template:
# ── MODE B: Template-based render ────────────────────────────────────
print(f"[still_render] Opening template: {template_path}")
bpy.ops.wm.open_mainfile(filepath=template_path)
# Find or create target collection
target_col = _ensure_collection(target_collection)
# Import and split STL
parts = _import_stl(stl_path)
# Scale mm→m: STEP coords are mm, Blender default unit is metres
_scale_mm_to_m(parts)
# Apply render position rotation (before camera/bbox calculations)
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
# Move imported parts into target collection
for part in parts:
for col in list(part.users_collection):
col.objects.unlink(part)
target_col.objects.link(part)
# Apply smooth shading
for part in parts:
_apply_smooth(part, SMOOTH_ANGLE)
# Material assignment: library materials if available, otherwise palette
if material_library_path and material_map:
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower, part_names_ordered)
# Parts not matched by library get palette fallback
for i, part in enumerate(parts):
if not part.data.materials or len(part.data.materials) == 0:
_assign_palette_material(part, i)
else:
for i, part in enumerate(parts):
step_name = _resolve_part_name(i, part, part_names_ordered)
color_hex = part_colors.get(step_name)
if color_hex:
color = _hex_to_linear(color_hex)
mat = bpy.data.materials.new(name=f"Part_{i}")
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
bsdf.inputs["Base Color"].default_value = color
bsdf.inputs["Metallic"].default_value = 0.35
bsdf.inputs["Roughness"].default_value = 0.40
try:
bsdf.inputs["Specular IOR Level"].default_value = 0.5
except KeyError:
pass
part.data.materials.clear()
part.data.materials.append(mat)
else:
_assign_palette_material(part, i)
# ── Shadow catcher (Cycles only, template mode only) ─────────────────
if shadow_catcher:
sc_col_name = "Shadowcatcher"
sc_obj_name = "Shadowcatcher"
for vl in bpy.context.scene.view_layers:
def _enable_col_recursive(layer_col):
if layer_col.collection.name == sc_col_name:
layer_col.exclude = False
layer_col.collection.hide_render = False
layer_col.collection.hide_viewport = False
return True
for child in layer_col.children:
if _enable_col_recursive(child):
return True
return False
_enable_col_recursive(vl.layer_collection)
sc_obj = bpy.data.objects.get(sc_obj_name)
if sc_obj:
all_world_z = []
for part in parts:
for corner in part.bound_box:
all_world_z.append((part.matrix_world @ Vector(corner)).z)
if all_world_z:
sc_obj.location.z = min(all_world_z)
print(f"[still_render] shadow catcher enabled, plane Z={sc_obj.location.z:.4f}")
else:
print(f"[still_render] WARNING: shadow catcher object '{sc_obj_name}' not found in template")
# lighting_only: use template World/HDRI but force auto-camera UNLESS the shadow
# catcher is enabled — in that case the template camera is already positioned to
# show both the product and its shadow on the ground plane.
needs_auto_camera = (lighting_only and not shadow_catcher) or not bpy.context.scene.camera
if lighting_only and not shadow_catcher:
print("[still_render] lighting_only mode: using template World/HDRI, forcing auto-camera")
elif needs_auto_camera:
print("[still_render] WARNING: template has no camera — will create auto-camera")
# Set very close near clip on template camera for mm-scale parts (now in metres)
if not needs_auto_camera and bpy.context.scene.camera:
bpy.context.scene.camera.data.clip_start = 0.001
print(f"[still_render] template mode: {len(parts)} parts imported into collection '{target_collection}'")
else:
# ── MODE A: Factory settings (original behavior) ─────────────────────
needs_auto_camera = True
bpy.ops.wm.read_factory_settings(use_empty=True)
parts = _import_stl(stl_path)
# Scale mm→m: STEP coords are mm, Blender default unit is metres
_scale_mm_to_m(parts)
# Apply render position rotation (before camera/bbox calculations)
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
for i, part in enumerate(parts):
_apply_smooth(part, SMOOTH_ANGLE)
# Material assignment: library materials if available, else part_colors/palette
if material_library_path and material_map:
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower, part_names_ordered)
# Palette fallback for unmatched parts
for i, part in enumerate(parts):
if not part.data.materials or len(part.data.materials) == 0:
_assign_palette_material(part, i)
else:
# part_colors or palette — use index-based lookup via part_names_ordered
for i, part in enumerate(parts):
step_name = _resolve_part_name(i, part, part_names_ordered)
color_hex = part_colors.get(step_name)
if color_hex:
color = _hex_to_linear(color_hex)
else:
color = PALETTE_LINEAR[i % len(PALETTE_LINEAR)]
mat = bpy.data.materials.new(name=f"Part_{i}")
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
bsdf.inputs["Base Color"].default_value = color
bsdf.inputs["Metallic"].default_value = 0.35
bsdf.inputs["Roughness"].default_value = 0.40
try:
bsdf.inputs["Specular IOR Level"].default_value = 0.5
except KeyError:
pass
part.data.materials.clear()
part.data.materials.append(mat)
if needs_auto_camera:
# ── Combined bounding box / bounding sphere ──────────────────────────
all_corners = []
for part in parts:
all_corners.extend(part.matrix_world @ Vector(c) for c in part.bound_box)
bbox_min = Vector((
min(v.x for v in all_corners),
min(v.y for v in all_corners),
min(v.z for v in all_corners),
))
bbox_max = Vector((
max(v.x for v in all_corners),
max(v.y for v in all_corners),
max(v.z for v in all_corners),
))
bbox_center = (bbox_min + bbox_max) * 0.5
bbox_dims = bbox_max - bbox_min
bsphere_radius = max(bbox_dims.length * 0.5, 0.001)
print(f"[still_render] bbox_dims={tuple(round(d, 4) for d in bbox_dims)}, "
f"bsphere_radius={bsphere_radius:.4f}")
# ── Lighting — only in Mode A (factory settings) ─────────────────────
# In template mode the .blend file provides its own World/HDRI lighting.
# Adding auto-lights would overpower the template's intended look.
if not use_template:
light_dist = bsphere_radius * 6.0
bpy.ops.object.light_add(type='SUN', location=(
bbox_center.x + light_dist * 0.5,
bbox_center.y - light_dist * 0.35,
bbox_center.z + light_dist,
))
sun = bpy.context.active_object
sun.data.energy = 4.0
sun.rotation_euler = (math.radians(45), 0, math.radians(30))
bpy.ops.object.light_add(type='AREA', location=(
bbox_center.x - light_dist * 0.4,
bbox_center.y + light_dist * 0.4,
bbox_center.z + light_dist * 0.7,
))
fill = bpy.context.active_object
fill.data.energy = max(800.0, bsphere_radius ** 2 * 2000.0)
fill.data.size = max(4.0, bsphere_radius * 4.0)
# ── Camera (isometric-style, matches blender_render.py) ──────────────
ELEVATION_DEG = 28.0
AZIMUTH_DEG = 40.0
LENS_MM = 50.0
SENSOR_WIDTH_MM = 36.0
FILL_FACTOR = 0.85
elevation_rad = math.radians(ELEVATION_DEG)
azimuth_rad = math.radians(AZIMUTH_DEG)
cam_dir = Vector((
math.cos(elevation_rad) * math.cos(azimuth_rad),
math.cos(elevation_rad) * math.sin(azimuth_rad),
math.sin(elevation_rad),
)).normalized()
fov_h = math.atan(SENSOR_WIDTH_MM / (2.0 * LENS_MM))
fov_v = math.atan(SENSOR_WIDTH_MM * (height / width) / (2.0 * LENS_MM))
fov_used = min(fov_h, fov_v)
dist = (bsphere_radius / math.tan(fov_used)) / FILL_FACTOR
dist = max(dist, bsphere_radius * 1.5)
cam_location = bbox_center + cam_dir * dist
bpy.ops.object.camera_add(location=cam_location)
cam_obj = bpy.context.active_object
cam_obj.data.lens = LENS_MM
bpy.context.scene.camera = cam_obj
# Look-at rotation
look_dir = (bbox_center - cam_location).normalized()
up_world = Vector((0.0, 0.0, 1.0))
right = look_dir.cross(up_world)
if right.length < 1e-6:
right = Vector((1.0, 0.0, 0.0))
right.normalize()
cam_up = right.cross(look_dir).normalized()
rot_mat = Matrix((
(right.x, right.y, right.z),
(cam_up.x, cam_up.y, cam_up.z),
(-look_dir.x, -look_dir.y, -look_dir.z),
)).transposed()
cam_obj.rotation_euler = rot_mat.to_euler('XYZ')
cam_obj.data.clip_start = max(dist * 0.001, 0.0001)
cam_obj.data.clip_end = dist + bsphere_radius * 3.0
# ── World background — only in Mode A ───────────────────────────────
# In template mode the .blend file owns its World (HDRI, sky texture,
# studio lighting). Overwriting it would destroy the HDR look the
# template was designed to use (e.g. Alpha-HDR output types).
if not use_template:
world = bpy.data.worlds.new("World")
bpy.context.scene.world = world
world.use_nodes = True
bg = world.node_tree.nodes["Background"]
bg.inputs["Color"].default_value = (0.96, 0.96, 0.97, 1.0)
bg.inputs["Strength"].default_value = 0.15
# ── Colour management ────────────────────────────────────────────────────
# In template mode the .blend file owns its colour management settings
# (e.g. Filmic/AgX for HDR, custom exposure for Alpha-HDR output types).
# Overwriting them would destroy the look the template was designed for.
# In factory-settings mode (Mode A) we force Standard to avoid the grey
# Filmic tint that Blender applies by default.
scene = bpy.context.scene
if not use_template:
scene.view_settings.view_transform = 'Standard'
scene.view_settings.exposure = 0.0
scene.view_settings.gamma = 1.0
try:
scene.view_settings.look = 'None'
except Exception:
pass
# ── Render engine ────────────────────────────────────────────────────────
if engine == "eevee":
eevee_ok = False
for eevee_id in ('BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT'):
try:
scene.render.engine = eevee_id
eevee_ok = True
print(f"[still_render] EEVEE engine id: {eevee_id}")
break
except TypeError:
continue
if eevee_ok:
for attr in ('taa_render_samples', 'samples'):
try:
setattr(scene.eevee, attr, samples)
break
except AttributeError:
continue
else:
print("[still_render] WARNING: EEVEE unavailable, falling back to Cycles")
engine = "cycles"
if engine != "eevee":
scene.render.engine = 'CYCLES'
scene.cycles.samples = samples
scene.cycles.use_denoising = True
scene.cycles.denoiser = denoiser_arg if denoiser_arg else 'OPENIMAGEDENOISE'
if denoising_input_passes_arg:
try: scene.cycles.denoising_input_passes = denoising_input_passes_arg
except Exception: pass
if denoising_prefilter_arg:
try: scene.cycles.denoising_prefilter = denoising_prefilter_arg
except Exception: pass
if denoising_quality_arg:
try: scene.cycles.denoising_quality = denoising_quality_arg
except Exception: pass
if denoising_use_gpu_arg:
try: scene.cycles.denoising_use_gpu = (denoising_use_gpu_arg == "1")
except AttributeError: pass
if noise_threshold_arg:
scene.cycles.use_adaptive_sampling = True
scene.cycles.adaptive_threshold = float(noise_threshold_arg)
# Device selection: "cpu" forces CPU, "gpu" forces GPU (warns if unavailable),
# "auto" (default) tries GPU first and falls back to CPU.
print(f"[still_render] cycles_device={cycles_device}")
gpu_found = False
if cycles_device != "cpu":
try:
cycles_prefs = bpy.context.preferences.addons['cycles'].preferences
for device_type in ('OPTIX', 'CUDA', 'HIP', 'ONEAPI'):
try:
cycles_prefs.compute_device_type = device_type
cycles_prefs.get_devices()
gpu_devs = [d for d in cycles_prefs.devices if d.type != 'CPU']
if gpu_devs:
for d in gpu_devs:
d.use = True
scene.cycles.device = 'GPU'
gpu_found = True
print(f"[still_render] Cycles GPU ({device_type})")
break
except Exception:
continue
except Exception:
pass
if not gpu_found:
scene.cycles.device = 'CPU'
print("[still_render] WARNING: GPU not found — falling back to CPU")
# ── Render settings ──────────────────────────────────────────────────────
scene.render.resolution_x = width
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.render.film_transparent = transparent_bg
ext = os.path.splitext(output_path)[1].lower()
if ext in ('.jpg', '.jpeg'):
scene.render.image_settings.file_format = 'JPEG'
scene.render.image_settings.quality = 92
else:
scene.render.image_settings.file_format = 'PNG'
scene.render.filepath = output_path
# ── Render ───────────────────────────────────────────────────────────────
print(f"[still_render] Rendering -> {output_path} (Blender {bpy.app.version_string})")
bpy.ops.render.render(write_still=True)
print("[still_render] render done.")
# ── Pillow post-processing: green bar + model name label ─────────────────
# Skip overlay for transparent renders to keep clean alpha channel
if transparent_bg:
print("[still_render] Transparent mode — skipping Pillow overlay.")
else:
try:
from PIL import Image, ImageDraw, ImageFont
img = Image.open(output_path).convert("RGBA")
draw = ImageDraw.Draw(img)
W, H = img.size
# Schaeffler green top bar
bar_h = max(8, H // 32)
draw.rectangle([0, 0, W - 1, bar_h - 1], fill=(0, 137, 61, 255))
# Model name strip at bottom
model_name = os.path.splitext(os.path.basename(stl_path))[0]
label_h = max(20, H // 20)
img.alpha_composite(
Image.new("RGBA", (W, label_h), (30, 30, 30, 180)),
dest=(0, H - label_h),
)
font_size = max(10, label_h - 6)
font = None
for fp in [
"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
"/usr/share/fonts/truetype/freefont/FreeSansBold.ttf",
]:
if os.path.exists(fp):
try:
font = ImageFont.truetype(fp, font_size)
break
except Exception:
pass
if font is None:
font = ImageFont.load_default()
tb = draw.textbbox((0, 0), model_name, font=font)
text_w = tb[2] - tb[0]
draw.text(
((W - text_w) // 2, H - label_h + (label_h - (tb[3] - tb[1])) // 2),
model_name, font=font, fill=(255, 255, 255, 255),
)
# Save in original format
if ext in ('.jpg', '.jpeg'):
img.convert("RGB").save(output_path, format="JPEG", quality=92)
else:
img.convert("RGB").save(output_path, format="PNG")
print("[still_render] Pillow overlay applied.")
except ImportError:
print("[still_render] Pillow not available - skipping overlay.")
except Exception as exc:
print(f"[still_render] Pillow overlay failed (non-fatal): {exc}")
print("[still_render] Done.")
if __name__ == "__main__":
main()
-74
View File
@@ -1,74 +0,0 @@
"""Blender GPU preferences setup for native animation render (-a).
Called as:
blender --background scene.blend --python turntable_gpu_setup.py -a
Reads the intended cycles_device from the scene custom property set by
turntable_setup.py, then applies the matching GPU compute device preferences.
GPU preferences are user-level and not stored in .blend, so they must be
re-applied at render time.
After this script runs, Blender processes -a and renders all animation frames
natively — keeping the GPU scene (BVH, textures) loaded across all frames.
"""
import bpy
scene = bpy.context.scene
cycles_device = scene.get("_cycles_device", "gpu")
denoiser_override = scene.get("_denoiser_override", "")
if scene.render.engine != 'CYCLES':
# EEVEE or other engine — no Cycles GPU preferences needed
print(f"[turntable_gpu] engine={scene.render.engine} — no Cycles GPU setup needed")
elif cycles_device == "cpu":
scene.cycles.device = 'CPU'
print("[turntable_gpu] Using CPU (explicit override)")
else:
gpu_found = False
try:
cycles_prefs = bpy.context.preferences.addons['cycles'].preferences
for device_type in ('OPTIX', 'CUDA', 'HIP', 'ONEAPI'):
try:
cycles_prefs.compute_device_type = device_type
cycles_prefs.get_devices()
gpu_devs = [d for d in cycles_prefs.devices if d.type != 'CPU']
if gpu_devs:
for d in gpu_devs:
d.use = True
scene.cycles.device = 'GPU'
gpu_found = True
# OptiX denoiser is fully GPU-native and faster than OIDN on NVIDIA.
# Fall back to OIDN (also GPU-accelerated) on CUDA/HIP.
if not denoiser_override:
if device_type == 'OPTIX':
try:
scene.cycles.denoiser = 'OPTIX'
print("[turntable_gpu] OptiX denoiser active (GPU-native)")
except Exception:
pass # Keep OIDN
else:
try:
scene.cycles.denoiser = denoiser_override
print(f"[turntable_gpu] Denoiser override: {denoiser_override}")
except Exception:
pass
# Blender 4.x+: explicitly route OIDN through GPU path
try:
scene.cycles.denoising_use_gpu = True
except AttributeError:
pass # Older Blender — OIDN uses GPU automatically when device=GPU
print(f"[turntable_gpu] Cycles GPU ({device_type}) — rendering {scene.frame_end - scene.frame_start + 1} frames")
break
except Exception:
continue
except Exception:
pass
if not gpu_found:
scene.cycles.device = 'CPU'
print("[turntable_gpu] WARNING: GPU not found — falling back to CPU")
print(f"[turntable_gpu] Output: {scene.render.filepath}#### (frames {scene.frame_start}{scene.frame_end})")
-762
View File
@@ -1,762 +0,0 @@
"""Blender Python script: turntable animation render for Flamenco.
Usage (from Blender):
blender --background --python turntable_render.py -- \
<stl_path> <frames_dir> <frame_count> <degrees> <width> <height> \
<engine> <samples> <part_colors_json> \
[template_path] [target_collection] [material_library_path] [material_map_json]
"""
import bpy
import sys
import os
import json
import math
from mathutils import Vector, Matrix
# ── Colour palette (matches blender_render.py / Three.js renderer) ───────────
PALETTE_HEX = [
"#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8",
"#4CD4E8", "#E84CA8", "#7EC850", "#E86B30", "#5088C8",
]
def _srgb_to_linear(c: int) -> float:
v = c / 255.0
return v / 12.92 if v <= 0.04045 else ((v + 0.055) / 1.055) ** 2.4
def _hex_to_linear(hex_color: str) -> tuple:
h = hex_color.lstrip('#')
return (
_srgb_to_linear(int(h[0:2], 16)),
_srgb_to_linear(int(h[2:4], 16)),
_srgb_to_linear(int(h[4:6], 16)),
1.0,
)
PALETTE_LINEAR = [_hex_to_linear(h) for h in PALETTE_HEX]
SMOOTH_ANGLE = 30 # degrees
# ── Helper functions ─────────────────────────────────────────────────────────
def _ensure_collection(name: str):
"""Return a collection by name, creating it if needed."""
if name in bpy.data.collections:
return bpy.data.collections[name]
col = bpy.data.collections.new(name)
bpy.context.scene.collection.children.link(col)
return col
def _assign_palette_material(part_obj, index):
"""Assign a palette colour material to a mesh part."""
color = PALETTE_LINEAR[index % len(PALETTE_LINEAR)]
mat = bpy.data.materials.new(name=f"Part_{index}")
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
bsdf.inputs["Base Color"].default_value = color
bsdf.inputs["Metallic"].default_value = 0.35
bsdf.inputs["Roughness"].default_value = 0.40
try:
bsdf.inputs["Specular IOR Level"].default_value = 0.5
except KeyError:
pass
part_obj.data.materials.clear()
part_obj.data.materials.append(mat)
def _apply_smooth(part_obj, angle_deg):
"""Apply smooth or flat shading to a mesh object."""
bpy.context.view_layer.objects.active = part_obj
part_obj.select_set(True)
if angle_deg > 0:
try:
bpy.ops.object.shade_smooth_by_angle(angle=math.radians(angle_deg))
except AttributeError:
bpy.ops.object.shade_smooth()
part_obj.data.use_auto_smooth = True
part_obj.data.auto_smooth_angle = math.radians(angle_deg)
else:
bpy.ops.object.shade_flat()
import re as _re
def _apply_rotation(parts, rx, ry, rz):
"""Apply Euler XYZ rotation (degrees) to all parts by modifying matrix_world.
Rotates around world origin, which equals the assembly centre because
_import_stl already centres parts there. Applied before material assignment
and camera/bbox calculations so everything downstream sees the final pose.
"""
if not parts or (rx == 0.0 and ry == 0.0 and rz == 0.0):
return
from mathutils import Euler
rot_mat = Euler((math.radians(rx), math.radians(ry), math.radians(rz)), 'XYZ').to_matrix().to_4x4()
for p in parts:
p.matrix_world = rot_mat @ p.matrix_world
bpy.ops.object.select_all(action='DESELECT')
for p in parts:
p.select_set(True)
bpy.context.view_layer.objects.active = parts[0]
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
print(f"[turntable_render] applied rotation ({rx}°, {ry}°, {rz}°) to {len(parts)} parts")
def _axis_rotation(axis: str, degrees: float) -> tuple:
"""Map turntable axis name to Euler (x, y, z) rotation in radians."""
rad = math.radians(degrees)
if axis == "world_x":
return (rad, 0.0, 0.0)
elif axis == "world_y":
return (0.0, rad, 0.0)
else: # "world_z" default
return (0.0, 0.0, rad)
def _set_fcurves_linear(action):
"""Set LINEAR interpolation on all fcurves.
Handles both the legacy Blender < 4.4 API (action.fcurves) and the new
Baklava layered-action API introduced in Blender 4.4 / 5.x
(action.layers[*].strips[*].channelbags[*].fcurves).
"""
try:
# New layered-action API (Blender 4.4+ / 5.x)
for layer in action.layers:
for strip in layer.strips:
for channelbag in strip.channelbags:
for fc in channelbag.fcurves:
for kp in fc.keyframe_points:
kp.interpolation = 'LINEAR'
except AttributeError:
# Legacy API (Blender < 4.4)
for fc in action.fcurves:
for kp in fc.keyframe_points:
kp.interpolation = 'LINEAR'
def _scale_mm_to_m(parts):
"""Scale imported STL objects from mm to Blender metres (×0.001).
STEP/STL coordinates are in mm; Blender's default unit is metres.
Without scaling a 50 mm part appears as 50 m inside Blender — way too large
relative to any template environment designed in metric units.
"""
if not parts:
return
bpy.ops.object.select_all(action='DESELECT')
for p in parts:
p.scale = (0.001, 0.001, 0.001)
p.location *= 0.001
p.select_set(True)
bpy.context.view_layer.objects.active = parts[0]
bpy.ops.object.transform_apply(scale=True, location=False, rotation=False)
print(f"[turntable_render] scaled {len(parts)} parts mm→m (×0.001)")
def _import_stl(stl_file):
"""Import STL into Blender, using per-part STLs if available.
Checks for {stl_stem}_parts/manifest.json next to the STL file.
- Per-part mode: imports each part STL, names Blender object after STEP part name.
- Fallback: imports combined STL and splits by loose geometry.
Returns list of Blender mesh objects, centred at origin.
"""
stl_dir = os.path.dirname(stl_file)
stl_stem = os.path.splitext(os.path.basename(stl_file))[0]
parts_dir = os.path.join(stl_dir, stl_stem + "_parts")
manifest_path = os.path.join(parts_dir, "manifest.json")
parts = []
if os.path.isfile(manifest_path):
# ── Per-part mode ────────────────────────────────────────────────
try:
with open(manifest_path, "r") as f:
manifest = json.loads(f.read())
part_entries = manifest.get("parts", [])
except Exception as e:
print(f"[turntable_render] WARNING: failed to read manifest: {e}")
part_entries = []
if part_entries:
for entry in part_entries:
part_file = os.path.join(parts_dir, entry["file"])
part_name = entry["name"]
if not os.path.isfile(part_file):
print(f"[turntable_render] WARNING: part STL missing: {part_file}")
continue
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.wm.stl_import(filepath=part_file)
imported = bpy.context.selected_objects
if imported:
obj = imported[0]
obj.name = part_name
if obj.data:
obj.data.name = part_name
parts.append(obj)
if parts:
print(f"[turntable_render] imported {len(parts)} named parts from per-part STLs")
# ── Fallback: combined STL + separate by loose ───────────────────────
if not parts:
bpy.ops.wm.stl_import(filepath=stl_file)
obj = bpy.context.selected_objects[0] if bpy.context.selected_objects else None
if obj is None:
print(f"ERROR: No objects imported from {stl_file}")
sys.exit(1)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
obj.location = (0.0, 0.0, 0.0)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.separate(type='LOOSE')
bpy.ops.object.mode_set(mode='OBJECT')
parts = list(bpy.context.selected_objects)
print(f"[turntable_render] fallback: separated into {len(parts)} part(s)")
return parts
# ── Centre per-part imports at origin (combined bbox) ────────────────
all_corners = []
for p in parts:
all_corners.extend(p.matrix_world @ Vector(c) for c in p.bound_box)
if all_corners:
mins = Vector((min(v.x for v in all_corners),
min(v.y for v in all_corners),
min(v.z for v in all_corners)))
maxs = Vector((max(v.x for v in all_corners),
max(v.y for v in all_corners),
max(v.z for v in all_corners)))
center = (mins + maxs) * 0.5
for p in parts:
p.location -= center
return parts
def _resolve_part_name(index, part_obj, part_names_ordered):
"""Get the STEP part name for a Blender part by index.
With per-part import, part_obj.name IS the STEP name (possibly with
Blender .NNN suffix). Falls back to part_names_ordered for combined-STL mode.
"""
base_name = _re.sub(r'\.\d{3}$', '', part_obj.name)
if part_names_ordered and index < len(part_names_ordered):
return part_names_ordered[index]
return base_name
def _apply_material_library(parts, mat_lib_path, mat_map, part_names_ordered=None):
"""Append materials from library .blend and assign to parts via material_map.
With per-part STL import, Blender objects are named after STEP parts,
so matching is by name (stripping Blender .NNN suffix for duplicates).
Falls back to part_names_ordered index-based matching for combined-STL mode.
mat_map: {part_name_lower: material_name}
Parts without a match keep their current material.
"""
if not mat_lib_path or not os.path.isfile(mat_lib_path):
print(f"[turntable_render] material library not found: {mat_lib_path}")
return
# Collect unique material names needed
needed = set(mat_map.values())
if not needed:
return
# Append materials from library
appended = {}
for mat_name in needed:
inner_path = f"{mat_lib_path}/Material/{mat_name}"
try:
bpy.ops.wm.append(
filepath=inner_path,
directory=f"{mat_lib_path}/Material/",
filename=mat_name,
link=False,
)
if mat_name in bpy.data.materials:
appended[mat_name] = bpy.data.materials[mat_name]
print(f"[turntable_render] appended material: {mat_name}")
else:
print(f"[turntable_render] WARNING: material '{mat_name}' not found after append")
except Exception as exc:
print(f"[turntable_render] WARNING: failed to append material '{mat_name}': {exc}")
if not appended:
return
# Assign materials to parts — primary: name-based (per-part STL mode),
# secondary: index-based via part_names_ordered (combined STL fallback)
assigned_count = 0
for i, part in enumerate(parts):
# Try name-based matching first (strip Blender .NNN suffix)
base_name = _re.sub(r'\.\d{3}$', '', part.name)
part_key = base_name.lower().strip()
mat_name = mat_map.get(part_key)
# Fall back to index-based matching via part_names_ordered
if not mat_name and part_names_ordered and i < len(part_names_ordered):
step_name = part_names_ordered[i]
part_key = step_name.lower().strip()
mat_name = mat_map.get(part_key)
if mat_name and mat_name in appended:
part.data.materials.clear()
part.data.materials.append(appended[mat_name])
assigned_count += 1
print(f"[turntable_render] assigned '{mat_name}' to part '{part.name}'")
print(f"[turntable_render] material assignment: {assigned_count}/{len(parts)} parts matched")
def main():
argv = sys.argv
# Everything after "--" is our args
args = argv[argv.index("--") + 1:]
stl_path = args[0]
frames_dir = args[1]
frame_count = int(args[2])
degrees = int(args[3])
width = int(args[4])
height = int(args[5])
engine = args[6]
samples = int(args[7])
part_colors_json = args[8] if len(args) > 8 else "{}"
# Template + material library args (passed by schaeffler-turntable.js)
template_path = args[9] if len(args) > 9 and args[9] else ""
target_collection = args[10] if len(args) > 10 else "Product"
material_library_path = args[11] if len(args) > 11 and args[11] else ""
material_map_raw = args[12] if len(args) > 12 else "{}"
part_names_ordered_raw = args[13] if len(args) > 13 else "[]"
lighting_only = args[14] == "1" if len(args) > 14 else False
cycles_device = args[15].lower() if len(args) > 15 else "auto" # "auto", "gpu", "cpu"
shadow_catcher = args[16] == "1" if len(args) > 16 else False
rotation_x = float(args[17]) if len(args) > 17 else 0.0
rotation_y = float(args[18]) if len(args) > 18 else 0.0
rotation_z = float(args[19]) if len(args) > 19 else 0.0
turntable_axis = args[20] if len(args) > 20 else "world_z"
bg_color = args[21] if len(args) > 21 else ""
transparent_bg = args[22] == "1" if len(args) > 22 else False
os.makedirs(frames_dir, exist_ok=True)
try:
part_colors = json.loads(part_colors_json)
except json.JSONDecodeError:
part_colors = {}
try:
material_map = json.loads(material_map_raw) if material_map_raw else {}
except json.JSONDecodeError:
material_map = {}
try:
part_names_ordered = json.loads(part_names_ordered_raw) if part_names_ordered_raw else []
except json.JSONDecodeError:
part_names_ordered = []
# Validate template path: if provided it MUST exist on disk.
if template_path and not os.path.isfile(template_path):
print(f"[turntable_render] ERROR: template_path was provided but file not found: {template_path}")
print("[turntable_render] Ensure the blend-templates directory is accessible on this worker.")
sys.exit(1)
use_template = bool(template_path)
print(f"[turntable_render] engine={engine}, samples={samples}, size={width}x{height}, "
f"frames={frame_count}, degrees={degrees}")
print(f"[turntable_render] part_names_ordered: {len(part_names_ordered)} entries")
if use_template:
print(f"[turntable_render] template={template_path}, collection={target_collection}, lighting_only={lighting_only}")
else:
print("[turntable_render] no template — using factory settings (Mode A)")
if material_library_path:
print(f"[turntable_render] material_library={material_library_path}, material_map keys={list(material_map.keys())}")
# ── SCENE SETUP ──────────────────────────────────────────────────────────
if use_template:
# ── MODE B: Template-based render ────────────────────────────────────
print(f"[turntable_render] Opening template: {template_path}")
bpy.ops.wm.open_mainfile(filepath=template_path)
# Find or create target collection
target_col = _ensure_collection(target_collection)
# Import and split STL
parts = _import_stl(stl_path)
# Scale mm→m: STEP coords are mm, Blender default unit is metres
_scale_mm_to_m(parts)
# Apply render position rotation before material/camera setup
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
# Move imported parts into target collection
for part in parts:
for col in list(part.users_collection):
col.objects.unlink(part)
target_col.objects.link(part)
# Apply smooth shading
for part in parts:
_apply_smooth(part, SMOOTH_ANGLE)
# Material assignment: library materials if available, otherwise palette
if material_library_path and material_map:
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower, part_names_ordered)
# Parts not matched by library get palette fallback
for i, part in enumerate(parts):
if not part.data.materials or len(part.data.materials) == 0:
_assign_palette_material(part, i)
else:
for i, part in enumerate(parts):
step_name = _resolve_part_name(i, part, part_names_ordered)
color_hex = part_colors.get(step_name)
if not color_hex:
_assign_palette_material(part, i)
# ── Shadow catcher (Cycles only, template mode only) ─────────────────
if shadow_catcher:
sc_col_name = "Shadowcatcher"
sc_obj_name = "Shadowcatcher"
for vl in bpy.context.scene.view_layers:
def _enable_col_recursive(layer_col):
if layer_col.collection.name == sc_col_name:
layer_col.exclude = False
layer_col.collection.hide_render = False
layer_col.collection.hide_viewport = False
return True
for child in layer_col.children:
if _enable_col_recursive(child):
return True
return False
_enable_col_recursive(vl.layer_collection)
sc_obj = bpy.data.objects.get(sc_obj_name)
if sc_obj:
all_world_z = []
for part in parts:
for corner in part.bound_box:
all_world_z.append((part.matrix_world @ Vector(corner)).z)
if all_world_z:
sc_obj.location.z = min(all_world_z)
print(f"[turntable_render] shadow catcher enabled, plane Z={sc_obj.location.z:.4f}")
else:
print(f"[turntable_render] WARNING: shadow catcher object '{sc_obj_name}' not found in template")
# lighting_only: always use auto-framing; normal template: use camera if present
needs_auto_camera = (lighting_only and not shadow_catcher) or not bpy.context.scene.camera
if lighting_only and not shadow_catcher:
print("[turntable_render] lighting_only mode: using template World/HDRI, forcing auto-camera")
elif needs_auto_camera:
print("[turntable_render] WARNING: template has no camera — will create auto-camera")
# Set very close near clip on template camera for mm-scale parts (now in metres)
if not needs_auto_camera and bpy.context.scene.camera:
bpy.context.scene.camera.data.clip_start = 0.001
print(f"[turntable_render] template mode: {len(parts)} parts imported into collection '{target_collection}'")
else:
# ── MODE A: Factory settings ─────────────────────────────────────────
needs_auto_camera = True
bpy.ops.wm.read_factory_settings(use_empty=True)
parts = _import_stl(stl_path)
# Scale mm→m: STEP coords are mm, Blender default unit is metres
_scale_mm_to_m(parts)
# Apply render position rotation before material/camera setup
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
for i, part in enumerate(parts):
_apply_smooth(part, SMOOTH_ANGLE)
# Material assignment: library materials if available, else part_colors/palette
if material_library_path and material_map:
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower, part_names_ordered)
# Palette fallback for unmatched parts
for i, part in enumerate(parts):
if not part.data.materials or len(part.data.materials) == 0:
_assign_palette_material(part, i)
else:
# part_colors or palette — use index-based lookup via part_names_ordered
for i, part in enumerate(parts):
step_name = _resolve_part_name(i, part, part_names_ordered)
color_hex = part_colors.get(step_name)
if color_hex:
mat = bpy.data.materials.new(name=f"mat_{part.name}")
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
color = _hex_to_linear(color_hex)
bsdf.inputs["Base Color"].default_value = color
bsdf.inputs["Metallic"].default_value = 0.35
bsdf.inputs["Roughness"].default_value = 0.40
try:
bsdf.inputs["Specular IOR Level"].default_value = 0.5
except KeyError:
pass
part.data.materials.clear()
part.data.materials.append(mat)
else:
_assign_palette_material(part, i)
if needs_auto_camera:
# ── Combined bounding box / bounding sphere ──────────────────────────
all_corners = []
for part in parts:
all_corners.extend(part.matrix_world @ Vector(c) for c in part.bound_box)
bbox_min = Vector((
min(v.x for v in all_corners),
min(v.y for v in all_corners),
min(v.z for v in all_corners),
))
bbox_max = Vector((
max(v.x for v in all_corners),
max(v.y for v in all_corners),
max(v.z for v in all_corners),
))
bbox_center = (bbox_min + bbox_max) * 0.5
bbox_dims = bbox_max - bbox_min
bsphere_radius = max(bbox_dims.length * 0.5, 0.001)
print(f"[turntable_render] bbox_dims={tuple(round(d, 4) for d in bbox_dims)}, "
f"bsphere_radius={bsphere_radius:.4f}")
# ── Lighting — only in Mode A (factory settings) ─────────────────────
# In template mode the .blend file provides its own World/HDRI lighting.
# Adding auto-lights would overpower the template's intended look.
if not use_template:
light_dist = bsphere_radius * 6.0
bpy.ops.object.light_add(type='SUN', location=(
bbox_center.x + light_dist * 0.5,
bbox_center.y - light_dist * 0.35,
bbox_center.z + light_dist,
))
sun = bpy.context.active_object
sun.data.energy = 4.0
sun.rotation_euler = (math.radians(45), 0, math.radians(30))
bpy.ops.object.light_add(type='AREA', location=(
bbox_center.x - light_dist * 0.4,
bbox_center.y + light_dist * 0.4,
bbox_center.z + light_dist * 0.7,
))
fill = bpy.context.active_object
fill.data.energy = max(800.0, bsphere_radius ** 2 * 2000.0)
fill.data.size = max(4.0, bsphere_radius * 4.0)
# ── Camera ───────────────────────────────────────────────────────────
cam_dist = bsphere_radius * 2.5
cam_location = Vector((
bbox_center.x + cam_dist,
bbox_center.y,
bbox_center.z + bsphere_radius * 0.5,
))
bpy.ops.object.camera_add(location=cam_location)
camera = bpy.context.active_object
bpy.context.scene.camera = camera
camera.data.clip_start = max(cam_dist * 0.001, 0.0001)
camera.data.clip_end = cam_dist * 10.0
# Track-to constraint for look-at
empty = bpy.data.objects.new("target", None)
bpy.context.collection.objects.link(empty)
empty.location = bbox_center
track = camera.constraints.new(type='TRACK_TO')
track.target = empty
track.track_axis = 'TRACK_NEGATIVE_Z'
track.up_axis = 'UP_Y'
# ── World background — only in Mode A ───────────────────────────────
# In template mode the .blend file owns its World (HDRI, sky texture,
# studio lighting). Overwriting it would destroy the HDR look.
if not use_template:
world = bpy.data.worlds.new("World")
bpy.context.scene.world = world
world.use_nodes = True
bg = world.node_tree.nodes["Background"]
bg.inputs["Color"].default_value = (0.96, 0.96, 0.97, 1.0)
bg.inputs["Strength"].default_value = 0.15
# ── Turntable pivot ──────────────────────────────────────────────────
pivot = bpy.data.objects.new("pivot", None)
bpy.context.collection.objects.link(pivot)
pivot.location = bbox_center
# Parent camera to pivot
camera.parent = pivot
camera.location = (cam_dist, 0, bsphere_radius * 0.5)
# Keyframe pivot rotation
scene = bpy.context.scene
scene.frame_start = 1
scene.frame_end = frame_count
pivot.rotation_euler = (0, 0, 0)
pivot.keyframe_insert(data_path="rotation_euler", frame=1)
pivot.rotation_euler = _axis_rotation(turntable_axis, degrees)
pivot.keyframe_insert(data_path="rotation_euler", frame=frame_count + 1)
# Linear interpolation — frame N+1 is never rendered, giving N uniform steps
_set_fcurves_linear(pivot.animation_data.action)
else:
# Template has camera — set up turntable on the model parts instead
scene = bpy.context.scene
scene.frame_start = 1
scene.frame_end = frame_count
# Calculate model center for pivot
all_corners = []
for part in parts:
all_corners.extend(part.matrix_world @ Vector(c) for c in part.bound_box)
bbox_center = Vector((
(min(v.x for v in all_corners) + max(v.x for v in all_corners)) * 0.5,
(min(v.y for v in all_corners) + max(v.y for v in all_corners)) * 0.5,
(min(v.z for v in all_corners) + max(v.z for v in all_corners)) * 0.5,
))
# Create a pivot empty and parent all parts to it
pivot = bpy.data.objects.new("turntable_pivot", None)
bpy.context.collection.objects.link(pivot)
pivot.location = bbox_center
for part in parts:
part.parent = pivot
# Keyframe pivot rotation
pivot.rotation_euler = (0, 0, 0)
pivot.keyframe_insert(data_path="rotation_euler", frame=1)
pivot.rotation_euler = _axis_rotation(turntable_axis, degrees)
pivot.keyframe_insert(data_path="rotation_euler", frame=frame_count + 1)
# Linear interpolation — frame N+1 is never rendered, giving N uniform steps
_set_fcurves_linear(pivot.animation_data.action)
# ── Colour management ────────────────────────────────────────────────────
# In template mode the .blend file owns its colour management settings.
# Overwriting them would destroy the intended HDR/tonemapping look.
# In factory-settings mode force Standard to avoid the grey Filmic tint.
scene = bpy.context.scene
if not use_template:
scene.view_settings.view_transform = 'Standard'
scene.view_settings.exposure = 0.0
scene.view_settings.gamma = 1.0
try:
scene.view_settings.look = 'None'
except Exception:
pass
# ── Render engine ────────────────────────────────────────────────────────
if engine == "eevee":
eevee_ok = False
for eevee_id in ('BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT'):
try:
scene.render.engine = eevee_id
eevee_ok = True
print(f"[turntable_render] EEVEE engine id: {eevee_id}")
break
except TypeError:
continue
if eevee_ok:
for attr in ('taa_render_samples', 'samples'):
try:
setattr(scene.eevee, attr, samples)
break
except AttributeError:
continue
else:
print("[turntable_render] WARNING: EEVEE not available, falling back to Cycles")
engine = "cycles"
if engine != "eevee":
scene.render.engine = 'CYCLES'
scene.cycles.samples = samples
scene.cycles.use_denoising = True
scene.cycles.denoiser = 'OPENIMAGEDENOISE' # GPU-accelerated when CUDA/OptiX active
# Device selection: "cpu" forces CPU, "gpu" forces GPU (warns if unavailable),
# "auto" (default) tries GPU first and falls back to CPU.
print(f"[turntable_render] cycles_device={cycles_device}")
gpu_found = False
if cycles_device != "cpu":
try:
cycles_prefs = bpy.context.preferences.addons['cycles'].preferences
for device_type in ('OPTIX', 'CUDA', 'HIP', 'ONEAPI'):
try:
cycles_prefs.compute_device_type = device_type
cycles_prefs.get_devices()
gpu_devs = [d for d in cycles_prefs.devices if d.type != 'CPU']
if gpu_devs:
for d in gpu_devs:
d.use = True
scene.cycles.device = 'GPU'
gpu_found = True
print(f"[turntable_render] Cycles GPU ({device_type})")
break
except Exception:
continue
except Exception:
pass
if not gpu_found:
scene.cycles.device = 'CPU'
print("[turntable_render] WARNING: GPU not found — falling back to CPU")
# ── Render settings ──────────────────────────────────────────────────────
scene.render.resolution_x = width
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.render.image_settings.file_format = 'PNG'
# ── Transparent background ────────────────────────────────────────────────
# bg_color compositing is handled by FFmpeg in the compose-video task.
# Blender renders transparent PNG frames when bg_color is set.
if bg_color or transparent_bg:
scene.render.film_transparent = True
if bg_color:
print(f"[turntable_render] film_transparent=True for FFmpeg bg_color compositing ({bg_color})")
else:
print("[turntable_render] transparent_bg enabled (alpha PNG frames)")
# ── Render all frames ────────────────────────────────────────────────────
# Per-frame loop with write_still=True. In a single Blender session,
# Cycles keeps the GPU scene (BVH, textures, material graph) loaded
# between frames — only the animated pivot transform is updated each step.
# bpy.ops.render.render(animation=True) does NOT work reliably in
# background mode after wm.open_mainfile() in Blender 5.x (silently
# writes no files), so we use the explicit per-frame approach.
import time as _time
_render_start = _time.time()
for frame in range(1, frame_count + 1):
scene.frame_set(frame)
scene.render.filepath = os.path.join(frames_dir, f"frame_{frame:04d}")
bpy.ops.render.render(write_still=True)
elapsed = _time.time() - _render_start
fps_so_far = frame / elapsed
print(f"[turntable_render] Frame {frame}/{frame_count}{elapsed:.1f}s elapsed ({fps_so_far:.2f} fps)")
total = _time.time() - _render_start
print(f"[turntable_render] Turntable render complete: {frame_count} frames in {total:.1f}s ({frame_count/total:.2f} fps avg)")
if __name__ == "__main__":
main()
-688
View File
@@ -1,688 +0,0 @@
"""Blender Python script: scene setup for turntable animation (Flamenco).
Performs all scene preparation — STL import, materials, camera, pivot animation,
compositor — then SAVES the resulting .blend file to <scene_path>.
The saved .blend is then rendered by a separate Flamenco task:
blender --background <scene_path> --python turntable_gpu_setup.py -a
Using Blender's native -a (--render-anim) keeps the GPU scene (BVH, textures)
loaded for ALL frames in one process, avoiding per-frame GPU re-upload overhead.
Usage (from Blender):
blender --background --python turntable_setup.py -- \\
<stl_path> <frames_dir> <frame_count> <degrees> <width> <height> \\
<engine> <samples> <part_colors_json> \\
[template_path] [target_collection] [material_library_path] \\
[material_map_json] [part_names_ordered_json] [lighting_only] \\
[cycles_device] [shadow_catcher] [rotation_x] [rotation_y] [rotation_z] \\
[turntable_axis] [bg_color] [transparent_bg] [scene_path] [camera_orbit]
"""
import bpy
import sys
import os
import json
import math
from mathutils import Vector, Matrix
# ── Colour palette ────────────────────────────────────────────────────────────
PALETTE_HEX = [
"#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8",
"#4CD4E8", "#E84CA8", "#7EC850", "#E86B30", "#5088C8",
]
def _srgb_to_linear(c: int) -> float:
v = c / 255.0
return v / 12.92 if v <= 0.04045 else ((v + 0.055) / 1.055) ** 2.4
def _hex_to_linear(hex_color: str) -> tuple:
h = hex_color.lstrip('#')
return (
_srgb_to_linear(int(h[0:2], 16)),
_srgb_to_linear(int(h[2:4], 16)),
_srgb_to_linear(int(h[4:6], 16)),
1.0,
)
PALETTE_LINEAR = [_hex_to_linear(h) for h in PALETTE_HEX]
SMOOTH_ANGLE = 30
# ── Helpers (kept in sync with turntable_render.py) ──────────────────────────
def _ensure_collection(name: str):
if name in bpy.data.collections:
return bpy.data.collections[name]
col = bpy.data.collections.new(name)
bpy.context.scene.collection.children.link(col)
return col
def _assign_palette_material(part_obj, index):
color = PALETTE_LINEAR[index % len(PALETTE_LINEAR)]
mat = bpy.data.materials.new(name=f"Part_{index}")
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
bsdf.inputs["Base Color"].default_value = color
bsdf.inputs["Metallic"].default_value = 0.35
bsdf.inputs["Roughness"].default_value = 0.40
try:
bsdf.inputs["Specular IOR Level"].default_value = 0.5
except KeyError:
pass
part_obj.data.materials.clear()
part_obj.data.materials.append(mat)
def _apply_smooth(part_obj, angle_deg):
bpy.context.view_layer.objects.active = part_obj
part_obj.select_set(True)
if angle_deg > 0:
try:
bpy.ops.object.shade_smooth_by_angle(angle=math.radians(angle_deg))
except AttributeError:
bpy.ops.object.shade_smooth()
part_obj.data.use_auto_smooth = True
part_obj.data.auto_smooth_angle = math.radians(angle_deg)
else:
bpy.ops.object.shade_flat()
import re as _re
def _apply_rotation(parts, rx, ry, rz):
if not parts or (rx == 0.0 and ry == 0.0 and rz == 0.0):
return
from mathutils import Euler
rot_mat = Euler((math.radians(rx), math.radians(ry), math.radians(rz)), 'XYZ').to_matrix().to_4x4()
for p in parts:
p.matrix_world = rot_mat @ p.matrix_world
bpy.ops.object.select_all(action='DESELECT')
for p in parts:
p.select_set(True)
bpy.context.view_layer.objects.active = parts[0]
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
print(f"[turntable_setup] applied rotation ({rx}°, {ry}°, {rz}°) to {len(parts)} parts")
def _axis_rotation(axis: str, degrees: float) -> tuple:
rad = math.radians(degrees)
if axis == "world_x":
return (rad, 0.0, 0.0)
elif axis == "world_y":
return (0.0, rad, 0.0)
else:
return (0.0, 0.0, rad)
def _set_fcurves_linear(action):
try:
for layer in action.layers:
for strip in layer.strips:
for channelbag in strip.channelbags:
for fc in channelbag.fcurves:
for kp in fc.keyframe_points:
kp.interpolation = 'LINEAR'
except AttributeError:
for fc in action.fcurves:
for kp in fc.keyframe_points:
kp.interpolation = 'LINEAR'
def _scale_mm_to_m(parts):
if not parts:
return
bpy.ops.object.select_all(action='DESELECT')
for p in parts:
p.scale = (0.001, 0.001, 0.001)
p.location *= 0.001
p.select_set(True)
bpy.context.view_layer.objects.active = parts[0]
bpy.ops.object.transform_apply(scale=True, location=False, rotation=False)
print(f"[turntable_setup] scaled {len(parts)} parts mm→m (×0.001)")
def _import_stl(stl_file):
stl_dir = os.path.dirname(stl_file)
stl_stem = os.path.splitext(os.path.basename(stl_file))[0]
parts_dir = os.path.join(stl_dir, stl_stem + "_parts")
manifest_path = os.path.join(parts_dir, "manifest.json")
parts = []
if os.path.isfile(manifest_path):
try:
with open(manifest_path, "r") as f:
manifest = json.loads(f.read())
part_entries = manifest.get("parts", [])
except Exception as e:
print(f"[turntable_setup] WARNING: failed to read manifest: {e}")
part_entries = []
if part_entries:
for entry in part_entries:
part_file = os.path.join(parts_dir, entry["file"])
part_name = entry["name"]
if not os.path.isfile(part_file):
print(f"[turntable_setup] WARNING: part STL missing: {part_file}")
continue
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.wm.stl_import(filepath=part_file)
imported = bpy.context.selected_objects
if imported:
obj = imported[0]
obj.name = part_name
if obj.data:
obj.data.name = part_name
parts.append(obj)
if parts:
print(f"[turntable_setup] imported {len(parts)} named parts from per-part STLs")
if not parts:
bpy.ops.wm.stl_import(filepath=stl_file)
obj = bpy.context.selected_objects[0] if bpy.context.selected_objects else None
if obj is None:
print(f"ERROR: No objects imported from {stl_file}")
sys.exit(1)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
obj.location = (0.0, 0.0, 0.0)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.separate(type='LOOSE')
bpy.ops.object.mode_set(mode='OBJECT')
parts = list(bpy.context.selected_objects)
print(f"[turntable_setup] fallback: separated into {len(parts)} part(s)")
return parts
all_corners = []
for p in parts:
all_corners.extend(p.matrix_world @ Vector(c) for c in p.bound_box)
if all_corners:
mins = Vector((min(v.x for v in all_corners),
min(v.y for v in all_corners),
min(v.z for v in all_corners)))
maxs = Vector((max(v.x for v in all_corners),
max(v.y for v in all_corners),
max(v.z for v in all_corners)))
center = (mins + maxs) * 0.5
for p in parts:
p.location -= center
return parts
def _resolve_part_name(index, part_obj, part_names_ordered):
base_name = _re.sub(r'\.\d{3}$', '', part_obj.name)
if part_names_ordered and index < len(part_names_ordered):
return part_names_ordered[index]
return base_name
def _apply_material_library(parts, mat_lib_path, mat_map, part_names_ordered=None):
if not mat_lib_path or not os.path.isfile(mat_lib_path):
print(f"[turntable_setup] material library not found: {mat_lib_path}")
return
needed = set(mat_map.values())
if not needed:
return
appended = {}
for mat_name in needed:
inner_path = f"{mat_lib_path}/Material/{mat_name}"
try:
bpy.ops.wm.append(
filepath=inner_path,
directory=f"{mat_lib_path}/Material/",
filename=mat_name,
link=False,
)
if mat_name in bpy.data.materials:
appended[mat_name] = bpy.data.materials[mat_name]
print(f"[turntable_setup] appended material: {mat_name}")
else:
print(f"[turntable_setup] WARNING: material '{mat_name}' not found after append")
except Exception as exc:
print(f"[turntable_setup] WARNING: failed to append material '{mat_name}': {exc}")
if not appended:
return
assigned_count = 0
for i, part in enumerate(parts):
base_name = _re.sub(r'\.\d{3}$', '', part.name)
part_key = base_name.lower().strip()
mat_name = mat_map.get(part_key)
if not mat_name and part_names_ordered and i < len(part_names_ordered):
step_name = part_names_ordered[i]
part_key = step_name.lower().strip()
mat_name = mat_map.get(part_key)
if mat_name and mat_name in appended:
part.data.materials.clear()
part.data.materials.append(appended[mat_name])
assigned_count += 1
print(f"[turntable_setup] assigned '{mat_name}' to part '{part.name}'")
print(f"[turntable_setup] material assignment: {assigned_count}/{len(parts)} parts matched")
def main():
argv = sys.argv
args = argv[argv.index("--") + 1:]
stl_path = args[0]
frames_dir = args[1]
frame_count = int(args[2])
degrees = int(args[3])
width = int(args[4])
height = int(args[5])
engine = args[6]
samples = int(args[7])
part_colors_json = args[8] if len(args) > 8 else "{}"
template_path = args[9] if len(args) > 9 and args[9] else ""
target_collection = args[10] if len(args) > 10 else "Product"
material_library_path = args[11] if len(args) > 11 and args[11] else ""
material_map_raw = args[12] if len(args) > 12 else "{}"
part_names_ordered_raw = args[13] if len(args) > 13 else "[]"
lighting_only = args[14] == "1" if len(args) > 14 else False
cycles_device = args[15].lower() if len(args) > 15 else "auto"
shadow_catcher = args[16] == "1" if len(args) > 16 else False
rotation_x = float(args[17]) if len(args) > 17 else 0.0
rotation_y = float(args[18]) if len(args) > 18 else 0.0
rotation_z = float(args[19]) if len(args) > 19 else 0.0
turntable_axis = args[20] if len(args) > 20 else "world_z"
bg_color = args[21] if len(args) > 21 else ""
transparent_bg = args[22] == "1" if len(args) > 22 else False
scene_path = args[23] if len(args) > 23 else os.path.join(os.path.dirname(frames_dir), "scene.blend")
camera_orbit = args[24] != "0" if len(args) > 24 else True
noise_threshold_arg = args[25] if len(args) > 25 else ""
denoiser_arg = args[26] if len(args) > 26 else ""
denoising_input_passes_arg = args[27] if len(args) > 27 else ""
denoising_prefilter_arg = args[28] if len(args) > 28 else ""
denoising_quality_arg = args[29] if len(args) > 29 else ""
denoising_use_gpu_arg = args[30] if len(args) > 30 else ""
os.makedirs(frames_dir, exist_ok=True)
os.makedirs(os.path.dirname(scene_path), exist_ok=True)
try:
part_colors = json.loads(part_colors_json)
except json.JSONDecodeError:
part_colors = {}
try:
material_map = json.loads(material_map_raw) if material_map_raw else {}
except json.JSONDecodeError:
material_map = {}
try:
part_names_ordered = json.loads(part_names_ordered_raw) if part_names_ordered_raw else []
except json.JSONDecodeError:
part_names_ordered = []
if template_path and not os.path.isfile(template_path):
print(f"[turntable_setup] ERROR: template_path not found: {template_path}")
sys.exit(1)
use_template = bool(template_path)
print(f"[turntable_setup] engine={engine}, samples={samples}, size={width}x{height}, "
f"frames={frame_count}, degrees={degrees}")
print(f"[turntable_setup] part_names_ordered: {len(part_names_ordered)} entries")
if use_template:
print(f"[turntable_setup] template={template_path}, collection={target_collection}, lighting_only={lighting_only}")
else:
print("[turntable_setup] no template — using factory settings (Mode A)")
if material_library_path:
print(f"[turntable_setup] material_library={material_library_path}, material_map keys={list(material_map.keys())}")
# ── SCENE SETUP ──────────────────────────────────────────────────────────
if use_template:
print(f"[turntable_setup] Opening template: {template_path}")
bpy.ops.wm.open_mainfile(filepath=template_path)
target_col = _ensure_collection(target_collection)
parts = _import_stl(stl_path)
_scale_mm_to_m(parts)
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
for part in parts:
for col in list(part.users_collection):
col.objects.unlink(part)
target_col.objects.link(part)
for part in parts:
_apply_smooth(part, SMOOTH_ANGLE)
if material_library_path and material_map:
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower, part_names_ordered)
for i, part in enumerate(parts):
if not part.data.materials or len(part.data.materials) == 0:
_assign_palette_material(part, i)
else:
for i, part in enumerate(parts):
step_name = _resolve_part_name(i, part, part_names_ordered)
color_hex = part_colors.get(step_name)
if not color_hex:
_assign_palette_material(part, i)
if shadow_catcher:
sc_col_name = "Shadowcatcher"
sc_obj_name = "Shadowcatcher"
for vl in bpy.context.scene.view_layers:
def _enable_col_recursive(layer_col):
if layer_col.collection.name == sc_col_name:
layer_col.exclude = False
layer_col.collection.hide_render = False
layer_col.collection.hide_viewport = False
return True
for child in layer_col.children:
if _enable_col_recursive(child):
return True
return False
_enable_col_recursive(vl.layer_collection)
sc_obj = bpy.data.objects.get(sc_obj_name)
if sc_obj:
all_world_z = []
for part in parts:
for corner in part.bound_box:
all_world_z.append((part.matrix_world @ Vector(corner)).z)
if all_world_z:
sc_obj.location.z = min(all_world_z)
print(f"[turntable_setup] shadow catcher enabled, plane Z={sc_obj.location.z:.4f}")
else:
print(f"[turntable_setup] WARNING: shadow catcher object '{sc_obj_name}' not found")
needs_auto_camera = (lighting_only and not shadow_catcher) or not bpy.context.scene.camera
if not needs_auto_camera and bpy.context.scene.camera:
bpy.context.scene.camera.data.clip_start = 0.001
print(f"[turntable_setup] template mode: {len(parts)} parts imported into '{target_collection}'")
else:
needs_auto_camera = True
bpy.ops.wm.read_factory_settings(use_empty=True)
parts = _import_stl(stl_path)
_scale_mm_to_m(parts)
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
for i, part in enumerate(parts):
_apply_smooth(part, SMOOTH_ANGLE)
if material_library_path and material_map:
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower, part_names_ordered)
for i, part in enumerate(parts):
if not part.data.materials or len(part.data.materials) == 0:
_assign_palette_material(part, i)
else:
for i, part in enumerate(parts):
step_name = _resolve_part_name(i, part, part_names_ordered)
color_hex = part_colors.get(step_name)
if color_hex:
mat = bpy.data.materials.new(name=f"mat_{part.name}")
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
color = _hex_to_linear(color_hex)
bsdf.inputs["Base Color"].default_value = color
bsdf.inputs["Metallic"].default_value = 0.35
bsdf.inputs["Roughness"].default_value = 0.40
try:
bsdf.inputs["Specular IOR Level"].default_value = 0.5
except KeyError:
pass
part.data.materials.clear()
part.data.materials.append(mat)
else:
_assign_palette_material(part, i)
if needs_auto_camera:
all_corners = []
for part in parts:
all_corners.extend(part.matrix_world @ Vector(c) for c in part.bound_box)
bbox_min = Vector((min(v.x for v in all_corners), min(v.y for v in all_corners), min(v.z for v in all_corners)))
bbox_max = Vector((max(v.x for v in all_corners), max(v.y for v in all_corners), max(v.z for v in all_corners)))
bbox_center = (bbox_min + bbox_max) * 0.5
bbox_dims = bbox_max - bbox_min
bsphere_radius = max(bbox_dims.length * 0.5, 0.001)
print(f"[turntable_setup] bbox_dims={tuple(round(d, 4) for d in bbox_dims)}, bsphere_radius={bsphere_radius:.4f}")
if not use_template:
light_dist = bsphere_radius * 6.0
bpy.ops.object.light_add(type='SUN', location=(
bbox_center.x + light_dist * 0.5,
bbox_center.y - light_dist * 0.35,
bbox_center.z + light_dist,
))
sun = bpy.context.active_object
sun.data.energy = 4.0
sun.rotation_euler = (math.radians(45), 0, math.radians(30))
bpy.ops.object.light_add(type='AREA', location=(
bbox_center.x - light_dist * 0.4,
bbox_center.y + light_dist * 0.4,
bbox_center.z + light_dist * 0.7,
))
fill = bpy.context.active_object
fill.data.energy = max(800.0, bsphere_radius ** 2 * 2000.0)
fill.data.size = max(4.0, bsphere_radius * 4.0)
cam_dist = bsphere_radius * 2.5
cam_location = Vector((bbox_center.x + cam_dist, bbox_center.y, bbox_center.z + bsphere_radius * 0.5))
bpy.ops.object.camera_add(location=cam_location)
camera = bpy.context.active_object
bpy.context.scene.camera = camera
camera.data.clip_start = max(cam_dist * 0.001, 0.0001)
camera.data.clip_end = cam_dist * 10.0
empty = bpy.data.objects.new("target", None)
bpy.context.collection.objects.link(empty)
empty.location = bbox_center
track = camera.constraints.new(type='TRACK_TO')
track.target = empty
track.track_axis = 'TRACK_NEGATIVE_Z'
track.up_axis = 'UP_Y'
if not use_template:
world = bpy.data.worlds.new("World")
bpy.context.scene.world = world
world.use_nodes = True
bg = world.node_tree.nodes["Background"]
bg.inputs["Color"].default_value = (0.96, 0.96, 0.97, 1.0)
bg.inputs["Strength"].default_value = 0.15
pivot = bpy.data.objects.new("pivot", None)
bpy.context.collection.objects.link(pivot)
pivot.location = bbox_center
camera.parent = pivot
camera.location = (cam_dist, 0, bsphere_radius * 0.5)
scene = bpy.context.scene
scene.frame_start = 1
scene.frame_end = frame_count
pivot.rotation_euler = (0, 0, 0)
pivot.keyframe_insert(data_path="rotation_euler", frame=1)
pivot.rotation_euler = _axis_rotation(turntable_axis, degrees)
pivot.keyframe_insert(data_path="rotation_euler", frame=frame_count + 1)
_set_fcurves_linear(pivot.animation_data.action)
else:
scene = bpy.context.scene
scene.frame_start = 1
scene.frame_end = frame_count
all_corners = []
for part in parts:
all_corners.extend(part.matrix_world @ Vector(c) for c in part.bound_box)
bbox_center = Vector((
(min(v.x for v in all_corners) + max(v.x for v in all_corners)) * 0.5,
(min(v.y for v in all_corners) + max(v.y for v in all_corners)) * 0.5,
(min(v.z for v in all_corners) + max(v.z for v in all_corners)) * 0.5,
))
if camera_orbit and bpy.context.scene.camera:
# Camera-orbit mode: rotate camera around static product.
# Parts stay stationary → Cycles BVH cached across all frames → ~40% speedup.
camera = bpy.context.scene.camera
cam_world = camera.matrix_world.copy()
cam_pivot = bpy.data.objects.new("cam_pivot", None)
bpy.context.collection.objects.link(cam_pivot)
cam_pivot.location = bbox_center
camera.parent = cam_pivot
# Restore world-space transform after parenting (Blender recomputes local matrix)
camera.matrix_world = cam_world
cam_pivot.rotation_euler = (0, 0, 0)
cam_pivot.keyframe_insert(data_path="rotation_euler", frame=1)
cam_pivot.rotation_euler = _axis_rotation(turntable_axis, degrees)
cam_pivot.keyframe_insert(data_path="rotation_euler", frame=frame_count + 1)
_set_fcurves_linear(cam_pivot.animation_data.action)
print(f"[turntable_setup] camera-orbit mode: cam_pivot at {tuple(round(c, 4) for c in bbox_center)}")
else:
# Product-rotation mode: parts parent to pivot (default fallback when no camera)
pivot = bpy.data.objects.new("turntable_pivot", None)
bpy.context.collection.objects.link(pivot)
pivot.location = bbox_center
for part in parts:
part.parent = pivot
pivot.rotation_euler = (0, 0, 0)
pivot.keyframe_insert(data_path="rotation_euler", frame=1)
pivot.rotation_euler = _axis_rotation(turntable_axis, degrees)
pivot.keyframe_insert(data_path="rotation_euler", frame=frame_count + 1)
_set_fcurves_linear(pivot.animation_data.action)
print(f"[turntable_setup] product-rotation mode: {len(parts)} parts parented to turntable_pivot")
# ── Colour management ────────────────────────────────────────────────────
scene = bpy.context.scene
if not use_template:
scene.view_settings.view_transform = 'Standard'
scene.view_settings.exposure = 0.0
scene.view_settings.gamma = 1.0
try:
scene.view_settings.look = 'None'
except Exception:
pass
# ── Render engine ────────────────────────────────────────────────────────
if engine == "eevee":
eevee_ok = False
for eevee_id in ('BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT'):
try:
scene.render.engine = eevee_id
eevee_ok = True
print(f"[turntable_setup] EEVEE engine id: {eevee_id}")
break
except TypeError:
continue
if eevee_ok:
for attr in ('taa_render_samples', 'samples'):
try:
setattr(scene.eevee, attr, samples)
break
except AttributeError:
continue
else:
print("[turntable_setup] WARNING: EEVEE not available, falling back to Cycles")
engine = "cycles"
if engine != "eevee":
scene.render.engine = 'CYCLES'
scene.cycles.samples = samples
scene.cycles.use_denoising = True
scene.cycles.denoiser = denoiser_arg if denoiser_arg else 'OPENIMAGEDENOISE'
if denoising_input_passes_arg:
try: scene.cycles.denoising_input_passes = denoising_input_passes_arg
except Exception: pass
if denoising_prefilter_arg:
try: scene.cycles.denoising_prefilter = denoising_prefilter_arg
except Exception: pass
if denoising_quality_arg:
try: scene.cycles.denoising_quality = denoising_quality_arg
except Exception: pass
if denoising_use_gpu_arg:
try: scene.cycles.denoising_use_gpu = (denoising_use_gpu_arg == "1")
except AttributeError: pass
if noise_threshold_arg:
scene.cycles.use_adaptive_sampling = True
scene.cycles.adaptive_threshold = float(noise_threshold_arg)
if denoiser_arg:
scene["_denoiser_override"] = denoiser_arg
# scene.cycles.device is set by turntable_gpu_setup.py at render time
# (GPU preferences are user-level and not stored in .blend)
# We set the intended device here so gpu_setup can read it.
scene["_cycles_device"] = cycles_device
# Keep BVH, textures, and scene data resident on GPU between frames.
# Critical for -a mode: prevents Cycles from re-uploading data each frame.
scene.render.use_persistent_data = True
# No motion blur needed for static mechanical parts — eliminates per-frame
# CPU deformation calculations.
scene.render.use_motion_blur = False
print(f"[turntable_setup] cycles_device preference saved: {cycles_device}")
print("[turntable_setup] use_persistent_data=True, use_motion_blur=False")
# ── Render output settings ───────────────────────────────────────────────
scene.render.resolution_x = width
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.render.image_settings.file_format = 'PNG'
# Blender -a appends 4-digit frame number: "frame_" → "frame_0001.png"
scene.render.filepath = os.path.join(frames_dir, "frame_")
# ── Transparent background ────────────────────────────────────────────────
# bg_color compositing is done by FFmpeg in the compose-video task.
# Blender renders transparent PNG frames (film_transparent=True) when
# bg_color is set; FFmpeg then overlays them over a solid colour background.
if bg_color or transparent_bg:
scene.render.film_transparent = True
if bg_color:
print(f"[turntable_setup] film_transparent=True for FFmpeg bg_color compositing ({bg_color})")
else:
print("[turntable_setup] transparent_bg enabled (alpha PNG frames)")
# ── Save scene ───────────────────────────────────────────────────────────
# save_as_mainfile saves to an explicit new path (like File > Save As).
# save_mainfile would save back to the originally-opened template path.
print(f"[turntable_setup] Saving scene to {scene_path}")
result = bpy.ops.wm.save_as_mainfile(filepath=scene_path)
if 'FINISHED' not in result:
print(f"[turntable_setup] ERROR: save_as_mainfile returned {result} — aborting")
sys.exit(1)
if not os.path.isfile(scene_path):
print(f"[turntable_setup] ERROR: scene file not found after save: {scene_path}")
sys.exit(1)
size_mb = os.path.getsize(scene_path) / 1024 / 1024
print(f"[turntable_setup] Scene saved → {scene_path} ({size_mb:.1f} MB)")
print(f"[turntable_setup] Ready for: blender --background {scene_path} --python turntable_gpu_setup.py -a")
if __name__ == "__main__":
try:
main()
except SystemExit:
raise
except Exception as _exc:
import traceback
traceback.print_exc()
print(f"[turntable_setup] FATAL: unhandled exception — {_exc}")
sys.exit(1)
-2
View File
@@ -1,2 +0,0 @@
manager_url: http://flamenco-manager:8080/
task_types: [blender, ffmpeg, file-management, misc]
-1
View File
@@ -33,7 +33,6 @@ export interface ItemStatusBreakdown {
}
export interface RenderTimeBreakdown {
avg_stl_s: number | null
avg_render_s: number | null
avg_total_s: number | null
sample_count: number
-52
View File
@@ -1,52 +0,0 @@
FROM python:3.11-slim
ENV PYTHONUNBUFFERED=1
ENV PLAYWRIGHT_BROWSERS_PATH=/ms-playwright
# System dependencies for Playwright Chromium + cadquery (needs OpenGL/Mesa)
RUN apt-get update && apt-get install -y \
wget \
gnupg \
libglib2.0-0 \
libnss3 \
libnspr4 \
libatk1.0-0 \
libatk-bridge2.0-0 \
libcups2 \
libdbus-1-3 \
libdrm2 \
libxcb1 \
libxkbcommon0 \
libx11-6 \
libxcomposite1 \
libxdamage1 \
libxext6 \
libxfixes3 \
libxrandr2 \
libgbm1 \
libpango-1.0-0 \
libcairo2 \
libatspi2.0-0 \
libgl1 \
libosmesa6 \
libgomp1 \
libasound2 \
&& rm -rf /var/lib/apt/lists/*
# Use OSMesa for offscreen rendering (no display needed)
ENV PYOPENGL_PLATFORM=osmesa
ENV VTK_DEFAULT_EGL=0
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Install Playwright Chromium browser
RUN playwright install chromium
COPY . .
EXPOSE 8101
CMD ["python", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8101", "--reload"]
-350
View File
@@ -1,350 +0,0 @@
"""
Three.js renderer service — FastAPI microservice.
Pipeline: STEP → STL (cadquery) → Three.js in headless Chromium → PNG screenshot.
Two render modes
────────────────
part_colors = None Single grey metallic mesh (original behaviour).
part_colors = dict Connected-component analysis in JavaScript:
The STL is loaded as one mesh; disconnected islands are
detected entirely in the browser and each gets a distinct
palette colour. No server-side OCC/per-part extraction —
just one STL conversion and client-side graph analysis.
"""
import asyncio
import base64
import json
import logging
from pathlib import Path
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
logger = logging.getLogger(__name__)
app = FastAPI(title="Three.js Renderer", version="1.0.0")
# 10-colour palette used for connected-component assignment
PALETTE = [
"#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8",
"#4CD4E8", "#E84CA8", "#7EC850", "#E86B30", "#5088C8",
]
class RenderRequest(BaseModel):
step_path: str
output_path: str
width: int = 512
height: int = 512
# None → single grey mesh
# {} → auto-colour by connected-component index (palette)
# {...} → same (named-part colour mapping is handled in JS if names match)
part_colors: dict[str, str] | None = None
rotation_x: float = 0.0
rotation_y: float = 0.0
rotation_z: float = 0.0
@app.get("/health")
async def health():
return {"status": "ok", "renderer": "threejs"}
@app.post("/render")
async def render(req: RenderRequest):
step_path = Path(req.step_path)
output_path = Path(req.output_path)
if not step_path.exists():
raise HTTPException(404, detail=f"STEP file not found: {step_path}")
output_path.parent.mkdir(parents=True, exist_ok=True)
# Persistent STL cache — same convention as blender-renderer (quality always "low")
stl_path = step_path.parent / f"{step_path.stem}_low.stl"
if not stl_path.exists() or stl_path.stat().st_size == 0:
try:
_convert_step_to_stl(step_path, stl_path)
except Exception as e:
logger.error(f"STEP→STL conversion failed: {e}")
raise HTTPException(500, detail=f"STEP conversion failed: {e}")
logger.info("STL cached: %s (%d KB)", stl_path.name, stl_path.stat().st_size // 1024)
else:
logger.info("STL cache hit: %s (%d KB)", stl_path.name, stl_path.stat().st_size // 1024)
use_colors = req.part_colors is not None
try:
await asyncio.to_thread(
_render_stl_threejs,
stl_path, output_path, req.width, req.height, use_colors,
req.rotation_x, req.rotation_y, req.rotation_z,
)
except Exception as e:
logger.error(f"Three.js render failed: {e}")
raise HTTPException(500, detail=f"Three.js render failed: {e}")
if not output_path.exists():
raise HTTPException(500, detail="Render produced no output file")
return {
"output_path": str(output_path),
"status": "ok",
"renderer": "threejs-colored" if use_colors else "threejs",
}
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _convert_step_to_stl(step_path: Path, stl_path: Path) -> None:
"""Convert STEP to a single binary STL via cadquery."""
import cadquery as cq
shape = cq.importers.importStep(str(step_path))
cq.exporters.export(shape, str(stl_path))
if not stl_path.exists() or stl_path.stat().st_size == 0:
raise RuntimeError("cadquery produced empty STL")
def _render_stl_threejs(
stl_path: Path,
output_path: Path,
width: int,
height: int,
use_colors: bool,
rotation_x: float = 0.0,
rotation_y: float = 0.0,
rotation_z: float = 0.0,
) -> None:
"""Render STL via Three.js in headless Chromium."""
from playwright.sync_api import sync_playwright
stl_b64 = base64.b64encode(stl_path.read_bytes()).decode()
filename = stl_path.stem
palette_json = json.dumps(PALETTE)
html = _build_html(stl_b64, filename, width, height, palette_json, use_colors,
rotation_x, rotation_y, rotation_z)
with sync_playwright() as p:
browser = p.chromium.launch(
args=["--no-sandbox", "--disable-setuid-sandbox", "--disable-gpu"]
)
page = browser.new_page(viewport={"width": width, "height": height})
page.set_content(html, wait_until="domcontentloaded")
try:
page.wait_for_function("window.__renderDone === true", timeout=12000)
except Exception:
pass # take screenshot anyway
page.screenshot(
path=str(output_path),
full_page=False,
clip={"x": 0, "y": 0, "width": width, "height": height},
)
browser.close()
def _build_html(
stl_b64: str,
filename: str,
width: int,
height: int,
palette_json: str,
use_colors: bool,
rotation_x: float = 0.0,
rotation_y: float = 0.0,
rotation_z: float = 0.0,
) -> str:
"""
Build a self-contained HTML page that renders the STL with Three.js.
When use_colors=True the JavaScript runs a Union-Find connected-component
analysis on the vertex graph of the STL and paints each disconnected island
with a distinct colour from the palette. This requires no server-side part
extraction — it works directly on the flat triangle soup in the STL.
"""
# ---------- colour-assignment script (injected only when use_colors=True)
color_script = ""
if use_colors:
color_script = f"""
// ── Connected-component colouring ─────────────────────────────────────────
// Each STL face is a triplet of un-shared vertices. We weld coincident
// vertices by their rounded position string, then run Union-Find on the
// resulting graph to identify disconnected parts. Each part gets a colour
// from the palette.
function applyPartColors(geometry, palette) {{
const pos = geometry.attributes.position;
const n = pos.count;
// Round to 4 d.p. to merge floating-point near-duplicates
const key = i =>
Math.round(pos.getX(i)*1e4) + ',' +
Math.round(pos.getY(i)*1e4) + ',' +
Math.round(pos.getZ(i)*1e4);
// Map position string → canonical vertex index
const posMap = Object.create(null);
const canon = new Int32Array(n);
for (let i = 0; i < n; i++) {{
const k = key(i);
if (posMap[k] === undefined) posMap[k] = i;
canon[i] = posMap[k];
}}
// Union-Find with path compression + union by rank
const parent = new Int32Array(n);
const rank = new Uint8Array(n);
for (let i = 0; i < n; i++) parent[i] = i;
function find(x) {{
while (parent[x] !== x) {{ parent[x] = parent[parent[x]]; x = parent[x]; }}
return x;
}}
function unite(a, b) {{
a = find(a); b = find(b);
if (a === b) return;
if (rank[a] < rank[b]) {{ let t = a; a = b; b = t; }}
parent[b] = a;
if (rank[a] === rank[b]) rank[a]++;
}}
// Connect the three canonical vertices of every triangle
for (let i = 0; i < n; i += 3) {{
unite(canon[i], canon[i+1]);
unite(canon[i+1], canon[i+2]);
}}
// Assign a palette index to each component root
const compIdx = Object.create(null);
let nextIdx = 0;
const colors = new Float32Array(n * 3);
for (let i = 0; i < n; i++) {{
const root = find(canon[i]);
if (compIdx[root] === undefined) compIdx[root] = nextIdx++;
const hex = palette[compIdx[root] % palette.length];
colors[i*3] = parseInt(hex.slice(1,3), 16) / 255;
colors[i*3+1] = parseInt(hex.slice(3,5), 16) / 255;
colors[i*3+2] = parseInt(hex.slice(5,7), 16) / 255;
}}
geometry.setAttribute('color', new THREE.BufferAttribute(colors, 3));
return new THREE.MeshStandardMaterial({{
vertexColors: true,
metalness: 0.4,
roughness: 0.45,
}});
}}
const palette = {palette_json};
const material = applyPartColors(geometry, palette);
"""
else:
color_script = """
const material = new THREE.MeshStandardMaterial({
color: 0xc0cad8,
metalness: 0.8,
roughness: 0.3,
});
"""
return f"""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<style>
* {{ margin:0; padding:0; box-sizing:border-box; }}
body {{ background:#f5f6f8; overflow:hidden; }}
canvas {{ display:block; }}
</style>
</head>
<body>
<script type="importmap">
{{
"imports": {{
"three": "https://cdn.jsdelivr.net/npm/three@0.162.0/build/three.module.js",
"three/addons/": "https://cdn.jsdelivr.net/npm/three@0.162.0/examples/jsm/"
}}
}}
</script>
<script type="module">
import * as THREE from 'three';
import {{ STLLoader }} from 'three/addons/loaders/STLLoader.js';
const W = {width}, H = {height};
const renderer = new THREE.WebGLRenderer({{ antialias: true }});
renderer.setSize(W, H);
renderer.setPixelRatio(1);
renderer.outputColorSpace = THREE.SRGBColorSpace;
renderer.shadowMap.enabled = true;
renderer.shadowMap.type = THREE.PCFSoftShadowMap;
document.body.appendChild(renderer.domElement);
const scene = new THREE.Scene();
scene.background = new THREE.Color(0xf5f6f8);
const camera = new THREE.PerspectiveCamera(45, W / H, 0.001, 10000);
scene.add(camera);
const ambientLight = new THREE.AmbientLight(0xffffff, 0.6);
scene.add(ambientLight);
const dirLight = new THREE.DirectionalLight(0xffffff, 2.5);
dirLight.position.set(1, 2, 1.5);
dirLight.castShadow = true;
scene.add(dirLight);
const fillLight = new THREE.DirectionalLight(0xddeeff, 1.0);
fillLight.position.set(-1, -0.5, -1);
scene.add(fillLight);
// Decode base64 STL
const b64 = "{stl_b64}";
const binary = Uint8Array.from(atob(b64), c => c.charCodeAt(0));
const loader = new STLLoader();
const geometry = loader.parse(binary.buffer);
geometry.computeVertexNormals();
// Material (grey or per-part coloured)
{color_script}
const mesh = new THREE.Mesh(geometry, material);
mesh.castShadow = true;
mesh.receiveShadow = true;
scene.add(mesh);
// Centre and frame
geometry.computeBoundingBox();
const box = geometry.boundingBox;
const center = new THREE.Vector3();
box.getCenter(center);
mesh.position.sub(center);
mesh.rotation.set({rotation_x}*Math.PI/180, {rotation_y}*Math.PI/180, {rotation_z}*Math.PI/180);
const size = new THREE.Vector3();
box.getSize(size);
const maxDim = Math.max(size.x, size.y, size.z);
const fov = camera.fov * (Math.PI / 180);
let dist = maxDim / (2 * Math.tan(fov / 2)) * 1.15;
dist = Math.max(dist, 0.01);
camera.position.set(dist * 0.8, dist * 0.6, dist * 0.8);
camera.lookAt(0, 0, 0);
// Schaeffler green top bar
const topBar = document.createElement('div');
topBar.style.cssText =
`position:fixed;top:0;left:0;width:${{W}}px;height:10px;background:#00893d;z-index:10`;
document.body.appendChild(topBar);
// Model name label
const label = document.createElement('div');
label.textContent = "{filename}";
label.style.cssText =
`position:fixed;bottom:0;left:0;width:${{W}}px;background:rgba(20,35,55,0.85);` +
`color:#fff;text-align:center;font-size:13px;font-family:monospace;` +
`padding:6px 4px;box-sizing:border-box;z-index:10`;
document.body.appendChild(label);
renderer.render(scene, camera);
window.__renderDone = true;
</script>
</body>
</html>"""
-5
View File
@@ -1,5 +0,0 @@
fastapi>=0.110.0
uvicorn[standard]>=0.27.0
playwright>=1.42.0
cadquery>=2.4.0
pillow>=10.2.0