feat: initial commit

This commit is contained in:
2026-03-05 22:12:38 +01:00
commit bce762a783
380 changed files with 51955 additions and 0 deletions
+47
View File
@@ -0,0 +1,47 @@
FROM ubuntu:22.04
ENV DEBIAN_FRONTEND=noninteractive
ENV PYTHONUNBUFFERED=1
# OSMesa for headless cadquery/VTK (no display needed)
ENV PYOPENGL_PLATFORM=osmesa
ENV VTK_DEFAULT_EGL=0
# Runtime libraries for cadquery/VTK + Blender 5.x
RUN apt-get update && apt-get install -y \
python3-pip \
python3-dev \
libxrender1 \
libxi6 \
libxkbcommon-x11-0 \
libsm6 \
libglib2.0-0 \
libgl1-mesa-glx \
libosmesa6 \
libgomp1 \
libxfixes3 \
libxrandr2 \
libxcursor1 \
libxinerama1 \
libwayland-client0 \
libwayland-cursor0 \
libwayland-egl1 \
libvulkan1 \
mesa-vulkan-drivers \
libegl1 \
libegl-mesa0 \
libgbm1 \
&& rm -rf /var/lib/apt/lists/*
# Blender 5.0.1 is mounted from the host at /opt/blender (see docker-compose.yml)
ENV BLENDER_BIN=/opt/blender/blender
WORKDIR /app
COPY requirements.txt .
RUN pip3 install --no-cache-dir -r requirements.txt
COPY . .
EXPOSE 8100
CMD ["python3", "-m", "uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8100"]
Binary file not shown.
+581
View File
@@ -0,0 +1,581 @@
"""
Blender renderer service — FastAPI microservice.
Accepts a STEP file path (on shared uploads volume) and renders a thumbnail PNG
using the pipeline: STEP → STL (via cadquery) → PNG (via Blender headless).
"""
import asyncio
import json as _json_mod
import logging
import os
import signal
import shutil
import subprocess
import tempfile
import threading
import time
from pathlib import Path
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
logger = logging.getLogger(__name__)
app = FastAPI(title="Blender Renderer", version="1.0.0")
# Active render subprocesses keyed by job_id for cancellation support
_active_procs: dict[str, subprocess.Popen] = {}
_procs_lock = threading.Lock()
# Limit concurrent Blender renders to avoid memory exhaustion from parallel threads
# (each thread loads cadquery/OCC, ~300-500 MB each).
# Resizable at runtime via POST /configure without restart.
_max_concurrent: int = 3
_render_semaphore = threading.Semaphore(_max_concurrent)
_config_lock = threading.Lock()
def _set_max_concurrent(n: int) -> None:
"""Replace the global semaphore with a new one sized to n.
In-flight renders hold a reference to the old semaphore and will release it
normally; new renders pick up the new one.
"""
global _render_semaphore, _max_concurrent
with _config_lock:
_max_concurrent = n
_render_semaphore = threading.Semaphore(n)
class RenderRequest(BaseModel):
step_path: str
output_path: str
width: int = 512
height: int = 512
engine: str = "cycles" # "cycles" or "eevee"
samples: int = 256
stl_quality: str = "low" # "low" or "high"
smooth_angle: int = 30 # degrees; 0 = shade_flat, >0 = shade_smooth_by_angle
cycles_device: str = "auto" # "auto", "gpu", or "cpu"
transparent_bg: bool = False # render with transparent background (PNG only)
part_colors: dict | None = None # optional {part_name: hex_color}
template_path: str | None = None # Path to .blend template file
target_collection: str = "Product" # Collection to import geometry into
material_library_path: str | None = None # Path to material library .blend
material_map: dict | None = None # {part_name: material_name} from Excel
part_names_ordered: list | None = None # ordered STEP part names for index matching
lighting_only: bool = False # use template World/HDRI only; force auto-camera
shadow_catcher: bool = False # enable Shadowcatcher collection + position plane at bbox min Z
rotation_x: float = 0.0 # Euler X rotation in degrees (applied to imported STL)
rotation_y: float = 0.0 # Euler Y rotation in degrees
rotation_z: float = 0.0 # Euler Z rotation in degrees
job_id: str | None = None # Optional ID for cancellation tracking
noise_threshold: str = "" # Adaptive sampling noise threshold (empty = Blender default)
denoiser: str = "" # "OPTIX" | "OPENIMAGEDENOISE" (empty = auto)
denoising_input_passes: str = "" # "RGB" | "RGB_ALBEDO" | "RGB_ALBEDO_NORMAL"
denoising_prefilter: str = "" # "NONE" | "FAST" | "ACCURATE"
denoising_quality: str = "" # "HIGH" | "BALANCED" | "FAST" (Blender 4.2+)
denoising_use_gpu: str = "" # "1" = GPU, "0" = CPU, "" = auto
def _find_blender() -> str:
"""Locate the Blender binary: prefer $BLENDER_BIN, then PATH."""
import os, shutil
env_bin = os.environ.get("BLENDER_BIN", "")
if env_bin and Path(env_bin).exists():
return env_bin
return shutil.which("blender") or "blender"
@app.get("/health")
async def health():
blender_bin = _find_blender()
version = "unknown"
try:
result = subprocess.run(
[blender_bin, "--version"], capture_output=True, text=True, timeout=10
)
first_line = (result.stdout or result.stderr or "").splitlines()
version = first_line[0].strip() if first_line else "unknown"
except Exception:
pass
return {
"status": "ok",
"renderer": "blender",
"blender_path": blender_bin,
"blender_version": version,
}
class ConvertStlRequest(BaseModel):
step_path: str
quality: str = "low" # "low" or "high"
@app.post("/convert-stl")
async def convert_stl(req: ConvertStlRequest):
"""Convert a STEP file to STL and cache it — no Blender render."""
if req.quality not in ("low", "high"):
raise HTTPException(400, detail="quality must be 'low' or 'high'")
step_path = Path(req.step_path)
if not step_path.exists():
raise HTTPException(404, detail=f"STEP file not found: {step_path}")
stl_path = step_path.parent / f"{step_path.stem}_{req.quality}.stl"
parts_dir = step_path.parent / f"{step_path.stem}_{req.quality}_parts"
t0 = time.monotonic()
try:
if not stl_path.exists() or stl_path.stat().st_size == 0:
await asyncio.to_thread(_convert_step_to_stl, step_path, stl_path, req.quality)
logger.info("STL generated: %s (%d KB)", stl_path.name, stl_path.stat().st_size // 1024)
else:
logger.info("STL cache hit: %s (%d KB)", stl_path.name, stl_path.stat().st_size // 1024)
except Exception as e:
logger.error("STEP→STL conversion failed: %s", e)
raise HTTPException(500, detail=f"STEP conversion failed: {e}")
try:
if not (parts_dir / "manifest.json").exists():
await asyncio.to_thread(_export_per_part_stls, step_path, parts_dir, req.quality)
except Exception as e:
logger.warning("per-part STL export failed (non-fatal): %s", e)
return {
"stl_path": str(stl_path),
"size_bytes": stl_path.stat().st_size if stl_path.exists() else 0,
"duration_s": round(time.monotonic() - t0, 2),
}
@app.post("/cancel/{job_id}")
async def cancel_render(job_id: str):
"""Kill the Blender subprocess for a running job (best-effort)."""
with _procs_lock:
proc = _active_procs.pop(job_id, None)
if proc is None:
return {"status": "not_found", "job_id": job_id}
try:
pgid = os.getpgid(proc.pid)
os.killpg(pgid, signal.SIGTERM)
logger.info("Sent SIGTERM to process group %d for job %s", pgid, job_id)
except (ProcessLookupError, OSError):
pass # process already finished
return {"status": "cancelled", "job_id": job_id}
@app.get("/status")
async def status():
"""Return current render queue depth and concurrency setting."""
with _procs_lock:
active = len(_active_procs)
with _config_lock:
current_max = _max_concurrent
return {"active_jobs": active, "max_concurrent": current_max}
@app.post("/configure")
async def configure(max_concurrent: int):
"""Dynamically update the maximum number of concurrent Blender renders."""
if not (1 <= max_concurrent <= 16):
from fastapi import HTTPException
raise HTTPException(400, detail="max_concurrent must be between 1 and 16")
_set_max_concurrent(max_concurrent)
logger.info("max_concurrent_renders updated to %d", max_concurrent)
return {"max_concurrent": max_concurrent}
@app.post("/render")
async def render(req: RenderRequest):
step_path = Path(req.step_path)
output_path = Path(req.output_path)
if not step_path.exists():
raise HTTPException(404, detail=f"STEP file not found: {step_path}")
output_path.parent.mkdir(parents=True, exist_ok=True)
t_start = time.monotonic()
# Acquire render slot — blocks if 3 renders are already running.
# asyncio.to_thread is used so the semaphore acquire doesn't block the event loop.
acquired = await asyncio.to_thread(_render_semaphore.acquire)
# 1. Get/create STL cache — persistent next to STEP file so re-renders skip conversion
stl_path = step_path.parent / f"{step_path.stem}_{req.stl_quality}.stl"
parts_dir = step_path.parent / f"{step_path.stem}_{req.stl_quality}_parts"
stl_size_bytes = 0
t_stl_start = time.monotonic()
try:
if not stl_path.exists() or stl_path.stat().st_size == 0:
logger.info("STL cache miss — converting: %s", step_path.name)
_convert_step_to_stl(step_path, stl_path, req.stl_quality)
else:
logger.info("STL cache hit: %s (%d KB)", stl_path.name, stl_path.stat().st_size // 1024)
stl_size_bytes = stl_path.stat().st_size if stl_path.exists() else 0
except Exception as e:
_render_semaphore.release()
logger.error(f"STEP→STL conversion failed: {e}")
raise HTTPException(500, detail=f"STEP conversion failed: {e}")
# Per-part export (non-fatal — Blender falls back to combined STL)
try:
if not (parts_dir / "manifest.json").exists():
_export_per_part_stls(step_path, parts_dir, req.stl_quality)
except Exception as e:
logger.warning("per-part STL export failed (non-fatal): %s", e)
stl_duration_s = round(time.monotonic() - t_stl_start, 2)
# 2. Render STL → PNG via Blender
render_log_lines: list[str] = []
parts_count = 0
engine_used = req.engine
t_render_start = time.monotonic()
try:
render_log_lines, parts_count, engine_used = _render_stl_with_blender(
stl_path, output_path, req.width, req.height,
req.engine, req.samples, req.smooth_angle, req.cycles_device,
req.transparent_bg,
template_path=req.template_path,
target_collection=req.target_collection,
material_library_path=req.material_library_path,
material_map=req.material_map,
part_names_ordered=req.part_names_ordered,
lighting_only=req.lighting_only,
shadow_catcher=req.shadow_catcher,
rotation_x=req.rotation_x,
rotation_y=req.rotation_y,
rotation_z=req.rotation_z,
job_id=req.job_id,
noise_threshold=req.noise_threshold,
denoiser=req.denoiser,
denoising_input_passes=req.denoising_input_passes,
denoising_prefilter=req.denoising_prefilter,
denoising_quality=req.denoising_quality,
denoising_use_gpu=req.denoising_use_gpu,
)
except Exception as e:
logger.error(f"Blender render failed: {e}")
raise HTTPException(500, detail=f"Blender render failed: {e}")
finally:
_render_semaphore.release()
# STL cache is persistent — do NOT delete stl_path or parts_dir
render_duration_s = round(time.monotonic() - t_render_start, 2)
if not output_path.exists():
raise HTTPException(500, detail="Render produced no output file")
total_duration_s = round(time.monotonic() - t_start, 2)
output_size_bytes = output_path.stat().st_size
return {
"output_path": str(output_path),
"status": "ok",
"renderer": "blender",
# Timing
"total_duration_s": total_duration_s,
"stl_duration_s": stl_duration_s,
"render_duration_s": render_duration_s,
# Mesh info
"stl_size_bytes": stl_size_bytes,
"output_size_bytes": output_size_bytes,
"parts_count": parts_count,
# Effective settings (engine may differ from requested if EEVEE fell back)
"engine_used": engine_used,
# Blender log lines (filtered to [blender_render] prefix lines)
"log_lines": render_log_lines,
}
def _convert_step_to_stl(step_path: Path, stl_path: Path, quality: str = "low") -> None:
"""Convert STEP file to STL using cadquery.
quality="low" → tolerance=0.3, angularTolerance=0.3 (fast, coarser mesh)
quality="high" → tolerance=0.01, angularTolerance=0.02 (slower, finer mesh)
"""
import cadquery as cq
shape = cq.importers.importStep(str(step_path))
if quality == "high":
cq.exporters.export(shape, str(stl_path), tolerance=0.01, angularTolerance=0.02)
else:
cq.exporters.export(shape, str(stl_path), tolerance=0.3, angularTolerance=0.3)
if not stl_path.exists() or stl_path.stat().st_size == 0:
raise RuntimeError("cadquery produced empty STL")
def _export_per_part_stls(step_path: Path, parts_dir: Path, quality: str = "low") -> list:
"""Export one STL per named STEP leaf shape using OCP XCAF.
Creates parts_dir with individual STL files and a manifest.json.
Returns the manifest list, or empty list on failure.
"""
tol = 0.01 if quality == "high" else 0.3
angular_tol = 0.05 if quality == "high" else 0.3
try:
from OCP.STEPCAFControl import STEPCAFControl_Reader
from OCP.XCAFDoc import XCAFDoc_DocumentTool, XCAFDoc_ShapeTool
from OCP.TDataStd import TDataStd_Name
from OCP.TDF import TDF_Label as TDF_Label_cls, TDF_LabelSequence
from OCP.XCAFApp import XCAFApp_Application
from OCP.TDocStd import TDocStd_Document
from OCP.TCollection import TCollection_ExtendedString
from OCP.IFSelect import IFSelect_RetDone
import cadquery as cq
except ImportError as e:
logger.warning("per-part export skipped (import error): %s", e)
return []
app = XCAFApp_Application.GetApplication_s()
doc = TDocStd_Document(TCollection_ExtendedString("XmlOcaf"))
app.InitDocument(doc)
reader = STEPCAFControl_Reader()
reader.SetNameMode(True)
status = reader.ReadFile(str(step_path))
if status != IFSelect_RetDone:
logger.warning("XCAF reader failed with status %s", status)
return []
if not reader.Transfer(doc):
logger.warning("XCAF transfer failed")
return []
shape_tool = XCAFDoc_DocumentTool.ShapeTool_s(doc.Main())
name_id = TDataStd_Name.GetID_s()
leaves = []
def _get_label_name(label):
name_attr = TDataStd_Name()
if label.FindAttribute(name_id, name_attr):
return name_attr.Get().ToExtString()
return ""
def _collect_leaves(label):
if XCAFDoc_ShapeTool.IsAssembly_s(label):
components = TDF_LabelSequence()
XCAFDoc_ShapeTool.GetComponents_s(label, components)
for i in range(1, components.Length() + 1):
comp_label = components.Value(i)
if XCAFDoc_ShapeTool.IsReference_s(comp_label):
ref_label = TDF_Label_cls()
XCAFDoc_ShapeTool.GetReferredShape_s(comp_label, ref_label)
comp_name = _get_label_name(comp_label)
ref_name = _get_label_name(ref_label)
# Prefer referred shape name — matches material_map keys
name = ref_name or comp_name
if XCAFDoc_ShapeTool.IsAssembly_s(ref_label):
_collect_leaves(ref_label)
elif XCAFDoc_ShapeTool.IsSimpleShape_s(ref_label):
# Use comp_label shape — includes instance transform (position)
shape = XCAFDoc_ShapeTool.GetShape_s(comp_label)
leaves.append((name or f"unnamed_{len(leaves)}", shape))
else:
_collect_leaves(comp_label)
elif XCAFDoc_ShapeTool.IsSimpleShape_s(label):
name = _get_label_name(label)
shape = XCAFDoc_ShapeTool.GetShape_s(label)
leaves.append((name or f"unnamed_{len(leaves)}", shape))
top_labels = TDF_LabelSequence()
shape_tool.GetFreeShapes(top_labels)
for i in range(1, top_labels.Length() + 1):
_collect_leaves(top_labels.Value(i))
if not leaves:
logger.warning("no leaf shapes found via XCAF")
return []
parts_dir.mkdir(parents=True, exist_ok=True)
manifest = []
for idx, (name, shape) in enumerate(leaves):
safe_name = name.replace("/", "_").replace("\\", "_").replace(" ", "_")
filename = f"{idx:02d}_{safe_name}.stl"
filepath = str(parts_dir / filename)
try:
import cadquery as cq
cq_shape = cq.Shape(shape)
cq_shape.exportStl(filepath, tolerance=tol, angularTolerance=angular_tol)
manifest.append({"index": idx, "name": name, "file": filename})
except Exception as e:
logger.warning("failed to export part '%s': %s", name, e)
manifest_path = parts_dir / "manifest.json"
with open(manifest_path, "w") as f:
_json_mod.dump({"parts": manifest}, f, indent=2)
total_size = sum(
os.path.getsize(str(parts_dir / p["file"]))
for p in manifest
if (parts_dir / p["file"]).exists()
)
logger.info("exported %d per-part STLs (%d KB) to %s", len(manifest), total_size // 1024, parts_dir)
return manifest
def _parse_blender_log(stdout: str) -> tuple[list[str], int]:
"""Extract [blender_render] lines and parts count from Blender stdout."""
lines = []
parts_count = 0
for line in (stdout or "").splitlines():
stripped = line.strip()
if "[blender_render]" in stripped or "[blender_render" in stripped:
lines.append(stripped)
if "separated into" in stripped:
try:
parts_count = int(stripped.split("separated into")[1].split("part")[0].strip())
except Exception:
pass
elif "imported" in stripped and "named parts" in stripped:
try:
parts_count = int(stripped.split("imported")[1].split("named")[0].strip())
except Exception:
pass
elif stripped.startswith("Saved:") or stripped.startswith("Fra:"):
lines.append(stripped)
return lines, parts_count
def _render_stl_with_blender(
stl_path: Path, output_path: Path, width: int, height: int,
engine: str = "cycles", samples: int = 256, smooth_angle: int = 30,
cycles_device: str = "auto", transparent_bg: bool = False,
template_path: str | None = None, target_collection: str = "Product",
material_library_path: str | None = None, material_map: dict | None = None,
part_names_ordered: list | None = None, lighting_only: bool = False,
shadow_catcher: bool = False,
rotation_x: float = 0.0, rotation_y: float = 0.0, rotation_z: float = 0.0,
job_id: str | None = None,
noise_threshold: str = "",
denoiser: str = "",
denoising_input_passes: str = "",
denoising_prefilter: str = "",
denoising_quality: str = "",
denoising_use_gpu: str = "",
) -> tuple[list[str], int, str]:
"""Render STL to PNG using Blender in background mode.
Returns (log_lines, parts_count, engine_used).
Blender is launched in its own process group (start_new_session=True) so
that SIGTERM from a cancel request kills the entire Blender tree.
"""
import json as _json
blender_bin = _find_blender()
script_path = Path(__file__).parent / "blender_render.py"
env = dict(os.environ)
if engine == "eevee":
env.update({
"VK_ICD_FILENAMES": "/usr/share/vulkan/icd.d/lvp_icd.x86_64.json",
"LIBGL_ALWAYS_SOFTWARE": "1",
"MESA_GL_VERSION_OVERRIDE": "4.5",
"EGL_PLATFORM": "surfaceless",
})
else:
env.update({
"EGL_PLATFORM": "surfaceless",
})
def _build_cmd(eng: str) -> list:
return [
blender_bin,
"--background",
"--python", str(script_path),
"--",
str(stl_path),
str(output_path),
str(width),
str(height),
eng,
str(samples),
str(smooth_angle),
cycles_device,
"1" if transparent_bg else "0",
template_path or "",
target_collection,
material_library_path or "",
_json.dumps(material_map) if material_map else "{}",
_json.dumps(part_names_ordered) if part_names_ordered else "[]",
"1" if lighting_only else "0",
"1" if shadow_catcher else "0",
str(rotation_x),
str(rotation_y),
str(rotation_z),
noise_threshold or "",
denoiser or "",
denoising_input_passes or "",
denoising_prefilter or "",
denoising_quality or "",
denoising_use_gpu or "",
]
def _run_blender(eng: str) -> subprocess.CompletedProcess:
"""Launch Blender in an isolated process group and wait for completion."""
cmd = _build_cmd(eng)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
env=env,
start_new_session=True, # new process group → SIGTERM kills entire tree
)
if job_id:
with _procs_lock:
_active_procs[job_id] = proc
try:
stdout, stderr = proc.communicate(timeout=300)
except subprocess.TimeoutExpired:
try:
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
except (ProcessLookupError, OSError):
pass
stdout, stderr = proc.communicate()
finally:
if job_id:
with _procs_lock:
_active_procs.pop(job_id, None)
return subprocess.CompletedProcess(cmd, proc.returncode, stdout, stderr)
result = _run_blender(engine)
engine_used = engine
# Log to uvicorn output
if result.stdout:
for line in result.stdout.splitlines():
logger.info("[blender] %s", line)
if result.stderr:
for line in result.stderr.splitlines():
logger.warning("[blender stderr] %s", line)
# If EEVEE fails with a non-signal error, automatically retry with Cycles.
# A negative returncode means the process was killed by a signal (e.g. cancel)
# — do NOT retry in that case.
if result.returncode > 0 and engine == "eevee":
logger.warning(
"EEVEE render failed (exit %d) retrying with Cycles (CPU).",
result.returncode,
)
result = _run_blender("cycles")
engine_used = "cycles (eevee fallback)"
if result.stdout:
for line in result.stdout.splitlines():
logger.info("[blender-cycles-fallback] %s", line)
if result.stderr:
for line in result.stderr.splitlines():
logger.warning("[blender-cycles-fallback stderr] %s", line)
if result.returncode != 0:
stdout_tail = result.stdout[-2000:] if result.stdout else ""
stderr_tail = result.stderr[-2000:] if result.stderr else ""
raise RuntimeError(
f"Blender exited {result.returncode}.\n"
f"STDOUT: {stdout_tail}\nSTDERR: {stderr_tail}"
)
log_lines, parts_count = _parse_blender_log(result.stdout)
return log_lines, parts_count, engine_used
+753
View File
@@ -0,0 +1,753 @@
"""
Blender Python script for rendering an STL file to PNG.
Targets Blender 5.0+ (EEVEE / Cycles).
Called by Blender:
blender --background --python blender_render.py -- \
<stl_path> <output_path> <width> <height> [engine] [samples]
engine: "cycles" (default) | "eevee"
Features:
- Disconnected mesh islands split into separate objects and painted with
palette colours (same 10-colour palette as the Three.js renderer).
- Bounding-box-aware camera: object fills ~85 % of the frame.
- Isometric-style angle (elevation 28°, azimuth 40°).
- Dynamic clip planes.
- Standard (non-Filmic) colour management → no grey tint.
- Schaeffler green top bar + model name label via Pillow post-processing.
"""
import sys
import os
import math
import bpy
from mathutils import Vector, Matrix
# ── Colour palette (matches Three.js renderer) ───────────────────────────────
PALETTE_HEX = [
"#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8",
"#4CD4E8", "#E84CA8", "#7EC850", "#E86B30", "#5088C8",
]
def _srgb_to_linear(c: int) -> float:
"""Convert 0-255 sRGB integer to linear float."""
v = c / 255.0
return v / 12.92 if v <= 0.04045 else ((v + 0.055) / 1.055) ** 2.4
def _hex_to_linear(hex_color: str) -> tuple:
"""Return (r, g, b, 1.0) in Blender linear colour space."""
h = hex_color.lstrip('#')
return (
_srgb_to_linear(int(h[0:2], 16)),
_srgb_to_linear(int(h[2:4], 16)),
_srgb_to_linear(int(h[4:6], 16)),
1.0,
)
PALETTE_LINEAR = [_hex_to_linear(h) for h in PALETTE_HEX]
# ── Parse arguments ───────────────────────────────────────────────────────────
argv = sys.argv
if "--" in argv:
argv = argv[argv.index("--") + 1:]
else:
argv = []
if len(argv) < 4:
print("Usage: blender --background --python blender_render.py -- "
"<stl_path> <output_path> <width> <height> [engine] [samples] [smooth_angle] [cycles_device] [transparent_bg]")
sys.exit(1)
import json as _json
stl_path = argv[0]
output_path = argv[1]
width = int(argv[2])
height = int(argv[3])
engine = argv[4].lower() if len(argv) > 4 else "cycles"
samples = int(argv[5]) if len(argv) > 5 else (64 if engine == "eevee" else 256)
smooth_angle = int(argv[6]) if len(argv) > 6 else 30 # degrees; 0 = flat shading
cycles_device = argv[7].lower() if len(argv) > 7 else "auto" # "auto", "gpu", "cpu"
transparent_bg = argv[8] == "1" if len(argv) > 8 else False
template_path = argv[9] if len(argv) > 9 and argv[9] else ""
target_collection = argv[10] if len(argv) > 10 else "Product"
material_library_path = argv[11] if len(argv) > 11 and argv[11] else ""
material_map_raw = argv[12] if len(argv) > 12 else "{}"
try:
material_map = _json.loads(material_map_raw) if material_map_raw else {}
except _json.JSONDecodeError:
material_map = {}
part_names_ordered_raw = argv[13] if len(argv) > 13 else "[]"
try:
part_names_ordered = _json.loads(part_names_ordered_raw) if part_names_ordered_raw else []
except _json.JSONDecodeError:
part_names_ordered = []
lighting_only = argv[14] == "1" if len(argv) > 14 else False
shadow_catcher = argv[15] == "1" if len(argv) > 15 else False
rotation_x = float(argv[16]) if len(argv) > 16 else 0.0
rotation_y = float(argv[17]) if len(argv) > 17 else 0.0
rotation_z = float(argv[18]) if len(argv) > 18 else 0.0
noise_threshold_arg = argv[19] if len(argv) > 19 else ""
denoiser_arg = argv[20] if len(argv) > 20 else ""
denoising_input_passes_arg = argv[21] if len(argv) > 21 else ""
denoising_prefilter_arg = argv[22] if len(argv) > 22 else ""
denoising_quality_arg = argv[23] if len(argv) > 23 else ""
denoising_use_gpu_arg = argv[24] if len(argv) > 24 else ""
# Validate template path: if provided it MUST exist on disk.
# Fail loudly rather than silently rendering with factory settings.
if template_path and not os.path.isfile(template_path):
print(f"[blender_render] ERROR: template_path was provided but file not found: {template_path}")
print("[blender_render] Check that the blend-templates directory is on the shared volume.")
sys.exit(1)
use_template = bool(template_path)
print(f"[blender_render] engine={engine}, samples={samples}, size={width}x{height}, smooth_angle={smooth_angle}°, device={cycles_device}, transparent={transparent_bg}")
print(f"[blender_render] part_names_ordered: {len(part_names_ordered)} entries")
if use_template:
print(f"[blender_render] template={template_path}, collection={target_collection}, lighting_only={lighting_only}")
else:
print("[blender_render] no template — using factory settings (Mode A)")
if material_library_path:
print(f"[blender_render] material_library={material_library_path}, material_map keys={list(material_map.keys())}")
# ── Helper: find or create collection by name ────────────────────────────────
def _ensure_collection(name: str):
"""Return a collection by name, creating it if needed."""
if name in bpy.data.collections:
return bpy.data.collections[name]
col = bpy.data.collections.new(name)
bpy.context.scene.collection.children.link(col)
return col
def _apply_smooth(part_obj, angle_deg):
"""Apply smooth or flat shading to a mesh object."""
bpy.context.view_layer.objects.active = part_obj
part_obj.select_set(True)
if angle_deg > 0:
try:
bpy.ops.object.shade_smooth_by_angle(angle=math.radians(angle_deg))
except AttributeError:
bpy.ops.object.shade_smooth()
part_obj.data.use_auto_smooth = True
part_obj.data.auto_smooth_angle = math.radians(angle_deg)
else:
bpy.ops.object.shade_flat()
def _assign_palette_material(part_obj, index):
"""Assign a palette colour material to a mesh part."""
color = PALETTE_LINEAR[index % len(PALETTE_LINEAR)]
mat = bpy.data.materials.new(name=f"Part_{index}")
mat.use_nodes = True
bsdf = mat.node_tree.nodes.get("Principled BSDF")
if bsdf:
bsdf.inputs["Base Color"].default_value = color
bsdf.inputs["Metallic"].default_value = 0.35
bsdf.inputs["Roughness"].default_value = 0.40
try:
bsdf.inputs["Specular IOR Level"].default_value = 0.5
except KeyError:
pass
part_obj.data.materials.clear()
part_obj.data.materials.append(mat)
import re as _re
def _scale_mm_to_m(parts):
"""Scale imported STL objects from mm to Blender metres (×0.001).
STEP/STL coordinates are in mm; Blender's default unit is metres.
Without scaling a 50 mm part appears as 50 m inside Blender — way too large
relative to any template environment designed in metric units.
"""
if not parts:
return
bpy.ops.object.select_all(action='DESELECT')
for p in parts:
p.scale = (0.001, 0.001, 0.001)
p.location *= 0.001
p.select_set(True)
bpy.context.view_layer.objects.active = parts[0]
bpy.ops.object.transform_apply(scale=True, location=False, rotation=False)
print(f"[blender_render] scaled {len(parts)} parts mm→m (×0.001)")
def _apply_rotation(parts, rx, ry, rz):
"""Apply Euler rotation (degrees, XYZ order) to all parts around world origin.
After _import_stl + _scale_mm_to_m the combined bbox center is at world origin,
so rotating around origin is equivalent to rotating around the assembly center.
"""
if not parts or (rx == 0.0 and ry == 0.0 and rz == 0.0):
return
from mathutils import Euler
rot_mat = Euler((math.radians(rx), math.radians(ry), math.radians(rz)), 'XYZ').to_matrix().to_4x4()
for p in parts:
p.matrix_world = rot_mat @ p.matrix_world
# Bake rotation into mesh data so camera bbox calculations see the rotated geometry
bpy.ops.object.select_all(action='DESELECT')
for p in parts:
p.select_set(True)
bpy.context.view_layer.objects.active = parts[0]
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
print(f"[blender_render] applied rotation ({rx}°, {ry}°, {rz}°) to {len(parts)} parts")
def _import_stl(stl_file):
"""Import STL into Blender, using per-part STLs if available.
Checks for {stl_stem}_parts/manifest.json next to the STL file.
- Per-part mode: imports each part STL, names Blender object after STEP part name.
- Fallback: imports combined STL and splits by loose geometry.
Returns list of Blender mesh objects, centred at origin.
"""
stl_dir = os.path.dirname(stl_file)
stl_stem = os.path.splitext(os.path.basename(stl_file))[0]
parts_dir = os.path.join(stl_dir, stl_stem + "_parts")
manifest_path = os.path.join(parts_dir, "manifest.json")
parts = []
if os.path.isfile(manifest_path):
# ── Per-part mode ────────────────────────────────────────────────
try:
with open(manifest_path, "r") as f:
manifest = _json.loads(f.read())
part_entries = manifest.get("parts", [])
except Exception as e:
print(f"[blender_render] WARNING: failed to read manifest: {e}")
part_entries = []
if part_entries:
for entry in part_entries:
part_file = os.path.join(parts_dir, entry["file"])
part_name = entry["name"]
if not os.path.isfile(part_file):
print(f"[blender_render] WARNING: part STL missing: {part_file}")
continue
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.wm.stl_import(filepath=part_file)
imported = bpy.context.selected_objects
if imported:
obj = imported[0]
obj.name = part_name
if obj.data:
obj.data.name = part_name
parts.append(obj)
if parts:
print(f"[blender_render] imported {len(parts)} named parts from per-part STLs")
# ── Fallback: combined STL + separate by loose ───────────────────────
if not parts:
bpy.ops.wm.stl_import(filepath=stl_file)
obj = bpy.context.selected_objects[0] if bpy.context.selected_objects else None
if obj is None:
print(f"ERROR: No objects imported from {stl_file}")
sys.exit(1)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
obj.location = (0.0, 0.0, 0.0)
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.separate(type='LOOSE')
bpy.ops.object.mode_set(mode='OBJECT')
parts = list(bpy.context.selected_objects)
print(f"[blender_render] fallback: separated into {len(parts)} part(s)")
return parts
# ── Centre per-part imports at origin (combined bbox) ────────────────
all_corners = []
for p in parts:
all_corners.extend(p.matrix_world @ Vector(c) for c in p.bound_box)
if all_corners:
mins = Vector((min(v.x for v in all_corners),
min(v.y for v in all_corners),
min(v.z for v in all_corners)))
maxs = Vector((max(v.x for v in all_corners),
max(v.y for v in all_corners),
max(v.z for v in all_corners)))
center = (mins + maxs) * 0.5
for p in parts:
p.location -= center
return parts
def _resolve_part_name(index, part_obj):
"""Get the STEP part name for a Blender part by index.
With per-part import, part_obj.name IS the STEP name (possibly with
Blender .NNN suffix for duplicates). Strip that suffix for lookup.
Falls back to part_names_ordered index mapping for combined-STL mode.
"""
# Strip Blender auto-suffix (.001, .002, etc.)
base_name = _re.sub(r'\.\d{3}$', '', part_obj.name)
# If the base name looks like a real STEP part name (not generic "Cube" etc.),
# use it directly
if part_names_ordered and index < len(part_names_ordered):
return part_names_ordered[index]
return base_name
def _apply_material_library(parts, mat_lib_path, mat_map):
"""Append materials from library .blend and assign to parts via material_map.
With per-part STL import, Blender objects are named after STEP parts,
so matching is by name (stripping Blender .NNN suffix for duplicates).
Falls back to part_names_ordered index-based matching for combined-STL mode.
mat_map: {part_name_lower: material_name}
Parts without a match keep their current material.
"""
if not mat_lib_path or not os.path.isfile(mat_lib_path):
print(f"[blender_render] material library not found: {mat_lib_path}")
return
# Collect unique material names needed
needed = set(mat_map.values())
if not needed:
return
# Append materials from library
appended = {}
for mat_name in needed:
inner_path = f"{mat_lib_path}/Material/{mat_name}"
try:
bpy.ops.wm.append(
filepath=inner_path,
directory=f"{mat_lib_path}/Material/",
filename=mat_name,
link=False,
)
if mat_name in bpy.data.materials:
appended[mat_name] = bpy.data.materials[mat_name]
print(f"[blender_render] appended material: {mat_name}")
else:
print(f"[blender_render] WARNING: material '{mat_name}' not found after append")
except Exception as exc:
print(f"[blender_render] WARNING: failed to append material '{mat_name}': {exc}")
if not appended:
return
# Assign materials to parts — primary: name-based (per-part STL mode),
# secondary: index-based via part_names_ordered (combined STL fallback)
assigned_count = 0
for i, part in enumerate(parts):
# Try name-based matching first (strip Blender .NNN suffix)
base_name = _re.sub(r'\.\d{3}$', '', part.name)
part_key = base_name.lower().strip()
mat_name = mat_map.get(part_key)
# Fall back to index-based matching via part_names_ordered
if not mat_name and part_names_ordered and i < len(part_names_ordered):
step_name = part_names_ordered[i]
part_key = step_name.lower().strip()
mat_name = mat_map.get(part_key)
if mat_name and mat_name in appended:
part.data.materials.clear()
part.data.materials.append(appended[mat_name])
assigned_count += 1
print(f"[blender_render] assigned '{mat_name}' to part '{part.name}'")
print(f"[blender_render] material assignment: {assigned_count}/{len(parts)} parts matched")
# ── SCENE SETUP ──────────────────────────────────────────────────────────────
if use_template:
# ── MODE B: Template-based render ────────────────────────────────────────
print(f"[blender_render] Opening template: {template_path}")
bpy.ops.wm.open_mainfile(filepath=template_path)
# Find or create target collection
target_col = _ensure_collection(target_collection)
# Import and split STL
parts = _import_stl(stl_path)
# Scale mm→m: STEP coords are mm, Blender default unit is metres
_scale_mm_to_m(parts)
# Apply render position rotation (before camera/bbox calculations)
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
# Move imported parts into target collection
for part in parts:
# Remove from all existing collections
for col in list(part.users_collection):
col.objects.unlink(part)
target_col.objects.link(part)
# Apply smooth shading
for part in parts:
_apply_smooth(part, smooth_angle)
# Material assignment: library materials if available, otherwise palette
if material_library_path and material_map:
# Build lowercased material_map for matching
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower)
# Parts not matched by library get palette fallback
for i, part in enumerate(parts):
if not part.data.materials or len(part.data.materials) == 0:
_assign_palette_material(part, i)
else:
for i, part in enumerate(parts):
_assign_palette_material(part, i)
# ── Shadow catcher (Cycles only, template mode only) ─────────────────────
if shadow_catcher:
sc_col_name = "Shadowcatcher"
sc_obj_name = "Shadowcatcher"
# Enable the Shadowcatcher collection in all view layers
for vl in bpy.context.scene.view_layers:
def _enable_col_recursive(layer_col):
if layer_col.collection.name == sc_col_name:
layer_col.exclude = False
layer_col.collection.hide_render = False
layer_col.collection.hide_viewport = False
return True
for child in layer_col.children:
if _enable_col_recursive(child):
return True
return False
_enable_col_recursive(vl.layer_collection)
sc_obj = bpy.data.objects.get(sc_obj_name)
if sc_obj:
# Calculate product bbox min Z (world space)
all_world_corners = []
for part in parts:
for corner in part.bound_box:
all_world_corners.append((part.matrix_world @ Vector(corner)).z)
if all_world_corners:
sc_obj.location.z = min(all_world_corners)
print(f"[blender_render] shadow catcher enabled, plane Z={sc_obj.location.z:.4f}")
else:
print(f"[blender_render] WARNING: shadow catcher object '{sc_obj_name}' not found in template")
# lighting_only: use template World/HDRI but force auto-camera UNLESS the shadow
# catcher is enabled — in that case the template camera is already positioned to
# show both the product and its shadow on the ground plane.
needs_auto_camera = (lighting_only and not shadow_catcher) or not bpy.context.scene.camera
if lighting_only and not shadow_catcher:
print("[blender_render] lighting_only mode: using template World/HDRI, forcing auto-camera")
elif needs_auto_camera:
print("[blender_render] WARNING: template has no camera — will create auto-camera")
# Set very close near clip on template camera for mm-scale parts (now in metres)
if not needs_auto_camera and bpy.context.scene.camera:
bpy.context.scene.camera.data.clip_start = 0.001
print(f"[blender_render] template mode: {len(parts)} parts imported into collection '{target_collection}'")
else:
# ── MODE A: Factory settings (original behavior) ─────────────────────────
needs_auto_camera = True
bpy.ops.wm.read_factory_settings(use_empty=True)
parts = _import_stl(stl_path)
# Scale mm→m: STEP coords are mm, Blender default unit is metres
_scale_mm_to_m(parts)
# Apply render position rotation (before camera/bbox calculations)
_apply_rotation(parts, rotation_x, rotation_y, rotation_z)
for i, part in enumerate(parts):
_apply_smooth(part, smooth_angle)
_assign_palette_material(part, i)
# Apply material library on top of palette colours (same logic as Mode B).
# material_library_path / material_map are parsed from argv even in Mode A
# but were previously never used here — that was the bug.
if material_library_path and material_map:
mat_map_lower = {k.lower(): v for k, v in material_map.items()}
_apply_material_library(parts, material_library_path, mat_map_lower)
# Parts not matched by the library keep their palette material (already set above)
if needs_auto_camera:
# ── Combined bounding box / bounding sphere ──────────────────────────────
all_corners = []
for part in parts:
all_corners.extend(part.matrix_world @ Vector(c) for c in part.bound_box)
bbox_min = Vector((
min(v.x for v in all_corners),
min(v.y for v in all_corners),
min(v.z for v in all_corners),
))
bbox_max = Vector((
max(v.x for v in all_corners),
max(v.y for v in all_corners),
max(v.z for v in all_corners),
))
bbox_center = (bbox_min + bbox_max) * 0.5
bbox_dims = bbox_max - bbox_min
bsphere_radius = max(bbox_dims.length * 0.5, 0.001)
print(f"[blender_render] bbox_dims={tuple(round(d,4) for d in bbox_dims)}, "
f"bsphere_radius={bsphere_radius:.4f}, center={tuple(round(c,4) for c in bbox_center)}")
# ── Lighting — only in Mode A (factory settings) ─────────────────────────
# In template mode the .blend file provides its own World/HDRI lighting.
# Adding auto-lights would overpower the template's intended look.
if not use_template:
light_dist = bsphere_radius * 6.0
bpy.ops.object.light_add(type='SUN', location=(
bbox_center.x + light_dist * 0.5,
bbox_center.y - light_dist * 0.35,
bbox_center.z + light_dist,
))
sun = bpy.context.active_object
sun.data.energy = 4.0
sun.rotation_euler = (math.radians(45), 0, math.radians(30))
bpy.ops.object.light_add(type='AREA', location=(
bbox_center.x - light_dist * 0.4,
bbox_center.y + light_dist * 0.4,
bbox_center.z + light_dist * 0.7,
))
fill = bpy.context.active_object
fill.data.energy = max(800.0, bsphere_radius ** 2 * 2000.0)
fill.data.size = max(4.0, bsphere_radius * 4.0)
# ── Camera ───────────────────────────────────────────────────────────────
ELEVATION_DEG = 28.0
AZIMUTH_DEG = 40.0
LENS_MM = 50.0
SENSOR_WIDTH_MM = 36.0
FILL_FACTOR = 0.85
elevation_rad = math.radians(ELEVATION_DEG)
azimuth_rad = math.radians(AZIMUTH_DEG)
cam_dir = Vector((
math.cos(elevation_rad) * math.cos(azimuth_rad),
math.cos(elevation_rad) * math.sin(azimuth_rad),
math.sin(elevation_rad),
)).normalized()
fov_h = math.atan(SENSOR_WIDTH_MM / (2.0 * LENS_MM))
fov_v = math.atan(SENSOR_WIDTH_MM * (height / width) / (2.0 * LENS_MM))
fov_used = min(fov_h, fov_v)
dist = (bsphere_radius / math.tan(fov_used)) / FILL_FACTOR
dist = max(dist, bsphere_radius * 1.5)
print(f"[blender_render] camera dist={dist:.4f}, fov={math.degrees(fov_used):.2f}°")
cam_location = bbox_center + cam_dir * dist
bpy.ops.object.camera_add(location=cam_location)
cam_obj = bpy.context.active_object
cam_obj.data.lens = LENS_MM
bpy.context.scene.camera = cam_obj
look_dir = (bbox_center - cam_location).normalized()
up_world = Vector((0.0, 0.0, 1.0))
right = look_dir.cross(up_world)
if right.length < 1e-6:
right = Vector((1.0, 0.0, 0.0))
right.normalize()
cam_up = right.cross(look_dir).normalized()
rot_mat = Matrix((
( right.x, right.y, right.z),
( cam_up.x, cam_up.y, cam_up.z),
(-look_dir.x, -look_dir.y, -look_dir.z),
)).transposed()
cam_obj.rotation_euler = rot_mat.to_euler('XYZ')
cam_obj.data.clip_start = max(dist * 0.001, 0.0001)
cam_obj.data.clip_end = dist + bsphere_radius * 3.0
print(f"[blender_render] clip {cam_obj.data.clip_start:.6f}{cam_obj.data.clip_end:.4f}")
# ── World background — only in Mode A ────────────────────────────────────
# In template mode the .blend file owns its World (HDRI, sky texture, studio
# lighting). Overwriting it would destroy the HDR look the template was
# designed to use (e.g. Alpha-HDR output types with Filmic tonemapping).
if not use_template:
world = bpy.data.worlds.new("World")
bpy.context.scene.world = world
world.use_nodes = True
bg = world.node_tree.nodes["Background"]
bg.inputs["Color"].default_value = (0.96, 0.96, 0.97, 1.0)
bg.inputs["Strength"].default_value = 0.15
# ── Render engine ─────────────────────────────────────────────────────────────
scene = bpy.context.scene
if engine == "eevee":
# Blender 4.x used 'BLENDER_EEVEE_NEXT'; Blender 5.x reverted to 'BLENDER_EEVEE'.
# Try both names so the script works across versions.
set_ok = False
for eevee_id in ('BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT'):
try:
scene.render.engine = eevee_id
set_ok = True
print(f"[blender_render] EEVEE engine id: {eevee_id}")
break
except TypeError:
continue
if not set_ok:
print("[blender_render] WARNING: could not set EEVEE engine falling back to Cycles")
engine = "cycles"
if engine == "eevee":
# Sample attribute name changed across minor versions
for attr in ('taa_render_samples', 'samples'):
try:
setattr(scene.eevee, attr, samples)
print(f"[blender_render] EEVEE samples: scene.eevee.{attr}={samples}")
break
except AttributeError:
continue
if engine != "eevee": # covers both explicit Cycles and EEVEE-fallback
scene.render.engine = 'CYCLES'
scene.cycles.samples = samples
scene.cycles.use_denoising = True
scene.cycles.denoiser = denoiser_arg if denoiser_arg else 'OPENIMAGEDENOISE'
if denoising_input_passes_arg:
try: scene.cycles.denoising_input_passes = denoising_input_passes_arg
except Exception: pass
if denoising_prefilter_arg:
try: scene.cycles.denoising_prefilter = denoising_prefilter_arg
except Exception: pass
if denoising_quality_arg:
try: scene.cycles.denoising_quality = denoising_quality_arg
except Exception: pass
if denoising_use_gpu_arg:
try: scene.cycles.denoising_use_gpu = (denoising_use_gpu_arg == "1")
except AttributeError: pass
if noise_threshold_arg:
scene.cycles.use_adaptive_sampling = True
scene.cycles.adaptive_threshold = float(noise_threshold_arg)
# ── Device selection: "cpu" forces CPU, "gpu" forces GPU (fail if unavailable),
# "auto" tries GPU first and falls back to CPU.
gpu_type_found = None
if cycles_device != "cpu":
try:
cycles_prefs = bpy.context.preferences.addons['cycles'].preferences
for device_type in ('OPTIX', 'CUDA', 'HIP', 'ONEAPI'):
try:
cycles_prefs.compute_device_type = device_type
cycles_prefs.get_devices()
gpu_devs = [d for d in cycles_prefs.devices if d.type != 'CPU']
if gpu_devs:
for d in gpu_devs:
d.use = True
gpu_type_found = device_type
break
except Exception as e:
print(f"[blender_render] {device_type} not available: {e}")
except Exception as e:
print(f"[blender_render] GPU probe failed: {e}")
if gpu_type_found:
scene.cycles.device = 'GPU'
print(f"[blender_render] Cycles GPU ({gpu_type_found}), samples={samples}")
else:
scene.cycles.device = 'CPU'
print(f"[blender_render] WARNING: GPU not found — falling back to CPU, samples={samples}")
# ── Colour management ─────────────────────────────────────────────────────────
# In template mode the .blend file owns its colour management (e.g. Filmic/
# AgX for HDR, custom exposure for Alpha-HDR output types). Overwriting it
# would destroy the look the template was designed for.
# In factory-settings mode (Mode A) force Standard to avoid the grey Filmic
# tint that Blender applies by default.
if not use_template:
scene.view_settings.view_transform = 'Standard'
scene.view_settings.exposure = 0.0
scene.view_settings.gamma = 1.0
try:
scene.view_settings.look = 'None'
except Exception:
pass
# ── Render settings ───────────────────────────────────────────────────────────
scene.render.resolution_x = width
scene.render.resolution_y = height
scene.render.resolution_percentage = 100
scene.render.image_settings.file_format = 'PNG'
scene.render.filepath = output_path
scene.render.film_transparent = transparent_bg
# ── Render ────────────────────────────────────────────────────────────────────
print(f"[blender_render] Rendering → {output_path} (Blender {bpy.app.version_string})")
bpy.ops.render.render(write_still=True)
print("[blender_render] render done.")
# ── Pillow post-processing: green bar + model name label ─────────────────────
# Skip overlay for transparent renders to keep clean alpha channel
if transparent_bg:
print("[blender_render] Transparent mode — skipping Pillow overlay.")
else:
try:
from PIL import Image, ImageDraw, ImageFont
img = Image.open(output_path).convert("RGBA")
draw = ImageDraw.Draw(img)
W, H = img.size
# Schaeffler green top bar
bar_h = max(8, H // 32)
draw.rectangle([0, 0, W - 1, bar_h - 1], fill=(0, 137, 61, 255))
# Model name strip at bottom
model_name = os.path.splitext(os.path.basename(stl_path))[0]
label_h = max(20, H // 20)
img.alpha_composite(
Image.new("RGBA", (W, label_h), (30, 30, 30, 180)),
dest=(0, H - label_h),
)
font_size = max(10, label_h - 6)
font = None
for fp in [
"/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
"/usr/share/fonts/truetype/liberation/LiberationSans-Bold.ttf",
"/usr/share/fonts/truetype/freefont/FreeSansBold.ttf",
]:
if os.path.exists(fp):
try:
font = ImageFont.truetype(fp, font_size)
break
except Exception:
pass
if font is None:
font = ImageFont.load_default()
tb = draw.textbbox((0, 0), model_name, font=font)
text_w = tb[2] - tb[0]
draw.text(
((W - text_w) // 2, H - label_h + (label_h - (tb[3] - tb[1])) // 2),
model_name, font=font, fill=(255, 255, 255, 255),
)
img.convert("RGB").save(output_path, format="PNG")
print(f"[blender_render] Pillow overlay applied.")
except ImportError:
print("[blender_render] Pillow not in Blender Python skipping overlay.")
except Exception as exc:
print(f"[blender_render] Pillow overlay failed (non-fatal): {exc}")
print("[blender_render] Done.")
+4
View File
@@ -0,0 +1,4 @@
fastapi>=0.110.0
uvicorn[standard]>=0.27.0
cadquery>=2.4.0
pillow>=10.2.0