diff --git a/backend/app/domains/pipeline/tasks/render_order_line.py b/backend/app/domains/pipeline/tasks/render_order_line.py index e39b381..4b80b16 100644 --- a/backend/app/domains/pipeline/tasks/render_order_line.py +++ b/backend/app/domains/pipeline/tasks/render_order_line.py @@ -300,6 +300,13 @@ def render_order_line_task(self, order_line_id: str): # Determine if this is an animation output type is_animation = bool(line.output_type and getattr(line.output_type, 'is_animation', False)) + # Detect cinematic render type (render_settings.cinematic flag) + is_cinematic = bool( + line.output_type and + line.output_type.render_settings and + line.output_type.render_settings.get("cinematic") + ) + # Determine output format/extension out_ext = "jpg" if line.output_type and line.output_type.output_format: @@ -434,7 +441,75 @@ def render_order_line_task(self, order_line_id: str): tmpl_info = f" template={template.name}" if template else "" - if is_animation: + if is_cinematic: + # ── Cinematic highlight animation path ────────────────────── + _cine_fps = 24 + _cine_frames = 480 + emit(order_line_id, f"Starting cinematic render: {_cine_frames} frames @ {_cine_fps}fps, {render_width or 1920}x{render_height or 1080}{tmpl_info}") + pl.step_start("blender_cinematic", {"frame_count": _cine_frames, "fps": _cine_fps}) + from app.services.render_blender import is_blender_available, render_cinematic_to_file + if not is_blender_available(): + raise RuntimeError("Blender not available on this worker") + + from app.services.step_processor import _get_all_settings + _sys = _get_all_settings() + try: + service_data = render_cinematic_to_file( + step_path=_Path(cad_file.stored_path), + output_path=_Path(output_path), + width=render_width or 1920, + height=render_height or 1080, + engine=render_engine or _sys.get("blender_engine", "cycles"), + samples=render_samples or int(_sys.get(f"blender_{render_engine or _sys.get('blender_engine','cycles')}_samples", 128)), + smooth_angle=int(_sys.get("blender_smooth_angle", 30)), + cycles_device=cycles_device_val, + transparent_bg=transparent_bg, + part_colors=part_colors or None, + template_path=template.blend_file_path if template else None, + target_collection=template.target_collection if template else "Product", + material_library_path=material_library if use_materials else None, + material_map=material_map, + part_names_ordered=part_names_ordered, + lighting_only=bool(template.lighting_only) if template else False, + shadow_catcher=bool(template.shadow_catcher_enabled) if template else False, + rotation_x=rotation_x, + rotation_y=rotation_y, + rotation_z=rotation_z, + usd_path=usd_render_path, + focal_length_mm=focal_length_mm, + sensor_width_mm=sensor_width_mm, + material_override=override_mat, + ) + success = True + render_log = { + "renderer": "blender", + "type": "cinematic", + "format": "mp4", + "engine": render_engine or _sys.get("blender_engine", "cycles"), + "engine_used": service_data.get("engine_used", "cycles"), + "samples": render_samples, + "cycles_device": cycles_device_val, + "width": render_width or 1920, + "height": render_height or 1080, + "frame_count": service_data.get("frame_count", _cine_frames), + "fps": _cine_fps, + "total_duration_s": service_data.get("total_duration_s"), + "stl_duration_s": service_data.get("stl_duration_s"), + "render_duration_s": service_data.get("render_duration_s"), + "ffmpeg_duration_s": service_data.get("ffmpeg_duration_s"), + "stl_size_bytes": service_data.get("stl_size_bytes"), + "output_size_bytes": service_data.get("output_size_bytes"), + "log_lines": service_data.get("log_lines", []), + } + if template: + render_log["template"] = template.blend_file_path + pl.step_done("blender_cinematic") + except Exception as exc: + success = False + render_log = {"renderer": "blender", "type": "cinematic", "error": str(exc)[:500]} + pl.step_error("blender_cinematic", str(exc), exc) + logger.error("Cinematic render failed for %s: %s", order_line_id, exc) + elif is_animation: # ── Turntable animation path ──────────────────────────────── emit(order_line_id, f"Starting turntable render: {frame_count} frames @ {fps}fps, {render_width or 1920}x{render_height or 1920}{tmpl_info}") pl.step_start("blender_turntable", {"frame_count": frame_count, "fps": fps}) diff --git a/backend/app/services/render_blender.py b/backend/app/services/render_blender.py index 6642b27..2f648c9 100644 --- a/backend/app/services/render_blender.py +++ b/backend/app/services/render_blender.py @@ -518,3 +518,209 @@ def render_turntable_to_file( "engine_used": engine, "log_lines": log_lines, } + + +def render_cinematic_to_file( + step_path: Path, + output_path: Path, + width: int = 1920, + height: int = 1080, + engine: str = "cycles", + samples: int = 128, + smooth_angle: int = 30, + cycles_device: str = "auto", + transparent_bg: bool = False, + part_colors: dict | None = None, + template_path: str | None = None, + target_collection: str = "Product", + material_library_path: str | None = None, + material_map: dict | None = None, + part_names_ordered: list | None = None, + lighting_only: bool = False, + shadow_catcher: bool = False, + rotation_x: float = 0.0, + rotation_y: float = 0.0, + rotation_z: float = 0.0, + usd_path: "Path | None" = None, + tessellation_engine: str = "occ", + focal_length_mm: float | None = None, + sensor_width_mm: float | None = None, + material_override: str | None = None, + log_callback: "Callable[[str], None] | None" = None, +) -> dict: + """Render a cinematic highlight animation: STEP -> GLB/USD -> 480 frames @ 24fps (Blender) -> mp4 (ffmpeg). + + Fixed at 24fps, 480 frames (20 seconds). Uses cinematic_render.py which + creates a procedural 4-segment camera animation with varying focal lengths, + elevations, and bezier-eased transitions. + + When usd_path is provided and exists, the GLB conversion step is skipped. + + Returns a dict with timing, frame count, engine_used, log_lines. + Raises RuntimeError on failure. + """ + import shutil as _shutil + import time + + # Cinematic parameters are fixed + frame_count = 480 + fps = 24 + + blender_bin = find_blender() + if not blender_bin: + raise RuntimeError("Blender binary not found — check BLENDER_BIN env or PATH") + + script_path = Path(os.environ.get("RENDER_SCRIPTS_DIR", "/render-scripts")) / "cinematic_render.py" + if not script_path.exists(): + alt = Path(__file__).parent.parent.parent.parent / "render-worker" / "scripts" / "cinematic_render.py" + if alt.exists(): + script_path = alt + else: + raise RuntimeError(f"cinematic_render.py not found at {script_path}") + + ffmpeg_bin = _shutil.which("ffmpeg") + if not ffmpeg_bin: + raise RuntimeError("ffmpeg not found — install ffmpeg in the render-worker container") + + t0 = time.monotonic() + + # 1. GLB conversion (OCC) — skipped when usd_path is provided + glb_path = step_path.parent / f"{step_path.stem}_thumbnail.glb" + use_usd = bool(usd_path and usd_path.exists()) + + t_glb = time.monotonic() + if use_usd: + logger.info("[render_blender] cinematic using USD path: %s", usd_path) + else: + if not glb_path.exists() or glb_path.stat().st_size == 0: + _glb_from_step(step_path, glb_path, tessellation_engine) + else: + logger.info("GLB local hit: %s (%d KB)", glb_path.name, glb_path.stat().st_size // 1024) + glb_duration_s = round(time.monotonic() - t_glb, 2) + + # 2. Render frames with Blender + frames_dir = output_path.parent / f"_frames_{output_path.stem}" + frames_dir.mkdir(parents=True, exist_ok=True) + output_path.parent.mkdir(parents=True, exist_ok=True) + + env = dict(os.environ) + env["EGL_PLATFORM"] = "surfaceless" + + glb_arg = "" if use_usd else str(glb_path) + cmd = [ + blender_bin, + "--background", + "--python", str(script_path), + "--", + glb_arg, + str(frames_dir), + str(width), str(height), + engine, str(samples), + json.dumps(part_colors or {}), + template_path or "", + target_collection, + material_library_path or "", + json.dumps(material_map) if material_map else "{}", + json.dumps(part_names_ordered) if part_names_ordered else "[]", + "1" if lighting_only else "0", + cycles_device, + "1" if shadow_catcher else "0", + str(rotation_x), str(rotation_y), str(rotation_z), + ] + if use_usd: + cmd += ["--usd-path", str(usd_path)] + if focal_length_mm is not None: + cmd += ["--focal-length", str(focal_length_mm)] + if sensor_width_mm is not None: + cmd += ["--sensor-width", str(sensor_width_mm)] + if material_override: + cmd += ["--material-override", material_override] + + log_lines: list[str] = [] + + t_render = time.monotonic() + proc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + text=True, env=env, start_new_session=True, + ) + try: + stdout, stderr = proc.communicate(timeout=7200) # 2hr max for cinematic (480 frames) + except subprocess.TimeoutExpired: + try: + os.killpg(os.getpgid(proc.pid), signal.SIGTERM) + except (ProcessLookupError, OSError): + pass + stdout, stderr = proc.communicate() + + for line in (stdout or "").splitlines(): + logger.info("[cinematic] %s", line) + if "[cinematic_render]" in line: + log_lines.append(line) + if log_callback: + log_callback(line) + for line in (stderr or "").splitlines(): + logger.warning("[cinematic stderr] %s", line) + + if proc.returncode != 0: + raise RuntimeError( + f"cinematic_render.py exited with code {proc.returncode}.\n" + f"stdout: {(stdout or '')[-2000:]}\n" + f"stderr: {(stderr or '')[-500:]}" + ) + + render_duration_s = round(time.monotonic() - t_render, 2) + + # Check frames were written + frame_files = sorted(frames_dir.glob("frame_*.png")) + if not frame_files: + raise RuntimeError(f"No frames rendered in {frames_dir}") + logger.info("Cinematic rendered %d frames in %.1fs", len(frame_files), render_duration_s) + + # 3. Compose frames -> mp4 with ffmpeg + t_ffmpeg = time.monotonic() + + ffmpeg_cmd = [ + ffmpeg_bin, + "-y", + "-framerate", str(fps), + "-i", str(frames_dir / "frame_%04d.png"), + "-vcodec", "libx264", + "-pix_fmt", "yuv420p", + "-crf", "18", + "-movflags", "+faststart", + str(output_path), + ] + + ffmpeg_proc = subprocess.run( + ffmpeg_cmd, capture_output=True, text=True, timeout=300 + ) + ffmpeg_duration_s = round(time.monotonic() - t_ffmpeg, 2) + + for line in (ffmpeg_proc.stdout or "").splitlines(): + logger.info("[ffmpeg] %s", line) + for line in (ffmpeg_proc.stderr or "").splitlines(): + logger.debug("[ffmpeg stderr] %s", line) + + if ffmpeg_proc.returncode != 0: + raise RuntimeError( + f"ffmpeg exited with code {ffmpeg_proc.returncode}.\n" + f"stderr: {(ffmpeg_proc.stderr or '')[-1000:]}" + ) + + # Clean up frames directory + try: + _shutil.rmtree(frames_dir) + except Exception: + pass + + return { + "total_duration_s": round(time.monotonic() - t0, 2), + "stl_duration_s": glb_duration_s, + "render_duration_s": render_duration_s, + "ffmpeg_duration_s": ffmpeg_duration_s, + "stl_size_bytes": 0, + "output_size_bytes": output_path.stat().st_size if output_path.exists() else 0, + "frame_count": len(frame_files), + "engine_used": engine, + "log_lines": log_lines, + } diff --git a/plan.md b/plan.md index bc5b706..55e26ea 100644 --- a/plan.md +++ b/plan.md @@ -1,122 +1,87 @@ -# Plan: Rich Product Metadata Extraction from STEP Files +# Plan: Cinematic Highlight Render ## Context -The AI chat agent was asked "What is the biggest product from my order?" and couldn't answer because dimensional data wasn't available in tool results. While `cad_files.mesh_attributes` already stores bounding box dimensions, much more metadata is extractable from STEP files via OCC that would make the AI agent and the product library significantly more useful. +Add a new render type: a 20-second cinematic product highlight video with procedural multi-shot camera animation. Unlike turntable (constant orbit), this has varying camera distances, focal lengths, elevations, and bezier-eased transitions between 4 segments. -**Currently extracted**: part names, bounding box (xyz), sharp edges, smooth angle -**Available but not extracted**: per-part volume, surface area, assembly hierarchy, instance counts, embedded colors, triangle counts, geometric complexity +## Tasks -**Goal**: Expand the STEP metadata extraction to compute richer product characteristics and store them in a structured `cad_metadata` JSONB field, accessible to the AI agent, product search, and frontend. +### [ ] Task 1: Cinematic Blender script -## Affected Files +- **File**: `render-worker/scripts/cinematic_render.py` (new) +- **What**: Blender Python script that: + 1. Imports product geometry (GLB or USD, same as turntable_render.py) + 2. Applies materials (same pipeline as turntable) + 3. Computes bounding sphere from imported meshes + 4. Creates a procedural 4-segment camera animation (480 frames @ 24fps = 20s): + - Segment 1 (0-120): Establishing — slow 45° orbit + push-in, 50mm lens + - Segment 2 (121-240): Detail sweep — low-angle close arc, 85mm lens, shallow DOF + - Segment 3 (241-360): Crane up — rising from 30° to 60° elevation, 35mm wide + - Segment 4 (361-480): Hero close — slow push-in, 65mm, ease-out to still + 5. Each segment: camera position from spherical coords (azimuth, elevation, distance), bezier interpolation + 6. Depth of field enabled (f-stop scales with product size) + 7. Renders all frames as PNG to temp directory + 8. FFmpeg assembles frames → MP4 (H.264, yuv420p, 24fps) -| File | Change | -|------|--------| -| `backend/app/services/step_processor.py` | Expand `extract_step_metadata()` with volume, surface area, hierarchy, complexity | -| `backend/app/domains/products/models.py` | Add `cad_metadata` JSONB column to Product | -| `backend/alembic/versions/XXX_add_cad_metadata.py` | Migration | -| `backend/app/domains/pipeline/tasks/extract_metadata.py` | Populate `cad_metadata` after STEP processing | -| `backend/app/domains/products/schemas.py` | Expose `cad_metadata` in ProductOut | -| `backend/app/services/chat_service.py` | Include metadata in search_products and system prompt | -| `frontend/src/pages/ProductDetail.tsx` | Display rich metadata (volume, part count, complexity) | - -## Tasks (in order) - -### [ ] Task 1: Expand STEP metadata extraction - -- **File**: `backend/app/services/step_processor.py` -- **What**: Expand `extract_step_metadata()` to compute additional properties after the existing bbox/edge extraction. Add a new function `extract_rich_metadata(doc, shape_tool)` that returns: - ```python - { - "part_count": 42, # Number of leaf parts - "assembly_depth": 3, # Max nesting depth - "total_volume_cm3": 1250.4, # Sum of all part volumes (cm³) - "total_surface_area_cm2": 3400.2, # Sum of all surface areas (cm²) - "total_triangle_count": 45000, # After tessellation - "total_vertex_count": 23000, # After tessellation - "largest_part": { # Part with largest volume - "name": "OuterRing", - "volume_cm3": 450.2, - }, - "smallest_dimension_mm": 0.5, # Smallest bbox dimension across all parts - "instance_count": 36, # Total instances (parts may repeat) - "unique_part_count": 12, # Distinct shapes - "complexity_score": "high", # low/medium/high based on triangle count - } + CLI args (same pattern as turntable_render.py): ``` - Use OCC: - - `GProp_GProps` + `BRepGProp.VolumeProperties()` for volume - - `BRepGProp.SurfaceProperties()` for surface area - - `Poly_Triangulation` for triangle/vertex counts (after tessellation) - - Assembly tree walk (already done in `_collect_part_key_map`) for hierarchy depth + instance count -- **Acceptance gate**: `extract_rich_metadata()` returns all fields for a test STEP file + blender --background --python cinematic_render.py -- \ + \ + \ + \ + \ + \ + [--usd-path ...] [--focal-length ...] [--material-override ...] + ``` +- **Acceptance gate**: Script renders a 20s MP4 from a STEP file - **Dependencies**: None -### [ ] Task 2: Add cad_metadata column to Product model +### [ ] Task 2: Render service function -- **File**: `backend/app/domains/products/models.py` -- **What**: Add `cad_metadata: Mapped[dict | None] = mapped_column(JSONB, nullable=True, default=None)` to the Product model. This stores the rich metadata at the product level (not cad_file) because products are the user-facing entity. -- **Migration**: `alembic revision --autogenerate -m "add cad_metadata to products"` -- **Also**: Add to ProductOut schema in `backend/app/domains/products/schemas.py` -- **Acceptance gate**: Column exists, schema includes it -- **Dependencies**: None +- **File**: `backend/app/services/render_blender.py` +- **What**: Add `render_cinematic_to_file()` function with same signature as `render_turntable_to_file()` but: + - Calls `cinematic_render.py` instead of `turntable_render.py` + - Fixed 24fps, 480 frames (20s) + - Output format: mp4 + - Passes all the same material/template/position args +- **Acceptance gate**: Function callable, builds correct subprocess command +- **Dependencies**: Task 1 -### [ ] Task 3: Populate cad_metadata during STEP processing +### [ ] Task 3: Pipeline integration -- **File**: `backend/app/domains/pipeline/tasks/extract_metadata.py` -- **What**: After `process_step_file` extracts objects and queues thumbnail, call `extract_rich_metadata()` and store the result on the Product's `cad_metadata` field. Also store it on `cad_files.mesh_attributes` (merge with existing data). -- **Also**: Add a "reextract metadata" admin action that re-runs this for all existing products -- **Acceptance gate**: After STEP processing, product.cad_metadata is populated with volume, part_count, etc. -- **Dependencies**: Tasks 1, 2 - -### [ ] Task 4: Expose metadata in AI agent tools - -- **File**: `backend/app/services/chat_service.py` -- **What**: - 1. Update `_tool_search_products()` to include `cad_metadata` fields (part_count, total_volume_cm3, complexity_score) in results - 2. Update `query_database` tool description to mention `products.cad_metadata` JSONB field - 3. Update system prompt to mention available metadata -- **Acceptance gate**: AI agent can answer "What is the biggest product?" using volume data -- **Dependencies**: Task 3 - -### [ ] Task 5: Display rich metadata on ProductDetail page - -- **File**: `frontend/src/pages/ProductDetail.tsx` -- **What**: Add a "CAD Metadata" section on the product detail page showing: - - Part count + unique parts + instances - - Total volume (cm³) + surface area (cm²) - - Largest part name + volume - - Complexity score badge (low/medium/high) - - Triangle/vertex count - - Assembly depth -- **Acceptance gate**: Metadata displayed on product page; empty gracefully when not available +- **File**: `backend/app/domains/pipeline/tasks/render_order_line.py` +- **What**: In the render task, detect when output type has a cinematic flag. Add a check: + - If `output_type.render_settings.get("cinematic")` is True → call `render_cinematic_to_file()` instead of `render_turntable_to_file()` + - OR: if output_type name contains "Cinematic" → route to cinematic +- **Acceptance gate**: Order line with cinematic output type renders via the new script - **Dependencies**: Task 2 -### [ ] Task 6: Batch re-extract metadata for existing products +### [ ] Task 4: Output type + test -- **File**: `backend/app/api/routers/admin.py` -- **What**: Add a "Re-extract Rich Metadata" button in System Tools that queues a Celery task to re-process all completed STEP files and populate `cad_metadata` for all products. -- **Acceptance gate**: Button triggers batch job; existing products get metadata populated -- **Dependencies**: Tasks 1, 3 +- **What**: Create the "Cinematic Highlight" output type via API: + ```json + { + "name": "Cinematic Highlight", + "renderer": "blender", + "output_format": "mp4", + "render_backend": "celery", + "is_animation": true, + "transparent_bg": false, + "cycles_device": "gpu", + "render_settings": { + "width": 1920, + "height": 1080, + "engine": "cycles", + "samples": 128, + "cinematic": true, + "fps": 24, + "frame_count": 480 + } + } + ``` + Link to BlenderStudio template. Test with a real product. +- **Acceptance gate**: A cinematic MP4 renders successfully for a TRB product +- **Dependencies**: Task 3 ## Migration Check - -**Yes** — one new JSONB column on `products` table. - -## Order Recommendation - -1. Task 1 (extraction logic) + Task 2 (model + migration) — parallel -2. Task 3 (wire up in pipeline) -3. Task 4 (AI agent) + Task 5 (frontend) — parallel -4. Task 6 (batch re-extract) - -## Risks / Open Questions - -1. **Volume calculation accuracy**: OCC `BRepGProp` computes exact B-rep volume, not mesh-based. This is accurate but can be slow for very complex shapes. Cap at 5s per file. - -2. **Performance**: Rich metadata extraction adds ~100-500ms per STEP file. This is acceptable since STEP processing already takes 1-5s. - -3. **Existing products**: ~45 products with STEP files need backfill. Task 6 handles this. - -4. **Triangle count varies**: Depends on tessellation settings (deflection angles). Store the count at the current tessellation quality for reference, with a note that it's approximate. +**No** — no DB changes needed. diff --git a/render-worker/scripts/cinematic_render.py b/render-worker/scripts/cinematic_render.py new file mode 100644 index 0000000..fc4d9f8 --- /dev/null +++ b/render-worker/scripts/cinematic_render.py @@ -0,0 +1,859 @@ +"""Blender Python script: cinematic product highlight render. + +4-segment camera animation (480 frames @ 24fps = 20s): + Segment 1 (1-120): Establishing shot — slow orbit + push-in, 50mm + Segment 2 (121-240): Detail sweep — low arc, telephoto 85mm, shallow DOF + Segment 3 (241-360): Crane up — rising pull-back, wide 35mm + Segment 4 (361-480): Hero close — final push-in, 65mm, smooth deceleration + +Usage (from Blender): + blender --background --python cinematic_render.py -- \ + \ + \ + [template_path] [target_collection] [material_library_path] [material_map_json] \ + [part_names_ordered_json] [lighting_only] [cycles_device] [shadow_catcher] \ + [rotation_x] [rotation_y] [rotation_z] [turntable_axis] [bg_color] [transparent_bg] + +Named arguments (after --): + --mesh-attributes + --usd-path + --focal-length (ignored — cinematic uses per-segment lenses) + --sensor-width + --material-override + --camera-orbit (always true for cinematic — camera moves, not model) +""" +import bpy +import sys +import os +import json +import math +from mathutils import Vector, Matrix + +# ── Colour palette (matches turntable_render.py / blender_render.py) ───────── +PALETTE_HEX = [ + "#4C9BE8", "#E85B4C", "#4CBE72", "#E8A84C", "#A04CE8", + "#4CD4E8", "#E84CA8", "#7EC850", "#E86B30", "#5088C8", +] + +def _srgb_to_linear(c: int) -> float: + v = c / 255.0 + return v / 12.92 if v <= 0.04045 else ((v + 0.055) / 1.055) ** 2.4 + +def _hex_to_linear(hex_color: str) -> tuple: + h = hex_color.lstrip('#') + return ( + _srgb_to_linear(int(h[0:2], 16)), + _srgb_to_linear(int(h[2:4], 16)), + _srgb_to_linear(int(h[4:6], 16)), + 1.0, + ) + +PALETTE_LINEAR = [_hex_to_linear(h) for h in PALETTE_HEX] + +SMOOTH_ANGLE = 30 # degrees + + +# ── Helper functions (copied from turntable_render.py) ─────────────────────── + +def _ensure_collection(name: str): + """Return a collection by name, creating it if needed.""" + if name in bpy.data.collections: + return bpy.data.collections[name] + col = bpy.data.collections.new(name) + bpy.context.scene.collection.children.link(col) + return col + + +def _assign_palette_material(part_obj, index): + """Assign a palette colour material to a mesh part.""" + color = PALETTE_LINEAR[index % len(PALETTE_LINEAR)] + mat = bpy.data.materials.new(name=f"Part_{index}") + mat.use_nodes = True + bsdf = mat.node_tree.nodes.get("Principled BSDF") + if bsdf: + bsdf.inputs["Base Color"].default_value = color + bsdf.inputs["Metallic"].default_value = 0.35 + bsdf.inputs["Roughness"].default_value = 0.40 + try: + bsdf.inputs["Specular IOR Level"].default_value = 0.5 + except KeyError: + pass + part_obj.data.materials.clear() + part_obj.data.materials.append(mat) + + +def _apply_smooth(part_obj, angle_deg): + """Apply smooth or flat shading to a mesh object.""" + bpy.context.view_layer.objects.active = part_obj + part_obj.select_set(True) + if angle_deg > 0: + try: + bpy.ops.object.shade_smooth_by_angle(angle=math.radians(angle_deg)) + except AttributeError: + bpy.ops.object.shade_smooth() + part_obj.data.use_auto_smooth = True + part_obj.data.auto_smooth_angle = math.radians(angle_deg) + else: + bpy.ops.object.shade_flat() + + +import re as _re + + +def _apply_rotation(parts, rx, ry, rz): + """Apply Euler XYZ rotation (degrees) to all parts by modifying matrix_world.""" + if not parts or (rx == 0.0 and ry == 0.0 and rz == 0.0): + return + from mathutils import Euler + rot_mat = Euler((math.radians(rx), math.radians(ry), math.radians(rz)), 'XYZ').to_matrix().to_4x4() + for p in parts: + p.matrix_world = rot_mat @ p.matrix_world + bpy.ops.object.select_all(action='DESELECT') + for p in parts: + p.select_set(True) + bpy.context.view_layer.objects.active = parts[0] + bpy.ops.object.transform_apply(location=False, rotation=True, scale=False) + print(f"[cinematic_render] applied rotation ({rx}, {ry}, {rz}) to {len(parts)} parts") + + +def _apply_mesh_attributes(objects: list, mesh_attributes: dict) -> None: + """Apply topology-based shading settings from OCC analysis.""" + if not mesh_attributes or mesh_attributes.get("error"): + return + curved_ratio = mesh_attributes.get("curved_ratio", 0.0) + threshold_deg = mesh_attributes.get("sharp_angle_threshold_deg", 30.0) + threshold_rad = threshold_deg * math.pi / 180.0 + for obj in objects: + if obj.type != 'MESH': + continue + if curved_ratio > 0.3: + for poly in obj.data.polygons: + poly.use_smooth = True + obj.data.use_auto_smooth = True + obj.data.auto_smooth_angle = threshold_rad + + +def _import_glb(glb_file): + """Import OCC-generated GLB into Blender. + + Returns list of Blender mesh objects, centred at world origin. + """ + bpy.ops.object.select_all(action='DESELECT') + bpy.ops.import_scene.gltf(filepath=glb_file) + parts = [o for o in bpy.context.selected_objects if o.type == 'MESH'] + + if not parts: + print(f"ERROR: No mesh objects imported from {glb_file}") + sys.exit(1) + + print(f"[cinematic_render] imported {len(parts)} part(s) from GLB: " + f"{[p.name for p in parts[:5]]}") + + # Centre combined bbox at world origin + all_corners = [] + for p in parts: + all_corners.extend(p.matrix_world @ Vector(c) for c in p.bound_box) + + if all_corners: + mins = Vector((min(v.x for v in all_corners), + min(v.y for v in all_corners), + min(v.z for v in all_corners))) + maxs = Vector((max(v.x for v in all_corners), + max(v.y for v in all_corners), + max(v.z for v in all_corners))) + center = (mins + maxs) * 0.5 + all_imported = list(bpy.context.selected_objects) + root_objects = [o for o in all_imported if o.parent is None] + for obj in root_objects: + obj.location -= center + + return parts + + +def _resolve_part_name(index, part_obj, part_names_ordered): + """Get the STEP part name for a Blender part by index.""" + base_name = _re.sub(r'\.\d{3}$', '', part_obj.name) + if part_names_ordered and index < len(part_names_ordered): + return part_names_ordered[index] + return base_name + + +def _apply_material_library(parts, mat_lib_path, mat_map, part_names_ordered=None): + """Append materials from library .blend and assign to parts via material_map.""" + if not mat_lib_path or not os.path.isfile(mat_lib_path): + print(f"[cinematic_render] material library not found: {mat_lib_path}") + return + + needed = set(mat_map.values()) + if not needed: + return + + appended = {} + for mat_name in needed: + inner_path = f"{mat_lib_path}/Material/{mat_name}" + try: + bpy.ops.wm.append( + filepath=inner_path, + directory=f"{mat_lib_path}/Material/", + filename=mat_name, + link=False, + ) + if mat_name in bpy.data.materials: + appended[mat_name] = bpy.data.materials[mat_name] + print(f"[cinematic_render] appended material: {mat_name}") + else: + print(f"[cinematic_render] WARNING: material '{mat_name}' not found after append") + except Exception as exc: + print(f"[cinematic_render] WARNING: failed to append material '{mat_name}': {exc}") + + if not appended: + return + + assigned_count = 0 + for i, part in enumerate(parts): + base_name = _re.sub(r'\.\d{3}$', '', part.name) + _prev = None + while _prev != base_name: + _prev = base_name + base_name = _re.sub(r'_AF\d+$', '', base_name, flags=_re.IGNORECASE) + part_key = base_name.lower().strip() + mat_name = mat_map.get(part_key) + + if not mat_name: + for key, val in sorted(mat_map.items(), key=lambda x: len(x[0]), reverse=True): + if len(key) >= 5 and len(part_key) >= 5 and ( + part_key.startswith(key) or key.startswith(part_key) + ): + mat_name = val + break + + if not mat_name and part_names_ordered and i < len(part_names_ordered): + step_name = part_names_ordered[i] + step_key = step_name.lower().strip() + mat_name = mat_map.get(step_key) + if not mat_name: + _p2 = None + while _p2 != step_key: + _p2 = step_key + step_key = _re.sub(r'_af\d+$', '', step_key) + mat_name = mat_map.get(step_key) + + if mat_name and mat_name in appended: + part.data.materials.clear() + part.data.materials.append(appended[mat_name]) + assigned_count += 1 + print(f"[cinematic_render] assigned '{mat_name}' to part '{part.name}'") + + print(f"[cinematic_render] material assignment: {assigned_count}/{len(parts)} parts matched") + + +# ── Cinematic camera animation ─────────────────────────────────────────────── + +TOTAL_FRAMES = 480 +SEGMENT_LENGTH = 120 # frames per segment + +# Segment definitions: (start_azimuth_offset, end_azimuth_offset, +# start_elevation, end_elevation, +# start_dist_factor, end_dist_factor, +# start_lens, end_lens, +# use_dof) +SEGMENTS = [ + # Segment 1: Establishing shot — orbit 45deg, push in, 50mm + { + "az_start": 0.0, "az_end": 45.0, + "el_start": 25.0, "el_end": 25.0, + "dist_start": 3.0, "dist_end": 2.5, + "lens_start": 50.0, "lens_end": 50.0, + "dof": False, + }, + # Segment 2: Detail sweep — arc 45deg low, telephoto, shallow DOF + { + "az_start": 45.0, "az_end": 90.0, + "el_start": 10.0, "el_end": 15.0, + "dist_start": 1.8, "dist_end": 1.5, + "lens_start": 85.0, "lens_end": 85.0, + "dof": True, + }, + # Segment 3: Crane up — rise + orbit 30deg, wide pull-back + { + "az_start": 90.0, "az_end": 120.0, + "el_start": 30.0, "el_end": 60.0, + "dist_start": 2.0, "dist_end": 3.5, + "lens_start": 35.0, "lens_end": 35.0, + "dof": False, + }, + # Segment 4: Hero close — push in, settle down + { + "az_start": 120.0, "az_end": 120.0, + "el_start": 35.0, "el_end": 25.0, + "dist_start": 3.0, "dist_end": 2.2, + "lens_start": 65.0, "lens_end": 65.0, + "dof": False, + }, +] + + +def _ease_in_out(t: float) -> float: + """Cubic ease in-out, t in [0, 1].""" + if t < 0.5: + return 4.0 * t * t * t + else: + return 1.0 - (-2.0 * t + 2.0) ** 3 / 2.0 + + +def _lerp(a: float, b: float, t: float) -> float: + """Linear interpolation.""" + return a + (b - a) * t + + +def _spherical_to_xyz(azimuth_deg: float, elevation_deg: float, + distance: float, center: Vector) -> Vector: + """Convert spherical coordinates to Cartesian position.""" + az = math.radians(azimuth_deg) + el = math.radians(elevation_deg) + x = center.x + distance * math.cos(el) * math.cos(az) + y = center.y + distance * math.cos(el) * math.sin(az) + z = center.z + distance * math.sin(el) + return Vector((x, y, z)) + + +def _get_segment_params(frame: int, bsphere_radius: float): + """Compute camera parameters for a given frame. + + Returns (azimuth_deg, elevation_deg, distance, lens_mm, use_dof). + """ + # Determine which segment (0-3) and local t (0-1) + seg_index = min((frame - 1) // SEGMENT_LENGTH, len(SEGMENTS) - 1) + local_frame = (frame - 1) - seg_index * SEGMENT_LENGTH + raw_t = local_frame / max(SEGMENT_LENGTH - 1, 1) + + # Apply easing: smooth start for segment 1, smooth stop for segment 4, + # ease-in-out for segments 2 and 3 + if seg_index == 0: + # Smooth start: ease-out (decelerate into motion) + t = _ease_in_out(raw_t) + elif seg_index == 3: + # Smooth stop: ease-in-out with emphasis on deceleration + t = _ease_in_out(raw_t) + else: + t = _ease_in_out(raw_t) + + seg = SEGMENTS[seg_index] + + azimuth = _lerp(seg["az_start"], seg["az_end"], t) + elevation = _lerp(seg["el_start"], seg["el_end"], t) + dist_factor = _lerp(seg["dist_start"], seg["dist_end"], t) + distance = dist_factor * bsphere_radius + lens = _lerp(seg["lens_start"], seg["lens_end"], t) + use_dof = seg["dof"] + + return azimuth, elevation, distance, lens, use_dof + + +def _setup_cinematic_camera(parts, bbox_center, bsphere_radius, total_frames): + """Create camera and keyframe the cinematic 4-segment animation. + + Returns the camera object. + """ + # Starting azimuth: offset so segment 1 starts from a good angle (40deg) + base_azimuth = 40.0 + + # Create camera + start_az, start_el, start_dist, start_lens, _ = _get_segment_params(1, bsphere_radius) + start_pos = _spherical_to_xyz(base_azimuth + start_az, start_el, start_dist, bbox_center) + bpy.ops.object.camera_add(location=start_pos) + cam_obj = bpy.context.active_object + bpy.context.scene.camera = cam_obj + cam_obj.data.lens = start_lens + cam_obj.data.clip_start = max(bsphere_radius * 0.001, 0.0001) + cam_obj.data.clip_end = bsphere_radius * 20.0 + + # Set sensor width + cam_obj.data.sensor_width = 36.0 + + # DOF defaults (will be toggled per-segment) + cam_obj.data.dof.use_dof = False + cam_obj.data.dof.focus_distance = bsphere_radius * 2.0 + cam_obj.data.dof.aperture_fstop = bsphere_radius * 8.0 + + print(f"[cinematic_render] animating {total_frames} frames, bsphere_radius={bsphere_radius:.4f}") + + # Keyframe every frame for smooth cinematic motion + for frame in range(1, total_frames + 1): + bpy.context.scene.frame_set(frame) + + azimuth, elevation, distance, lens, use_dof = _get_segment_params(frame, bsphere_radius) + azimuth += base_azimuth # offset from base viewing angle + + # Camera position from spherical coordinates + position = _spherical_to_xyz(azimuth, elevation, distance, bbox_center) + cam_obj.location = position + cam_obj.keyframe_insert(data_path="location", frame=frame) + + # Point camera at center using track quaternion + direction = bbox_center - cam_obj.location + rot = direction.to_track_quat('-Z', 'Y') + cam_obj.rotation_euler = rot.to_euler() + cam_obj.keyframe_insert(data_path="rotation_euler", frame=frame) + + # Focal length animation + cam_obj.data.lens = lens + cam_obj.data.keyframe_insert(data_path="lens", frame=frame) + + # DOF animation + cam_obj.data.dof.use_dof = use_dof + cam_obj.data.dof.keyframe_insert(data_path="use_dof", frame=frame) + if use_dof: + cam_obj.data.dof.focus_distance = direction.length + cam_obj.data.dof.aperture_fstop = bsphere_radius * 8.0 + cam_obj.data.dof.keyframe_insert(data_path="focus_distance", frame=frame) + cam_obj.data.dof.keyframe_insert(data_path="aperture_fstop", frame=frame) + + print(f"[cinematic_render] camera keyframed: {total_frames} frames across 4 segments") + return cam_obj + + +# ── Main ───────────────────────────────────────────────────────────────────── + +def main(): + argv = sys.argv + # Everything after "--" is our args + args = argv[argv.index("--") + 1:] + + glb_path = args[0] + frames_dir = args[1] + frame_count = int(args[2]) + degrees = int(args[3]) # kept for arg compatibility, not used in cinematic + width = int(args[4]) + height = int(args[5]) + engine = args[6] + samples = int(args[7]) + part_colors_json = args[8] if len(args) > 8 else "{}" + + # Template + material library args (same positional layout as turntable_render.py) + template_path = args[9] if len(args) > 9 and args[9] else "" + target_collection = args[10] if len(args) > 10 else "Product" + material_library_path = args[11] if len(args) > 11 and args[11] else "" + material_map_raw = args[12] if len(args) > 12 else "{}" + part_names_ordered_raw = args[13] if len(args) > 13 else "[]" + lighting_only = args[14] == "1" if len(args) > 14 else False + cycles_device = args[15].lower() if len(args) > 15 else "auto" + shadow_catcher = args[16] == "1" if len(args) > 16 else False + rotation_x = float(args[17]) if len(args) > 17 else 0.0 + rotation_y = float(args[18]) if len(args) > 18 else 0.0 + rotation_z = float(args[19]) if len(args) > 19 else 0.0 + turntable_axis = args[20] if len(args) > 20 else "world_z" # unused in cinematic + bg_color = args[21] if len(args) > 21 else "" + transparent_bg = args[22] == "1" if len(args) > 22 else False + + # Named argument: --mesh-attributes + _mesh_attrs: dict = {} + if "--mesh-attributes" in argv: + _idx = argv.index("--mesh-attributes") + try: + _mesh_attrs = json.loads(argv[_idx + 1]) + except Exception: + pass + + # Named argument: --usd-path + usd_path = "" + if "--usd-path" in argv: + _usd_idx = argv.index("--usd-path") + usd_path = argv[_usd_idx + 1] if _usd_idx + 1 < len(argv) else "" + + # Named argument: --sensor-width + _sensor_width = None + if "--sensor-width" in argv: + _idx = argv.index("--sensor-width") + _sensor_width = float(argv[_idx + 1]) if _idx + 1 < len(argv) else None + + # Named argument: --material-override + _material_override = None + if "--material-override" in argv: + _idx = argv.index("--material-override") + _material_override = argv[_idx + 1] if _idx + 1 < len(argv) else None + + # Cinematic always uses camera orbit (camera moves, model stays) + camera_orbit = True + + # Override frame count to cinematic default if not explicitly set differently + if frame_count <= 0: + frame_count = TOTAL_FRAMES + + # Ensure scripts dir is on path for shared module imports + _scripts_dir = os.path.dirname(os.path.abspath(__file__)) + if _scripts_dir not in sys.path: + sys.path.insert(0, _scripts_dir) + + # Pre-load USD import helper + _import_usd_file = None + if usd_path: + from import_usd import import_usd_file as _import_usd_file # type: ignore[assignment] + + # Shared material helpers (handle USD stub collisions correctly) + from _blender_materials import ( + apply_material_library_direct as _apply_material_library_direct, + apply_material_library as _apply_material_library_shared, + build_mat_map_lower as _build_mat_map_lower, + assign_failed_material as _assign_failed_material, + ) + + os.makedirs(frames_dir, exist_ok=True) + + try: + part_colors = json.loads(part_colors_json) + except json.JSONDecodeError: + part_colors = {} + + try: + material_map = json.loads(material_map_raw) if material_map_raw else {} + except json.JSONDecodeError: + material_map = {} + + try: + part_names_ordered = json.loads(part_names_ordered_raw) if part_names_ordered_raw else [] + except json.JSONDecodeError: + part_names_ordered = [] + + # Validate template path + if template_path and not os.path.isfile(template_path): + print(f"[cinematic_render] ERROR: template_path was provided but file not found: {template_path}") + print("[cinematic_render] Ensure the blend-templates directory is accessible on this worker.") + sys.exit(1) + + use_template = bool(template_path) + + print(f"[cinematic_render] engine={engine}, samples={samples}, size={width}x{height}, " + f"frames={frame_count}") + print(f"[cinematic_render] part_names_ordered: {len(part_names_ordered)} entries") + if use_template: + print(f"[cinematic_render] template={template_path}, collection={target_collection}, lighting_only={lighting_only}") + else: + print("[cinematic_render] no template -- using factory settings (Mode A)") + if material_library_path: + print(f"[cinematic_render] material_library={material_library_path}, material_map keys={list(material_map.keys())}") + + # ── SCENE SETUP ────────────────────────────────────────────────────────── + _usd_mat_lookup: dict = {} + + if use_template: + # ── MODE B: Template-based render ──────────────────────────────────── + print(f"[cinematic_render] Opening template: {template_path}") + bpy.ops.wm.open_mainfile(filepath=template_path) + + target_col = _ensure_collection(target_collection) + + if usd_path and _import_usd_file: + parts, _usd_mat_lookup = _import_usd_file(usd_path) + else: + parts = _import_glb(glb_path) + _apply_rotation(parts, rotation_x, rotation_y, rotation_z) + _apply_mesh_attributes(parts, _mesh_attrs) + + # Move imported parts into target collection + for part in parts: + for col in list(part.users_collection): + col.objects.unlink(part) + target_col.objects.link(part) + + # Apply smooth shading + for part in parts: + _apply_smooth(part, SMOOTH_ANGLE) + + # Apply material override if set + if _material_override: + print(f"[cinematic_render] material_override active: all parts -> {_material_override}", flush=True) + if _usd_mat_lookup: + _usd_mat_lookup = {k: _material_override for k in _usd_mat_lookup} + if material_map: + material_map = {k: _material_override for k in material_map} + + # Material assignment: USD primvar path first, then name-matching fallback + if material_library_path and _usd_mat_lookup: + _apply_material_library_direct(parts, material_library_path, _usd_mat_lookup) + if material_map: + _unassigned = [p for p in parts if not p.data.materials or + (len(p.data.materials) == 1 and p.data.materials[0] and + p.data.materials[0].name == "SCHAEFFLER_059999_FailedMaterial")] + if _unassigned: + print(f"[cinematic_render] {len(_unassigned)} parts without USD primvar -- " + f"falling back to name-matching", flush=True) + _apply_material_library_shared( + _unassigned, material_library_path, + _build_mat_map_lower(material_map), part_names_ordered, + ) + elif material_library_path and material_map: + _apply_material_library_shared( + parts, material_library_path, + _build_mat_map_lower(material_map), part_names_ordered, + ) + # Palette fallback for any parts still without materials + for i, part in enumerate(parts): + if not part.data.materials or len(part.data.materials) == 0: + _assign_palette_material(part, i) + + # ── Shadow catcher (Cycles only, template mode only) ───────────────── + if shadow_catcher: + sc_col_name = "Shadowcatcher" + sc_obj_name = "Shadowcatcher" + for vl in bpy.context.scene.view_layers: + def _enable_col_recursive(layer_col): + if layer_col.collection.name == sc_col_name: + layer_col.exclude = False + layer_col.collection.hide_render = False + layer_col.collection.hide_viewport = False + return True + for child in layer_col.children: + if _enable_col_recursive(child): + return True + return False + _enable_col_recursive(vl.layer_collection) + + sc_obj = bpy.data.objects.get(sc_obj_name) + if sc_obj: + all_world_z = [] + for part in parts: + for corner in part.bound_box: + all_world_z.append((part.matrix_world @ Vector(corner)).z) + if all_world_z: + sc_obj.location.z = min(all_world_z) + print(f"[cinematic_render] shadow catcher enabled, plane Z={sc_obj.location.z:.4f}") + else: + print(f"[cinematic_render] WARNING: shadow catcher object '{sc_obj_name}' not found in template") + + print(f"[cinematic_render] template mode: {len(parts)} parts imported into collection '{target_collection}'") + + else: + # ── MODE A: Factory settings ───────────────────────────────────────── + bpy.ops.wm.read_factory_settings(use_empty=True) + + if usd_path and _import_usd_file: + parts, _usd_mat_lookup = _import_usd_file(usd_path) + else: + parts = _import_glb(glb_path) + _apply_rotation(parts, rotation_x, rotation_y, rotation_z) + _apply_mesh_attributes(parts, _mesh_attrs) + + for i, part in enumerate(parts): + _apply_smooth(part, SMOOTH_ANGLE) + + # Apply material override if set + if _material_override: + print(f"[cinematic_render] material_override active (Mode A): all parts -> {_material_override}", flush=True) + if _usd_mat_lookup: + _usd_mat_lookup = {k: _material_override for k in _usd_mat_lookup} + if material_map: + material_map = {k: _material_override for k in material_map} + + # Material assignment: USD primvar path first, then name-matching fallback + if material_library_path and _usd_mat_lookup: + _apply_material_library_direct(parts, material_library_path, _usd_mat_lookup) + if material_map: + _unassigned = [p for p in parts if not p.data.materials or + (len(p.data.materials) == 1 and p.data.materials[0] and + p.data.materials[0].name == "SCHAEFFLER_059999_FailedMaterial")] + if _unassigned: + _apply_material_library_shared( + _unassigned, material_library_path, + _build_mat_map_lower(material_map), part_names_ordered, + ) + elif material_library_path and material_map: + _apply_material_library_shared( + parts, material_library_path, + _build_mat_map_lower(material_map), part_names_ordered, + ) + else: + for i, part in enumerate(parts): + step_name = _resolve_part_name(i, part, part_names_ordered) + color_hex = part_colors.get(step_name) + if color_hex: + mat = bpy.data.materials.new(name=f"mat_{part.name}") + mat.use_nodes = True + bsdf = mat.node_tree.nodes.get("Principled BSDF") + if bsdf: + color = _hex_to_linear(color_hex) + bsdf.inputs["Base Color"].default_value = color + bsdf.inputs["Metallic"].default_value = 0.35 + bsdf.inputs["Roughness"].default_value = 0.40 + try: + bsdf.inputs["Specular IOR Level"].default_value = 0.5 + except KeyError: + pass + part.data.materials.clear() + part.data.materials.append(mat) + else: + _assign_palette_material(part, i) + # Palette fallback for any parts still without materials + for i, part in enumerate(parts): + if not part.data.materials or len(part.data.materials) == 0: + _assign_palette_material(part, i) + + # ── Combined bounding box / bounding sphere ────────────────────────────── + all_corners = [] + for part in parts: + all_corners.extend(part.matrix_world @ Vector(c) for c in part.bound_box) + + bbox_min = Vector(( + min(v.x for v in all_corners), + min(v.y for v in all_corners), + min(v.z for v in all_corners), + )) + bbox_max = Vector(( + max(v.x for v in all_corners), + max(v.y for v in all_corners), + max(v.z for v in all_corners), + )) + + bbox_center = (bbox_min + bbox_max) * 0.5 + bbox_dims = bbox_max - bbox_min + bsphere_radius = max(bbox_dims.length * 0.5, 0.001) + + print(f"[cinematic_render] bbox_dims={tuple(round(d, 4) for d in bbox_dims)}, " + f"bsphere_radius={bsphere_radius:.4f}") + + # ── Lighting -- only in Mode A (factory settings) ──────────────────────── + if not use_template: + light_dist = bsphere_radius * 6.0 + + bpy.ops.object.light_add(type='SUN', location=( + bbox_center.x + light_dist * 0.5, + bbox_center.y - light_dist * 0.35, + bbox_center.z + light_dist, + )) + sun = bpy.context.active_object + sun.data.energy = 4.0 + sun.rotation_euler = (math.radians(45), 0, math.radians(30)) + + bpy.ops.object.light_add(type='AREA', location=( + bbox_center.x - light_dist * 0.4, + bbox_center.y + light_dist * 0.4, + bbox_center.z + light_dist * 0.7, + )) + fill = bpy.context.active_object + fill.data.energy = max(800.0, bsphere_radius ** 2 * 2000.0) + fill.data.size = max(4.0, bsphere_radius * 4.0) + + # World background + world = bpy.data.worlds.new("World") + bpy.context.scene.world = world + world.use_nodes = True + bg = world.node_tree.nodes["Background"] + bg.inputs["Color"].default_value = (0.96, 0.96, 0.97, 1.0) + bg.inputs["Strength"].default_value = 0.15 + + # ── Cinematic camera animation ─────────────────────────────────────────── + # Remove any existing template camera — cinematic always creates its own + if bpy.context.scene.camera: + old_cam = bpy.context.scene.camera + bpy.data.objects.remove(old_cam, do_unlink=True) + + scene = bpy.context.scene + scene.frame_start = 1 + scene.frame_end = frame_count + + camera = _setup_cinematic_camera(parts, bbox_center, bsphere_radius, frame_count) + + # ── Colour management ──────────────────────────────────────────────────── + if not use_template: + scene.view_settings.view_transform = 'Standard' + scene.view_settings.exposure = 0.0 + scene.view_settings.gamma = 1.0 + try: + scene.view_settings.look = 'None' + except Exception: + pass + + # ── Render engine ──────────────────────────────────────────────────────── + if engine == "eevee": + eevee_ok = False + for eevee_id in ('BLENDER_EEVEE', 'BLENDER_EEVEE_NEXT'): + try: + scene.render.engine = eevee_id + eevee_ok = True + print(f"[cinematic_render] EEVEE engine id: {eevee_id}") + break + except TypeError: + continue + if eevee_ok: + for attr in ('taa_render_samples', 'samples'): + try: + setattr(scene.eevee, attr, samples) + break + except AttributeError: + continue + else: + print("[cinematic_render] WARNING: EEVEE not available, falling back to Cycles") + engine = "cycles" + + if engine != "eevee": + scene.render.engine = 'CYCLES' + scene.cycles.samples = samples + scene.cycles.use_denoising = True + scene.cycles.denoiser = 'OPENIMAGEDENOISE' + print(f"[cinematic_render] cycles_device={cycles_device}") + gpu_found = False + if cycles_device != "cpu": + try: + cycles_prefs = bpy.context.preferences.addons['cycles'].preferences + for device_type in ('OPTIX', 'CUDA', 'HIP', 'ONEAPI'): + try: + cycles_prefs.compute_device_type = device_type + cycles_prefs.get_devices() + gpu_devs = [d for d in cycles_prefs.devices if d.type != 'CPU'] + if gpu_devs: + for d in gpu_devs: + d.use = True + scene.cycles.device = 'GPU' + gpu_found = True + print(f"[cinematic_render] Cycles GPU ({device_type})") + break + except Exception: + continue + except Exception: + pass + if gpu_found: + print(f"RENDER_DEVICE_USED: engine=CYCLES device=GPU compute_type={device_type}", flush=True) + else: + scene.cycles.device = 'CPU' + print("[cinematic_render] WARNING: GPU not found -- falling back to CPU") + print("RENDER_DEVICE_USED: engine=CYCLES device=CPU compute_type=NONE (fallback)", flush=True) + import os as _os + if _os.environ.get("CYCLES_DEVICE", "auto").lower() == "gpu": + print("GPU_REQUIRED_BUT_CPU_USED: strict mode active (CYCLES_DEVICE=gpu)", flush=True) + sys.exit(2) + + # ── Render settings ────────────────────────────────────────────────────── + scene.render.resolution_x = width + scene.render.resolution_y = height + scene.render.resolution_percentage = 100 + scene.render.image_settings.file_format = 'PNG' + + # ── Transparent background ─────────────────────────────────────────────── + if bg_color or transparent_bg: + scene.render.film_transparent = True + if bg_color: + print(f"[cinematic_render] film_transparent=True for FFmpeg bg_color compositing ({bg_color})") + else: + print("[cinematic_render] transparent_bg enabled (alpha PNG frames)") + + # ── Persistent data (Cycles BVH caching between frames) ────────────────── + scene.render.use_persistent_data = True + print("[cinematic_render] persistent_data enabled -- BVH cached between frames", flush=True) + + # ── Render all frames ──────────────────────────────────────────────────── + import time as _time + _render_start = _time.time() + for frame in range(1, frame_count + 1): + scene.frame_set(frame) + scene.render.filepath = os.path.join(frames_dir, f"frame_{frame:04d}") + bpy.ops.render.render(write_still=True) + elapsed = _time.time() - _render_start + fps_so_far = frame / elapsed + print(f"[cinematic_render] Frame {frame}/{frame_count} -- {elapsed:.1f}s elapsed ({fps_so_far:.2f} fps)") + + total = _time.time() - _render_start + print(f"[cinematic_render] Cinematic render complete: {frame_count} frames in {total:.1f}s ({frame_count/total:.2f} fps avg)") + + +if __name__ == "__main__": + main()