chore: snapshot workflow migration progress
This commit is contained in:
@@ -21,6 +21,7 @@ from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
@@ -436,31 +437,38 @@ def _tessellate_with_gmsh(shape, linear_deflection: float, angular_deflection: f
|
||||
)
|
||||
|
||||
|
||||
def _collect_part_key_map(shape_tool, free_labels) -> dict:
|
||||
"""Return {normalized_source_name: part_key_slug} for all leaf parts in the XCAF hierarchy.
|
||||
|
||||
The normalized source name (XCAF label name without _AF\\d+ suffix) is what
|
||||
Three.js sees after normalizeMeshName() strips the OCC assembly suffix from the
|
||||
GLB mesh node name. The slug algorithm matches part_key_service.generate_part_key().
|
||||
"""
|
||||
import re as _re
|
||||
def _collect_part_key_mappings(shape_tool, free_labels) -> tuple[dict[str, str], dict[str, list[str]]]:
|
||||
"""Return canonical and occurrence-aware part-key mappings for leaf XCAF parts."""
|
||||
import hashlib as _hashlib
|
||||
import re as _re
|
||||
from OCP.TDF import TDF_LabelSequence
|
||||
from OCP.TDataStd import TDataStd_Name
|
||||
from OCP.XCAFDoc import XCAFDoc_ShapeTool
|
||||
|
||||
_af_re = _re.compile(r'_AF\d+$', _re.IGNORECASE)
|
||||
_af_re = _re.compile(r"_AF\d+$", _re.IGNORECASE)
|
||||
|
||||
def _slug(source_name: str, xcaf_path: str = "") -> str:
|
||||
base = _af_re.sub('', source_name) if source_name else ''
|
||||
# camelCase split — same as part_key_service.generate_part_key
|
||||
base = _re.sub(r'([a-z])([A-Z])', r'\1_\2', base)
|
||||
slug = _re.sub(r'[^a-z0-9]+', '_', base.lower()).strip('_')
|
||||
def _slug(source_name: str, xcaf_path: str = "", existing_keys: set[str] | None = None) -> str:
|
||||
base = _af_re.sub("", source_name) if source_name else ""
|
||||
base = _re.sub(r"([a-z])([A-Z])", r"\1_\2", base)
|
||||
slug = _re.sub(r"[^a-z0-9]+", "_", base.lower()).strip("_")
|
||||
if not slug:
|
||||
slug = f"part_{_hashlib.sha256(xcaf_path.encode()).hexdigest()[:8]}"
|
||||
return slug[:50]
|
||||
slug = slug[:50]
|
||||
|
||||
part_key_map: dict = {}
|
||||
if existing_keys is None:
|
||||
return slug
|
||||
|
||||
key = slug
|
||||
n = 2
|
||||
while key in existing_keys:
|
||||
key = f"{slug}_{n}"
|
||||
n += 1
|
||||
existing_keys.add(key)
|
||||
return key
|
||||
|
||||
part_key_map: dict[str, str] = {}
|
||||
part_key_occurrences: dict[str, list[str]] = {}
|
||||
existing_keys: set[str] = set()
|
||||
|
||||
def _collect(label, path: str = "") -> None:
|
||||
name_attr = TDataStd_Name()
|
||||
@@ -468,9 +476,8 @@ def _collect_part_key_map(shape_tool, free_labels) -> dict:
|
||||
if label.FindAttribute(TDataStd_Name.GetID_s(), name_attr):
|
||||
name = name_attr.Get().ToExtString()
|
||||
|
||||
# Dereference component references to their definition label
|
||||
# (the definition may itself be an assembly with sub-components)
|
||||
from OCP.TDF import TDF_Label as _TDF_Label
|
||||
|
||||
actual_label = label
|
||||
if XCAFDoc_ShapeTool.IsReference_s(label):
|
||||
ref_label = _TDF_Label()
|
||||
@@ -481,45 +488,206 @@ def _collect_part_key_map(shape_tool, free_labels) -> dict:
|
||||
XCAFDoc_ShapeTool.GetComponents_s(actual_label, components)
|
||||
|
||||
xcaf_path = f"{path}/{name}" if name else f"{path}/unnamed"
|
||||
|
||||
if components.Length() == 0:
|
||||
# Leaf node — normalized source name (without _AF suffix) as key
|
||||
normalized = _af_re.sub('', name) if name else ''
|
||||
normalized = _af_re.sub("", name) if name else ""
|
||||
if normalized:
|
||||
part_key_map[normalized] = _slug(name, xcaf_path)
|
||||
else:
|
||||
for i in range(1, components.Length() + 1):
|
||||
_collect(components.Value(i), xcaf_path)
|
||||
part_key = _slug(name, xcaf_path, existing_keys)
|
||||
part_key_map.setdefault(normalized, part_key)
|
||||
part_key_occurrences.setdefault(normalized, []).append(part_key)
|
||||
return
|
||||
|
||||
for i in range(1, components.Length() + 1):
|
||||
_collect(components.Value(i), xcaf_path)
|
||||
|
||||
for i in range(1, free_labels.Length() + 1):
|
||||
_collect(free_labels.Value(i))
|
||||
|
||||
return part_key_map, part_key_occurrences
|
||||
|
||||
|
||||
def _collect_part_key_map(shape_tool, free_labels) -> dict[str, str]:
|
||||
part_key_map, _ = _collect_part_key_mappings(shape_tool, free_labels)
|
||||
return part_key_map
|
||||
|
||||
|
||||
def _inject_glb_extras(glb_path: Path, extras: dict, part_key_map: dict | None = None) -> None:
|
||||
"""Patch a GLB binary to add/update scenes[0].extras JSON field.
|
||||
|
||||
Also stamps per-node extras.partKey on each GLB node whose name maps to an
|
||||
entry in part_key_map (the dict returned by _collect_part_key_map). Three.js
|
||||
GLTFLoader propagates node extras → object.userData, so every THREE.Mesh will
|
||||
carry userData.partKey after load — no runtime lookup needed in the viewer.
|
||||
|
||||
The GLB format stores a JSON chunk immediately after the 12-byte header.
|
||||
We re-serialize it with the new extras and update chunk + total lengths.
|
||||
No external dependencies — pure stdlib struct/json.
|
||||
"""
|
||||
def _normalize_occ_node_name(raw_name: str) -> str:
|
||||
"""Collapse exporter-only suffixes back to their semantic OCC source name."""
|
||||
import re as _re
|
||||
|
||||
name = (raw_name or "").strip()
|
||||
name = _re.sub(r"\.\d{3}$", "", name)
|
||||
|
||||
previous = None
|
||||
while previous != name:
|
||||
previous = name
|
||||
name = _re.sub(r"_AF\d+(_ASM)?_?$", "", name, flags=_re.IGNORECASE)
|
||||
return name
|
||||
|
||||
|
||||
def _slugify_occ_name(raw_name: str) -> str:
|
||||
import re as _re
|
||||
|
||||
base = _re.sub(r"([a-z])([A-Z])", r"\1_\2", raw_name or "")
|
||||
return _re.sub(r"[^a-z0-9]+", "_", base.lower()).strip("_")
|
||||
|
||||
|
||||
def _is_instance_variant_name(mesh_name: str, semantic_name: str) -> bool:
|
||||
if not mesh_name or not semantic_name:
|
||||
return False
|
||||
if mesh_name == semantic_name:
|
||||
return True
|
||||
if mesh_name.startswith(f"{semantic_name}_"):
|
||||
return True
|
||||
return mesh_name.rstrip("0123456789_") == semantic_name
|
||||
|
||||
|
||||
def _read_vec(node: dict, key: str, default: tuple[float, ...]) -> tuple[float, ...]:
|
||||
value = node.get(key)
|
||||
if not isinstance(value, list) or len(value) != len(default):
|
||||
return default
|
||||
result: list[float] = []
|
||||
for idx, fallback in enumerate(default):
|
||||
item = value[idx]
|
||||
result.append(float(item) if isinstance(item, (int, float)) else fallback)
|
||||
return tuple(result)
|
||||
|
||||
|
||||
def _have_matching_local_transforms(left: dict, right: dict) -> bool:
|
||||
position_left = _read_vec(left, "translation", (0.0, 0.0, 0.0))
|
||||
position_right = _read_vec(right, "translation", (0.0, 0.0, 0.0))
|
||||
scale_left = _read_vec(left, "scale", (1.0, 1.0, 1.0))
|
||||
scale_right = _read_vec(right, "scale", (1.0, 1.0, 1.0))
|
||||
rotation_left = _read_vec(left, "rotation", (0.0, 0.0, 0.0, 1.0))
|
||||
rotation_right = _read_vec(right, "rotation", (0.0, 0.0, 0.0, 1.0))
|
||||
|
||||
epsilon = 1e-5
|
||||
if any(abs(a - b) > epsilon for a, b in zip(position_left, position_right)):
|
||||
return False
|
||||
if any(abs(a - b) > epsilon for a, b in zip(scale_left, scale_right)):
|
||||
return False
|
||||
|
||||
dot = sum(a * b for a, b in zip(rotation_left, rotation_right))
|
||||
return abs(1 - abs(dot)) <= 1e-4
|
||||
|
||||
|
||||
def _part_key_priority(source_name: str, part_key: str) -> tuple[int, int, int]:
|
||||
import re as _re
|
||||
|
||||
penalty = 0
|
||||
if _re.search(r"_AF\d+(_ASM)?_?$", source_name, flags=_re.IGNORECASE):
|
||||
penalty += 100
|
||||
if _re.search(r"_af\d+$", part_key, flags=_re.IGNORECASE):
|
||||
penalty += 100
|
||||
if _re.search(r"_\d+_\d+$", source_name):
|
||||
penalty += 25
|
||||
return (penalty, len(source_name), len(part_key))
|
||||
|
||||
|
||||
def _build_exact_part_key_lookup(part_key_map: dict[str, str]) -> dict[str, str]:
|
||||
return {
|
||||
_slugify_occ_name(raw_name): part_key
|
||||
for raw_name, part_key in part_key_map.items()
|
||||
if raw_name and part_key
|
||||
}
|
||||
|
||||
|
||||
def _build_canonical_part_key_lookup(part_key_map: dict[str, str]) -> dict[str, str]:
|
||||
canonical: dict[str, tuple[tuple[int, int, int], str]] = {}
|
||||
for raw_name, part_key in part_key_map.items():
|
||||
normalized = _normalize_occ_node_name(raw_name)
|
||||
if not normalized or not part_key:
|
||||
continue
|
||||
slug = _slugify_occ_name(normalized)
|
||||
if not slug:
|
||||
continue
|
||||
candidate = (_part_key_priority(raw_name, part_key), part_key)
|
||||
current = canonical.get(slug)
|
||||
if current is None or candidate[0] < current[0]:
|
||||
canonical[slug] = candidate
|
||||
return {slug: part_key for slug, (_, part_key) in canonical.items()}
|
||||
|
||||
|
||||
def _score_semantic_sibling(
|
||||
mesh_name: str,
|
||||
semantic_name: str,
|
||||
part_key: str,
|
||||
*,
|
||||
transforms_match: bool,
|
||||
) -> int:
|
||||
import re as _re
|
||||
|
||||
transform_bonus = 10_000 if transforms_match else 0
|
||||
canonical_bonus = 1_000 if not _re.search(r"_af\d+$", part_key, flags=_re.IGNORECASE) else 0
|
||||
exact_bonus = 100 if mesh_name.rstrip("0123456789_") == semantic_name else 0
|
||||
specificity_bonus = len(semantic_name)
|
||||
return transform_bonus + canonical_bonus + exact_bonus + specificity_bonus
|
||||
|
||||
|
||||
def _find_semantic_sibling_part_key(
|
||||
node_index: int,
|
||||
nodes: list[dict],
|
||||
parent_by_child: dict[int, int],
|
||||
canonical_lookup: dict[str, str],
|
||||
) -> str | None:
|
||||
node = nodes[node_index]
|
||||
if node.get("mesh") is None:
|
||||
return None
|
||||
|
||||
parent_index = parent_by_child.get(node_index)
|
||||
if parent_index is None:
|
||||
return None
|
||||
|
||||
mesh_name = _normalize_occ_node_name(node.get("name", ""))
|
||||
if not mesh_name:
|
||||
return None
|
||||
|
||||
best_match: tuple[int, str] | None = None
|
||||
parent = nodes[parent_index]
|
||||
for sibling_index in parent.get("children") or []:
|
||||
if sibling_index == node_index:
|
||||
continue
|
||||
sibling = nodes[sibling_index]
|
||||
if sibling.get("mesh") is not None:
|
||||
continue
|
||||
|
||||
semantic_name = _normalize_occ_node_name(sibling.get("name", ""))
|
||||
if not _is_instance_variant_name(mesh_name, semantic_name):
|
||||
continue
|
||||
|
||||
sibling_part_key = canonical_lookup.get(_slugify_occ_name(semantic_name))
|
||||
if not sibling_part_key:
|
||||
continue
|
||||
|
||||
score = _score_semantic_sibling(
|
||||
mesh_name,
|
||||
semantic_name,
|
||||
sibling_part_key,
|
||||
transforms_match=_have_matching_local_transforms(node, sibling),
|
||||
)
|
||||
candidate = (score, sibling_part_key)
|
||||
if best_match is None or candidate[0] > best_match[0]:
|
||||
best_match = candidate
|
||||
|
||||
return best_match[1] if best_match else None
|
||||
|
||||
|
||||
def _inject_glb_extras(
|
||||
glb_path: Path,
|
||||
extras: dict,
|
||||
part_key_map: dict | None = None,
|
||||
part_key_occurrences: dict[str, list[str]] | None = None,
|
||||
) -> None:
|
||||
"""Patch a GLB binary to add/update scenes[0].extras JSON field."""
|
||||
import struct as _struct
|
||||
|
||||
data = glb_path.read_bytes()
|
||||
# GLB header: magic(4) + version(4) + total_length(4) = 12 bytes
|
||||
# JSON chunk: chunk_length(4) + chunk_type(4) + chunk_data(chunk_length bytes)
|
||||
json_len = _struct.unpack_from("<I", data, 12)[0]
|
||||
json_type = _struct.unpack_from("<I", data, 16)[0]
|
||||
if json_type != 0x4E4F534A: # "JSON"
|
||||
print("WARNING: _inject_glb_extras: unexpected chunk type, skipping extras injection",
|
||||
file=sys.stderr)
|
||||
if json_type != 0x4E4F534A:
|
||||
print(
|
||||
"WARNING: _inject_glb_extras: unexpected chunk type, skipping extras injection",
|
||||
file=sys.stderr,
|
||||
)
|
||||
return
|
||||
|
||||
j = json.loads(data[20: 20 + json_len])
|
||||
@@ -530,47 +698,94 @@ def _inject_glb_extras(glb_path: Path, extras: dict, part_key_map: dict | None =
|
||||
else:
|
||||
j.setdefault("extras", {}).update(extras)
|
||||
|
||||
# Stamp per-node extras.partKey so Three.js maps it to mesh.userData.partKey.
|
||||
# part_key_map keys are raw OCC names with _AF\d+ stripped but not slugified
|
||||
# (e.g. "GE360-HF_000_P_ASM_1" → "ge360_hf_000_p_asm_1").
|
||||
# GLB node names are raw OCC names (may or may not have _AF\d+ suffix).
|
||||
# Normalize both sides to slugified form for the lookup.
|
||||
if part_key_map:
|
||||
_norm_re = _re.compile(r'_AF\d+$', _re.IGNORECASE)
|
||||
exact_lookup = _build_exact_part_key_lookup(part_key_map)
|
||||
canonical_lookup = _build_canonical_part_key_lookup(part_key_map)
|
||||
occurrence_lookup: dict[str, list[str]] = {}
|
||||
if part_key_occurrences:
|
||||
for raw_name, part_keys in part_key_occurrences.items():
|
||||
normalized = _normalize_occ_node_name(raw_name)
|
||||
if not normalized:
|
||||
continue
|
||||
cleaned_keys = [part_key for part_key in part_keys if part_key]
|
||||
if cleaned_keys:
|
||||
occurrence_lookup[normalized] = cleaned_keys
|
||||
|
||||
def _slugify(s: str) -> str:
|
||||
return _re.sub(r'[^a-z0-9]+', '_', _norm_re.sub('', s).lower()).strip('_')
|
||||
|
||||
# Build a slug→partKey lookup from the part_key_map
|
||||
# part_key_map: {raw_name_no_af_suffix: part_key_slug}
|
||||
slug_to_part_key: dict = {}
|
||||
for raw_key, part_key in part_key_map.items():
|
||||
slug_to_part_key[_slugify(raw_key)] = part_key
|
||||
occurrence_indices: dict[str, int] = {}
|
||||
parent_by_child: dict[int, int] = {}
|
||||
nodes = j.get("nodes", [])
|
||||
for parent_index, parent in enumerate(nodes):
|
||||
for child_index in parent.get("children") or []:
|
||||
if isinstance(child_index, int):
|
||||
parent_by_child[child_index] = parent_index
|
||||
|
||||
n_stamped = 0
|
||||
for node in j.get("nodes", []):
|
||||
raw = node.get("name", "")
|
||||
if not raw:
|
||||
for node_index, node in enumerate(nodes):
|
||||
raw_name = node.get("name", "")
|
||||
if not raw_name:
|
||||
continue
|
||||
slug = _slugify(raw)
|
||||
part_key = slug_to_part_key.get(slug)
|
||||
|
||||
normalized_raw = _normalize_occ_node_name(raw_name)
|
||||
canonical_part_key = canonical_lookup.get(_slugify_occ_name(normalized_raw))
|
||||
exact_part_key = exact_lookup.get(_slugify_occ_name(raw_name))
|
||||
|
||||
if node.get("mesh") is not None:
|
||||
occurrence_part_key = None
|
||||
occurrence_keys = occurrence_lookup.get(normalized_raw)
|
||||
if occurrence_keys:
|
||||
occurrence_index = occurrence_indices.get(normalized_raw, 0)
|
||||
if occurrence_index < len(occurrence_keys):
|
||||
occurrence_part_key = occurrence_keys[occurrence_index]
|
||||
occurrence_indices[normalized_raw] = occurrence_index + 1
|
||||
|
||||
semantic_sibling_part_key = _find_semantic_sibling_part_key(
|
||||
node_index,
|
||||
nodes,
|
||||
parent_by_child,
|
||||
canonical_lookup,
|
||||
)
|
||||
part_key = (
|
||||
occurrence_part_key
|
||||
or exact_part_key
|
||||
or canonical_part_key
|
||||
or semantic_sibling_part_key
|
||||
)
|
||||
else:
|
||||
part_key = canonical_part_key or exact_part_key
|
||||
|
||||
if part_key:
|
||||
node.setdefault("extras", {})["partKey"] = part_key
|
||||
n_stamped += 1
|
||||
|
||||
print(f"Stamped partKey extras on {n_stamped} GLB nodes")
|
||||
|
||||
new_json = json.dumps(j, separators=(",", ":"))
|
||||
# Pad to 4-byte boundary with spaces (required by GLB spec)
|
||||
pad = (4 - len(new_json) % 4) % 4
|
||||
new_json_bytes = new_json.encode() + b" " * pad
|
||||
|
||||
rest = data[20 + json_len:] # BIN chunk and anything after
|
||||
rest = data[20 + json_len:]
|
||||
new_chunk = _struct.pack("<II", len(new_json_bytes), 0x4E4F534A) + new_json_bytes
|
||||
new_total = 12 + len(new_chunk) + len(rest)
|
||||
new_header = _struct.pack("<III", 0x46546C67, 2, new_total)
|
||||
glb_path.write_bytes(new_header + new_chunk + rest)
|
||||
|
||||
|
||||
def _prepare_atomic_export_path(output_path: Path) -> Path:
|
||||
"""Return a non-existent temp file path adjacent to the final GLB target."""
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
temp_path = output_path.with_name(
|
||||
f".{output_path.stem}.{os.getpid()}.tmp{output_path.suffix}"
|
||||
)
|
||||
temp_path.unlink(missing_ok=True)
|
||||
return temp_path
|
||||
|
||||
|
||||
def _finalize_atomic_export(temp_path: Path, output_path: Path) -> None:
|
||||
"""Publish the validated temp GLB atomically to the canonical output path."""
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
temp_path.replace(output_path)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
color_map: dict = json.loads(args.color_map)
|
||||
@@ -613,7 +828,7 @@ def main() -> None:
|
||||
shape_tool.GetFreeShapes(free_labels)
|
||||
|
||||
# Collect partKeyMap before tessellation (XCAF names are stable at this point)
|
||||
part_key_map = _collect_part_key_map(shape_tool, free_labels)
|
||||
part_key_map, part_key_occurrences = _collect_part_key_mappings(shape_tool, free_labels)
|
||||
print(f"partKeyMap: {len(part_key_map)} unique part names collected")
|
||||
|
||||
print(f"Found {free_labels.Length()} root shape(s), tessellating "
|
||||
@@ -724,7 +939,10 @@ def main() -> None:
|
||||
# --- Export GLB via RWGltf_CafWriter (in mm, Z-up → Y-up handled by writer) ---
|
||||
from OCP.RWGltf import RWGltf_CafWriter
|
||||
|
||||
writer = RWGltf_CafWriter(TCollection_AsciiString(args.output_path), True) # True = binary GLB
|
||||
out = Path(args.output_path)
|
||||
temp_out = _prepare_atomic_export_path(out)
|
||||
|
||||
writer = RWGltf_CafWriter(TCollection_AsciiString(str(temp_out)), True) # True = binary GLB
|
||||
# MergeFaces=True merges per-face triangulations into a single buffer per shape.
|
||||
# Without this, RWGltf_CafWriter fails to find per-face Poly_Triangulation data
|
||||
# from the XCAF component hierarchy and falls back to degenerate meshes (~2 verts/face).
|
||||
@@ -739,13 +957,13 @@ def main() -> None:
|
||||
# Older API without metadata dict
|
||||
ok = writer.Perform(doc, Message_ProgressRange())
|
||||
|
||||
out = Path(args.output_path)
|
||||
if not ok or not out.exists() or out.stat().st_size == 0:
|
||||
print(f"ERROR: RWGltf_CafWriter.Perform returned ok={ok}, file exists={out.exists()}",
|
||||
if not ok or not temp_out.exists() or temp_out.stat().st_size == 0:
|
||||
temp_out.unlink(missing_ok=True)
|
||||
print(f"ERROR: RWGltf_CafWriter.Perform returned ok={ok}, file exists={temp_out.exists()}",
|
||||
file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(f"GLB exported: {out.name} ({out.stat().st_size // 1024} KB)")
|
||||
print(f"GLB exported: {temp_out.name} ({temp_out.stat().st_size // 1024} KB)")
|
||||
|
||||
# --- Inject sharp edge pairs and partKeyMap into GLB extras ---
|
||||
# Blender 5.0 reads scenes[0].extras as scene custom properties on import,
|
||||
@@ -759,7 +977,12 @@ def main() -> None:
|
||||
if part_key_map:
|
||||
extras_payload["partKeyMap"] = part_key_map
|
||||
if extras_payload:
|
||||
_inject_glb_extras(out, extras_payload, part_key_map=part_key_map if part_key_map else None)
|
||||
_inject_glb_extras(
|
||||
temp_out,
|
||||
extras_payload,
|
||||
part_key_map=part_key_map if part_key_map else None,
|
||||
part_key_occurrences=part_key_occurrences if part_key_occurrences else None,
|
||||
)
|
||||
if sharp_pairs:
|
||||
print(f"Injected {len(sharp_pairs)} sharp edge segment pairs into GLB extras")
|
||||
if part_key_map:
|
||||
@@ -767,16 +990,20 @@ def main() -> None:
|
||||
except Exception as _exc:
|
||||
print(f"WARNING: GLB extras injection failed (non-fatal): {_exc}", file=sys.stderr)
|
||||
|
||||
_finalize_atomic_export(temp_out, out)
|
||||
print(f"GLB finalized: {out.name} ({out.stat().st_size // 1024} KB)")
|
||||
|
||||
# NOTE: RWGltf_CafWriter reads unit metadata from the XDE document (set by
|
||||
# STEPCAFControl_Reader from the STEP file's SI_UNIT declarations) and converts
|
||||
# mm → m automatically. It also handles Z-up → Y-up coordinate transform.
|
||||
# No additional scaling or BRepBuilderAPI_Transform is needed.
|
||||
|
||||
|
||||
try:
|
||||
main()
|
||||
except SystemExit:
|
||||
raise
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
except SystemExit:
|
||||
raise
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
Reference in New Issue
Block a user