feat(azure-ai+gpu-ui): per-tenant Azure AI config + GPU health panel

- Per-tenant Azure AI config stored in tenants.tenant_config JSONB
- GET/PUT /api/tenants/{id}/ai-config + POST .../test connection
- api_key never returned to frontend (has_api_key: bool pattern)
- azure_ai.py resolves creds from tenant config when ai_enabled=True
- ai_tasks.py loads tenant config and passes it to validate_thumbnail
- Admin GPU Status section: probe button + status badge + last-checked time
- Notifications: _BELL_CHANNELS filter (notification+alert only in bell)
- Tenants.tsx: per-row Azure AI Config modal with URL auto-parse helper
- Remove duplicate in-memory /gpu-probe endpoints (kept DB-backed /probe/gpu)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-08 21:04:09 +01:00
parent 34f89cc225
commit 22c29d5655
11 changed files with 792 additions and 24 deletions
+42 -12
View File
@@ -24,10 +24,13 @@ Respond in JSON with exactly these fields:
}"""
def validate_thumbnail(order_item_id: str) -> dict:
def validate_thumbnail(order_item_id: str, tenant_config: dict | None = None) -> dict:
"""
Validate thumbnail orientation using Azure GPT-4o Vision.
Updates the order_item AI validation fields in DB.
If tenant_config is provided and tenant_config["ai_enabled"] is True,
the tenant's own Azure credentials are used instead of global settings.
"""
from app.config import settings
from sqlalchemy import create_engine
@@ -45,7 +48,7 @@ def validate_thumbnail(order_item_id: str) -> dict:
session.commit()
try:
result = _call_azure_vision(item.thumbnail_path, settings)
result = _call_azure_vision(item.thumbnail_path, settings, tenant_config)
item.ai_validation_status = AIValidationStatus.completed
item.ai_validation_result = result
except Exception as exc:
@@ -58,11 +61,38 @@ def validate_thumbnail(order_item_id: str) -> dict:
return result
def _call_azure_vision(thumbnail_path: str | None, settings) -> dict:
"""Call Azure OpenAI GPT-4o with a base64-encoded thumbnail."""
def _call_azure_vision(
thumbnail_path: str | None,
settings,
tenant_config: dict | None = None,
) -> dict:
"""Call Azure OpenAI GPT-4o with a base64-encoded thumbnail.
Credential resolution order:
1. tenant_config (if provided and ai_enabled=True)
2. Global settings (azure_openai_* env vars)
"""
import json
if not settings.azure_openai_api_key or not settings.azure_openai_endpoint:
# Resolve credentials from tenant config or global settings
if tenant_config and tenant_config.get("ai_enabled"):
api_key = tenant_config.get("ai_api_key") or settings.azure_openai_api_key
endpoint = tenant_config.get("ai_endpoint") or settings.azure_openai_endpoint
deployment = tenant_config.get("ai_deployment") or settings.azure_openai_deployment
api_version = tenant_config.get("ai_api_version") or settings.azure_openai_api_version
max_tokens = int(tenant_config.get("ai_max_tokens", 500))
temperature = float(tenant_config.get("ai_temperature", 0.1))
prompt = tenant_config.get("ai_validation_prompt") or VALIDATION_PROMPT
else:
api_key = settings.azure_openai_api_key
endpoint = settings.azure_openai_endpoint
deployment = settings.azure_openai_deployment
api_version = settings.azure_openai_api_version
max_tokens = 500
temperature = 0.1
prompt = VALIDATION_PROMPT
if not api_key or not endpoint:
raise ValueError("Azure OpenAI credentials not configured")
if not thumbnail_path or not Path(thumbnail_path).exists():
@@ -72,21 +102,21 @@ def _call_azure_vision(thumbnail_path: str | None, settings) -> dict:
from openai import AzureOpenAI
client = AzureOpenAI(
api_key=settings.azure_openai_api_key,
azure_endpoint=settings.azure_openai_endpoint,
api_version=settings.azure_openai_api_version,
api_key=api_key,
azure_endpoint=endpoint,
api_version=api_version,
)
with open(thumbnail_path, "rb") as f:
image_b64 = base64.b64encode(f.read()).decode("utf-8")
response = client.chat.completions.create(
model=settings.azure_openai_deployment,
model=deployment,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": VALIDATION_PROMPT},
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/png;base64,{image_b64}"},
@@ -94,8 +124,8 @@ def _call_azure_vision(thumbnail_path: str | None, settings) -> dict:
],
}
],
max_tokens=500,
temperature=0.1,
max_tokens=max_tokens,
temperature=temperature,
)
content = response.choices[0].message.content or ""