refactor(runtime): prefer env-backed secrets at runtime

This commit is contained in:
2026-03-30 19:17:32 +02:00
parent 4f5d410b94
commit fed7aa5b61
13 changed files with 532 additions and 71 deletions
+39 -20
View File
@@ -1,5 +1,6 @@
import OpenAI, { AzureOpenAI } from "openai";
import { logger } from "./lib/logger.js";
import { resolveSystemSettingsRuntime } from "./lib/system-settings-runtime.js";
type AiSettings = {
aiProvider?: string | null;
@@ -14,51 +15,70 @@ type AiSettings = {
azureDalleApiKey?: string | null;
};
function redactDiagnosticText(value: string): string {
return value
.replace(/https?:\/\/[^\s)\]}]+/gi, "<redacted-url>")
.replace(/\bsk-[A-Za-z0-9_-]+\b/g, "<redacted-secret>")
.replace(/\bAIza[0-9A-Za-z_-]+\b/g, "<redacted-secret>")
.replace(/(api[-_ ]?key\s*[=:]\s*)([^,\s]+)/gi, "$1<redacted-secret>")
.replace(/(Bearer\s+)([^\s]+)/gi, "$1<redacted-secret>")
.replace(/([?&](?:api-key|key)=)([^&\s]+)/gi, "$1<redacted-secret>");
}
export function sanitizeDiagnosticError(err: unknown): string {
const raw = err instanceof Error ? err.message : String(err);
return redactDiagnosticText(raw.replace(/^Error:\s*/, "")).slice(0, 300);
}
/** Returns true if the settings have enough information to make an API call. */
export function isAiConfigured(settings: AiSettings | null | undefined): boolean {
if (!settings?.azureOpenAiApiKey || !settings.azureOpenAiDeployment) return false;
if (settings.aiProvider === "azure" && !settings.azureOpenAiEndpoint) return false;
const runtimeSettings = resolveSystemSettingsRuntime(settings);
if (!runtimeSettings.azureOpenAiApiKey || !runtimeSettings.azureOpenAiDeployment) return false;
if (runtimeSettings.aiProvider === "azure" && !runtimeSettings.azureOpenAiEndpoint) return false;
return true;
}
/** Instantiates the right OpenAI client based on the stored provider setting. */
export function createAiClient(settings: AiSettings): OpenAI {
if (settings.aiProvider === "azure") {
const runtimeSettings = resolveSystemSettingsRuntime(settings);
if (runtimeSettings.aiProvider === "azure") {
return new AzureOpenAI({
endpoint: settings.azureOpenAiEndpoint!,
apiKey: settings.azureOpenAiApiKey!,
apiVersion: settings.azureApiVersion ?? "2025-01-01-preview",
deployment: settings.azureOpenAiDeployment!,
endpoint: runtimeSettings.azureOpenAiEndpoint!,
apiKey: runtimeSettings.azureOpenAiApiKey!,
apiVersion: runtimeSettings.azureApiVersion ?? "2025-01-01-preview",
deployment: runtimeSettings.azureOpenAiDeployment!,
});
}
// Default: regular OpenAI (sk-... key)
return new OpenAI({ apiKey: settings.azureOpenAiApiKey! });
return new OpenAI({ apiKey: runtimeSettings.azureOpenAiApiKey! });
}
/** Returns true if DALL-E image generation is configured. */
export function isDalleConfigured(settings: AiSettings | null | undefined): boolean {
if (!settings) return false;
const runtimeSettings = resolveSystemSettingsRuntime(settings);
// DALL-E needs its own deployment (or a non-Azure key with model name)
if (settings.aiProvider === "azure") {
return !!(settings.azureDalleDeployment && (settings.azureDalleEndpoint || settings.azureOpenAiEndpoint) && (settings.azureDalleApiKey || settings.azureOpenAiApiKey));
if (runtimeSettings.aiProvider === "azure") {
return !!(runtimeSettings.azureDalleDeployment && (runtimeSettings.azureDalleEndpoint || runtimeSettings.azureOpenAiEndpoint) && (runtimeSettings.azureDalleApiKey || runtimeSettings.azureOpenAiApiKey));
}
// For direct OpenAI, the chat API key works for DALL-E too
return !!settings.azureOpenAiApiKey;
return !!runtimeSettings.azureOpenAiApiKey;
}
/** Creates an OpenAI client configured for DALL-E image generation. */
export function createDalleClient(settings: AiSettings): OpenAI {
if (settings.aiProvider === "azure") {
const endpoint = settings.azureDalleEndpoint || settings.azureOpenAiEndpoint!;
const apiKey = settings.azureDalleApiKey || settings.azureOpenAiApiKey!;
const runtimeSettings = resolveSystemSettingsRuntime(settings);
if (runtimeSettings.aiProvider === "azure") {
const endpoint = runtimeSettings.azureDalleEndpoint || runtimeSettings.azureOpenAiEndpoint!;
const apiKey = runtimeSettings.azureDalleApiKey || runtimeSettings.azureOpenAiApiKey!;
return new AzureOpenAI({
endpoint,
apiKey,
apiVersion: settings.azureApiVersion ?? "2025-01-01-preview",
deployment: settings.azureDalleDeployment!,
apiVersion: runtimeSettings.azureApiVersion ?? "2025-01-01-preview",
deployment: runtimeSettings.azureDalleDeployment!,
});
}
return new OpenAI({ apiKey: settings.azureOpenAiApiKey! });
return new OpenAI({ apiKey: runtimeSettings.azureOpenAiApiKey! });
}
/**
@@ -79,7 +99,7 @@ export async function loggedAiCall<T>(
return result;
} catch (err) {
const responseTimeMs = Math.round(performance.now() - start);
const errorMessage = err instanceof Error ? err.message : String(err);
const errorMessage = sanitizeDiagnosticError(err);
logger.warn({ provider, model, promptLength, responseTimeMs, errorMessage }, "External API call failed");
throw err;
}
@@ -114,6 +134,5 @@ export function parseAiError(err: unknown): string {
if (lower.includes("context_length_exceeded") || lower.includes("maximum context")) {
return "Request too large — the prompt exceeded the model's context limit.";
}
// Fall back to the raw message but strip noise
return msg.replace(/^Error: /, "").slice(0, 300);
return sanitizeDiagnosticError(msg);
}