310 lines
8.8 KiB
TypeScript
310 lines
8.8 KiB
TypeScript
import { TRPCError } from "@trpc/server";
|
|
import { loggedAiCall, parseAiError } from "../ai-client.js";
|
|
import { createAuditEntry } from "../lib/audit.js";
|
|
import { checkAiOutput } from "../lib/content-filter.js";
|
|
import { logger } from "../lib/logger.js";
|
|
import {
|
|
AssistantApprovalStorageUnavailableError,
|
|
createPendingAssistantApproval,
|
|
toApprovalPayload,
|
|
} from "./assistant-approvals.js";
|
|
import {
|
|
ASSISTANT_CONFIRMATION_PREFIX,
|
|
parseToolArguments,
|
|
} from "./assistant-confirmation.js";
|
|
import {
|
|
buildAssistantChatResponse,
|
|
mergeInsights,
|
|
type AssistantChatResponse,
|
|
} from "./assistant-chat-response.js";
|
|
import { buildAssistantInsight, type AssistantInsight } from "./assistant-insights.js";
|
|
import {
|
|
executeTool,
|
|
MUTATION_TOOLS,
|
|
type ToolAction,
|
|
type ToolContext,
|
|
} from "./assistant-tools.js";
|
|
|
|
type AssistantToolCall = {
|
|
id: string;
|
|
function: {
|
|
name: string;
|
|
arguments: string;
|
|
};
|
|
};
|
|
|
|
type AssistantChoiceMessage = {
|
|
content?: string | null;
|
|
tool_calls?: AssistantToolCall[];
|
|
};
|
|
|
|
type AssistantCompletionResponse = {
|
|
choices?: Array<{
|
|
message?: AssistantChoiceMessage;
|
|
}>;
|
|
};
|
|
|
|
type AssistantChatClient = {
|
|
chat: {
|
|
completions: {
|
|
create(...args: any[]): Promise<unknown>;
|
|
};
|
|
};
|
|
};
|
|
|
|
export async function runAssistantToolLoop(input: {
|
|
db: ToolContext["db"];
|
|
dbUserId?: string | undefined;
|
|
client: AssistantChatClient;
|
|
provider: string;
|
|
model: string;
|
|
maxTokens: number;
|
|
temperature: number;
|
|
openaiMessages: Array<{ content?: unknown } & Record<string, unknown>>;
|
|
availableTools: unknown[];
|
|
toolCtx: ToolContext;
|
|
userId: string;
|
|
conversationId: string;
|
|
collectedActions: ToolAction[];
|
|
collectedInsights: AssistantInsight[];
|
|
maxToolIterations: number;
|
|
}): Promise<AssistantChatResponse> {
|
|
let collectedActions = input.collectedActions;
|
|
let collectedInsights = input.collectedInsights;
|
|
|
|
for (let i = 0; i < input.maxToolIterations; i++) {
|
|
const response = await requestAssistantCompletion(input);
|
|
const choice = response.choices?.[0];
|
|
const msg = choice?.message;
|
|
if (!msg) {
|
|
throw new TRPCError({ code: "INTERNAL_SERVER_ERROR", message: "No response from AI" });
|
|
}
|
|
|
|
if (msg.tool_calls && msg.tool_calls.length > 0) {
|
|
input.openaiMessages.push(msg);
|
|
|
|
const toolResult = await handleAssistantToolCalls({
|
|
db: input.db,
|
|
dbUserId: input.dbUserId,
|
|
openaiMessages: input.openaiMessages,
|
|
toolCalls: msg.tool_calls,
|
|
toolCtx: input.toolCtx,
|
|
userId: input.userId,
|
|
conversationId: input.conversationId,
|
|
collectedActions,
|
|
collectedInsights,
|
|
});
|
|
|
|
collectedActions = toolResult.collectedActions;
|
|
collectedInsights = toolResult.collectedInsights;
|
|
|
|
if (toolResult.response) {
|
|
return toolResult.response;
|
|
}
|
|
|
|
continue;
|
|
}
|
|
|
|
return buildFinalAssistantResponse({
|
|
db: input.db,
|
|
dbUserId: input.dbUserId,
|
|
content: msg.content,
|
|
collectedActions,
|
|
collectedInsights,
|
|
});
|
|
}
|
|
|
|
return buildAssistantChatResponse({
|
|
content: "I had to stop after too many tool calls. Please try a simpler question.",
|
|
insights: collectedInsights,
|
|
actions: collectedActions,
|
|
});
|
|
}
|
|
|
|
async function requestAssistantCompletion(input: {
|
|
client: AssistantChatClient;
|
|
provider: string;
|
|
model: string;
|
|
maxTokens: number;
|
|
temperature: number;
|
|
openaiMessages: Array<{ content?: unknown } & Record<string, unknown>>;
|
|
availableTools: unknown[];
|
|
}): Promise<AssistantCompletionResponse> {
|
|
const msgLen = input.openaiMessages.reduce(
|
|
(total, message) => total + (typeof message.content === "string" ? message.content.length : 0),
|
|
0,
|
|
);
|
|
|
|
try {
|
|
const response = await loggedAiCall(input.provider, input.model, msgLen, () =>
|
|
input.client.chat.completions.create({
|
|
model: input.model,
|
|
messages: input.openaiMessages,
|
|
tools: input.availableTools,
|
|
max_completion_tokens: input.maxTokens,
|
|
temperature: input.temperature,
|
|
}),
|
|
);
|
|
return response as AssistantCompletionResponse;
|
|
} catch (error) {
|
|
throw new TRPCError({
|
|
code: "INTERNAL_SERVER_ERROR",
|
|
message: `AI error: ${parseAiError(error)}`,
|
|
});
|
|
}
|
|
}
|
|
|
|
async function handleAssistantToolCalls(input: {
|
|
db: ToolContext["db"];
|
|
dbUserId?: string | undefined;
|
|
openaiMessages: Array<{ content?: unknown } & Record<string, unknown>>;
|
|
toolCalls: AssistantToolCall[];
|
|
toolCtx: ToolContext;
|
|
userId: string;
|
|
conversationId: string;
|
|
collectedActions: ToolAction[];
|
|
collectedInsights: AssistantInsight[];
|
|
}): Promise<{
|
|
response: AssistantChatResponse | null;
|
|
collectedActions: ToolAction[];
|
|
collectedInsights: AssistantInsight[];
|
|
}> {
|
|
let collectedActions = input.collectedActions;
|
|
let collectedInsights = input.collectedInsights;
|
|
|
|
for (const toolCall of input.toolCalls) {
|
|
if (MUTATION_TOOLS.has(toolCall.function.name)) {
|
|
try {
|
|
const approval = await createPendingAssistantApproval(
|
|
input.db,
|
|
input.userId,
|
|
input.conversationId,
|
|
toolCall.function.name,
|
|
toolCall.function.arguments,
|
|
);
|
|
|
|
void createAuditEntry({
|
|
db: input.db,
|
|
entityType: "AiToolExecution",
|
|
entityId: toolCall.id,
|
|
entityName: toolCall.function.name,
|
|
action: "CREATE",
|
|
source: "ai",
|
|
summary: `AI tool blocked pending confirmation: ${toolCall.function.name}`,
|
|
after: {
|
|
approvalId: approval.id,
|
|
params: parseToolArguments(toolCall.function.arguments),
|
|
executed: false,
|
|
},
|
|
...(input.dbUserId !== undefined ? { userId: input.dbUserId } : {}),
|
|
});
|
|
|
|
return {
|
|
response: buildAssistantChatResponse({
|
|
content: `${ASSISTANT_CONFIRMATION_PREFIX} ${approval.summary}. Bitte bestätigen.`,
|
|
approval: toApprovalPayload(approval, "pending"),
|
|
insights: collectedInsights,
|
|
actions: collectedActions,
|
|
}),
|
|
collectedActions,
|
|
collectedInsights,
|
|
};
|
|
} catch (error) {
|
|
if (!(error instanceof AssistantApprovalStorageUnavailableError)) {
|
|
throw error;
|
|
}
|
|
|
|
return {
|
|
response: buildAssistantChatResponse({
|
|
content: "Schreibende Assistant-Aktionen sind gerade nicht verfuegbar, weil der Bestaetigungsspeicher in der Datenbank fehlt. Bitte die CapaKraken-DB-Migration anwenden und dann erneut versuchen.",
|
|
insights: collectedInsights,
|
|
actions: collectedActions,
|
|
}),
|
|
collectedActions,
|
|
collectedInsights,
|
|
};
|
|
}
|
|
}
|
|
|
|
const result = await executeTool(
|
|
toolCall.function.name,
|
|
toolCall.function.arguments,
|
|
input.toolCtx,
|
|
);
|
|
|
|
const insight = buildAssistantInsight(toolCall.function.name, result.data);
|
|
if (insight) {
|
|
collectedInsights = mergeInsights(collectedInsights, insight);
|
|
}
|
|
|
|
if (result.action) {
|
|
collectedActions = [...collectedActions, result.action];
|
|
}
|
|
|
|
input.openaiMessages.push({
|
|
role: "tool",
|
|
tool_call_id: toolCall.id,
|
|
content: result.content,
|
|
});
|
|
|
|
let parsedArgs: Record<string, unknown> = {};
|
|
try {
|
|
parsedArgs = JSON.parse(toolCall.function.arguments) as Record<string, unknown>;
|
|
} catch {
|
|
parsedArgs = {};
|
|
}
|
|
|
|
void createAuditEntry({
|
|
db: input.db,
|
|
entityType: "AiToolExecution",
|
|
entityId: toolCall.id,
|
|
entityName: toolCall.function.name,
|
|
action: "CREATE",
|
|
source: "ai",
|
|
summary: `AI executed tool: ${toolCall.function.name}`,
|
|
after: { params: parsedArgs, executed: true },
|
|
...(input.dbUserId !== undefined ? { userId: input.dbUserId } : {}),
|
|
});
|
|
}
|
|
|
|
return {
|
|
response: null,
|
|
collectedActions,
|
|
collectedInsights,
|
|
};
|
|
}
|
|
|
|
function buildFinalAssistantResponse(input: {
|
|
db: ToolContext["db"];
|
|
dbUserId?: string | undefined;
|
|
content?: string | null | undefined;
|
|
collectedActions: ToolAction[];
|
|
collectedInsights: AssistantInsight[];
|
|
}): AssistantChatResponse {
|
|
let finalContent = input.content ?? "I couldn't generate a response.";
|
|
const contentCheck = checkAiOutput(finalContent);
|
|
if (!contentCheck.clean) {
|
|
logger.warn(
|
|
{ userId: input.dbUserId },
|
|
"AI output contained sensitive content — redacted before delivery",
|
|
);
|
|
finalContent = contentCheck.redacted;
|
|
void createAuditEntry({
|
|
db: input.db,
|
|
entityType: "SecurityAlert",
|
|
entityId: crypto.randomUUID(),
|
|
entityName: "AiOutputRedacted",
|
|
action: "CREATE",
|
|
source: "ai",
|
|
summary: "AI output contained potentially sensitive content and was redacted",
|
|
...(input.dbUserId !== undefined ? { userId: input.dbUserId } : {}),
|
|
});
|
|
}
|
|
|
|
return buildAssistantChatResponse({
|
|
content: finalContent,
|
|
insights: input.collectedInsights,
|
|
actions: input.collectedActions,
|
|
});
|
|
}
|