refactor(insights): share workbook export and ai defaults

This commit is contained in:
2026-03-31 22:53:53 +02:00
parent 05eeaab3f7
commit 160ba99b5c
8 changed files with 272 additions and 61 deletions
@@ -1,3 +1,4 @@
import { DEFAULT_OPENAI_MODEL } from "@capakraken/shared";
import { TRPCError } from "@trpc/server";
import { beforeEach, describe, expect, it, vi } from "vitest";
@@ -14,7 +15,7 @@ vi.mock("../ai-client.js", async (importOriginal) => {
},
},
})),
isAiConfigured: vi.fn().mockReturnValue(true),
isAiConfigured: vi.fn((settings: unknown) => settings != null),
loggedAiCall: vi.fn(async (_provider, _model, _promptLength, fn) => fn()),
parseAiError: vi.fn((error: unknown) => error instanceof Error ? error.message : String(error)),
};
@@ -99,7 +100,7 @@ describe("insights procedure support", () => {
findUnique: vi.fn().mockResolvedValue({
id: "singleton",
aiProvider: "openai",
azureOpenAiDeployment: "gpt-4o-mini",
azureOpenAiDeployment: DEFAULT_OPENAI_MODEL,
aiMaxCompletionTokens: 220,
aiTemperature: 0.4,
}),
@@ -109,15 +110,15 @@ describe("insights procedure support", () => {
);
expect(isAiConfigured).toHaveBeenCalledWith(
expect.objectContaining({ azureOpenAiDeployment: "gpt-4o-mini" }),
expect.objectContaining({ azureOpenAiDeployment: DEFAULT_OPENAI_MODEL }),
);
expect(createAiClient).toHaveBeenCalledWith(
expect.objectContaining({ azureOpenAiDeployment: "gpt-4o-mini" }),
expect.objectContaining({ azureOpenAiDeployment: DEFAULT_OPENAI_MODEL }),
);
expect(loggedAiCall).toHaveBeenCalledOnce();
expect(aiCompletionCreate).toHaveBeenCalledWith(
expect.objectContaining({
model: "gpt-4o-mini",
model: DEFAULT_OPENAI_MODEL,
max_completion_tokens: 220,
temperature: 0.4,
messages: expect.arrayContaining([
@@ -148,6 +149,83 @@ describe("insights procedure support", () => {
}
});
it("fails when AI settings are missing", async () => {
await expect(
generateProjectNarrative(
createContext({
project: {
findUnique: vi.fn().mockResolvedValue({
id: "project_1",
name: "Apollo",
shortCode: "APO",
status: "ACTIVE",
startDate: new Date("2026-03-03T00:00:00.000Z"),
endDate: new Date("2026-04-30T00:00:00.000Z"),
budgetCents: 200_000_00,
dynamicFields: null,
demandRequirements: [],
assignments: [],
}),
update: vi.fn(),
},
systemSettings: {
findUnique: vi.fn().mockResolvedValue(null),
},
}),
{ projectId: "project_1" },
),
).rejects.toEqual(
expect.objectContaining<Partial<TRPCError>>({
code: "PRECONDITION_FAILED",
message: "AI is not configured. Please set credentials in Admin → Settings.",
}),
);
expect(createAiClient).not.toHaveBeenCalled();
expect(loggedAiCall).not.toHaveBeenCalled();
});
it("fails when AI configuration is incomplete", async () => {
vi.mocked(isAiConfigured).mockReturnValue(false);
await expect(
generateProjectNarrative(
createContext({
project: {
findUnique: vi.fn().mockResolvedValue({
id: "project_1",
name: "Apollo",
shortCode: "APO",
status: "ACTIVE",
startDate: new Date("2026-03-03T00:00:00.000Z"),
endDate: new Date("2026-04-30T00:00:00.000Z"),
budgetCents: 200_000_00,
dynamicFields: null,
demandRequirements: [],
assignments: [],
}),
update: vi.fn(),
},
systemSettings: {
findUnique: vi.fn().mockResolvedValue({
id: "singleton",
aiProvider: "openai",
azureOpenAiDeployment: null,
}),
},
}),
{ projectId: "project_1" },
),
).rejects.toEqual(
expect.objectContaining<Partial<TRPCError>>({
code: "PRECONDITION_FAILED",
message: "AI is not configured. Please set credentials in Admin → Settings.",
}),
);
expect(createAiClient).not.toHaveBeenCalled();
expect(aiCompletionCreate).not.toHaveBeenCalled();
});
it("returns the cached narrative for a project", async () => {
const result = await getCachedNarrative(
createContext({
@@ -1,4 +1,9 @@
import { DEFAULT_OPENAI_MODEL } from "@capakraken/shared";
import { TRPCError } from "@trpc/server";
import type {
ChatCompletion,
ChatCompletionCreateParamsNonStreaming,
} from "openai/resources/chat/completions/completions";
import { z } from "zod";
import { createAiClient, isAiConfigured, loggedAiCall, parseAiError } from "../ai-client.js";
import type { TRPCContext } from "../trpc.js";
@@ -73,13 +78,15 @@ export async function generateProjectNarrative(
throw new TRPCError({ code: "NOT_FOUND", message: "Project not found" });
}
if (!isAiConfigured(settings)) {
if (!settings || !isAiConfigured(settings)) {
throw new TRPCError({
code: "PRECONDITION_FAILED",
message: "AI is not configured. Please set credentials in Admin → Settings.",
});
}
const configuredSettings = settings;
const now = new Date();
const totalDays = countBusinessDays(project.startDate, project.endDate);
const elapsedDays = countBusinessDays(
@@ -125,27 +132,30 @@ export async function generateProjectNarrative(
${dataContext}`;
const client = createAiClient(settings);
const model = settings.azureOpenAiDeployment;
const maxTokens = settings.aiMaxCompletionTokens ?? 300;
const temperature = settings.aiTemperature ?? 1;
const provider = settings.aiProvider ?? "openai";
const client = createAiClient(configuredSettings);
const model = configuredSettings.azureOpenAiDeployment ?? DEFAULT_OPENAI_MODEL;
const maxTokens = configuredSettings.aiMaxCompletionTokens ?? 300;
const temperature = configuredSettings.aiTemperature ?? 1;
const provider = configuredSettings.aiProvider ?? "openai";
let narrative = "";
try {
const completion = await loggedAiCall(provider, model, prompt.length, () =>
client.chat.completions.create({
messages: [
{
role: "system",
content: "You are a project management analyst providing brief executive summaries. Be factual and action-oriented.",
},
{ role: "user", content: prompt },
],
max_completion_tokens: maxTokens,
model,
...(temperature !== 1 ? { temperature } : {}),
}),
const completionRequest: ChatCompletionCreateParamsNonStreaming = {
messages: [
{
role: "system",
content: "You are a project management analyst providing brief executive summaries. Be factual and action-oriented.",
},
{ role: "user", content: prompt },
],
max_completion_tokens: maxTokens,
model,
stream: false,
...(temperature !== 1 ? { temperature } : {}),
};
const completion = await loggedAiCall<ChatCompletion>(provider, model, prompt.length, () =>
client.chat.completions.create(completionRequest),
);
narrative = completion.choices[0]?.message?.content?.trim() ?? "";
} catch (error) {