refactor(insights): share workbook export and ai defaults

This commit is contained in:
2026-03-31 22:53:53 +02:00
parent 05eeaab3f7
commit 160ba99b5c
8 changed files with 272 additions and 61 deletions
@@ -1,3 +1,4 @@
import { DEFAULT_OPENAI_MODEL } from "@capakraken/shared";
import { TRPCError } from "@trpc/server";
import { beforeEach, describe, expect, it, vi } from "vitest";
@@ -14,7 +15,7 @@ vi.mock("../ai-client.js", async (importOriginal) => {
},
},
})),
isAiConfigured: vi.fn().mockReturnValue(true),
isAiConfigured: vi.fn((settings: unknown) => settings != null),
loggedAiCall: vi.fn(async (_provider, _model, _promptLength, fn) => fn()),
parseAiError: vi.fn((error: unknown) => error instanceof Error ? error.message : String(error)),
};
@@ -99,7 +100,7 @@ describe("insights procedure support", () => {
findUnique: vi.fn().mockResolvedValue({
id: "singleton",
aiProvider: "openai",
azureOpenAiDeployment: "gpt-4o-mini",
azureOpenAiDeployment: DEFAULT_OPENAI_MODEL,
aiMaxCompletionTokens: 220,
aiTemperature: 0.4,
}),
@@ -109,15 +110,15 @@ describe("insights procedure support", () => {
);
expect(isAiConfigured).toHaveBeenCalledWith(
expect.objectContaining({ azureOpenAiDeployment: "gpt-4o-mini" }),
expect.objectContaining({ azureOpenAiDeployment: DEFAULT_OPENAI_MODEL }),
);
expect(createAiClient).toHaveBeenCalledWith(
expect.objectContaining({ azureOpenAiDeployment: "gpt-4o-mini" }),
expect.objectContaining({ azureOpenAiDeployment: DEFAULT_OPENAI_MODEL }),
);
expect(loggedAiCall).toHaveBeenCalledOnce();
expect(aiCompletionCreate).toHaveBeenCalledWith(
expect.objectContaining({
model: "gpt-4o-mini",
model: DEFAULT_OPENAI_MODEL,
max_completion_tokens: 220,
temperature: 0.4,
messages: expect.arrayContaining([
@@ -148,6 +149,83 @@ describe("insights procedure support", () => {
}
});
it("fails when AI settings are missing", async () => {
await expect(
generateProjectNarrative(
createContext({
project: {
findUnique: vi.fn().mockResolvedValue({
id: "project_1",
name: "Apollo",
shortCode: "APO",
status: "ACTIVE",
startDate: new Date("2026-03-03T00:00:00.000Z"),
endDate: new Date("2026-04-30T00:00:00.000Z"),
budgetCents: 200_000_00,
dynamicFields: null,
demandRequirements: [],
assignments: [],
}),
update: vi.fn(),
},
systemSettings: {
findUnique: vi.fn().mockResolvedValue(null),
},
}),
{ projectId: "project_1" },
),
).rejects.toEqual(
expect.objectContaining<Partial<TRPCError>>({
code: "PRECONDITION_FAILED",
message: "AI is not configured. Please set credentials in Admin → Settings.",
}),
);
expect(createAiClient).not.toHaveBeenCalled();
expect(loggedAiCall).not.toHaveBeenCalled();
});
it("fails when AI configuration is incomplete", async () => {
vi.mocked(isAiConfigured).mockReturnValue(false);
await expect(
generateProjectNarrative(
createContext({
project: {
findUnique: vi.fn().mockResolvedValue({
id: "project_1",
name: "Apollo",
shortCode: "APO",
status: "ACTIVE",
startDate: new Date("2026-03-03T00:00:00.000Z"),
endDate: new Date("2026-04-30T00:00:00.000Z"),
budgetCents: 200_000_00,
dynamicFields: null,
demandRequirements: [],
assignments: [],
}),
update: vi.fn(),
},
systemSettings: {
findUnique: vi.fn().mockResolvedValue({
id: "singleton",
aiProvider: "openai",
azureOpenAiDeployment: null,
}),
},
}),
{ projectId: "project_1" },
),
).rejects.toEqual(
expect.objectContaining<Partial<TRPCError>>({
code: "PRECONDITION_FAILED",
message: "AI is not configured. Please set credentials in Admin → Settings.",
}),
);
expect(createAiClient).not.toHaveBeenCalled();
expect(aiCompletionCreate).not.toHaveBeenCalled();
});
it("returns the cached narrative for a project", async () => {
const result = await getCachedNarrative(
createContext({