chore(repo): checkpoint current capakraken implementation state

This commit is contained in:
2026-03-29 12:47:12 +02:00
parent beae1a5d6e
commit 47e4d701ff
94 changed files with 4283 additions and 1710 deletions
-298
View File
@@ -1,298 +0,0 @@
# =============================================================================
# Claude Flow V3 - Codex Configuration
# =============================================================================
# Generated by: @claude-flow/codex
# Documentation: https://github.com/ruvnet/claude-flow
#
# This file configures the Codex CLI for Claude Flow integration.
# Place in .agents/config.toml (project) or .codex/config.toml (user).
# =============================================================================
# =============================================================================
# Core Settings
# =============================================================================
# Model selection - the AI model to use for code generation
# Options: gpt-5.3-codex, gpt-4o, claude-sonnet, claude-opus
model = "gpt-5.3-codex"
# Approval policy determines when human approval is required
# - untrusted: Always require approval
# - on-failure: Require approval only after failures
# - on-request: Require approval for significant changes
# - never: Auto-approve all actions (use with caution)
approval_policy = "on-request"
# Sandbox mode controls file system access
# - read-only: Can only read files, no modifications
# - workspace-write: Can write within workspace directory
# - danger-full-access: Full file system access (dangerous)
sandbox_mode = "workspace-write"
# Web search enables internet access for research
# - disabled: No web access
# - cached: Use cached results when available
# - live: Always fetch fresh results
web_search = "cached"
# =============================================================================
# Project Documentation
# =============================================================================
# Maximum bytes to read from AGENTS.md files
project_doc_max_bytes = 65536
# Fallback filenames if AGENTS.md not found
project_doc_fallback_filenames = [
"AGENTS.md",
"TEAM_GUIDE.md",
".agents.md"
]
# =============================================================================
# Features
# =============================================================================
[features]
# Enable child AGENTS.md guidance
child_agents_md = true
# Cache shell environment for faster repeated commands
shell_snapshot = true
# Smart approvals based on request context
request_rule = true
# Enable remote compaction for large histories
remote_compaction = true
# =============================================================================
# MCP Servers
# =============================================================================
[mcp_servers.claude-flow]
command = "npx"
args = ["-y", "@claude-flow/cli@latest"]
enabled = true
tool_timeout_sec = 120
# =============================================================================
# Skills Configuration
# =============================================================================
[[skills.config]]
path = ".agents/skills/swarm-orchestration"
enabled = true
[[skills.config]]
path = ".agents/skills/memory-management"
enabled = true
[[skills.config]]
path = ".agents/skills/sparc-methodology"
enabled = true
[[skills.config]]
path = ".agents/skills/security-audit"
enabled = true
# =============================================================================
# Profiles
# =============================================================================
# Development profile - more permissive for local work
[profiles.dev]
approval_policy = "never"
sandbox_mode = "danger-full-access"
web_search = "live"
# Safe profile - maximum restrictions
[profiles.safe]
approval_policy = "untrusted"
sandbox_mode = "read-only"
web_search = "disabled"
# CI profile - for automated pipelines
[profiles.ci]
approval_policy = "never"
sandbox_mode = "workspace-write"
web_search = "cached"
# =============================================================================
# History
# =============================================================================
[history]
# Save all session transcripts
persistence = "save-all"
# =============================================================================
# Shell Environment
# =============================================================================
[shell_environment_policy]
# Inherit environment variables
inherit = "core"
# Exclude sensitive variables
exclude = ["*_KEY", "*_SECRET", "*_TOKEN", "*_PASSWORD"]
# =============================================================================
# Sandbox Workspace Write Settings
# =============================================================================
[sandbox_workspace_write]
# Additional writable paths beyond workspace
writable_roots = []
# Allow network access
network_access = true
# Exclude temp directories
exclude_slash_tmp = false
# =============================================================================
# Security Settings
# =============================================================================
[security]
# Enable input validation for all user inputs
input_validation = true
# Prevent directory traversal attacks
path_traversal_prevention = true
# Scan for hardcoded secrets
secret_scanning = true
# Scan dependencies for known CVEs
cve_scanning = true
# Maximum file size for operations (bytes)
max_file_size = 10485760
# Allowed file extensions (empty = allow all)
allowed_extensions = []
# Blocked file patterns (regex)
blocked_patterns = ["\\.env$", "credentials\\.json$", "\\.pem$", "\\.key$"]
# =============================================================================
# Performance Settings
# =============================================================================
[performance]
# Maximum concurrent agents
max_agents = 8
# Task timeout in seconds
task_timeout = 300
# Memory limit per agent
memory_limit = "512MB"
# Enable response caching
cache_enabled = true
# Cache TTL in seconds
cache_ttl = 3600
# Enable parallel task execution
parallel_execution = true
# =============================================================================
# Logging Settings
# =============================================================================
[logging]
# Log level: debug, info, warn, error
level = "info"
# Log format: json, text, pretty
format = "pretty"
# Log destination: stdout, file, both
destination = "stdout"
# =============================================================================
# Neural Intelligence Settings
# =============================================================================
[neural]
# Enable SONA (Self-Optimizing Neural Architecture)
sona_enabled = true
# Enable HNSW vector search
hnsw_enabled = true
# HNSW index parameters
hnsw_m = 16
hnsw_ef_construction = 200
hnsw_ef_search = 100
# Enable pattern learning
pattern_learning = true
# Learning rate for neural adaptation
learning_rate = 0.01
# =============================================================================
# Swarm Orchestration Settings
# =============================================================================
[swarm]
# Default topology: hierarchical, mesh, ring, star
default_topology = "hierarchical"
# Default strategy: balanced, specialized, adaptive
default_strategy = "specialized"
# Consensus algorithm: raft, byzantine, gossip
consensus = "raft"
# Enable anti-drift measures
anti_drift = true
# Checkpoint interval (tasks)
checkpoint_interval = 10
# =============================================================================
# Hooks Configuration
# =============================================================================
[hooks]
# Enable lifecycle hooks
enabled = true
# Pre-task hook
pre_task = true
# Post-task hook (for learning)
post_task = true
# Enable neural training on post-edit
train_on_edit = true
# =============================================================================
# Background Workers
# =============================================================================
[workers]
# Enable background workers
enabled = true
# Worker configuration
[workers.audit]
enabled = true
priority = "critical"
interval = 300
[workers.optimize]
enabled = true
priority = "high"
interval = 600
[workers.consolidate]
enabled = true
priority = "low"
interval = 1800
@@ -35,7 +35,7 @@ Define the contracts that remove field and widget drift before implementation st
## Agent Prompt
```text
You are A1, the architect for the Planarchy widget + field refactor sprint.
You are A1, the architect for the CapaKraken widget + field refactor sprint.
Your job is to define stable contracts before implementation starts. Focus on the field-definition model and the widget-config/layout versioning model.
@@ -33,7 +33,7 @@ Centralize dynamic-field validation and filter construction so project and resou
## Agent Prompt
```text
You are C1, the field domain coder for the Planarchy refactor sprint.
You are C1, the field domain coder for the CapaKraken refactor sprint.
Implement the shared dynamic-field validation and filter-building path defined by A1. Your target is parity between project and resource handling with duplicated logic removed from routers.
@@ -33,7 +33,7 @@ Align blueprint and dynamic-field UI to the canonical field contract so UI behav
## Agent Prompt
```text
You are C2, the blueprint UI coder for the Planarchy refactor sprint.
You are C2, the blueprint UI coder for the CapaKraken refactor sprint.
Update the blueprint field editor and dynamic-field UI to use the canonical field-definition contract. Remove UI-only assumptions and align rendering and filtering behavior with the shared schema.
@@ -34,7 +34,7 @@ Make dashboard widget persistence typed and versioned, then move rendering towar
## Agent Prompt
```text
You are C3, the widget platform coder for the Planarchy refactor sprint.
You are C3, the widget platform coder for the CapaKraken refactor sprint.
Implement the widget-config and layout-versioning contract from A1, then move dashboard rendering toward a registry-driven model.
@@ -30,7 +30,7 @@ Thin the dashboard router and prepare the path to SQL-first performance work.
## Agent Prompt
```text
You are C4, the dashboard data coder for the Planarchy refactor sprint.
You are C4, the dashboard data coder for the CapaKraken refactor sprint.
Extract dashboard data assembly out of the router, reduce JS-side aggregation complexity where feasible, and document the next sprint's SQL-first rewrite path.
@@ -24,7 +24,7 @@ Claude Flow does not expose an `orchestrator` agent type in the CLI. The sprint
- `O1` orchestrator role -> CLI `coordinator` type
The role behavior still comes from [O1-orchestrator.md](/home/hartmut/Documents/Copilot/planarchy/.agents/sprints/widget-field-refactor/O1-orchestrator.md), which explicitly forbids implementation work.
The role behavior still comes from [O1-orchestrator.md](/home/hartmut/Documents/Copilot/capakraken/.agents/sprints/widget-field-refactor/O1-orchestrator.md), which explicitly forbids implementation work.
## Azure Requirement
@@ -35,7 +35,7 @@ This launcher is intended to run against Azure OpenAI-backed Codex. It will fail
- the config references `AZURE_OPENAI_API_KEY`
- `AZURE_OPENAI_API_KEY` is present in the shell environment
See [docs/azure_codex_setup.md](/home/hartmut/Documents/Copilot/planarchy/docs/azure_codex_setup.md).
See [docs/azure_codex_setup.md](/home/hartmut/Documents/Copilot/capakraken/docs/azure_codex_setup.md).
## Start
@@ -111,7 +111,7 @@ Do not accept a story unless all are true:
## Orchestrator Prompt
```text
You are O1, the sprint orchestrator for the Planarchy widget + field refactor.
You are O1, the sprint orchestrator for the CapaKraken widget + field refactor.
You never implement. You do not patch files, write tests, fix lint errors, or edit migrations. Your job is to sequence work, issue precise tickets, review handoffs against acceptance criteria, control merge order, and escalate blockers.
@@ -33,7 +33,7 @@ Review every merge candidate for regressions, contract drift, and unsafe persist
## Agent Prompt
```text
You are R1, the reviewer for the Planarchy refactor sprint.
You are R1, the reviewer for the CapaKraken refactor sprint.
Review every merge candidate with a regression-first mindset. Prioritize correctness, contract discipline, persistence safety, and behavior parity across project/resource flows.
@@ -35,7 +35,7 @@ Add the regression and integration tests that make this refactor safe to land.
## Agent Prompt
```text
You are T1, the test agent for the Planarchy refactor sprint.
You are T1, the test agent for the CapaKraken refactor sprint.
Your role is to add the minimum set of high-value tests that make the refactor safe. Prioritize contracts, parity, persistence, and migration behavior over broad test volume.
+3 -3
View File
@@ -1,6 +1,6 @@
---
name: gitlooper
description: Gitea ticket processing agent — fetches, triages, analyses, implements, and submits Planarchy issues for review
description: Gitea ticket processing agent — fetches, triages, analyses, implements, and submits CapaKraken issues for review
allowed-tools: Bash, Read, Write, Edit, Glob, Grep, Agent, WebFetch
---
@@ -427,9 +427,9 @@ AGENT_DRY_RUN="false"
---
## 9. Planarchy-Specific Context
## 9. CapaKraken-Specific Context
The agent operates within the Planarchy monorepo and must adhere to all engineering rules defined in `CLAUDE.md`:
The agent operates within the CapaKraken monorepo and must adhere to all engineering rules defined in `CLAUDE.md`:
- **Money:** Always integer cents, never floats
- **Prisma:** After schema changes, run `pnpm db:push`, clear `.next/` cache, restart dev server
+5 -5
View File
@@ -1,13 +1,13 @@
Du bist der **Implementer** für das Planarchy-Projekt.
Du bist der **Implementer** fuer das CapaKraken-Projekt.
## Deine Aufgabe
Lies `plan.md` und implementiere die Tasks Schritt für Schritt. Führe nach jedem Task die Quality Gates aus.
## Planarchy-Kontext
## CapaKraken-Kontext
- Monorepo: pnpm workspaces + Turborepo
- Stack: Next.js 15 App Router + tRPC v11 + Prisma + PostgreSQL
- Dev-Server: `pnpm dev` auf Port 3100
- DB: PostgreSQL auf Port 5433 (`postgresql://planarchy:capakraken_dev@localhost:5433/planarchy`)
- DB: PostgreSQL auf Port 5433 (`postgresql://capakraken:capakraken_dev@localhost:5433/capakraken`)
## Implementierungs-Reihenfolge (immer einhalten)
1. **Prisma Schema** (`packages/db/prisma/schema.prisma`) → `pnpm db:push`
@@ -18,8 +18,8 @@ Lies `plan.md` und implementiere die Tasks Schritt für Schritt. Führe nach jed
## Nach jeder Schema-Änderung (Pflicht!)
```bash
DATABASE_URL="postgresql://planarchy:capakraken_dev@localhost:5433/planarchy" \
pnpm --filter @capakraken/db exec prisma generate
pnpm db:generate
pnpm db:validate
rm -rf apps/web/.next
```
+4 -4
View File
@@ -1,12 +1,12 @@
# PerformanceAgent — Web App & Data Optimization Specialist
Du bist der **PerformanceAgent** für das Planarchy-Projekt. Du bist Spezialist für Performance-Optimierung von datenintensiven Web-Applikationen mit großen PostgreSQL-Datenbanken, komplexen Berechnungen und visuell anspruchsvollen Interfaces.
Du bist der **PerformanceAgent** für das CapaKraken-Projekt. Du bist Spezialist für Performance-Optimierung von datenintensiven Web-Applikationen mit großen PostgreSQL-Datenbanken, komplexen Berechnungen und visuell anspruchsvollen Interfaces.
## Deine Aufgabe
Profil erstellen → Bottlenecks identifizieren → Fixes nach Impact ranken → Implementierungsplan ausgeben.
Implementiere NICHTS selbst — du lieferst einen priorisierten Befundbericht, den der Implementer umsetzt.
## Planarchy-Stack (immer im Blick)
## CapaKraken-Stack (immer im Blick)
- **Frontend:** Next.js 15 App Router, React 19, tRPC v11, Tailwind CSS v4
- **Backend:** tRPC Procedures, Prisma ORM, PostgreSQL 16
- **Auth:** Auth.js v5, dbUser-Caching per Request in TRPCContext
@@ -61,7 +61,7 @@ DB Seq Scan auf großer Tabelle → Index anlegen
Erstelle `research/perf-audit-[datum].md`:
```markdown
# Performance Audit — Planarchy
# Performance Audit — CapaKraken
**Datum:** YYYY-MM-DD
**Analysiert:** [welche Bereiche]
@@ -98,7 +98,7 @@ Erstelle `research/perf-audit-[datum].md`:
- ...
```
## Typische Planarchy-Bottlenecks (bekannte Kandidaten)
## Typische CapaKraken-Bottlenecks (bekannte Kandidaten)
- **Timeline:** Viele Allocations auf einmal rendern (SVG-Elemente, keine Virtualisierung)
- **Dashboard:** Widget-Queries laufen parallel, könnten gebündelt werden
+2 -2
View File
@@ -1,9 +1,9 @@
Du bist der **Planner** für das Planarchy-Projekt.
Du bist der **Planner** für das CapaKraken-Projekt.
## Deine Aufgabe
Analysiere die gegebene Anforderung und erstelle einen konkreten Umsetzungsplan. Implementiere NICHTS selbst.
## Planarchy-Kontext
## CapaKraken-Kontext
- Monorepo: `apps/web` (Next.js 15) + `packages/` (shared, db, engine, staffing, api, ui)
- Paketabhängigkeiten: `web → api → engine/staffing/db → shared` (keine Zyklen!)
- Prisma-Schema-Änderungen erfordern immer `prisma generate` + `.next/` Cache löschen
+5 -5
View File
@@ -1,11 +1,11 @@
# Research-Agent für Planarchy
# Research-Agent für CapaKraken
Du bist der **Research-Agent** für das Planarchy-Projekt. Deine Aufgabe ist es, komplexe technische oder fachliche Fragen zu analysieren, Optionen zu bewerten und strukturierte Entscheidungsgrundlagen für den Planner- und Implementer-Agenten bereitzustellen.
Du bist der **Research-Agent** für das CapaKraken-Projekt. Deine Aufgabe ist es, komplexe technische oder fachliche Fragen zu analysieren, Optionen zu bewerten und strukturierte Entscheidungsgrundlagen für den Planner- und Implementer-Agenten bereitzustellen.
## Deine Aufgabe
Führe tiefgehende Recherche durch. Implementiere NICHTS. Schreibe Code nur als Beispiele/Prototypen zur Veranschaulichung.
## Planarchy-Kontext (immer im Blick behalten)
## CapaKraken-Kontext (immer im Blick behalten)
- **Stack:** Next.js 15 App Router + tRPC v11 + Prisma + PostgreSQL + pnpm Monorepo
- **Ziel:** Ressourcenplanung für 3D-Produktionsstudio (Producer & Chapter Leads)
- **Kritische Constraints:**
@@ -36,7 +36,7 @@ Erstelle `research/[thema]-[datum].md` im Projekt-Root:
- ...
**Cons:**
- ...
**Kompatibilität mit Planarchy-Stack:** ✅/⚠️/❌
**Kompatibilität mit CapaKraken-Stack:** ✅/⚠️/❌
**Aufwand:** Klein / Mittel / Groß
### Option B: [Name]
@@ -55,7 +55,7 @@ Erstelle `research/[thema]-[datum].md` im Projekt-Root:
- [ ] ...
```
## Typische Research-Themen für Planarchy
## Typische Research-Themen für CapaKraken
- **Skalierung:** SSE Event-Bus → Redis Pub/Sub Migration
- **Performance:** Timeline-Rendering-Optimierung (1000+ Allocations)
- **Auth:** Produktions-taugliche Auth-Strategie (aktuell nur SHA-256 dev-only)
+2 -2
View File
@@ -1,9 +1,9 @@
Du bist der **Reviewer** für das Planarchy-Projekt.
Du bist der **Reviewer** für das CapaKraken-Projekt.
## Deine Aufgabe
Prüfe den aktuellen Code gegen alle Quality Gates, Coding-Standards und Architektur-Prinzipien. Erstelle einen Review-Report.
## Planarchy-Kontext
## CapaKraken-Kontext
- Monorepo: pnpm workspaces + Turborepo
- Stack: Next.js 15 App Router + tRPC v11 + Prisma + PostgreSQL
- TypeScript: `strict: true`, `exactOptionalPropertyTypes: true`
+4 -4
View File
@@ -1,8 +1,8 @@
You are an expert UX auditor, QA engineer, and frontend performance specialist working on the **Planarchy** project — an internal resource planning and project staffing tool for a 3D production studio.
You are an expert UX auditor, QA engineer, and frontend performance specialist working on the **CapaKraken** project — an internal resource planning and project staffing tool for a 3D production studio.
Your task is to perform a thorough audit of the running web service at **http://localhost:3100** and produce a structured report with prioritized, actionable feedback for the plan agent.
## Planarchy Context
## CapaKraken Context
- **Stack:** Next.js 15 (App Router) + tRPC v11 + Tailwind CSS v4 + Auth.js v5
- **Target users:** Producers, Chapter Leads, resource managers (internal tool, desktop-first)
- **Design system:** Custom Tailwind-based with `brand-*` color tokens, dark mode support
@@ -111,7 +111,7 @@ For each suggestion include:
Structure your final report as follows — this will be used directly as feedback for the plan agent:
# Planarchy — UX & Quality Audit Report
# CapaKraken — UX & Quality Audit Report
**Date**: [date]
**Audited URL**: http://localhost:3100
**Overall Score**: [X/10]
@@ -158,7 +158,7 @@ Structure your final report as follows — this will be used directly as feedbac
- Be constructive: every criticism should come with a concrete suggestion
- Prioritize ruthlessly: not everything is equally important
- Think like a first-time user AND a power user — they have different needs
- Planarchy is a desktop-first internal tool — mobile is secondary but should not be broken
- CapaKraken is a desktop-first internal tool — mobile is secondary but should not be broken
- The goal is: less lag, better functionality, more clarity, easy-to-use options,
consistent UI, and perfectly working themes
- After the report, suggest 35 concrete tasks the plan agent should pick up next
+1
View File
@@ -7,6 +7,7 @@ node_modules/
# Build outputs
.next/
.next-e2e/
**/.next.root-owned.*
dist/
build/
.turbo/
+1 -1
View File
@@ -1,4 +1,4 @@
# planarchy
# CapaKraken
> Multi-agent orchestration framework for agentic coding
+6 -6
View File
@@ -19,7 +19,7 @@ CapaKraken ist ein Ressourcenplanungs- und Projektbesetzungs-Tool fuer eine 3D-P
## Monorepo-Struktur
```text
planarchy/
capakraken/
├── apps/web
├── packages/shared
├── packages/db
@@ -46,10 +46,10 @@ planarchy/
## Dokumente
- Einstiegspunkt: [docs/README.md](/home/hartmut/Documents/Copilot/planarchy/docs/README.md)
- Aktiver Backlog: [docs/product-roadmap.md](/home/hartmut/Documents/Copilot/planarchy/docs/product-roadmap.md)
- Estimating-Design: [docs/estimating-extension-design.md](/home/hartmut/Documents/Copilot/planarchy/docs/estimating-extension-design.md)
- Historische Entscheidungen: [LEARNINGS.md](/home/hartmut/Documents/Copilot/planarchy/LEARNINGS.md)
- Einstiegspunkt: [docs/README.md](docs/README.md)
- Aktiver Backlog: [docs/product-roadmap.md](docs/product-roadmap.md)
- Estimating-Design: [docs/estimating-extension-design.md](docs/estimating-extension-design.md)
- Historische Entscheidungen: [LEARNINGS.md](LEARNINGS.md)
## Routing-Regeln Fuer Agenten
@@ -72,4 +72,4 @@ planarchy/
## Learnings
Wichtige Entscheidungen und Problemloesungen werden zentral in [LEARNINGS.md](/home/hartmut/Documents/Copilot/planarchy/LEARNINGS.md) festgehalten.
Wichtige Entscheidungen und Problemloesungen werden zentral in [LEARNINGS.md](LEARNINGS.md) festgehalten.
+1 -1
View File
@@ -1,5 +1,5 @@
# Compatibility Note
The canonical project guidance now lives in [CLAUDE.md](/home/hartmut/Documents/Copilot/planarchy/CLAUDE.md).
The canonical project guidance now lives in [CLAUDE.md](CLAUDE.md).
This file is kept only as a lightweight compatibility entry point.
+4 -4
View File
@@ -1,4 +1,4 @@
# Planarchy Projekt-Learnings
# CapaKraken Projekt-Learnings
## Format
**Datum | Kategorie | Problem → Lösung**
@@ -122,7 +122,7 @@ For modal focus trapping: create a `panelRef = useRef<HTMLDivElement>(null)`, ca
### 2026-03-05 | Build | MCP-Server im falschen Projektpfad registriert
**Problem:** `claude mcp add` wurde aus einem Unterverzeichnis (`packages/db`) heraus ausgeführt. Die Server wurden unter dem Unterverzeichnis-Pfad registriert, nicht unter dem Projekt-Root.
**Lösung:** MCP-Server-Einträge manuell in `~/.claude.json` in den richtigen Projekt-Pfad (`/home/hartmut/Documents/Copilot/planarchy`) verschieben.
**Lösung:** MCP-Server-Einträge manuell in `~/.claude.json` in den richtigen Projekt-Pfad (`/home/hartmut/Documents/Copilot/capakraken`) verschieben.
**Für künftige Projekte:** `claude mcp add` immer vom Projekt-Root aus ausführen.
---
@@ -181,7 +181,7 @@ For modal focus trapping: create a `panelRef = useRef<HTMLDivElement>(null)`, ca
### 2026-03-06 | Architektur | Redis Pub/Sub für SSE
**Problem:** SSE Event-Bus war ein In-Memory-Singleton, funktioniert nicht bei mehreren Server-Instanzen.
**Lösung:** `ioredis` in `@capakraken/api` hinzugefügt. Publisher schreibt Events in Redis-Channel `planarchy:sse`, Subscriber auf jeder Instanz empfängt und liefert lokal aus. Graceful Degradation: bei Redis-Ausfall weiterhin lokale Delivery.
**Lösung:** `ioredis` in `@capakraken/api` hinzugefügt. Publisher schreibt Events in Redis-Channel `capakraken:sse`, Subscriber auf jeder Instanz empfängt und liefert lokal aus. Graceful Degradation: bei Redis-Ausfall weiterhin lokale Delivery.
**Import-Pattern:** `import { Redis } from "ioredis"` (named export, nicht default) notwendig mit `moduleResolution: NodeNext` + ioredis v5.
**Offene Frage:** In Dev-Umgebung reicht lokale Delivery; Redis läuft auf Port 6380 via Docker Compose.
@@ -313,7 +313,7 @@ prisma.user.upsert({ where: ..., update: { passwordHash: adminHash }, create: {
### 2026-03-06 | Architektur | Granulares RBAC-System: Permission-Override-Muster
**Kontext:** Planarchy hatte 3 hartkodierte Procedure-Levels (protectedProcedure → managerProcedure → adminProcedure) ohne Granularität. Ziel: neue Rolle CONTROLLER + individuelle Permission-Overrides pro User.
**Kontext:** CapaKraken hatte 3 hartkodierte Procedure-Levels (protectedProcedure → managerProcedure → adminProcedure) ohne Granularität. Ziel: neue Rolle CONTROLLER + individuelle Permission-Overrides pro User.
**Lösung:** Zweigeteiltes System:
1. **`ROLE_DEFAULT_PERMISSIONS`** — statische Lookup-Tabelle: jede SystemRole hat eine Default-Menge an PermissionKeys.
+1 -1
View File
@@ -1,6 +1,6 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
/// <reference path="./.next-e2e/types/routes.d.ts" />
/// <reference path="./.next/types/routes.d.ts" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.
+3 -3
View File
@@ -1,6 +1,6 @@
/// <reference lib="webworker" />
const CACHE_NAME = "planarchy-v1";
const CACHE_NAME = "capakraken-v2";
const STATIC_EXTENSIONS = /\.(js|css|png|jpg|jpeg|svg|gif|ico|woff2?|ttf|eot)$/;
// Offline fallback page (simple inline HTML)
@@ -9,7 +9,7 @@ const OFFLINE_HTML = `<!DOCTYPE html>
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Planarchy — Offline</title>
<title>CapaKraken - Offline</title>
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
@@ -31,7 +31,7 @@ const OFFLINE_HTML = `<!DOCTYPE html>
<body>
<div class="container">
<h1>You are offline</h1>
<p>Planarchy requires an internet connection. Please check your network and try again.</p>
<p>CapaKraken requires an internet connection. Please check your network and try again.</p>
<button onclick="location.reload()">Retry</button>
</div>
</body>
+2 -2
View File
@@ -5,10 +5,10 @@ import { auth } from "~/server/auth.js";
export default async function AppLayout({ children }: { children: React.ReactNode }) {
const session = await auth();
if (!session) {
if (!session?.user) {
redirect("/auth/signin");
}
const userRole = (session.user as { role?: string }).role ?? "USER";
const userRole = (session.user as { role?: string } | undefined)?.role ?? "USER";
return <AppShell userRole={userRole}>{children}</AppShell>;
}
+1 -1
View File
@@ -12,7 +12,7 @@ export const runtime = "nodejs";
export async function GET() {
const session = await auth();
if (!session) {
if (!session?.user) {
return new Response("Unauthorized", { status: 401 });
}
+1 -1
View File
@@ -4,7 +4,7 @@ import { auth } from "~/server/auth.js";
export default async function HomePage() {
const session = await auth();
if (!session) {
if (!session?.user) {
redirect("/auth/signin");
}
@@ -248,6 +248,9 @@ export function ChatPanel({ onClose }: { onClose: () => void }) {
}
if (scope === "resource") void utils.resource.invalidate();
if (scope === "project") void utils.project.invalidate();
if (scope === "country") void utils.country.invalidate();
if (scope === "holidayCalendar") void utils.holidayCalendar.invalidate();
if (scope === "vacation") void utils.vacation.invalidate();
}
}
}
@@ -2,7 +2,7 @@
import type { DashboardWidgetConfig, DashboardWidgetType } from "@capakraken/shared/types";
import dynamic from "next/dynamic";
import { Suspense, useState, useRef, useEffect } from "react";
import { Suspense, useState, useRef, useEffect, useMemo } from "react";
import { useDashboardLayout } from "~/hooks/useDashboardLayout.js";
import { WidgetContainer } from "./WidgetContainer.js";
import { AddWidgetModal } from "./AddWidgetModal.js";
@@ -44,6 +44,94 @@ function renderWidget(
);
}
function DeferredWidgetFallback() {
return (
<div className="flex h-full min-h-32 flex-col gap-3 p-1">
<div className="h-3 w-32 shimmer-skeleton rounded" />
<div className="h-20 w-full shimmer-skeleton rounded-2xl" />
<div className="h-3 w-4/5 shimmer-skeleton rounded" />
<div className="h-3 w-3/5 shimmer-skeleton rounded" />
</div>
);
}
function DeferredWidgetBody({
type,
config,
activationRank,
isPriority,
onConfigChange,
}: {
type: DashboardWidgetType;
config: DashboardWidgetConfig;
activationRank: number;
isPriority: boolean;
onConfigChange: (u: Record<string, unknown>) => void;
}) {
const containerRef = useRef<HTMLDivElement>(null);
const [isActive, setIsActive] = useState(isPriority);
useEffect(() => {
if (isPriority) {
setIsActive(true);
}
}, [isPriority]);
useEffect(() => {
if (isActive) return;
const element = containerRef.current;
if (!element) return;
const observer = new IntersectionObserver(
([entry]) => {
if (!entry?.isIntersecting) return;
setIsActive(true);
observer.disconnect();
},
{ rootMargin: "320px 0px", threshold: 0.05 },
);
observer.observe(element);
return () => observer.disconnect();
}, [isActive]);
useEffect(() => {
if (isActive || isPriority || typeof window === "undefined") return;
const activationDelayMs = 900 + Math.min(activationRank, 6) * 180;
let timeoutId: number | null = null;
let idleId: number | null = null;
const browserWindow = window as Window &
typeof globalThis & {
requestIdleCallback?: (
callback: IdleRequestCallback,
options?: IdleRequestOptions,
) => number;
cancelIdleCallback?: (handle: number) => void;
};
const activate = () => setIsActive(true);
if (typeof browserWindow.requestIdleCallback === "function") {
idleId = browserWindow.requestIdleCallback(activate, { timeout: activationDelayMs });
} else {
timeoutId = browserWindow.setTimeout(activate, activationDelayMs);
}
return () => {
if (idleId !== null && typeof browserWindow.cancelIdleCallback === "function") {
browserWindow.cancelIdleCallback(idleId);
}
if (timeoutId !== null) {
browserWindow.clearTimeout(timeoutId);
}
};
}, [activationRank, isActive, isPriority]);
return <div ref={containerRef} className="h-full">{isActive ? renderWidget(type, config, onConfigChange) : <DeferredWidgetFallback />}</div>;
}
export function DashboardClient() {
const [addModalOpen, setAddModalOpen] = useState(false);
const { config, addWidget, removeWidget, updateWidgetConfig, onLayoutChange, resetLayout } =
@@ -52,29 +140,80 @@ export function DashboardClient() {
// Measure grid container width so Responsive knows the column size.
// We can't use WidthProvider (uses findDOMNode, deprecated in React 18).
const containerRef = useRef<HTMLDivElement>(null);
const resizeFrameRef = useRef<number | null>(null);
const lastMeasuredWidthRef = useRef(0);
const [gridWidth, setGridWidth] = useState(1200);
useEffect(() => {
const el = containerRef.current;
if (!el) return;
const updateWidth = (width: number) => {
const roundedWidth = Math.max(0, Math.round(width));
if (roundedWidth === lastMeasuredWidthRef.current) return;
lastMeasuredWidthRef.current = roundedWidth;
setGridWidth((currentWidth) => (currentWidth === roundedWidth ? currentWidth : roundedWidth));
};
const ro = new ResizeObserver(([entry]) => {
if (entry) setGridWidth(entry.contentRect.width);
if (!entry) return;
if (resizeFrameRef.current !== null) cancelAnimationFrame(resizeFrameRef.current);
resizeFrameRef.current = requestAnimationFrame(() => {
resizeFrameRef.current = null;
updateWidth(entry.contentRect.width);
});
});
ro.observe(el);
setGridWidth(el.getBoundingClientRect().width);
return () => ro.disconnect();
updateWidth(el.getBoundingClientRect().width);
return () => {
ro.disconnect();
if (resizeFrameRef.current !== null) cancelAnimationFrame(resizeFrameRef.current);
};
}, []);
const layouts = {
lg: config.widgets.map((w) => ({
i: w.id,
x: w.x,
y: w.y,
w: w.w,
h: w.h,
minW: w.minW ?? 2,
minH: w.minH ?? 2,
})),
};
const layouts = useMemo(
() => ({
lg: config.widgets.map((w) => ({
i: w.id,
x: w.x,
y: w.y,
w: w.w,
h: w.h,
minW: w.minW ?? 2,
minH: w.minH ?? 2,
})),
}),
[config.widgets],
);
const renderedWidgets = useMemo(
() =>
config.widgets.map((widget) => {
const widgetDefinition = getWidget(widget.type);
const isPriorityWidget = widget.y < 3;
return (
<div key={widget.id}>
<WidgetContainer
title={widget.title ?? widgetDefinition.label}
description={widgetDefinition.description}
showDetails={widget.config.showDetails === true}
onToggleDetails={() =>
updateWidgetConfig(widget.id, {
showDetails: widget.config.showDetails !== true,
})
}
onRemove={() => removeWidget(widget.id)}
>
<DeferredWidgetBody
type={widget.type}
config={widget.config}
activationRank={widget.y * 12 + widget.x}
isPriority={isPriorityWidget}
onConfigChange={(update) => updateWidgetConfig(widget.id, update)}
/>
</WidgetContainer>
</div>
);
}),
[config.widgets, removeWidget, updateWidgetConfig],
);
return (
<div className="app-page space-y-6">
@@ -153,25 +292,7 @@ export function DashboardClient() {
draggableHandle=".widget-drag-handle"
margin={[12, 12]}
>
{config.widgets.map((widget) => (
<div key={widget.id}>
<WidgetContainer
title={widget.title ?? getWidget(widget.type).label}
description={getWidget(widget.type).description}
showDetails={widget.config.showDetails === true}
onToggleDetails={() =>
updateWidgetConfig(widget.id, {
showDetails: widget.config.showDetails !== true,
})
}
onRemove={() => removeWidget(widget.id)}
>
{renderWidget(widget.type, widget.config, (update) =>
updateWidgetConfig(widget.id, update),
)}
</WidgetContainer>
</div>
))}
{renderedWidgets}
</AnyGridLayout>
);
})()}
@@ -80,7 +80,7 @@ function SummaryCard({
export function BudgetForecastWidget({ config, onConfigChange }: WidgetProps) {
const showDetails = config.showDetails === true;
const { clients } = useWidgetFilterOptions();
const { clients } = useWidgetFilterOptions({ clients: true });
const filters = useMemo<WidgetFilter[]>(
() => [
@@ -7,6 +7,7 @@ import { InfoTooltip } from "~/components/ui/InfoTooltip.js";
import { AnimatedNumber } from "~/components/ui/AnimatedNumber.js";
import { WidgetFilterBar, type WidgetFilter } from "~/components/dashboard/WidgetFilterBar.js";
import { useWidgetFilterOptions } from "~/hooks/useWidgetFilterOptions.js";
import { useReferenceData } from "~/hooks/useReferenceData.js";
function UtilizationBar({ percent }: { percent: number }) {
const barColor =
@@ -24,11 +25,6 @@ function UtilizationBar({ percent }: { percent: number }) {
type TopSortKey = "name" | "actual" | "expected";
type WatchSortKey = "name" | "actual" | "target";
type CountryOption = {
id: string;
name: string;
};
type ChargeabilityRow = {
id: string;
displayName: string;
@@ -164,7 +160,8 @@ export function ChargeabilityWidget({ config: _config, onConfigChange }: WidgetP
includeProposed?: boolean;
showDetails?: boolean;
};
const { chapters } = useWidgetFilterOptions();
const { chapters } = useWidgetFilterOptions({ chapters: true });
const { countries } = useReferenceData({ countries: true });
const widgetFilters = useMemo<WidgetFilter[]>(
() => [
@@ -187,15 +184,6 @@ export function ChargeabilityWidget({ config: _config, onConfigChange }: WidgetP
const [topVisibleCount, setTopVisibleCount] = useState(batchSize);
const [watchVisibleCount, setWatchVisibleCount] = useState(batchSize);
const { data: countriesData } = trpc.country.list.useQuery(undefined, { staleTime: 60_000 });
const countries = useMemo(
() =>
((countriesData ?? []) as Array<{ id: string; name: string }>).map((country) => ({
id: country.id,
name: country.name,
})),
[countriesData],
) as CountryOption[];
const selectedCountryLabel = useMemo(() => {
if (selectedCountryIds.length === 0) return "Countries: All";
if (selectedCountryIds.length === 1) {
@@ -71,7 +71,7 @@ function formatLocation(location: {
export function ProjectHealthWidget({ config, onConfigChange }: WidgetProps) {
const showDetails = config.showDetails === true;
const { clients } = useWidgetFilterOptions();
const { clients } = useWidgetFilterOptions({ clients: true });
const filters = useMemo<WidgetFilter[]>(
() => [
@@ -1,9 +1,10 @@
"use client";
import { useState } from "react";
import { useMemo, useState } from "react";
import { trpc } from "~/lib/trpc/client.js";
import type { WidgetProps } from "~/components/dashboard/widget-registry.js";
import { InfoTooltip } from "~/components/ui/InfoTooltip.js";
import { useWidgetFilterOptions } from "~/hooks/useWidgetFilterOptions.js";
interface ResourceRow {
id: string;
@@ -29,8 +30,7 @@ export function ResourceTableWidget({ config, onConfigChange }: WidgetProps) {
{ staleTime: 60_000 },
);
const { data: chapterData } = trpc.resource.chapters.useQuery(undefined, { staleTime: 120_000 });
const chapters = chapterData ?? [];
const { chapters } = useWidgetFilterOptions({ chapters: true });
type SortKey = "eid" | "name" | "chapter" | "bookings" | "utilization" | "target";
const [sortKey, setSortKey] = useState<SortKey>("name");
@@ -44,6 +44,32 @@ export function ResourceTableWidget({ config, onConfigChange }: WidgetProps) {
}
}
const list = useMemo(() => (resources ?? []) as unknown as ResourceRow[], [resources]);
const sorted = useMemo(() => {
const next = [...list];
next.sort((a, b) => {
const mult = sortDir === "asc" ? 1 : -1;
switch (sortKey) {
case "eid":
return mult * a.eid.localeCompare(b.eid);
case "name":
return mult * a.displayName.localeCompare(b.displayName);
case "chapter":
return mult * (a.chapter ?? "").localeCompare(b.chapter ?? "");
case "bookings":
return mult * (a.bookingCount - b.bookingCount);
case "utilization":
return mult * (a.utilizationPercent - b.utilizationPercent);
case "target":
return mult * (a.chargeabilityTarget - b.chargeabilityTarget);
default:
return 0;
}
});
return next;
}, [list, sortDir, sortKey]);
if (isLoading) {
return (
<div className="flex flex-col gap-2 pt-1">
@@ -74,28 +100,6 @@ export function ResourceTableWidget({ config, onConfigChange }: WidgetProps) {
);
}
const list = (resources ?? []) as unknown as ResourceRow[];
const sorted = [...list].sort((a, b) => {
const mult = sortDir === "asc" ? 1 : -1;
switch (sortKey) {
case "eid":
return mult * a.eid.localeCompare(b.eid);
case "name":
return mult * a.displayName.localeCompare(b.displayName);
case "chapter":
return mult * (a.chapter ?? "").localeCompare(b.chapter ?? "");
case "bookings":
return mult * (a.bookingCount - b.bookingCount);
case "utilization":
return mult * (a.utilizationPercent - b.utilizationPercent);
case "target":
return mult * (a.chargeabilityTarget - b.chargeabilityTarget);
default:
return 0;
}
});
return (
<div className="flex flex-col h-full gap-3">
{/* Filter */}
@@ -107,9 +111,9 @@ export function ResourceTableWidget({ config, onConfigChange }: WidgetProps) {
className="app-select w-44 text-xs"
>
<option value="">All Chapters</option>
{chapters.map((c) => (
<option key={c} value={c}>
{c}
{chapters.map((chapterOption) => (
<option key={chapterOption.value} value={chapterOption.value}>
{chapterOption.label}
</option>
))}
</select>
@@ -11,7 +11,7 @@ type SortKey = "eid" | "name" | "chapter" | "score" | "lcr";
export function TopValueWidget({ config, onConfigChange }: WidgetProps) {
const limit = (config.limit as number) || 10;
const { chapters } = useWidgetFilterOptions();
const { chapters } = useWidgetFilterOptions({ chapters: true });
const filters = useMemo<WidgetFilter[]>(
() => [
@@ -424,6 +424,13 @@ export function TimelineProvider({
const { resourceMap, allocsByResource, resources } = useMemo(() => {
const resourceMap = new Map<string, ResourceBrief>();
const allocsByResource = new Map<string, TimelineAssignmentEntry[]>();
const firstAssignmentByResource = new Map<string, TimelineAssignmentEntry>();
const projectIdsByResource = new Map<string, Set<string>>();
const clientIdsByResource = new Map<string, Set<string>>();
const chapterFilter = new Set(filters.chapters);
const eidFilter = new Set(filters.eids);
const projectFilter = new Set(filters.projectIds);
const clientFilter = new Set(filters.clientIds);
if (eidFilterData?.resources) {
for (const r of eidFilterData.resources as {
@@ -445,6 +452,7 @@ export function TimelineProvider({
for (const entry of visibleAssignments) {
if (!entry.resourceId) continue;
firstAssignmentByResource.set(entry.resourceId, entry);
if (!resourceMap.has(entry.resourceId)) {
resourceMap.set(entry.resourceId, {
id: entry.resource!.id,
@@ -456,13 +464,23 @@ export function TimelineProvider({
const arr = allocsByResource.get(entry.resourceId) ?? [];
arr.push(entry);
allocsByResource.set(entry.resourceId, arr);
const projectIds = projectIdsByResource.get(entry.resourceId) ?? new Set<string>();
projectIds.add(entry.projectId);
projectIdsByResource.set(entry.resourceId, projectIds);
if (typeof entry.project.clientId === "string") {
const clientIds = clientIdsByResource.get(entry.resourceId) ?? new Set<string>();
clientIds.add(entry.project.clientId);
clientIdsByResource.set(entry.resourceId, clientIds);
}
}
// Merge cross-project context allocations so they appear during drag
if (isDragging && contextAllocations.length > 0) {
for (const ca of contextAllocations) {
if (!ca.resourceId) continue;
const existing = visibleAssignments.find((entry) => entry.resourceId === ca.resourceId);
const existing = firstAssignmentByResource.get(ca.resourceId);
if (existing && !resourceMap.has(ca.resourceId)) {
resourceMap.set(ca.resourceId, {
id: existing.resource!.id,
@@ -477,32 +495,35 @@ export function TimelineProvider({
let resources = [...resourceMap.values()].sort((a, b) =>
a.displayName.localeCompare(b.displayName),
);
if (filters.chapters.length > 0) {
resources = resources.filter((r) => r.chapter && filters.chapters.includes(r.chapter));
if (chapterFilter.size > 0) {
resources = resources.filter((r) => r.chapter && chapterFilter.has(r.chapter));
}
if (filters.eids.length > 0) {
resources = resources.filter((r) => filters.eids.includes(r.eid));
if (eidFilter.size > 0) {
resources = resources.filter((r) => eidFilter.has(r.eid));
}
if (filters.projectIds.length > 0) {
resources = resources.filter((r) =>
visibleAssignments.some(
(e) => e.resourceId === r.id && filters.projectIds.includes(e.projectId),
),
);
if (projectFilter.size > 0) {
resources = resources.filter((r) => {
const projectIds = projectIdsByResource.get(r.id);
if (!projectIds) return false;
for (const projectId of projectIds) {
if (projectFilter.has(projectId)) {
return true;
}
}
return false;
});
}
if (filters.clientIds.length > 0) {
resources = resources.filter((r) =>
visibleAssignments.some(
(entry) => {
const clientId = entry.project.clientId;
return (
entry.resourceId === r.id &&
typeof clientId === "string" &&
filters.clientIds.includes(clientId)
);
},
),
);
if (clientFilter.size > 0) {
resources = resources.filter((r) => {
const clientIds = clientIdsByResource.get(r.id);
if (!clientIds) return false;
for (const clientId of clientIds) {
if (clientFilter.has(clientId)) {
return true;
}
}
return false;
});
}
return { resourceMap, allocsByResource, resources };
@@ -520,6 +541,14 @@ export function TimelineProvider({
// ─── Project groups (for project view) ────────────────────────────────────
const projectGroups = useMemo(() => {
const projectGroupMap = new Map<string, ProjectGroup>();
const resourceRowMapByProject = new Map<
string,
Map<string, ProjectGroup["resourceRows"][number]>
>();
const chapterFilter = new Set(filters.chapters);
const eidFilter = new Set(filters.eids);
const clientFilter = new Set(filters.clientIds);
const projectFilter = new Set(filters.projectIds);
const allGroupEntries: TimelineProjectEntry[] = [...visibleAssignments, ...visibleDemands];
for (const entry of allGroupEntries) {
let group = projectGroupMap.get(entry.projectId);
@@ -537,43 +566,37 @@ export function TimelineProvider({
resourceRows: [],
};
projectGroupMap.set(entry.projectId, group);
resourceRowMapByProject.set(entry.projectId, new Map());
}
const currentGroup = group;
if (!currentGroup) continue;
if (entry.kind === "assignment" && entry.resourceId) {
const existingRow = currentGroup.resourceRows.find(
(r) => r.resource.id === entry.resourceId,
);
const rowMap = resourceRowMapByProject.get(entry.projectId);
const existingRow = rowMap?.get(entry.resourceId);
if (existingRow) {
existingRow.allocs.push(entry);
} else {
const res = resourceMap.get(entry.resourceId);
if (res) {
currentGroup.resourceRows.push({ resource: res, allocs: [entry] });
const row = { resource: res, allocs: [entry] };
currentGroup.resourceRows.push(row);
rowMap?.set(entry.resourceId, row);
}
}
}
}
for (const group of projectGroupMap.values()) {
group.resourceRows = group.resourceRows.filter(({ resource, allocs }) => {
if (filters.chapters.length > 0) {
if (!resource.chapter || !filters.chapters.includes(resource.chapter)) {
group.resourceRows = group.resourceRows.filter(({ resource }) => {
if (chapterFilter.size > 0) {
if (!resource.chapter || !chapterFilter.has(resource.chapter)) {
return false;
}
}
if (filters.eids.length > 0 && !filters.eids.includes(resource.eid)) {
if (eidFilter.size > 0 && !eidFilter.has(resource.eid)) {
return false;
}
if (filters.clientIds.length > 0) {
const matchesClient = allocs.some(
(alloc) => {
const clientId = alloc.project.clientId;
return typeof clientId === "string" && filters.clientIds.includes(clientId);
},
);
if (!matchesClient) {
return false;
}
if (clientFilter.size > 0 && (!group.clientId || !clientFilter.has(group.clientId))) {
return false;
}
return true;
});
@@ -584,18 +607,18 @@ export function TimelineProvider({
return [...projectGroupMap.values()]
.sort((a, b) => a.startDate.getTime() - b.startDate.getTime())
.filter((pg) => {
if (filters.projectIds.length > 0 && !filters.projectIds.includes(pg.id)) return false;
if (projectFilter.size > 0 && !projectFilter.has(pg.id)) return false;
if (
filters.clientIds.length > 0 &&
(!pg.clientId || !filters.clientIds.includes(pg.clientId))
clientFilter.size > 0 &&
(!pg.clientId || !clientFilter.has(pg.clientId))
)
return false;
if (
filters.chapters.length > 0 &&
chapterFilter.size > 0 &&
pg.resourceRows.length === 0
)
return false;
if (filters.eids.length > 0 && pg.resourceRows.length === 0)
if (eidFilter.size > 0 && pg.resourceRows.length === 0)
return false;
return true;
});
@@ -4,6 +4,7 @@ import { createPortal } from "react-dom";
import { useMemo, useState, type ReactNode } from "react";
import { InfoTooltip } from "~/components/ui/InfoTooltip.js";
import { useAnchoredOverlay } from "~/hooks/useAnchoredOverlay.js";
import { useReferenceData } from "~/hooks/useReferenceData.js";
import { trpc } from "~/lib/trpc/client.js";
import type { TimelineFilters } from "./TimelineFilter.js";
@@ -105,6 +106,7 @@ interface TimelineQuickFiltersProps {
export function TimelineQuickFilters({ filters, onChange }: TimelineQuickFiltersProps) {
const [eidSearch, setEidSearch] = useState("");
const { clients, countries } = useReferenceData({ clients: true, countries: true });
const { data: resourceData } = trpc.resource.list.useQuery(
{ isActive: true, limit: 500 },
{ staleTime: 60_000 },
@@ -113,15 +115,6 @@ export function TimelineQuickFilters({ filters, onChange }: TimelineQuickFilters
{ isActive: true, search: eidSearch, limit: 100 },
{ staleTime: 15_000 },
);
const { data: clientsData } = trpc.clientEntity.list.useQuery(
{ isActive: true },
{ staleTime: 60_000 },
);
const { data: countriesData } = trpc.country.list.useQuery(
{ isActive: true },
{ staleTime: 60_000 },
);
const resources = ((resourceData?.resources as ResourceOption[] | undefined) ?? []).slice();
const eidSuggestions = (
(eidSearchData?.resources as ResourceOption[] | undefined) ??
@@ -140,22 +133,6 @@ export function TimelineQuickFilters({ filters, onChange }: TimelineQuickFilters
[resources],
);
const clients = useMemo(
() =>
((clientsData ?? []) as ClientOption[])
.filter((client) => client.isActive !== false)
.map((client) => ({ id: client.id, name: client.name, code: client.code })),
[clientsData],
);
const countries = useMemo(
() =>
((countriesData ?? []) as Array<{ id: string; code: string; name: string }>)
.map((c) => ({ id: c.id, code: c.code, name: c.name }))
.sort((a, b) => a.name.localeCompare(b.name)),
[countriesData],
);
const resourceMap = useMemo(
() => new Map(resources.map((resource) => [resource.eid, resource])),
[resources],
+4 -2
View File
@@ -108,8 +108,10 @@ export function useDashboardLayout() {
const onLayoutChange = useCallback(
(layout: { i: string; x: number; y: number; w: number; h: number }[]) => {
setConfig((prev) => {
const layoutMap = new Map(layout.map((item) => [item.i, item]));
const previousWidgetMap = new Map(prev.widgets.map((widget) => [widget.id, widget]));
const updatedWidgets = prev.widgets.map((w) => {
const item = layout.find((l) => l.i === w.id);
const item = layoutMap.get(w.id);
if (!item) return w;
return { ...w, x: item.x, y: item.y, w: item.w, h: item.h };
});
@@ -118,7 +120,7 @@ export function useDashboardLayout() {
// react-grid-layout fires onLayoutChange on mount too — we skip that
// to avoid overwriting saved positions with compacted coordinates.
const changed = updatedWidgets.some((w) => {
const orig = prev.widgets.find((o) => o.id === w.id);
const orig = previousWidgetMap.get(w.id);
return orig && (w.x !== orig.x || w.y !== orig.y || w.w !== orig.w || w.h !== orig.h);
});
+99
View File
@@ -0,0 +1,99 @@
"use client";
import { useMemo } from "react";
import { trpc } from "~/lib/trpc/client.js";
export interface ClientReference {
id: string;
name: string;
code: string | null;
isActive?: boolean;
}
export interface CountryReference {
id: string;
name: string;
code: string;
isActive?: boolean;
}
export interface RoleReference {
id: string;
name: string;
isActive?: boolean;
}
export interface ReferenceDataSelection {
clients?: boolean;
countries?: boolean;
roles?: boolean;
chapters?: boolean;
}
const LOOKUP_STALE_TIME_MS = 300_000;
export function useReferenceData(selection: ReferenceDataSelection = {}) {
const shouldLoadClients = selection.clients === true;
const shouldLoadCountries = selection.countries === true;
const shouldLoadRoles = selection.roles === true;
const shouldLoadChapters = selection.chapters === true;
const { data: clientsRaw } = trpc.clientEntity.list.useQuery(
{ isActive: true },
{ staleTime: LOOKUP_STALE_TIME_MS, enabled: shouldLoadClients },
);
const { data: countriesRaw } = trpc.country.list.useQuery(
{ isActive: true },
{ staleTime: LOOKUP_STALE_TIME_MS, enabled: shouldLoadCountries },
);
const { data: rolesRaw } = trpc.role.list.useQuery(
{ isActive: true },
{ staleTime: LOOKUP_STALE_TIME_MS, enabled: shouldLoadRoles },
);
const { data: chaptersRaw } = trpc.resource.chapters.useQuery(undefined, {
staleTime: LOOKUP_STALE_TIME_MS,
enabled: shouldLoadChapters,
});
const clients = useMemo<ClientReference[]>(() => {
if (!shouldLoadClients) return [];
const list = (
Array.isArray(clientsRaw) ? clientsRaw : ((clientsRaw as { clients?: ClientReference[] } | undefined)?.clients ?? [])
) as ClientReference[];
return [...list]
.filter((client) => client.isActive !== false)
.sort((left, right) => left.name.localeCompare(right.name));
}, [clientsRaw, shouldLoadClients]);
const countries = useMemo<CountryReference[]>(() => {
if (!shouldLoadCountries) return [];
const list = (Array.isArray(countriesRaw) ? countriesRaw : []) as CountryReference[];
return [...list]
.filter((country) => country.isActive !== false)
.sort((left, right) => left.name.localeCompare(right.name));
}, [countriesRaw, shouldLoadCountries]);
const roles = useMemo<RoleReference[]>(() => {
if (!shouldLoadRoles) return [];
const list = (Array.isArray(rolesRaw) ? rolesRaw : []) as RoleReference[];
return [...list]
.filter((role) => role.isActive !== false)
.sort((left, right) => left.name.localeCompare(right.name));
}, [rolesRaw, shouldLoadRoles]);
const chapters = useMemo<string[]>(() => {
if (!shouldLoadChapters) return [];
const list = (Array.isArray(chaptersRaw) ? chaptersRaw : []) as string[];
return [...list].sort((left, right) => left.localeCompare(right));
}, [chaptersRaw, shouldLoadChapters]);
return {
clients,
countries,
roles,
chapters,
};
}
+13 -36
View File
@@ -1,59 +1,36 @@
/**
* Shared hook for loading filter options used across dashboard widgets.
* Loads clients, countries, roles, and chapters once with long cache TTL.
* Loads only the requested lookup sets and exposes them as filter options.
*/
"use client";
import { useMemo } from "react";
import { trpc } from "~/lib/trpc/client.js";
import { useReferenceData, type ReferenceDataSelection } from "~/hooks/useReferenceData.js";
export interface FilterOption {
value: string;
label: string;
}
export function useWidgetFilterOptions() {
const { data: clientsRaw } = trpc.clientEntity.list.useQuery(
{ isActive: true },
{ staleTime: 300_000 },
);
const { data: countriesRaw } = trpc.country.list.useQuery(
{ isActive: true },
{ staleTime: 300_000 },
);
const { data: rolesRaw } = trpc.role.list.useQuery(
{ isActive: true },
{ staleTime: 300_000 },
);
export function useWidgetFilterOptions(selection: ReferenceDataSelection = {}) {
const { clients: clientRows, countries: countryRows, roles: roleRows, chapters: chapterRows } =
useReferenceData(selection);
const clients = useMemo<FilterOption[]>(() => {
const list = (Array.isArray(clientsRaw) ? clientsRaw : (clientsRaw as any)?.clients ?? []) as Array<{ id: string; name: string }>;
return list.map((c) => ({ value: c.id, label: c.name }));
}, [clientsRaw]);
return clientRows.map((client) => ({ value: client.id, label: client.name }));
}, [clientRows]);
const countries = useMemo<FilterOption[]>(() => {
const list = (Array.isArray(countriesRaw) ? countriesRaw : []) as Array<{ id: string; name: string }>;
return list.map((c) => ({ value: c.id, label: c.name }));
}, [countriesRaw]);
return countryRows.map((country) => ({ value: country.id, label: country.name }));
}, [countryRows]);
const roles = useMemo<FilterOption[]>(() => {
const list = (Array.isArray(rolesRaw) ? rolesRaw : []) as Array<{ id: string; name: string }>;
return list.map((r) => ({ value: r.id, label: r.name }));
}, [rolesRaw]);
return roleRows.map((role) => ({ value: role.id, label: role.name }));
}, [roleRows]);
// Chapters are derived from roles or can be hardcoded common ones
const chapters = useMemo<FilterOption[]>(() => {
const common = [
"Digital Content Production",
"Project Management",
"Art Direction",
"CGI-Dev",
"Product Data Management",
];
return common.map((c) => ({ value: c, label: c }));
}, []);
return chapterRows.map((chapter) => ({ value: chapter, label: chapter }));
}, [chapterRows]);
return { clients, countries, roles, chapters };
}
+1
View File
@@ -14,6 +14,7 @@ const LoginSchema = z.object({
});
const authConfig = {
trustHost: true,
providers: [
Credentials({
name: "credentials",
+18 -16
View File
@@ -1,20 +1,20 @@
# Documentation Index
**Date:** 2026-03-12
**Purpose:** Single entry point for active Planarchy product and technical documentation.
**Purpose:** Single entry point for active CapaKraken product and technical documentation.
## Canonical Documents
| Topic | File | Use |
|---|---|---|
| Active roadmap and open gaps | [product-roadmap.md](/home/hartmut/Documents/Copilot/planarchy/docs/product-roadmap.md) | Primary backlog and current delivery order |
| Estimating system design | [estimating-extension-design.md](/home/hartmut/Documents/Copilot/planarchy/docs/estimating-extension-design.md) | Workbook analysis, field mapping, and implementation plan |
| Dispo import implementation | [dispo-import-implementation.md](/home/hartmut/Documents/Copilot/planarchy/docs/dispo-import-implementation.md) | Clean-slate Dispo v2 import design, mapping rules, staging flow, and commit policy |
| Dispo import ticket pack | [dispo-import-implementation-tickets.md](/home/hartmut/Documents/Copilot/planarchy/docs/dispo-import-implementation-tickets.md) | Worker-ready delivery slices, dependencies, and acceptance criteria for the Dispo import |
| Demand/assignment cutover guide | [demand-assignment-migration-cutover.md](/home/hartmut/Documents/Copilot/planarchy/docs/demand-assignment-migration-cutover.md) | Go/no-go criteria, staged cutover, and readiness artifact policy |
| Strategic architecture direction | [v2-architecture-proposal-2026-03-11.md](/home/hartmut/Documents/Copilot/planarchy/research/v2-architecture-proposal-2026-03-11.md) | Longer-horizon architecture target |
| Implementation history | [LEARNINGS.md](/home/hartmut/Documents/Copilot/planarchy/LEARNINGS.md) | Append-only decisions and lessons |
| Agent/project guidance | [CLAUDE.md](/home/hartmut/Documents/Copilot/planarchy/CLAUDE.md) | Working conventions and quality gates |
| Active roadmap and open gaps | [product-roadmap.md](/home/hartmut/Documents/Copilot/capakraken/docs/product-roadmap.md) | Primary backlog and current delivery order |
| Estimating system design | [estimating-extension-design.md](/home/hartmut/Documents/Copilot/capakraken/docs/estimating-extension-design.md) | Workbook analysis, field mapping, and implementation plan |
| Dispo import implementation | [dispo-import-implementation.md](/home/hartmut/Documents/Copilot/capakraken/docs/dispo-import-implementation.md) | Clean-slate Dispo v2 import design, mapping rules, staging flow, and commit policy |
| Dispo import ticket pack | [dispo-import-implementation-tickets.md](/home/hartmut/Documents/Copilot/capakraken/docs/dispo-import-implementation-tickets.md) | Worker-ready delivery slices, dependencies, and acceptance criteria for the Dispo import |
| Demand/assignment cutover guide | [demand-assignment-migration-cutover.md](/home/hartmut/Documents/Copilot/capakraken/docs/demand-assignment-migration-cutover.md) | Go/no-go criteria, staged cutover, and readiness artifact policy |
| Strategic architecture direction | [v2-architecture-proposal-2026-03-11.md](/home/hartmut/Documents/Copilot/capakraken/research/v2-architecture-proposal-2026-03-11.md) | Longer-horizon architecture target |
| Implementation history | [LEARNINGS.md](/home/hartmut/Documents/Copilot/capakraken/LEARNINGS.md) | Append-only decisions and lessons |
| Agent/project guidance | [CLAUDE.md](/home/hartmut/Documents/Copilot/capakraken/CLAUDE.md) | Working conventions and quality gates |
## Archive Policy
@@ -30,10 +30,12 @@ Archive-note files should point back to the relevant canonical document instead
All archived markdown plan and proposal files now live under `docs/old-markdowns/`.
- [plan.md](/home/hartmut/Documents/Copilot/planarchy/docs/old-markdowns/plan.md)
- [PLAN_SKILLMATRIX.md](/home/hartmut/Documents/Copilot/planarchy/docs/old-markdowns/PLAN_SKILLMATRIX.md)
- [refactor-sprint-plan.md](/home/hartmut/Documents/Copilot/planarchy/docs/old-markdowns/refactor-sprint-plan.md)
- [estimating-field-mapping.md](/home/hartmut/Documents/Copilot/planarchy/docs/old-markdowns/estimating-field-mapping.md)
- [cgi-breakdown-implementation-proposal.md](/home/hartmut/Documents/Copilot/planarchy/docs/old-markdowns/cgi-breakdown-implementation-proposal.md)
- [architecture-evaluation-2026-03-06.md](/home/hartmut/Documents/Copilot/planarchy/docs/old-markdowns/architecture-evaluation-2026-03-06.md)
- [perf-audit-2026-03-09.md](/home/hartmut/Documents/Copilot/planarchy/docs/old-markdowns/perf-audit-2026-03-09.md)
- [plan.md](/home/hartmut/Documents/Copilot/capakraken/docs/old-markdowns/plan.md)
- [rename-capakraken-to-capakraken-plan.md](/home/hartmut/Documents/Copilot/capakraken/docs/old-markdowns/rename-capakraken-to-capakraken-plan.md)
- [PLAN_SKILLMATRIX.md](/home/hartmut/Documents/Copilot/capakraken/docs/old-markdowns/PLAN_SKILLMATRIX.md)
- [refactor-sprint-plan.md](/home/hartmut/Documents/Copilot/capakraken/docs/old-markdowns/refactor-sprint-plan.md)
- [estimating-field-mapping.md](/home/hartmut/Documents/Copilot/capakraken/docs/old-markdowns/estimating-field-mapping.md)
- [cgi-breakdown-implementation-proposal.md](/home/hartmut/Documents/Copilot/capakraken/docs/old-markdowns/cgi-breakdown-implementation-proposal.md)
- [architecture-evaluation-2026-03-06.md](/home/hartmut/Documents/Copilot/capakraken/docs/old-markdowns/architecture-evaluation-2026-03-06.md)
- [perf-audit-2026-03-09.md](/home/hartmut/Documents/Copilot/capakraken/docs/old-markdowns/perf-audit-2026-03-09.md)
- [review-report-2026-03-15-computation-graph.md](/home/hartmut/Documents/Copilot/capakraken/docs/old-markdowns/review-report-2026-03-15-computation-graph.md)
+45 -29
View File
@@ -26,6 +26,7 @@ Trotzdem ist die Paritaet zur eigentlichen App/API noch nicht erreicht. Die groe
- `assistant.chat` baut den System Prompt, filtert die verfuegbaren Tools und laesst das Modell Tools aufrufen.
- Der eigentliche Datenzugriff liegt fast komplett in `executeTool(...)` und den `executors` in `packages/api/src/router/assistant-tools.ts`.
- Fuer Chargeability Report und Computation Graph nutzt der Assistant jetzt dieselben tRPC-Readmodels wie die eigentlichen Fachrouter, statt eine zweite Query-Logik zu pflegen.
### Permission-Gating
@@ -71,6 +72,11 @@ Es gibt aktuell vier Permission-/Scope-Ebenen:
- basiert bereits auf denselben Timeline-Readmodels/Shift-Preview-Helfern wie die UI
- Estimates: nur Suche, Detail und Anlegen, aber kein voller Lifecycle
- Reports: `run_report` ist flexibel, deckt aber nicht die spezialisierten Report-/Analyse-Readmodels ab
- Chargeability / Transparenz:
- `get_chargeability_report`
- `get_resource_computation_graph`
- `get_project_computation_graph`
- damit sind die wichtigsten tiefen Herleitungen fuer Chargeability, SAH, Feiertagsabzuege und Projektkalkulation jetzt auch im Assistant verfuegbar
- Audit/History: nur einfache History-Abfragen, keine volle Audit-API
- Notification/Tasking: Kernfaelle vorhanden, aber keine volle Reminder-/Task-/Notification-Paritaet
- Country-/Location-Stammdaten: nur lesend und auch dort nur flach
@@ -78,22 +84,18 @@ Es gibt aktuell vier Permission-/Scope-Ebenen:
### Vollstaendig fehlend oder fachlich nicht ausreichend
- Holiday-Calendar-Admin und Editor-Funktionen
- Computation Graph fuer vollstaendige Herleitungen
- Chargeability Report Readmodel
- Webhook-Administration
- System Settings / AI / SMTP / Image-Provider Administration
- System Role Config Administration
- Import/Export-Flows
- User Self-Service und Preferences
- Country- und Metro-City-Administration
- Timeline-Mutationen und Dispo-spezifische Write-Flows
- Voller Estimate-Lifecycle
- Dispo-/Import-spezifische Flows
## Kritische Inkonsistenzen und Risiken
Stand 2026-03-28: Die frueheren P0s bei Notification-Scoping, `list_users`, Mutation-Audit und reinen Permission-Texten sind behoben. Die folgenden Punkte bleiben relevant.
Stand 2026-03-29: Die frueheren P0s bei Notification-Scoping, `list_users`, Mutation-Audit und reinen Permission-Texten sind behoben. Holiday-Calendar-Lesezugriffe sowie Admin-Mutationen fuer Kalender und Entries sind jetzt im Assistant vorhanden. Die folgenden Punkte bleiben relevant.
### P0: Human-in-the-Loop ist serverseitig persistiert, aber noch nicht als vollwertiger Approval-Workspace ausgebaut
@@ -127,7 +129,7 @@ Der Assistant kann viele Kernfaelle, aber noch nicht denselben Arbeitsmodus wie
Konsequenz:
- Timeline-Readmodel-Paritaet ist jetzt fuer die wichtigsten read-only Faelle vorhanden, aber komplexe Write-, Audit-, Admin- und Estimate-Workflows bleiben teilweise unvollstaendig
- Timeline-Readmodel- und die wichtigsten Timeline-Write-Paritaetsfaelle sind jetzt ueber dieselben Router-/Readmodel-Pfade verfuegbar, aber Audit-, Admin-, Import- und Estimate-Workflows bleiben teilweise unvollstaendig
- tiefe Erklaerungen fuer Herleitungen und Governance sind noch nicht auf UI-Niveau
## Was der Assistant heute noch nicht "weiss"
@@ -147,10 +149,21 @@ Die folgende Liste meint: Informationen, die in App/API bereits existieren oder
Aktuell im Assistant vorhanden:
- aufgeloeste Feiertage nach Region oder Ressource
- Holiday-Calendar-Stammdaten:
- `list_holiday_calendars`
- `get_holiday_calendar`
- `preview_resolved_holiday_calendar`
- Holiday-Calendar-Admin:
- `create_holiday_calendar`
- `update_holiday_calendar`
- `delete_holiday_calendar`
- `create_holiday_calendar_entry`
- `update_holiday_calendar_entry`
- `delete_holiday_calendar_entry`
Fehlend:
Restluecke:
- die eigentlichen Kalenderobjekte und deren Pflegekontext
- Country-/Metro-City-Stammdaten und tiefere Standortregeln sind weiterhin nicht in derselben Pflegebreite wie die eigentliche Admin-Oberflaeche abgedeckt
### Timeline und Disposition
@@ -160,25 +173,28 @@ Bereits vorhanden:
- `get_timeline_holiday_overlays`
- `get_project_timeline_context`
- `preview_project_shift`
- `update_timeline_allocation_inline`
- `quick_assign_timeline_resource`
- `batch_quick_assign_timeline_resources`
- `batch_shift_timeline_allocations`
- `apply_timeline_project_shift`
- Reuse derselben Timeline-Readmodels und Shift-Preview-Helfer wie in `timelineRouter`
- Reuse derselben Timeline-Mutationen via `createCallerFactory(timelineRouter)` statt Assistant-Sonderlogik
- identische Manager-/Admin- und `manageAllocations`-Guards wie im normalen API-Pfad
Noch fehlend:
- vollstaendige Write-Paritaet fuer Timeline-/Dispo-Workflows
- Inline-/Batch-Operationen der Timeline:
- `updateAllocationInline`
- `quickAssign`
- `batchQuickAssign`
- `batchShiftAllocations`
- `applyShift`
- Dispo-spezifische Import-/Workbook-Flows
Konsequenz:
- Der Assistant kann die wichtigsten Timeline-/Disposition-Readfaelle jetzt fachlich deutlich naeher an der UI abbilden, aber noch nicht denselben operativen Arbeitsmodus fuer Schreibaktionen und Imports.
- Der Assistant kann die wichtigsten Timeline-/Disposition-Read- und Writefaelle jetzt fachlich und technisch auf derselben Basis wie die UI abbilden.
- Offen bleiben vor allem Import-/Workbook-Flows und weitere Dispo-Spezialworkflows ausserhalb der Kernmutationen.
### Transparenz, Herleitungen und Berechnungsgraphen
Bereits vorhanden:
- Vollstaendige Computation-Graph-Daten fuer Resource- und Project-Views:
- Herleitungsfaktoren
- Formeln
@@ -191,7 +207,8 @@ Konsequenz:
Konsequenz:
- Der Assistant kann zwar Teilantworten zu Chargeability/Budget geben, aber noch nicht dieselbe Erklaerungstiefe wie die spezialisierten Analyseansichten.
- Der Assistant kann die wichtigsten Herleitungen jetzt auf derselben fachlichen Basis wie die spezialisierten Analyseansichten liefern.
- Offen bleibt vor allem, diese Tiefe konsequent in weiteren Admin-, Audit- und Workflow-spezifischen Assistentenfaellen auszubauen.
### Audit, Verlauf und Governance
@@ -247,17 +264,19 @@ Konsequenz:
### Stammdaten fuer Laender und Orte
- Country-Details inklusive `scheduleRules`
- Metro-City-Verwaltung
- Country-/City-CRUD
Aktuell im Assistant vorhanden:
- `list_countries` mit relativ flachem Output
- `list_countries` mit `scheduleRules`, Aktiv-Status und Metro-Cities
- `get_country`
- `create_country`
- `update_country`
- `create_metro_city`
- `update_metro_city`
- `delete_metro_city`
Fehlend:
Restluecke:
- volle fachliche Pflege und die tieferen Standortregeln, die fuer Feiertage, SAH und Forecasts relevant sind
- weitere standortbezogene Admin-Bereiche ausserhalb von Country/Metro-City
### Estimate-Lifecycle und Fachobjekte unterhalb des Estimates
@@ -292,7 +311,6 @@ Fehlend:
### Komplett fehlende Router-Paritaet
- `holidayCalendar`
- `importExport`
- `chargeabilityReport`
- `computationGraph`
@@ -330,7 +348,6 @@ Der Prompt suggeriert an mehreren Stellen mehr Paritaet, als technisch heute vor
### Problematische Aussagen
- "Urlaub, Feiertage" ist fuer Leseabfragen ok, aber nicht fuer Holiday-Calendar-Administration.
- "Notifications anzeigen" ist fuer die Basisfaelle inzwischen sauberer gescoped, deckt aber weiterhin nicht die volle Notification-/Reminder-Paritaet der App ab.
- "Dashboard-Details abrufen" stimmt nur fuer einen Teil der Dashboard-/Analysewelt.
- "Den User zu relevanten Seiten navigieren" stimmt, ersetzt aber keine echte Daten-/Aktionsparitaet in Timeline, Holiday Editor oder Admin-Bereichen.
@@ -405,9 +422,8 @@ Die Human-in-the-Loop-Regel ist inzwischen serverseitig erzwungen. Der Prompt so
- update
3. Country-/City-Tools
- Country-Detail
- Country-Create/Update
- City-Create/Update/Delete
- Status: umgesetzt fuer Country-Detail, Country-Create/Update und City-Create/Update/Delete
- offen bleiben nur weitergehende standortbezogene Admin-Readmodels ausserhalb dieses Stammdatenkerns
4. Webhook-Tools
- list/get/create/update/delete/test
+1 -1
View File
@@ -1,6 +1,6 @@
# Calculation Reference
How every number in Planarchy is derived. All monetary values are integer cents. All percentages are 0-100 integers unless noted.
How every number in CapaKraken is derived. All monetary values are integer cents. All percentages are 0-100 integers unless noted.
---
+15 -13
View File
@@ -1,8 +1,8 @@
# Planarchy CI/CD Manual
# CapaKraken CI/CD Manual
## Overview
Planarchy uses GitHub Actions for continuous integration and Docker for deployment. This document covers the full pipeline from code push to production.
CapaKraken uses GitHub Actions for continuous integration and Docker for deployment. This document covers the full pipeline from code push to production.
---
@@ -120,7 +120,7 @@ Checks PostgreSQL and Redis connectivity. Returns 200 if all services are reacha
```bash
# Build the image
docker build -f Dockerfile.prod -t planarchy:latest .
docker build -f Dockerfile.prod -t capakraken:latest .
# Test it locally
docker compose -f docker-compose.prod.yml up -d
@@ -142,9 +142,9 @@ The production image requires these environment variables:
```env
# Required
DATABASE_URL=postgresql://user:pass@host:5432/planarchy
DATABASE_URL=postgresql://user:pass@host:5432/capakraken
REDIS_URL=redis://host:6379
NEXTAUTH_URL=https://planarchy.your-domain.com
NEXTAUTH_URL=https://capakraken.your-domain.com
NEXTAUTH_SECRET=<random-32-char-string>
# Optional
@@ -153,7 +153,7 @@ SMTP_HOST=smtp.example.com
SMTP_PORT=587
SMTP_USER=notifications@example.com
SMTP_PASSWORD=<password>
SMTP_FROM=Planarchy <notifications@example.com>
SMTP_FROM=CapaKraken <notifications@example.com>
```
Generate a secure `NEXTAUTH_SECRET`:
@@ -175,11 +175,11 @@ docker compose -f docker-compose.prod.yml up -d --build
# Run database migrations
docker compose -f docker-compose.prod.yml exec app \
npx prisma db push --skip-generate
pnpm db:push
# Seed initial data (first deployment only)
docker compose -f docker-compose.prod.yml exec app \
npx prisma db seed
pnpm db:seed
```
### Manual deployment (current setup)
@@ -188,10 +188,11 @@ Since `capakraken.hartmut-noerenberg.com` runs behind nginx:
```bash
# On the server
cd /home/hartmut/Documents/Copilot/planarchy
cd /home/hartmut/Documents/Copilot/capakraken
git pull origin main
pnpm install
pnpm --filter @capakraken/db exec prisma generate
pnpm db:generate
pnpm db:validate
pnpm --filter @capakraken/web exec next build
rm -rf apps/web/.next/cache # clear stale cache
@@ -283,7 +284,7 @@ Playwright test failure. Check the HTML report artifact in the GitHub Actions ru
The Next.js process isn't running. Check:
```bash
ss -tlnp | grep 3100 # Is anything listening?
tail -50 /tmp/planarchy-dev.log # Check app logs
tail -50 /tmp/capakraken-dev.log # Check app logs
```
Restart:
@@ -296,7 +297,8 @@ pnpm dev & # or pnpm start for production mode
Usually a stale Prisma client after schema changes:
```bash
pnpm --filter @capakraken/db exec prisma generate
pnpm db:generate
pnpm db:validate
rm -rf apps/web/.next
pnpm --filter @capakraken/web exec next build
# Restart the server
@@ -312,5 +314,5 @@ curl -s https://capakraken.hartmut-noerenberg.com/api/ready | jq .
If `postgres: "error"`, verify:
```bash
docker ps | grep postgres # Is container running?
psql -h localhost -p 5433 -U planarchy -d planarchy # Can you connect?
psql -h localhost -p 5433 -U capakraken -d capakraken # Can you connect?
```
+3 -3
View File
@@ -1,7 +1,7 @@
# Dispo Import Implementation Tickets
**Date:** 2026-03-14
**Purpose:** Worker-ready implementation tickets for the clean-slate Dispo v2 import defined in [dispo-import-implementation.md](/home/hartmut/Documents/Copilot/planarchy/docs/dispo-import-implementation.md).
**Purpose:** Worker-ready implementation tickets for the clean-slate Dispo v2 import defined in [dispo-import-implementation.md](/home/hartmut/Documents/Copilot/capakraken/docs/dispo-import-implementation.md).
## How To Use This Ticket Pack
@@ -68,7 +68,7 @@ Freeze the implementation assumptions so multiple workers do not diverge.
**Deliverables**
- decision log appended to [dispo-import-implementation.md](/home/hartmut/Documents/Copilot/planarchy/docs/dispo-import-implementation.md)
- decision log appended to [dispo-import-implementation.md](/home/hartmut/Documents/Copilot/capakraken/docs/dispo-import-implementation.md)
- explicit list of values to seed for roles and internal project buckets
**Acceptance Criteria**
@@ -436,7 +436,7 @@ Apply part-time logic to resource availability without creating fake bookings.
**Goal**
Commit approved staged data into final Planarchy entities.
Commit approved staged data into final CapaKraken entities.
**Scope**
+15 -15
View File
@@ -1,11 +1,11 @@
# Dispo Import Implementation
**Date:** 2026-03-14
**Purpose:** Canonical implementation document for replacing the current Planarchy planning dataset with a clean-slate import from the Dispo v2 Excel workbooks.
**Purpose:** Canonical implementation document for replacing the current CapaKraken planning dataset with a clean-slate import from the Dispo v2 Excel workbooks.
## Scope
This document defines how Planarchy should ingest and normalize the following source workbooks:
This document defines how CapaKraken should ingest and normalize the following source workbooks:
- `/samples/Dispov2/MandatoryDispoCategories_V3.xlsx`
- `/samples/Dispov2/DISPO_2026.xlsx`
@@ -13,7 +13,7 @@ This document defines how Planarchy should ingest and normalize the following so
- `/samples/Dispov2/MV_DispoRoster.xlsx`
- `/samples/Dispov2/Resource Roster_MASTER_FY26_CJ_20251201.xlsx`
The goal is not a raw workbook archive. The goal is a normalized Planarchy dataset that:
The goal is not a raw workbook archive. The goal is a normalized CapaKraken dataset that:
- wipes existing database data and starts from a clean baseline
- imports canonical reference data first
@@ -79,7 +79,7 @@ Use as the source of:
- resource enrichment when missing elsewhere
- aggregate validation after commit
Do not treat PTD/MTD/YTD outputs as canonical source-of-truth records when Planarchy can derive them from normalized data.
Do not treat PTD/MTD/YTD outputs as canonical source-of-truth records when CapaKraken can derive them from normalized data.
### 4. `MV_DispoRoster.xlsx`
@@ -151,20 +151,20 @@ The import commits into the existing planning model:
Relevant current schema anchors:
- [schema.prisma](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L178)
- [schema.prisma](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L235)
- [schema.prisma](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L334)
- [schema.prisma](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L372)
- [schema.prisma](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L460)
- [schema.prisma](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L754)
- [schema.prisma](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L780)
- [schema.prisma](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L815)
- [schema.prisma](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L178)
- [schema.prisma](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L235)
- [schema.prisma](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L334)
- [schema.prisma](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L372)
- [schema.prisma](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L460)
- [schema.prisma](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L754)
- [schema.prisma](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L780)
- [schema.prisma](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L815)
## Required Implementation Changes
### 1. Canonical Person Identity
Planarchy currently stores both `eid` and `enterpriseId` on `Resource`. The import should operate on a single canonical identity.
CapaKraken currently stores both `eid` and `enterpriseId` on `Resource`. The import should operate on a single canonical identity.
Recommendation:
@@ -392,7 +392,7 @@ Assignments should be written only when a project or internal bucket is resolved
| `[_NA] Public Holiday ... {NA}` | `Vacation(type=PUBLIC_HOLIDAY)` | preferred source of truth is geography-driven generation |
| `[_NA] Weekend {NA}` | no vacation row | derive from calendar |
Public holiday implementation should integrate with the existing vacation planner and batch holiday support in [vacation.ts](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/vacation.ts#L425).
Public holiday implementation should integrate with the existing vacation planner and batch holiday support in [vacation.ts](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/vacation.ts#L425).
### Availability and Part-Time Mapping
@@ -506,7 +506,7 @@ Recommended approach:
Known implementation gap:
- the chargeability forecast currently passes an empty `publicHolidays` list into SAH calculation in [chargeability-report.ts](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/chargeability-report.ts#L167)
- the chargeability forecast currently passes an empty `publicHolidays` list into SAH calculation in [chargeability-report.ts](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/chargeability-report.ts#L167)
Required follow-up:
+7 -7
View File
@@ -2,7 +2,7 @@
**Date:** 2026-03-13
**Related workbook:** `samples/CGIBreakdown_Template/Template_CGI-Breakdown+Calc_25Dez_V0.976_251212_beta_LCR-Update.xlsx`
**Purpose:** Canonical design, field mapping, and implementation plan for a browser-based estimating system in Planarchy.
**Purpose:** Canonical design, field mapping, and implementation plan for a browser-based estimating system in CapaKraken.
## Executive Summary
@@ -15,12 +15,12 @@ The workbook is not a simple calculator. It is a full estimating and pricing sys
- management summaries
- downstream export sheets
Planarchy can support this, but not by copying Excel cell logic into the browser. The right implementation is a dedicated estimating bounded context with:
CapaKraken can support this, but not by copying Excel cell logic into the browser. The right implementation is a dedicated estimating bounded context with:
- a wizard for first-pass estimate creation
- a workspace for iterative revisions
- a typed calculation pipeline
- live linkage to Planarchy resources and roles
- live linkage to CapaKraken resources and roles
- immutable snapshots for auditability
## Design Principles
@@ -39,7 +39,7 @@ The replacement should be:
### 2. Reuse the current platform where it already fits
Useful existing Planarchy primitives:
Useful existing CapaKraken primitives:
- `Resource` for roster, rates, skills, availability, and dynamic metadata
- `Project` for schedule, budget, and project linkage
@@ -134,7 +134,7 @@ That mix is exactly why the app needs separated models for assumptions, scope, d
### Mapping legend
- `Direct`: already maps to an existing first-class Planarchy field
- `Direct`: already maps to an existing first-class CapaKraken field
- `Bridge`: can be bridged short-term, but should move to estimating models
- `Derived`: calculate it, do not persist it as manual source data
- `New Model`: requires estimating schema
@@ -236,7 +236,7 @@ That prevents old approved estimates from changing when roster rates or metadata
### Phase 4. Resource linkage and planning handoff
- connect demand lines to resources, roles, and availability
- add staffing suggestions from current Planarchy data
- add staffing suggestions from current CapaKraken data
- support conversion from approved estimate demand into downstream planning entities
### Phase 5. Exports and approvals
@@ -261,7 +261,7 @@ Implemented baseline in the current codebase:
- version submit, approve, and locked revision-cloning actions
- export artifact scaffolding with stored serializer metadata records
- format-specific export generation with stored payloads for JSON, CSV, XLSX, SAP, and MMP
- live resource-linked staffing rows that can sync current Planarchy rates and persist estimate-version snapshots
- live resource-linked staffing rows that can sync current CapaKraken rates and persist estimate-version snapshots
- explicit live-vs-manual rate mode metadata on demand lines, with server-side recalculation before metrics are persisted
- read-only and draft workspace visibility for manual overrides versus live resource snapshots
- project snapshot persistence on estimate versions
+1 -1
View File
@@ -5,7 +5,7 @@
## Overview
GitLooper is a Claude Code slash command (`/gitlooper:gitlooper`) that connects to Planarchy's Gitea instance, reads open issues, triages them, and autonomously implements fixes/features using spawned sub-agents.
GitLooper is a Claude Code slash command (`/gitlooper:gitlooper`) that connects to CapaKraken's Gitea instance, reads open issues, triages them, and autonomously implements fixes/features using spawned sub-agents.
## Architecture
+1 -1
View File
@@ -2,7 +2,7 @@
## Ziel
Planarchy soll standortabhaengige Feiertage fachlich korrekt berechnen koennen, sodass zwei Personen im selben Land, aber in unterschiedlichen Regionen oder Staedten, unterschiedliche `SAH` und damit unterschiedliche Chargeability erhalten koennen.
CapaKraken soll standortabhaengige Feiertage fachlich korrekt berechnen koennen, sodass zwei Personen im selben Land, aber in unterschiedlichen Regionen oder Staedten, unterschiedliche `SAH` und damit unterschiedliche Chargeability erhalten koennen.
Die Feiertagsaufloesung soll kuenftig diese Prioritaet haben:
+4 -4
View File
@@ -3,18 +3,18 @@
Date: 2026-03-14
Source workbook:
- `/home/hartmut/Documents/Copilot/planarchy/samples/Dispov2/MV_DispoRoster.xlsx`
- `/home/hartmut/Documents/Copilot/capakraken/samples/Dispov2/MV_DispoRoster.xlsx`
- Sheet: `DispoRoster`
- Column K: `MV Ressource Type`
Applied rule:
- If column K equals `Departed`, set `resource.departed = true` for the matching Planarchy resource identified by `EID`.
- If column K equals `Departed`, set `resource.departed = true` for the matching CapaKraken resource identified by `EID`.
Result:
- Workbook rows marked `Departed`: `166`
- Matching resources found in Planarchy: `141`
- Matching resources found in CapaKraken: `141`
- Resources updated to `departed = true`: `141`
- Workbook EIDs not found in Planarchy: `25`
- Workbook EIDs not found in CapaKraken: `25`
Missing EIDs:
- `antonia.melzer`
+1 -1
View File
@@ -8,4 +8,4 @@ Most of this plan has already been implemented:
- admin system settings for AI configuration
- AI summary generation and resource detail UI
Do not use this file as an active backlog. Remaining product work belongs in [docs/product-roadmap.md](/home/hartmut/Documents/Copilot/planarchy/docs/product-roadmap.md), and completed implementation detail is reflected in the codebase and [LEARNINGS.md](/home/hartmut/Documents/Copilot/planarchy/LEARNINGS.md).
Do not use this file as an active backlog. Remaining product work belongs in [docs/product-roadmap.md](/home/hartmut/Documents/Copilot/capakraken/docs/product-roadmap.md), and completed implementation detail is reflected in the codebase and [LEARNINGS.md](/home/hartmut/Documents/Copilot/capakraken/LEARNINGS.md).
@@ -12,4 +12,4 @@ Examples that are no longer current:
- Redis-backed SSE work has already landed
- Playwright E2E coverage is no longer empty
Use [product-roadmap.md](/home/hartmut/Documents/Copilot/planarchy/docs/product-roadmap.md) for the active backlog and [v2-architecture-proposal-2026-03-11.md](/home/hartmut/Documents/Copilot/planarchy/research/v2-architecture-proposal-2026-03-11.md) for the still-relevant strategic direction.
Use [product-roadmap.md](/home/hartmut/Documents/Copilot/capakraken/docs/product-roadmap.md) for the active backlog and [v2-architecture-proposal-2026-03-11.md](/home/hartmut/Documents/Copilot/capakraken/research/v2-architecture-proposal-2026-03-11.md) for the still-relevant strategic direction.
@@ -1,3 +1,3 @@
# Archived Proposal Note
The CGI workbook analysis and implementation proposal were merged into [estimating-extension-design.md](/home/hartmut/Documents/Copilot/planarchy/docs/estimating-extension-design.md) so the estimating work now has one canonical document.
The CGI workbook analysis and implementation proposal were merged into [estimating-extension-design.md](/home/hartmut/Documents/Copilot/capakraken/docs/estimating-extension-design.md) so the estimating work now has one canonical document.
@@ -1,3 +1,3 @@
# Archived Mapping Note
The field mapping table was merged into [estimating-extension-design.md](/home/hartmut/Documents/Copilot/planarchy/docs/estimating-extension-design.md) so the estimating design, workbook analysis, and implementation plan live in one canonical file.
The field mapping table was merged into [estimating-extension-design.md](/home/hartmut/Documents/Copilot/capakraken/docs/estimating-extension-design.md) so the estimating design, workbook analysis, and implementation plan live in one canonical file.
+1 -1
View File
@@ -16,4 +16,4 @@ Still conceptually relevant, but no longer the canonical backlog:
- staffing suggestion scalability
- index strategy for larger datasets
Use [product-roadmap.md](/home/hartmut/Documents/Copilot/planarchy/docs/product-roadmap.md) for active prioritization and keep this file only as archive context.
Use [product-roadmap.md](/home/hartmut/Documents/Copilot/capakraken/docs/product-roadmap.md) for active prioritization and keep this file only as archive context.
+1 -1
View File
@@ -8,4 +8,4 @@ That work is now only partially relevant as an archive:
- several sorting/view-state pieces were implemented
- Blueprints parity still appears open
The active backlog now lives in [docs/product-roadmap.md](/home/hartmut/Documents/Copilot/planarchy/docs/product-roadmap.md).
The active backlog now lives in [docs/product-roadmap.md](/home/hartmut/Documents/Copilot/capakraken/docs/product-roadmap.md).
+1 -1
View File
@@ -2,7 +2,7 @@
This sprint plan mixed active refactor work with implementation mechanics that are now stale.
The still-relevant backlog from this document is tracked centrally in [product-roadmap.md](/home/hartmut/Documents/Copilot/planarchy/docs/product-roadmap.md):
The still-relevant backlog from this document is tracked centrally in [product-roadmap.md](/home/hartmut/Documents/Copilot/capakraken/docs/product-roadmap.md):
- widget config typing and layout versioning
- registry-driven dashboard rendering
@@ -1,4 +1,4 @@
# Technischer Rename: planarchy → capakraken — Migrationsplan
# Technischer Rename: capakraken → capakraken — Migrationsplan
## Uebersicht
@@ -36,7 +36,7 @@
# Globaler Find+Replace
find . -type f \( -name "*.ts" -o -name "*.tsx" \) \
-not -path "*/node_modules/*" -not -path "*/.next/*" \
-exec sed -i 's/@planarchy\//@capakraken\//g' {} +
-exec sed -i 's/@capakraken\//@capakraken\//g' {} +
```
### 1.3 tsconfig.json Path-Mappings (8 Dateien)
@@ -57,19 +57,19 @@ find . -type f \( -name "*.ts" -o -name "*.tsx" \) \
| Alt | Neu |
|-----|-----|
| `POSTGRES_DB: planarchy` | `POSTGRES_DB: capakraken` |
| `POSTGRES_USER: planarchy` | `POSTGRES_USER: capakraken` |
| `POSTGRES_DB: capakraken` | `POSTGRES_DB: capakraken` |
| `POSTGRES_USER: capakraken` | `POSTGRES_USER: capakraken` |
| `POSTGRES_PASSWORD: capakraken_dev` | `POSTGRES_PASSWORD: capakraken_dev` |
| `planarchy_pgdata` (Volume) | `capakraken_pgdata` |
| `planarchy_prod_pgdata` | `capakraken_prod_pgdata` |
| `planarchy_prod_redis` | `capakraken_prod_redis` |
| `capakraken_pgdata` (Volume) | `capakraken_pgdata` |
| `capakraken_prod_pgdata` | `capakraken_prod_pgdata` |
| `capakraken_prod_redis` | `capakraken_prod_redis` |
| `admin@capakraken.dev` (pgAdmin) | `admin@capakraken.dev` |
### 2.2 Datenbank migrieren
```bash
# 1. Backup erstellen
docker exec capakraken-postgres-1 pg_dump -U planarchy planarchy > backup.sql
docker exec capakraken-postgres-1 pg_dump -U capakraken capakraken > backup.sql
# 2. Neue DB + User erstellen
docker exec capakraken-postgres-1 psql -U postgres -c "
@@ -87,7 +87,7 @@ DATABASE_URL=postgresql://capakraken:capakraken_dev@localhost:5433/capakraken
### 2.3 Environment-Dateien (3 Dateien)
```
DATABASE_URL=postgresql://planarchy:capakraken_dev@localhost:5433/planarchy
DATABASE_URL=postgresql://capakraken:capakraken_dev@localhost:5433/capakraken
→ DATABASE_URL=postgresql://capakraken:capakraken_dev@localhost:5433/capakraken
```
@@ -97,25 +97,25 @@ DATABASE_URL=postgresql://planarchy:capakraken_dev@localhost:5433/planarchy
### 3.1 GitHub Actions (.github/workflows/ci.yml, 25 Vorkommen)
```bash
sed -i 's/@planarchy\//@capakraken\//g' .github/workflows/ci.yml
sed -i 's/@capakraken\//@capakraken\//g' .github/workflows/ci.yml
sed -i 's/capakraken_test/capakraken_test/g' .github/workflows/ci.yml
sed -i 's/POSTGRES_USER: planarchy/POSTGRES_USER: capakraken/g' .github/workflows/ci.yml
sed -i 's/pg_isready -U planarchy/pg_isready -U capakraken/g' .github/workflows/ci.yml
sed -i 's/POSTGRES_USER: capakraken/POSTGRES_USER: capakraken/g' .github/workflows/ci.yml
sed -i 's/pg_isready -U capakraken/pg_isready -U capakraken/g' .github/workflows/ci.yml
```
### 3.2 Root package.json Scripts (9 Vorkommen)
```bash
sed -i 's/@planarchy\//@capakraken\//g' package.json
sed -i 's/@capakraken\//@capakraken\//g' package.json
```
### 3.3 Start/Stop/Restart Scripts
```bash
sed -i 's/planarchy/capakraken/g' scripts/start.sh scripts/stop.sh scripts/restart.sh
sed -i 's/capakraken/capakraken/g' scripts/start.sh scripts/stop.sh scripts/restart.sh
```
### 3.4 Dependabot
```bash
sed -i 's/planarchy/capakraken/g' .github/dependabot.yml
sed -i 's/capakraken/capakraken/g' .github/dependabot.yml
```
---
@@ -132,17 +132,17 @@ viewer@capakraken.dev → viewer@capakraken.dev
### 4.2 E2E-Tests (11 Spec-Dateien)
```bash
find apps/web/e2e -name "*.spec.ts" \
-exec sed -i 's/@planarchy\.dev/@capakraken.dev/g' {} +
-exec sed -i 's/@capakraken\.dev/@capakraken.dev/g' {} +
```
### 4.3 LocalStorage-Keys
```
planarchy_theme → capakraken_theme
planarchy_sidebar_collapsed → capakraken_sidebar_collapsed
planarchy_prefs → capakraken_prefs
planarchy_dashboard_v1 → capakraken_dashboard_v1
planarchy_pwa_dismiss → capakraken_pwa_dismiss
planarchy-chat-messages → capakraken-chat-messages
capakraken_theme → capakraken_theme
capakraken_sidebar_collapsed → capakraken_sidebar_collapsed
capakraken_prefs → capakraken_prefs
capakraken_dashboard_v1 → capakraken_dashboard_v1
capakraken_pwa_dismiss → capakraken_pwa_dismiss
capakraken-chat-messages → capakraken-chat-messages
```
### 4.4 Email-Defaults (3 Dateien)
@@ -158,7 +158,7 @@ noreply@capakraken.app → noreply@capakraken.app
```bash
# Globaler Replace in allen .md Dateien
find . -name "*.md" -not -path "*/node_modules/*" \
-exec sed -i 's/planarchy/capakraken/g; s/Planarchy/CapaKraken/g; s/plANARCHY/CapaKraken/g' {} +
-exec sed -i 's/capakraken/capakraken/g; s/CapaKraken/CapaKraken/g; s/plANARCHY/CapaKraken/g' {} +
```
### 5.2 CLAUDE.md aktualisieren
@@ -166,7 +166,7 @@ find . -name "*.md" -not -path "*/node_modules/*" \
### 5.3 Code-Kommentare
```bash
grep -rn "planarchy" --include="*.ts" --include="*.tsx" . \
grep -rn "capakraken" --include="*.ts" --include="*.tsx" . \
| grep -v node_modules | grep -v .next | grep "\/\/"
# Manuell pruefen und aendern
```
@@ -192,7 +192,7 @@ rm -rf node_modules apps/web/node_modules packages/*/node_modules
pnpm install
# 2. Prisma regenerieren
pnpm --filter @capakraken/db exec prisma generate
pnpm db:generate
# 3. TypeScript pruefen
pnpm --filter @capakraken/web exec tsc --noEmit
@@ -215,8 +215,8 @@ pnpm test:e2e
| Risiko | Mitigation |
|--------|-----------|
| **pnpm Workspace-Aufloesung bricht** | Nach Rename sofort `pnpm install` ausfuehren |
| **Import-Pfade nicht komplett ersetzt** | `grep -rn "@planarchy" --include="*.ts"` als Kontrolle |
| **Docker Volumes mit alten Namen** | Alte Volumes manuell loeschen: `docker volume rm planarchy_pgdata` |
| **Import-Pfade nicht komplett ersetzt** | `grep -rn "@capakraken" --include="*.ts"` als Kontrolle |
| **Docker Volumes mit alten Namen** | Alte Volumes manuell loeschen: `docker volume rm capakraken_pgdata` |
| **Bestehende User-Sessions invalide** | Alle User muessen sich neu einloggen (NEXTAUTH_SECRET bleibt gleich) |
| **LocalStorage-Keys veraltet** | Alte Keys werden ignoriert, neue Defaults greifen |
| **Git-History referenziert alten Namen** | Kein Problem — History bleibt unveraendert |
@@ -234,4 +234,4 @@ pnpm test:e2e
7. `pnpm test:unit` → alle Tests gruen?
8. **Phase 5** (Dokumentation)
9. **Phase 7** (Vollstaendige Verifikation)
10. **Commit + PR** als einzelner "chore: rename planarchy → capakraken" Commit
10. **Commit + PR** als einzelner "chore: rename capakraken → capakraken" Commit
@@ -6,7 +6,7 @@ Scope: analysis only. No runtime behavior was changed in this pass.
## Executive Summary
The biggest performance costs in Planarchy currently come from three patterns:
The biggest performance costs in CapaKraken currently come from three patterns:
1. Broad data fetches followed by repeated in-memory filtering/grouping.
2. Expensive client-side derivations on screens with large datasets, especially Timeline.
@@ -20,11 +20,11 @@ The highest-value optimization target is the Timeline stack. After that, the bes
Relevant files:
- [TimelineContext.tsx](/home/hartmut/Documents/Copilot/planarchy/apps/web/src/components/timeline/TimelineContext.tsx)
- [TimelineView.tsx](/home/hartmut/Documents/Copilot/planarchy/apps/web/src/components/timeline/TimelineView.tsx)
- [TimelineResourcePanel.tsx](/home/hartmut/Documents/Copilot/planarchy/apps/web/src/components/timeline/TimelineResourcePanel.tsx)
- [TimelineProjectPanel.tsx](/home/hartmut/Documents/Copilot/planarchy/apps/web/src/components/timeline/TimelineProjectPanel.tsx)
- [timeline.ts](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/timeline.ts)
- [TimelineContext.tsx](/home/hartmut/Documents/Copilot/capakraken/apps/web/src/components/timeline/TimelineContext.tsx)
- [TimelineView.tsx](/home/hartmut/Documents/Copilot/capakraken/apps/web/src/components/timeline/TimelineView.tsx)
- [TimelineResourcePanel.tsx](/home/hartmut/Documents/Copilot/capakraken/apps/web/src/components/timeline/TimelineResourcePanel.tsx)
- [TimelineProjectPanel.tsx](/home/hartmut/Documents/Copilot/capakraken/apps/web/src/components/timeline/TimelineProjectPanel.tsx)
- [timeline.ts](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/timeline.ts)
Observed issues:
@@ -43,8 +43,8 @@ Impact:
Relevant files:
- [ChargeabilityReportClient.tsx](/home/hartmut/Documents/Copilot/planarchy/apps/web/src/components/reports/ChargeabilityReportClient.tsx)
- [chargeability-report.ts](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/chargeability-report.ts)
- [ChargeabilityReportClient.tsx](/home/hartmut/Documents/Copilot/capakraken/apps/web/src/components/reports/ChargeabilityReportClient.tsx)
- [chargeability-report.ts](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/chargeability-report.ts)
Observed issues:
@@ -62,9 +62,9 @@ Impact:
Relevant files:
- [dashboard.ts](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/dashboard.ts)
- [get-chargeability-overview.ts](/home/hartmut/Documents/Copilot/planarchy/packages/application/src/use-cases/dashboard/get-chargeability-overview.ts)
- [get-overview.ts](/home/hartmut/Documents/Copilot/planarchy/packages/application/src/use-cases/dashboard/get-overview.ts)
- [dashboard.ts](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/dashboard.ts)
- [get-chargeability-overview.ts](/home/hartmut/Documents/Copilot/capakraken/packages/application/src/use-cases/dashboard/get-chargeability-overview.ts)
- [get-overview.ts](/home/hartmut/Documents/Copilot/capakraken/packages/application/src/use-cases/dashboard/get-overview.ts)
Observed issues:
@@ -80,8 +80,8 @@ Impact:
Relevant files:
- [ResourcesClient.tsx](/home/hartmut/Documents/Copilot/planarchy/apps/web/src/app/(app)/resources/ResourcesClient.tsx)
- [resource.ts](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/resource.ts)
- [ResourcesClient.tsx](/home/hartmut/Documents/Copilot/capakraken/apps/web/src/app/(app)/resources/ResourcesClient.tsx)
- [resource.ts](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/resource.ts)
Observed issues:
@@ -97,7 +97,7 @@ Impact:
Relevant files:
- [list-assignment-bookings.ts](/home/hartmut/Documents/Copilot/planarchy/packages/application/src/use-cases/allocation/list-assignment-bookings.ts)
- [list-assignment-bookings.ts](/home/hartmut/Documents/Copilot/capakraken/packages/application/src/use-cases/allocation/list-assignment-bookings.ts)
Observed issues:
@@ -112,7 +112,7 @@ Impact:
Relevant schema:
- [schema.prisma](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma)
- [schema.prisma](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma)
Good coverage already exists for:
+11 -11
View File
@@ -6,12 +6,12 @@
## Canonical Documents
- Active product and refactor backlog: this file
- Estimating system design and workbook mapping: [estimating-extension-design.md](/home/hartmut/Documents/Copilot/planarchy/docs/estimating-extension-design.md)
- Dispo clean-slate import design and field mapping: [dispo-import-implementation.md](/home/hartmut/Documents/Copilot/planarchy/docs/dispo-import-implementation.md)
- Dispo worker ticket pack and dependency breakdown: [dispo-import-implementation-tickets.md](/home/hartmut/Documents/Copilot/planarchy/docs/dispo-import-implementation-tickets.md)
- Demand/assignment migration cutover and readiness policy: [demand-assignment-migration-cutover.md](/home/hartmut/Documents/Copilot/planarchy/docs/demand-assignment-migration-cutover.md)
- Strategic longer-horizon architecture direction: [v2-architecture-proposal-2026-03-11.md](/home/hartmut/Documents/Copilot/planarchy/research/v2-architecture-proposal-2026-03-11.md)
- Implementation history and decisions: [LEARNINGS.md](/home/hartmut/Documents/Copilot/planarchy/LEARNINGS.md)
- Estimating system design and workbook mapping: [estimating-extension-design.md](/home/hartmut/Documents/Copilot/capakraken/docs/estimating-extension-design.md)
- Dispo clean-slate import design and field mapping: [dispo-import-implementation.md](/home/hartmut/Documents/Copilot/capakraken/docs/dispo-import-implementation.md)
- Dispo worker ticket pack and dependency breakdown: [dispo-import-implementation-tickets.md](/home/hartmut/Documents/Copilot/capakraken/docs/dispo-import-implementation-tickets.md)
- Demand/assignment migration cutover and readiness policy: [demand-assignment-migration-cutover.md](/home/hartmut/Documents/Copilot/capakraken/docs/demand-assignment-migration-cutover.md)
- Strategic longer-horizon architecture direction: [v2-architecture-proposal-2026-03-11.md](/home/hartmut/Documents/Copilot/capakraken/research/v2-architecture-proposal-2026-03-11.md)
- Implementation history and decisions: [LEARNINGS.md](/home/hartmut/Documents/Copilot/capakraken/LEARNINGS.md)
Older plans and reviews were left in place only as archive notes so active guidance is no longer split across stale task lists.
@@ -409,7 +409,7 @@ Use a single orchestrator and split roadmap execution into package-owned workstr
| Agent | Scope | Primary Files / Packages | Deliverables | Notes |
|---|---|---|---|---|
| `A1-architect` | keep roadmap, contracts, and merge boundaries coherent | [docs/product-roadmap.md](/home/hartmut/Documents/Copilot/planarchy/docs/product-roadmap.md), [docs/estimating-extension-design.md](/home/hartmut/Documents/Copilot/planarchy/docs/estimating-extension-design.md), shared contract entry points | acceptance criteria, sequencing, shared-file coordination | should not implement feature code unless integration is blocked |
| `A1-architect` | keep roadmap, contracts, and merge boundaries coherent | [docs/product-roadmap.md](/home/hartmut/Documents/Copilot/capakraken/docs/product-roadmap.md), [docs/estimating-extension-design.md](/home/hartmut/Documents/Copilot/capakraken/docs/estimating-extension-design.md), shared contract entry points | acceptance criteria, sequencing, shared-file coordination | should not implement feature code unless integration is blocked |
| `C1-estimate-backend` | estimate domain, router, persistence, exports | `packages/api`, `packages/application`, `packages/engine`, `packages/db`, `packages/shared` | workspace read/write procedures, export serializers, version actions, metrics persistence | owns server-side behavior and cross-package type safety |
| `C2-estimate-frontend` | estimates pages, wizard follow-up, workspace tabs | `apps/web/src/app/(app)/estimates`, `apps/web/src/components/estimates`, shared UI components | detail workspace, overview/assumptions/scope/rates tabs, iteration UX | should avoid editing backend contracts without handoff |
| `T1-regression` | tests and runtime verification | `packages/*/test*`, `apps/web` verification paths, Docker runtime checks | regression tests, package typechecks, app smoke validation | runs after each integration checkpoint |
@@ -445,7 +445,7 @@ Current target: execute the demand/assignment persistence split without blocking
| Topic | Canonical File | Notes |
|---|---|---|
| Active backlog | [product-roadmap.md](/home/hartmut/Documents/Copilot/planarchy/docs/product-roadmap.md) | Update this instead of reopening old plan files. |
| Estimating design and field mapping | [estimating-extension-design.md](/home/hartmut/Documents/Copilot/planarchy/docs/estimating-extension-design.md) | Holds workbook analysis, mapping, and implementation plan. |
| Strategic architecture direction | [v2-architecture-proposal-2026-03-11.md](/home/hartmut/Documents/Copilot/planarchy/research/v2-architecture-proposal-2026-03-11.md) | Keep as strategy, not sprint backlog. |
| Historical decisions | [LEARNINGS.md](/home/hartmut/Documents/Copilot/planarchy/LEARNINGS.md) | Append-only log. |
| Active backlog | [product-roadmap.md](/home/hartmut/Documents/Copilot/capakraken/docs/product-roadmap.md) | Update this instead of reopening old plan files. |
| Estimating design and field mapping | [estimating-extension-design.md](/home/hartmut/Documents/Copilot/capakraken/docs/estimating-extension-design.md) | Holds workbook analysis, mapping, and implementation plan. |
| Strategic architecture direction | [v2-architecture-proposal-2026-03-11.md](/home/hartmut/Documents/Copilot/capakraken/research/v2-architecture-proposal-2026-03-11.md) | Keep as strategy, not sprint backlog. |
| Historical decisions | [LEARNINGS.md](/home/hartmut/Documents/Copilot/capakraken/LEARNINGS.md) | Append-only log. |
+1 -1
View File
@@ -1,4 +1,4 @@
# Planarchy v2 Refactoring Plan
# CapaKraken v2 Refactoring Plan
**Date:** 2026-03-14
**Status:** Proposed
+3 -3
View File
@@ -2,7 +2,7 @@
## Scope
Static security review of the current Planarchy codebase, focused on:
Static security review of the current CapaKraken codebase, focused on:
- authentication and authorization boundaries
- sensitive read/write API routes
@@ -15,7 +15,7 @@ This review was done by parallel audit slices across API routes, auth/session co
## Executive Summary
The main security problem is not one isolated bug. It is that Planarchy currently treats "authenticated" as broadly equivalent to "allowed to see most planning data". That shows up in four places:
The main security problem is not one isolated bug. It is that CapaKraken currently treats "authenticated" as broadly equivalent to "allowed to see most planning data". That shows up in four places:
1. any signed-in user can currently create a vacation request for any `resourceId`
2. many sensitive read routes are only protected by `protectedProcedure`
@@ -119,7 +119,7 @@ Any signed-in user connected to the timeline SSE endpoint can receive metadata a
**Impact**
Planarchy parses spreadsheet data from files, including browser-side and import-related flows, with a library version that has known high-severity issues when reading crafted workbooks. Export-only flows are lower risk; read/parse flows are the real problem.
CapaKraken parses spreadsheet data from files, including browser-side and import-related flows, with a library version that has known high-severity issues when reading crafted workbooks. Export-only flows are lower risk; read/parse flows are the real problem.
**Recommended fix**
+3
View File
@@ -10,8 +10,11 @@
"test:unit": "turbo test:unit",
"test:e2e": "turbo test:e2e",
"db:doctor": "node ./scripts/db-doctor.mjs capakraken",
"db:prisma": "node ./scripts/prisma-with-env.mjs",
"db:push": "node ./scripts/with-env.mjs pnpm --filter @capakraken/db db:push",
"db:migrate": "node ./scripts/with-env.mjs pnpm --filter @capakraken/db db:migrate",
"db:generate": "node ./scripts/with-env.mjs pnpm --filter @capakraken/db db:generate",
"db:validate": "node ./scripts/with-env.mjs pnpm --filter @capakraken/db db:validate",
"db:seed": "node ./scripts/with-env.mjs pnpm --filter @capakraken/db db:seed",
"db:studio": "node ./scripts/with-env.mjs pnpm --filter @capakraken/db db:studio",
"db:reset:dispo": "pnpm --filter @capakraken/db db:reset:dispo",
@@ -1,5 +1,5 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import { PermissionKey, type PermissionKey as PermissionKeyValue } from "@capakraken/shared";
import { PermissionKey, SystemRole, type PermissionKey as PermissionKeyValue } from "@capakraken/shared";
import {
ASSISTANT_CONFIRMATION_PREFIX,
canExecuteMutationTool,
@@ -12,8 +12,11 @@ import {
} from "../router/assistant.js";
import { TOOL_DEFINITIONS } from "../router/assistant-tools.js";
function getToolNames(permissions: PermissionKeyValue[]) {
return getAvailableAssistantTools(new Set(permissions)).map((tool) => tool.function.name);
function getToolNames(
permissions: PermissionKeyValue[],
userRole: SystemRole = SystemRole.ADMIN,
) {
return getAvailableAssistantTools(new Set(permissions), userRole).map((tool) => tool.function.name);
}
const TEST_USER_ID = "assistant-test-user";
@@ -187,6 +190,9 @@ describe("assistant router tool gating", () => {
expect(withoutAdvanced).not.toContain("find_best_project_resource");
expect(withAdvanced).toContain("find_best_project_resource");
expect(withAdvanced).toContain("get_chargeability_report");
expect(withAdvanced).toContain("get_resource_computation_graph");
expect(withAdvanced).toContain("get_project_computation_graph");
});
it("keeps user administration tools behind manageUsers", () => {
@@ -201,6 +207,93 @@ describe("assistant router tool gating", () => {
const names = getToolNames([PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS]);
expect(names).not.toContain("find_best_project_resource");
expect(names).not.toContain("get_chargeability_report");
expect(names).not.toContain("get_resource_computation_graph");
expect(names).not.toContain("get_project_computation_graph");
});
it("keeps controller-grade readmodels hidden from plain users while allowing controller roles", () => {
const controllerNames = getToolNames([
PermissionKey.VIEW_COSTS,
PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS,
], SystemRole.CONTROLLER);
const userNames = getToolNames([
PermissionKey.VIEW_COSTS,
PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS,
], SystemRole.USER);
expect(controllerNames).toContain("get_chargeability_report");
expect(controllerNames).toContain("get_resource_computation_graph");
expect(controllerNames).toContain("get_project_computation_graph");
expect(userNames).not.toContain("get_chargeability_report");
expect(userNames).not.toContain("get_resource_computation_graph");
expect(userNames).not.toContain("get_project_computation_graph");
});
it("keeps timeline write parity tools behind manager/admin role, manageAllocations, and advanced assistant access", () => {
const managerNames = getToolNames([
PermissionKey.MANAGE_ALLOCATIONS,
PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS,
], SystemRole.MANAGER);
const userNames = getToolNames([
PermissionKey.MANAGE_ALLOCATIONS,
PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS,
], SystemRole.USER);
const missingAdvancedNames = getToolNames([
PermissionKey.MANAGE_ALLOCATIONS,
], SystemRole.MANAGER);
expect(managerNames).toContain("update_timeline_allocation_inline");
expect(managerNames).toContain("apply_timeline_project_shift");
expect(managerNames).toContain("quick_assign_timeline_resource");
expect(managerNames).toContain("batch_quick_assign_timeline_resources");
expect(managerNames).toContain("batch_shift_timeline_allocations");
expect(userNames).not.toContain("update_timeline_allocation_inline");
expect(userNames).not.toContain("apply_timeline_project_shift");
expect(userNames).not.toContain("quick_assign_timeline_resource");
expect(userNames).not.toContain("batch_quick_assign_timeline_resources");
expect(userNames).not.toContain("batch_shift_timeline_allocations");
expect(missingAdvancedNames).not.toContain("update_timeline_allocation_inline");
expect(missingAdvancedNames).not.toContain("quick_assign_timeline_resource");
});
it("keeps holiday calendar mutation tools admin-only while leaving read tools available", () => {
const adminNames = getToolNames([], SystemRole.ADMIN);
const managerNames = getToolNames([], SystemRole.MANAGER);
expect(adminNames).toContain("list_holiday_calendars");
expect(adminNames).toContain("get_holiday_calendar");
expect(adminNames).toContain("preview_resolved_holiday_calendar");
expect(adminNames).toContain("create_holiday_calendar");
expect(managerNames).toContain("list_holiday_calendars");
expect(managerNames).toContain("get_holiday_calendar");
expect(managerNames).toContain("preview_resolved_holiday_calendar");
expect(managerNames).not.toContain("create_holiday_calendar");
expect(managerNames).not.toContain("update_holiday_calendar");
expect(managerNames).not.toContain("delete_holiday_calendar");
expect(managerNames).not.toContain("create_holiday_calendar_entry");
expect(managerNames).not.toContain("update_holiday_calendar_entry");
expect(managerNames).not.toContain("delete_holiday_calendar_entry");
});
it("keeps country and metro-city mutation tools admin-only while leaving read tools available", () => {
const adminNames = getToolNames([], SystemRole.ADMIN);
const managerNames = getToolNames([], SystemRole.MANAGER);
expect(adminNames).toContain("list_countries");
expect(adminNames).toContain("get_country");
expect(adminNames).toContain("create_country");
expect(adminNames).toContain("update_country");
expect(adminNames).toContain("create_metro_city");
expect(adminNames).toContain("update_metro_city");
expect(adminNames).toContain("delete_metro_city");
expect(managerNames).toContain("list_countries");
expect(managerNames).toContain("get_country");
expect(managerNames).not.toContain("create_country");
expect(managerNames).not.toContain("update_country");
expect(managerNames).not.toContain("create_metro_city");
expect(managerNames).not.toContain("update_metro_city");
expect(managerNames).not.toContain("delete_metro_city");
});
it("blocks mutation tools until the user confirms a prior assistant summary", () => {
@@ -397,5 +490,16 @@ describe("assistant router tool gating", () => {
expect(toolDescriptions.get("list_users")).toContain("manageUsers");
expect(toolDescriptions.get("create_task_for_user")).toContain("manageProjects");
expect(toolDescriptions.get("send_broadcast")).toContain("manageProjects");
expect(toolDescriptions.get("create_holiday_calendar")).toContain("Admin role");
expect(toolDescriptions.get("create_holiday_calendar_entry")).toContain("Admin role");
expect(toolDescriptions.get("get_chargeability_report")).toContain("controller/manager/admin");
expect(toolDescriptions.get("get_chargeability_report")).toContain("viewCosts");
expect(toolDescriptions.get("get_resource_computation_graph")).toContain("useAssistantAdvancedTools");
expect(toolDescriptions.get("get_project_computation_graph")).toContain("controller/manager/admin");
expect(toolDescriptions.get("update_timeline_allocation_inline")).toContain("manager/admin");
expect(toolDescriptions.get("apply_timeline_project_shift")).toContain("manageAllocations");
expect(toolDescriptions.get("quick_assign_timeline_resource")).toContain("useAssistantAdvancedTools");
expect(toolDescriptions.get("batch_quick_assign_timeline_resources")).toContain("manageAllocations");
expect(toolDescriptions.get("batch_shift_timeline_allocations")).toContain("manager/admin");
});
});
@@ -1,5 +1,5 @@
import { describe, expect, it, vi } from "vitest";
import { PermissionKey } from "@capakraken/shared";
import { AllocationStatus, PermissionKey, SystemRole } from "@capakraken/shared";
vi.mock("@capakraken/application", async (importOriginal) => {
const actual = await importOriginal<typeof import("@capakraken/application")>();
@@ -11,17 +11,43 @@ vi.mock("@capakraken/application", async (importOriginal) => {
};
});
vi.mock("../sse/event-bus.js", () => ({
emitAllocationCreated: vi.fn(),
emitAllocationDeleted: vi.fn(),
emitAllocationUpdated: vi.fn(),
emitProjectShifted: vi.fn(),
}));
vi.mock("../lib/budget-alerts.js", () => ({
checkBudgetThresholds: vi.fn(),
}));
vi.mock("../lib/cache.js", () => ({
invalidateDashboardCache: vi.fn(),
}));
import { executeTool, type ToolContext } from "../router/assistant-tools.js";
function createToolContext(
db: Record<string, unknown>,
permissions: PermissionKey[] = [],
userRole: SystemRole = SystemRole.ADMIN,
): ToolContext {
return {
db: db as ToolContext["db"],
userId: "user_1",
userRole: "ADMIN",
userRole,
permissions: new Set(permissions),
session: {
user: { email: "assistant@example.com", name: "Assistant User", image: null },
expires: "2026-03-29T00:00:00.000Z",
},
dbUser: {
id: "user_1",
systemRole: userRole,
permissionOverrides: null,
},
roleDefaults: null,
};
}
@@ -542,6 +568,686 @@ describe("assistant advanced tools and scoping", () => {
]);
});
it("updates timeline allocations inline through the real timeline router mutation", async () => {
const existingAssignment = {
id: "assignment_1",
demandRequirementId: null,
resourceId: "resource_1",
projectId: "project_1",
startDate: new Date("2026-03-16"),
endDate: new Date("2026-03-20"),
hoursPerDay: 4,
percentage: 50,
role: "Compositor",
roleId: "role_comp",
dailyCostCents: 20000,
status: AllocationStatus.PROPOSED,
metadata: {},
createdAt: new Date("2026-03-13"),
updatedAt: new Date("2026-03-13"),
resource: {
id: "resource_1",
displayName: "Alice",
eid: "E-001",
lcrCents: 5000,
availability: {
monday: 8,
tuesday: 8,
wednesday: 8,
thursday: 8,
friday: 8,
saturday: 0,
sunday: 0,
},
},
project: { id: "project_1", name: "Project One", shortCode: "PRJ" },
roleEntity: { id: "role_comp", name: "Compositor", color: "#111111" },
demandRequirement: null,
};
const updatedAssignment = {
...existingAssignment,
hoursPerDay: 6,
endDate: new Date("2026-03-21"),
percentage: 75,
dailyCostCents: 30000,
metadata: { includeSaturday: true },
updatedAt: new Date("2026-03-14"),
};
const db = {
allocation: {
findUnique: vi.fn().mockResolvedValue(null),
},
demandRequirement: {
findUnique: vi.fn().mockResolvedValue(null),
},
assignment: {
findUnique: vi.fn().mockResolvedValue(existingAssignment),
update: vi.fn().mockResolvedValue(updatedAssignment),
},
resource: {
findUnique: vi.fn().mockResolvedValue({
id: "resource_1",
eid: "E-001",
displayName: "Alice",
lcrCents: 5000,
availability: {
monday: 8,
tuesday: 8,
wednesday: 8,
thursday: 8,
friday: 8,
saturday: 0,
sunday: 0,
},
}),
},
vacation: {
findMany: vi.fn().mockResolvedValue([]),
},
auditLog: {
create: vi.fn().mockResolvedValue({}),
},
$transaction: vi.fn(async (callback: (tx: unknown) => unknown) => callback(db)),
};
const ctx = createToolContext(
db,
[PermissionKey.MANAGE_ALLOCATIONS, PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS],
SystemRole.MANAGER,
);
const result = await executeTool(
"update_timeline_allocation_inline",
JSON.stringify({
allocationId: "assignment_1",
hoursPerDay: 6,
endDate: "2026-03-21",
includeSaturday: true,
}),
ctx,
);
expect(result.action).toEqual({ type: "invalidate", scope: ["allocation", "timeline", "project"] });
expect(JSON.parse(result.content)).toEqual(
expect.objectContaining({
success: true,
allocation: expect.objectContaining({
id: "assignment_1",
hoursPerDay: 6,
endDate: "2026-03-21",
}),
}),
);
expect(db.assignment.update).toHaveBeenCalledWith(
expect.objectContaining({
where: { id: "assignment_1" },
}),
);
});
it("quick-assigns a timeline resource through the real timeline router mutation", async () => {
const createdAssignment = {
id: "assignment_quick_1",
demandRequirementId: null,
resourceId: "resource_1",
projectId: "project_1",
startDate: new Date("2026-03-16"),
endDate: new Date("2026-03-20"),
hoursPerDay: 8,
percentage: 100,
role: "Team Member",
roleId: null,
dailyCostCents: 40000,
status: AllocationStatus.PROPOSED,
metadata: { source: "quickAssign" },
createdAt: new Date("2026-03-13"),
updatedAt: new Date("2026-03-13"),
resource: {
id: "resource_1",
displayName: "Alice",
eid: "E-001",
lcrCents: 5000,
},
project: { id: "project_1", name: "Gelddruckmaschine", shortCode: "GDM" },
roleEntity: null,
demandRequirement: null,
};
const db = {
project: {
findUnique: vi.fn().mockResolvedValue({
id: "project_1",
name: "Gelddruckmaschine",
shortCode: "GDM",
status: "ACTIVE",
responsiblePerson: null,
}),
},
resource: {
findUnique: vi.fn().mockResolvedValue({
id: "resource_1",
eid: "E-001",
displayName: "Alice",
lcrCents: 5000,
availability: {
monday: 8,
tuesday: 8,
wednesday: 8,
thursday: 8,
friday: 8,
saturday: 0,
sunday: 0,
},
}),
},
allocation: {
findMany: vi.fn().mockResolvedValue([]),
create: vi.fn(),
},
assignment: {
findMany: vi.fn().mockResolvedValue([]),
create: vi.fn().mockResolvedValue(createdAssignment),
},
vacation: {
findMany: vi.fn().mockResolvedValue([]),
},
auditLog: {
create: vi.fn().mockResolvedValue({}),
},
$transaction: vi.fn(async (callback: (tx: unknown) => unknown) => callback(db)),
};
const ctx = createToolContext(
db,
[PermissionKey.MANAGE_ALLOCATIONS, PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS],
SystemRole.MANAGER,
);
const result = await executeTool(
"quick_assign_timeline_resource",
JSON.stringify({
resourceIdentifier: "resource_1",
projectIdentifier: "project_1",
startDate: "2026-03-16",
endDate: "2026-03-20",
hoursPerDay: 8,
}),
ctx,
);
expect(result.action).toEqual({ type: "invalidate", scope: ["allocation", "timeline", "project"] });
expect(JSON.parse(result.content)).toEqual(
expect.objectContaining({
success: true,
allocation: expect.objectContaining({
id: "assignment_quick_1",
projectId: "project_1",
resourceId: "resource_1",
hoursPerDay: 8,
}),
}),
);
expect(db.assignment.create).toHaveBeenCalled();
});
it("batch quick-assigns timeline resources through the real timeline router mutation", async () => {
const db = {
project: {
findUnique: vi.fn().mockResolvedValue({
id: "project_1",
name: "Gelddruckmaschine",
shortCode: "GDM",
status: "ACTIVE",
responsiblePerson: null,
}),
},
resource: {
findUnique: vi.fn().mockImplementation(async ({ where }: { where: { id: string } }) => ({
id: where.id,
eid: `E-${where.id}`,
displayName: `Resource ${where.id}`,
lcrCents: 5000,
availability: {
monday: 8,
tuesday: 8,
wednesday: 8,
thursday: 8,
friday: 8,
saturday: 0,
sunday: 0,
},
})),
},
assignment: {
findMany: vi.fn().mockResolvedValue([]),
create: vi
.fn()
.mockResolvedValueOnce({ id: "assignment_batch_1" })
.mockResolvedValueOnce({ id: "assignment_batch_2" }),
},
auditLog: {
create: vi.fn().mockResolvedValue({}),
},
vacation: {
findMany: vi.fn().mockResolvedValue([]),
},
$transaction: vi.fn(async (callback: (tx: unknown) => unknown) => callback(db)),
};
const ctx = createToolContext(
db,
[PermissionKey.MANAGE_ALLOCATIONS, PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS],
SystemRole.MANAGER,
);
const result = await executeTool(
"batch_quick_assign_timeline_resources",
JSON.stringify({
assignments: [
{
resourceIdentifier: "resource_1",
projectIdentifier: "project_1",
startDate: "2026-03-16",
endDate: "2026-03-20",
hoursPerDay: 8,
},
{
resourceIdentifier: "resource_2",
projectIdentifier: "project_1",
startDate: "2026-03-23",
endDate: "2026-03-27",
hoursPerDay: 6,
},
],
}),
ctx,
);
expect(result.action).toEqual({ type: "invalidate", scope: ["allocation", "timeline", "project"] });
expect(JSON.parse(result.content)).toEqual(
expect.objectContaining({
success: true,
count: 2,
}),
);
expect(db.assignment.create).toHaveBeenCalledTimes(2);
});
it("applies timeline project shifts through the real timeline router mutation", async () => {
const { listAssignmentBookings } = await import("@capakraken/application");
vi.mocked(listAssignmentBookings).mockResolvedValueOnce([]);
const db = {
project: {
findUnique: vi.fn().mockResolvedValue({
id: "project_1",
name: "Gelddruckmaschine",
shortCode: "GDM",
status: "ACTIVE",
responsiblePerson: null,
budgetCents: 100000,
winProbability: 100,
startDate: new Date("2026-03-16"),
endDate: new Date("2026-03-20"),
}),
update: vi.fn().mockResolvedValue({
id: "project_1",
startDate: new Date("2026-03-23"),
endDate: new Date("2026-03-27"),
}),
},
demandRequirement: {
findMany: vi.fn().mockResolvedValue([]),
},
assignment: {
findMany: vi.fn().mockResolvedValue([]),
},
auditLog: {
create: vi.fn().mockResolvedValue({}),
},
$transaction: vi.fn(async (callback: (tx: unknown) => unknown) => callback(db)),
};
const ctx = createToolContext(
db,
[PermissionKey.MANAGE_ALLOCATIONS, PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS],
SystemRole.MANAGER,
);
const result = await executeTool(
"apply_timeline_project_shift",
JSON.stringify({
projectIdentifier: "project_1",
newStartDate: "2026-03-23",
newEndDate: "2026-03-27",
}),
ctx,
);
expect(result.action).toEqual({ type: "invalidate", scope: ["allocation", "timeline", "project"] });
expect(JSON.parse(result.content)).toEqual(
expect.objectContaining({
success: true,
project: expect.objectContaining({
id: "project_1",
startDate: "2026-03-23",
endDate: "2026-03-27",
}),
validation: expect.objectContaining({
valid: true,
}),
}),
);
expect(db.project.update).toHaveBeenCalledWith(
expect.objectContaining({
where: { id: "project_1" },
}),
);
});
it("batch-shifts timeline allocations through the real timeline router mutation", async () => {
const existingAssignment = {
id: "assignment_1",
demandRequirementId: null,
resourceId: "resource_1",
projectId: "project_1",
startDate: new Date("2026-03-16"),
endDate: new Date("2026-03-20"),
hoursPerDay: 4,
percentage: 50,
role: "Compositor",
roleId: "role_comp",
dailyCostCents: 20000,
status: AllocationStatus.PROPOSED,
metadata: {},
createdAt: new Date("2026-03-13"),
updatedAt: new Date("2026-03-13"),
resource: {
id: "resource_1",
displayName: "Alice",
eid: "E-001",
lcrCents: 5000,
availability: {
monday: 8,
tuesday: 8,
wednesday: 8,
thursday: 8,
friday: 8,
saturday: 0,
sunday: 0,
},
},
project: { id: "project_1", name: "Project One", shortCode: "PRJ" },
roleEntity: { id: "role_comp", name: "Compositor", color: "#111111" },
demandRequirement: null,
};
const db = {
allocation: {
findUnique: vi.fn().mockResolvedValue(null),
},
demandRequirement: {
findUnique: vi.fn().mockResolvedValue(null),
},
assignment: {
findUnique: vi.fn().mockResolvedValue(existingAssignment),
update: vi.fn().mockResolvedValue({
...existingAssignment,
startDate: new Date("2026-03-18"),
endDate: new Date("2026-03-22"),
}),
},
auditLog: {
create: vi.fn().mockResolvedValue({}),
},
$transaction: vi.fn(async (callback: (tx: unknown) => unknown) => callback(db)),
};
const ctx = createToolContext(
db,
[PermissionKey.MANAGE_ALLOCATIONS, PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS],
SystemRole.MANAGER,
);
const result = await executeTool(
"batch_shift_timeline_allocations",
JSON.stringify({
allocationIds: ["assignment_1"],
daysDelta: 2,
mode: "move",
}),
ctx,
);
expect(result.action).toEqual({ type: "invalidate", scope: ["allocation", "timeline", "project"] });
expect(JSON.parse(result.content)).toEqual(
expect.objectContaining({
success: true,
count: 1,
}),
);
expect(db.assignment.update).toHaveBeenCalledWith(
expect.objectContaining({
where: { id: "assignment_1" },
}),
);
});
it("returns the chargeability report readmodel through the assistant", async () => {
const { listAssignmentBookings } = await import("@capakraken/application");
vi.mocked(listAssignmentBookings).mockResolvedValue([
{
id: "assignment_confirmed",
projectId: "project_confirmed",
resourceId: "resource_1",
startDate: new Date("2026-03-02T00:00:00.000Z"),
endDate: new Date("2026-03-06T00:00:00.000Z"),
hoursPerDay: 4,
dailyCostCents: 0,
status: "CONFIRMED",
project: {
id: "project_confirmed",
name: "Confirmed Project",
shortCode: "CP",
status: "ACTIVE",
orderType: "CLIENT",
dynamicFields: null,
},
resource: { id: "resource_1", displayName: "Alice", chapter: "CGI" },
},
]);
const ctx = createToolContext(
{
resource: {
findMany: vi.fn().mockResolvedValue([
{
id: "resource_1",
eid: "E-001",
displayName: "Alice",
fte: 1,
availability: { monday: 8, tuesday: 8, wednesday: 8, thursday: 8, friday: 8 },
countryId: "country_es",
federalState: null,
metroCityId: "city_1",
chargeabilityTarget: 80,
country: {
id: "country_es",
code: "ES",
dailyWorkingHours: 8,
scheduleRules: null,
},
orgUnit: { id: "org_1", name: "CGI" },
managementLevelGroup: { id: "mgmt_1", name: "Senior", targetPercentage: 0.8 },
managementLevel: { id: "level_1", name: "L7" },
metroCity: { id: "city_1", name: "Barcelona" },
},
]),
},
project: {
findMany: vi.fn().mockResolvedValue([
{ id: "project_confirmed", utilizationCategory: { code: "Chg" } },
]),
},
vacation: {
findMany: vi.fn().mockResolvedValue([]),
},
holidayCalendar: {
findMany: vi.fn().mockResolvedValue([]),
},
},
[PermissionKey.VIEW_COSTS, PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS],
);
const result = await executeTool(
"get_chargeability_report",
JSON.stringify({
startMonth: "2026-03",
endMonth: "2026-03",
resourceLimit: 10,
}),
ctx,
);
const parsed = JSON.parse(result.content) as {
monthKeys: string[];
groupTotals: Array<{ monthKey: string; chargeabilityPct: number; targetPct: number }>;
resourceCount: number;
returnedResourceCount: number;
truncated: boolean;
resources: Array<{
displayName: string;
targetPct: number;
months: Array<{ monthKey: string; sah: number; chargeabilityPct: number }>;
}>;
};
expect(parsed.monthKeys).toEqual(["2026-03"]);
expect(parsed.groupTotals).toEqual([
expect.objectContaining({
monthKey: "2026-03",
chargeabilityPct: expect.any(Number),
targetPct: 80,
}),
]);
expect(parsed.resourceCount).toBe(1);
expect(parsed.returnedResourceCount).toBe(1);
expect(parsed.truncated).toBe(false);
expect(parsed.resources).toEqual([
expect.objectContaining({
displayName: "Alice",
targetPct: 80,
months: [
expect.objectContaining({
monthKey: "2026-03",
sah: expect.any(Number),
chargeabilityPct: expect.any(Number),
}),
],
}),
]);
});
it("returns a filtered resource computation graph through the assistant", async () => {
const resourceRecord = {
id: "resource_augsburg",
displayName: "Bruce Banner",
eid: "bruce.banner",
fte: 1,
lcrCents: 5_000,
chargeabilityTarget: 80,
countryId: "country_de",
federalState: "BY",
metroCityId: "city_augsburg",
availability: {
monday: 8,
tuesday: 8,
wednesday: 8,
thursday: 8,
friday: 8,
saturday: 0,
sunday: 0,
},
country: {
id: "country_de",
code: "DE",
name: "Deutschland",
dailyWorkingHours: 8,
scheduleRules: null,
},
metroCity: { id: "city_augsburg", name: "Augsburg" },
managementLevelGroup: {
id: "mlg_1",
name: "Senior",
targetPercentage: 0.8,
},
};
const ctx = createToolContext(
{
resource: {
findUnique: vi.fn().mockResolvedValue(resourceRecord),
findFirst: vi.fn(),
findUniqueOrThrow: vi.fn().mockResolvedValue(resourceRecord),
},
assignment: {
findMany: vi.fn().mockResolvedValue([]),
},
vacation: {
findMany: vi.fn().mockResolvedValue([]),
},
holidayCalendar: {
findMany: vi.fn().mockResolvedValue([]),
},
calculationRule: {
findMany: vi.fn().mockResolvedValue([]),
},
},
[PermissionKey.VIEW_COSTS, PermissionKey.USE_ASSISTANT_ADVANCED_TOOLS],
);
const result = await executeTool(
"get_resource_computation_graph",
JSON.stringify({
resourceId: "resource_augsburg",
month: "2026-08",
domain: "SAH",
}),
ctx,
);
const parsed = JSON.parse(result.content) as {
resource: { id: string; displayName: string };
requestedDomain: string;
totalNodeCount: number;
selectedNodeCount: number;
nodes: Array<{ id: string; domain: string }>;
meta: {
countryCode: string | null;
federalState: string | null;
metroCityName: string | null;
resolvedHolidays: Array<{ name: string; scope: string }>;
};
};
expect(parsed.resource).toEqual({
id: "resource_augsburg",
eid: "bruce.banner",
displayName: "Bruce Banner",
});
expect(parsed.requestedDomain).toBe("SAH");
expect(parsed.totalNodeCount).toBeGreaterThan(parsed.selectedNodeCount);
expect(parsed.selectedNodeCount).toBeGreaterThan(0);
expect(parsed.nodes.every((node) => node.domain === "SAH")).toBe(true);
expect(parsed.meta).toMatchObject({
countryCode: "DE",
federalState: "BY",
metroCityName: "Augsburg",
});
expect(parsed.meta.resolvedHolidays).toEqual(expect.arrayContaining([
expect.objectContaining({
name: "Augsburger Friedensfest",
scope: "CITY",
}),
]));
});
it("scopes assistant notification listing to the current user", async () => {
const findMany = vi.fn().mockResolvedValue([]);
const ctx = createToolContext({
@@ -0,0 +1,181 @@
import { describe, expect, it, vi } from "vitest";
import { SystemRole } from "@capakraken/shared";
import { executeTool, type ToolContext } from "../router/assistant-tools.js";
function createToolContext(
db: Record<string, unknown>,
permissions: string[] = [],
userRole: SystemRole = SystemRole.ADMIN,
): ToolContext {
return {
db: db as ToolContext["db"],
userId: "user_1",
userRole,
permissions: new Set(permissions) as ToolContext["permissions"],
};
}
describe("assistant country tools", () => {
it("lists countries with schedule rules, active state, and metro cities", async () => {
const ctx = createToolContext({
country: {
findMany: vi.fn().mockResolvedValue([
{
id: "country_de",
code: "DE",
name: "Deutschland",
dailyWorkingHours: 8,
scheduleRules: null,
isActive: true,
metroCities: [{ id: "city_muc", name: "Munich" }],
},
]),
},
});
const result = await executeTool(
"list_countries",
JSON.stringify({ includeInactive: true }),
ctx,
);
const parsed = JSON.parse(result.content) as {
count: number;
countries: Array<{
code: string;
isActive: boolean;
metroCities: Array<{ id: string; name: string }>;
cities: string[];
}>;
};
expect(parsed.count).toBe(1);
expect(parsed.countries[0]).toMatchObject({
code: "DE",
isActive: true,
cities: ["Munich"],
metroCities: [{ id: "city_muc", name: "Munich" }],
});
});
it("gets a country by code and exposes schedule details and resource count", async () => {
const ctx = createToolContext({
country: {
findUnique: vi.fn().mockResolvedValue(null),
findFirst: vi
.fn()
.mockResolvedValueOnce({
id: "country_es",
code: "ES",
name: "Spain",
dailyWorkingHours: 8,
scheduleRules: {
type: "spain",
fridayHours: 6.5,
summerPeriod: { from: "07-01", to: "09-15" },
summerHours: 6.5,
regularHours: 9,
},
isActive: true,
metroCities: [{ id: "city_mad", name: "Madrid" }],
_count: { resources: 4 },
}),
},
});
const result = await executeTool(
"get_country",
JSON.stringify({ identifier: "ES" }),
ctx,
);
const parsed = JSON.parse(result.content) as {
code: string;
resourceCount: number | null;
scheduleRules: { type: string };
metroCities: Array<{ name: string }>;
};
expect(parsed).toMatchObject({
code: "ES",
resourceCount: 4,
scheduleRules: { type: "spain" },
metroCities: [{ name: "Madrid" }],
});
});
it("creates a country for admin users and returns an invalidation action", async () => {
const ctx = createToolContext({
country: {
findUnique: vi.fn().mockResolvedValue(null),
create: vi.fn().mockResolvedValue({
id: "country_es",
code: "ES",
name: "Spain",
dailyWorkingHours: 8,
scheduleRules: null,
isActive: true,
metroCities: [],
_count: { resources: 0 },
}),
},
});
const result = await executeTool(
"create_country",
JSON.stringify({ code: "ES", name: "Spain", dailyWorkingHours: 8 }),
ctx,
);
expect(result.action).toEqual({
type: "invalidate",
scope: ["country", "resource", "holidayCalendar", "vacation"],
});
expect(result.data).toMatchObject({
success: true,
country: { code: "ES", name: "Spain" },
});
});
it("refuses country mutations for non-admin users", async () => {
const ctx = createToolContext({ country: {} }, [], SystemRole.MANAGER);
const result = await executeTool(
"create_country",
JSON.stringify({ code: "ES", name: "Spain" }),
ctx,
);
expect(JSON.parse(result.content)).toEqual({
error: "Admin role required to perform this action.",
});
});
it("deletes metro cities only when no resources are assigned", async () => {
const ctx = createToolContext({
metroCity: {
findUnique: vi.fn().mockResolvedValue({
id: "city_ham",
name: "Hamburg",
_count: { resources: 0 },
}),
delete: vi.fn().mockResolvedValue(undefined),
},
});
const result = await executeTool(
"delete_metro_city",
JSON.stringify({ id: "city_ham" }),
ctx,
);
expect(result.action).toEqual({
type: "invalidate",
scope: ["country", "resource", "holidayCalendar", "vacation"],
});
expect(result.data).toMatchObject({
success: true,
message: "Deleted metro city: Hamburg",
});
});
});
@@ -1,4 +1,5 @@
import { describe, expect, it, vi } from "vitest";
import { SystemRole } from "@capakraken/shared";
vi.mock("@capakraken/application", async (importOriginal) => {
const actual = await importOriginal<typeof import("@capakraken/application")>();
@@ -8,16 +9,21 @@ vi.mock("@capakraken/application", async (importOriginal) => {
};
});
vi.mock("../lib/audit.js", () => ({
createAuditEntry: vi.fn().mockResolvedValue(undefined),
}));
import { executeTool, type ToolContext } from "../router/assistant-tools.js";
function createToolContext(
db: Record<string, unknown>,
permissions: string[] = [],
userRole: SystemRole = SystemRole.ADMIN,
): ToolContext {
return {
db: db as ToolContext["db"],
userId: "user_1",
userRole: "ADMIN",
userRole,
permissions: new Set(permissions) as ToolContext["permissions"],
};
}
@@ -107,6 +113,193 @@ describe("assistant holiday tools", () => {
);
});
it("lists holiday calendars with scope metadata and entry counts", async () => {
const ctx = createToolContext({
holidayCalendar: {
findMany: vi.fn().mockResolvedValue([
{
id: "cal_by",
name: "Bayern Feiertage",
scopeType: "STATE",
stateCode: "BY",
isActive: true,
priority: 10,
country: { id: "country_de", code: "DE", name: "Deutschland" },
metroCity: null,
_count: { entries: 2 },
entries: [
{
id: "entry_1",
date: new Date("2026-01-06T00:00:00.000Z"),
name: "Heilige Drei Koenige",
isRecurringAnnual: true,
source: "state",
},
],
},
]),
},
});
const result = await executeTool(
"list_holiday_calendars",
JSON.stringify({ countryCode: "DE", scopeType: "STATE", includeInactive: true }),
ctx,
);
const parsed = JSON.parse(result.content) as {
count: number;
calendars: Array<{
name: string;
scopeType: string;
stateCode: string | null;
entryCount: number;
country: { code: string };
}>;
};
expect(parsed.count).toBe(1);
expect(parsed.calendars).toHaveLength(1);
expect(parsed.calendars[0]).toMatchObject({
name: "Bayern Feiertage",
scopeType: "STATE",
stateCode: "BY",
entryCount: 2,
country: { code: "DE" },
});
});
it("previews resolved holiday calendars for a scope and shows the source calendar", async () => {
const ctx = createToolContext({
country: {
findUnique: vi.fn().mockResolvedValue({ code: "DE" }),
},
metroCity: {
findUnique: vi.fn().mockResolvedValue({ name: "Augsburg" }),
},
holidayCalendar: {
findMany: vi.fn().mockResolvedValue([
{
id: "cal_city",
name: "Augsburg lokal",
scopeType: "CITY",
priority: 5,
createdAt: new Date("2026-01-01T00:00:00.000Z"),
entries: [
{
id: "entry_1",
date: new Date("2020-08-08T00:00:00.000Z"),
name: "Friedensfest lokal",
isRecurringAnnual: true,
source: "manual",
},
],
},
]),
},
});
const result = await executeTool(
"preview_resolved_holiday_calendar",
JSON.stringify({ countryId: "country_de", metroCityId: "city_augsburg", year: 2026 }),
ctx,
);
const parsed = JSON.parse(result.content) as {
count: number;
locationContext: { countryCode: string; metroCity: string | null; year: number };
holidays: Array<{ name: string; calendarName: string; scope: string; date: string }>;
};
expect(parsed.count).toBeGreaterThan(0);
expect(parsed.locationContext).toEqual(
expect.objectContaining({
countryCode: "DE",
metroCity: "Augsburg",
year: 2026,
}),
);
expect(parsed.holidays).toEqual(
expect.arrayContaining([
expect.objectContaining({
name: "Friedensfest lokal",
calendarName: "Augsburg lokal",
scope: "CITY",
date: "2026-08-08",
}),
]),
);
});
it("creates a holiday calendar through the assistant for admin users", async () => {
const ctx = createToolContext({
country: {
findUnique: vi.fn().mockResolvedValue({ id: "country_de", code: "DE", name: "Deutschland" }),
},
holidayCalendar: {
findFirst: vi.fn().mockResolvedValue(null),
create: vi.fn().mockResolvedValue({
id: "cal_by",
name: "Bayern Feiertage",
scopeType: "STATE",
stateCode: "BY",
isActive: true,
priority: 10,
country: { id: "country_de", code: "DE", name: "Deutschland" },
metroCity: null,
entries: [],
}),
},
});
const result = await executeTool(
"create_holiday_calendar",
JSON.stringify({
name: "Bayern Feiertage",
scopeType: "STATE",
countryId: "country_de",
stateCode: "BY",
priority: 10,
}),
ctx,
);
const parsed = JSON.parse(result.content) as {
success: boolean;
message: string;
calendar: { name: string; stateCode: string | null };
};
expect(parsed.success).toBe(true);
expect(parsed.message).toContain("Created holiday calendar");
expect(parsed.calendar).toEqual(
expect.objectContaining({
name: "Bayern Feiertage",
stateCode: "BY",
}),
);
});
it("rejects holiday calendar mutations for non-admin assistant users", async () => {
const ctx = createToolContext({}, [], SystemRole.MANAGER);
const result = await executeTool(
"create_holiday_calendar",
JSON.stringify({
name: "Hamburg Feiertage",
scopeType: "STATE",
countryId: "country_de",
stateCode: "HH",
}),
ctx,
);
expect(JSON.parse(result.content)).toEqual(
expect.objectContaining({
error: "Admin role required to perform this action.",
}),
);
});
it("calculates chargeability with regional holidays excluded from booked and available hours", async () => {
const db = {
resource: {
@@ -19,9 +19,13 @@ vi.mock("@capakraken/staffing", () => ({
),
}));
vi.mock("@capakraken/application", () => ({
listAssignmentBookings: vi.fn().mockResolvedValue([]),
}));
vi.mock("@capakraken/application", async (importOriginal) => {
const actual = await importOriginal<typeof import("@capakraken/application")>();
return {
...actual,
listAssignmentBookings: vi.fn().mockResolvedValue([]),
};
});
const createCaller = createCallerFactory(staffingRouter);
+12 -439
View File
@@ -1,439 +1,12 @@
import { VacationStatus } from "@capakraken/db";
import { getPublicHolidays, type WeekdayAvailability } from "@capakraken/shared";
type CalendarScope = "COUNTRY" | "STATE" | "CITY";
type HolidayCalendarEntryRecord = {
date: Date;
isRecurringAnnual: boolean;
};
type HolidayCalendarRecord = {
entries: HolidayCalendarEntryRecord[];
};
type VacationRecord = {
resourceId: string;
startDate: Date;
endDate: Date;
type: string;
isHalfDay: boolean;
};
export type ResourceCapacityProfile = {
id: string;
availability: WeekdayAvailability;
countryId: string | null | undefined;
countryCode: string | null | undefined;
federalState: string | null | undefined;
metroCityId: string | null | undefined;
metroCityName: string | null | undefined;
};
export type ResourceDailyAvailabilityContext = {
absenceFractionsByDate: Map<string, number>;
holidayDates: Set<string>;
vacationFractionsByDate: Map<string, number>;
};
type ResourceCapacityDbClient = {
holidayCalendar?: {
findMany: (args: {
where: Record<string, unknown>;
include: { entries: true };
orderBy: Array<Record<string, "asc" | "desc">>;
}) => Promise<unknown[]>;
};
vacation?: {
findMany: (args: {
where: Record<string, unknown>;
select: Record<string, boolean | Record<string, boolean>>;
}) => Promise<unknown[]>;
};
};
const DAY_KEYS: (keyof WeekdayAvailability)[] = [
"sunday",
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
];
const CITY_HOLIDAY_RULES: Array<{
countryCode: string;
cityName: string;
resolveDates: (year: number) => string[];
}> = [
{
countryCode: "DE",
cityName: "Augsburg",
resolveDates: (year) => [`${year}-08-08`],
},
];
function toIsoDate(value: Date): string {
return value.toISOString().slice(0, 10);
}
function normalizeCityName(cityName?: string | null): string | null {
const normalized = cityName?.trim().toLowerCase();
return normalized && normalized.length > 0 ? normalized : null;
}
function normalizeStateCode(stateCode?: string | null): string | null {
const normalized = stateCode?.trim().toUpperCase();
return normalized && normalized.length > 0 ? normalized : null;
}
export function getAvailabilityHoursForDate(
availability: WeekdayAvailability,
date: Date,
): number {
const key = DAY_KEYS[date.getUTCDay()];
return key ? (availability[key] ?? 0) : 0;
}
function listBuiltinHolidayDates(input: {
periodStart: Date;
periodEnd: Date;
countryCode: string | null | undefined;
federalState: string | null | undefined;
metroCityName: string | null | undefined;
}): Set<string> {
const dates = new Set<string>();
const startIso = toIsoDate(input.periodStart);
const endIso = toIsoDate(input.periodEnd);
const startYear = input.periodStart.getUTCFullYear();
const endYear = input.periodEnd.getUTCFullYear();
if (input.countryCode === "DE") {
for (let year = startYear; year <= endYear; year += 1) {
for (const holiday of getPublicHolidays(year, input.federalState ?? undefined)) {
if (holiday.date >= startIso && holiday.date <= endIso) {
dates.add(holiday.date);
}
}
}
}
const normalizedCityName = normalizeCityName(input.metroCityName);
if (input.countryCode && normalizedCityName) {
for (const rule of CITY_HOLIDAY_RULES) {
if (
rule.countryCode === input.countryCode
&& normalizeCityName(rule.cityName) === normalizedCityName
) {
for (let year = startYear; year <= endYear; year += 1) {
for (const date of rule.resolveDates(year)) {
if (date >= startIso && date <= endIso) {
dates.add(date);
}
}
}
}
}
}
return dates;
}
function resolveCalendarEntryDates(
calendars: HolidayCalendarRecord[],
periodStart: Date,
periodEnd: Date,
): Set<string> {
const dates = new Set<string>();
const startIso = toIsoDate(periodStart);
const endIso = toIsoDate(periodEnd);
const startYear = periodStart.getUTCFullYear();
const endYear = periodEnd.getUTCFullYear();
for (const calendar of calendars) {
for (const entry of calendar.entries) {
const baseDate = new Date(entry.date);
for (let year = startYear; year <= endYear; year += 1) {
const effectiveDate = entry.isRecurringAnnual
? new Date(Date.UTC(year, baseDate.getUTCMonth(), baseDate.getUTCDate()))
: baseDate;
const isoDate = toIsoDate(effectiveDate);
if (isoDate >= startIso && isoDate <= endIso) {
dates.add(isoDate);
}
if (!entry.isRecurringAnnual) {
break;
}
}
}
}
return dates;
}
async function loadCustomHolidayDates(
db: ResourceCapacityDbClient,
input: {
periodStart: Date;
periodEnd: Date;
countryId: string | null | undefined;
federalState: string | null | undefined;
metroCityId: string | null | undefined;
},
): Promise<Set<string>> {
if (!input.countryId || typeof db.holidayCalendar?.findMany !== "function") {
return new Set();
}
const stateCode = normalizeStateCode(input.federalState);
const metroCityId = input.metroCityId?.trim() || null;
const calendars = await db.holidayCalendar.findMany({
where: {
isActive: true,
countryId: input.countryId,
OR: [
{ scopeType: "COUNTRY" as CalendarScope },
...(stateCode ? [{ scopeType: "STATE" as CalendarScope, stateCode }] : []),
...(metroCityId ? [{ scopeType: "CITY" as CalendarScope, metroCityId }] : []),
],
},
include: { entries: true },
orderBy: [{ priority: "asc" }, { createdAt: "asc" }],
});
return resolveCalendarEntryDates(
calendars as HolidayCalendarRecord[],
input.periodStart,
input.periodEnd,
);
}
function buildProfileKey(profile: ResourceCapacityProfile): string {
return JSON.stringify({
countryId: profile.countryId ?? null,
countryCode: profile.countryCode ?? null,
federalState: profile.federalState ?? null,
metroCityId: profile.metroCityId ?? null,
metroCityName: profile.metroCityName ?? null,
});
}
export async function loadResourceDailyAvailabilityContexts(
db: ResourceCapacityDbClient,
resources: ResourceCapacityProfile[],
periodStart: Date,
periodEnd: Date,
): Promise<Map<string, ResourceDailyAvailabilityContext>> {
const profileHolidayCache = new Map<string, Promise<Set<string>>>();
const resourceIds = resources.map((resource) => resource.id);
const vacations = resourceIds.length > 0 && typeof db.vacation?.findMany === "function"
? await db.vacation.findMany({
where: {
resourceId: { in: resourceIds },
status: VacationStatus.APPROVED,
startDate: { lte: periodEnd },
endDate: { gte: periodStart },
},
select: {
resourceId: true,
startDate: true,
endDate: true,
type: true,
isHalfDay: true,
},
})
: [];
const vacationsByResourceId = new Map<string, VacationRecord[]>();
for (const vacation of vacations as VacationRecord[]) {
const items = vacationsByResourceId.get(vacation.resourceId) ?? [];
items.push(vacation);
vacationsByResourceId.set(vacation.resourceId, items);
}
const contexts = new Map<string, ResourceDailyAvailabilityContext>();
for (const resource of resources) {
const profileKey = buildProfileKey(resource);
const holidayPromise = profileHolidayCache.get(profileKey)
?? (async () => {
const builtin = listBuiltinHolidayDates({
periodStart,
periodEnd,
countryCode: resource.countryCode,
federalState: resource.federalState,
metroCityName: resource.metroCityName,
});
const custom = await loadCustomHolidayDates(db, {
periodStart,
periodEnd,
countryId: resource.countryId,
federalState: resource.federalState,
metroCityId: resource.metroCityId,
});
return new Set([...builtin, ...custom]);
})();
if (!profileHolidayCache.has(profileKey)) {
profileHolidayCache.set(profileKey, holidayPromise);
}
const holidayDates = new Set(await holidayPromise);
const absenceFractionsByDate = new Map<string, number>();
const vacationFractionsByDate = new Map<string, number>();
const resourceVacations = vacationsByResourceId.get(resource.id) ?? [];
for (const vacation of resourceVacations) {
const overlapStart = new Date(Math.max(vacation.startDate.getTime(), periodStart.getTime()));
const overlapEnd = new Date(Math.min(vacation.endDate.getTime(), periodEnd.getTime()));
if (overlapStart > overlapEnd) {
continue;
}
const cursor = new Date(overlapStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(overlapEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
const isoDate = toIsoDate(cursor);
const fraction = vacation.isHalfDay ? 0.5 : 1;
if (vacation.type === "PUBLIC_HOLIDAY") {
holidayDates.add(isoDate);
}
if (vacation.type !== "PUBLIC_HOLIDAY") {
const existingVacation = vacationFractionsByDate.get(isoDate) ?? 0;
vacationFractionsByDate.set(isoDate, Math.max(existingVacation, fraction));
}
const existing = absenceFractionsByDate.get(isoDate) ?? 0;
if (vacation.type === "PUBLIC_HOLIDAY" || !holidayDates.has(isoDate)) {
absenceFractionsByDate.set(isoDate, Math.max(existing, fraction));
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
}
for (const isoDate of holidayDates) {
const existing = absenceFractionsByDate.get(isoDate) ?? 0;
absenceFractionsByDate.set(isoDate, Math.max(existing, 1));
}
contexts.set(resource.id, {
absenceFractionsByDate,
holidayDates,
vacationFractionsByDate,
});
}
return contexts;
}
function calculateDayAvailabilityFraction(
context: ResourceDailyAvailabilityContext | undefined,
isoDate: string,
): number {
const fraction = context?.absenceFractionsByDate.get(isoDate) ?? 0;
return Math.max(0, 1 - fraction);
}
export function calculateEffectiveDayAvailability(input: {
availability: WeekdayAvailability;
date: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
const baseHours = getAvailabilityHoursForDate(input.availability, input.date);
if (baseHours <= 0) {
return 0;
}
return baseHours * calculateDayAvailabilityFraction(input.context, toIsoDate(input.date));
}
export function calculateEffectiveAvailableHours(input: {
availability: WeekdayAvailability;
periodStart: Date;
periodEnd: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
let hours = 0;
const cursor = new Date(input.periodStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(input.periodEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
hours += calculateEffectiveDayAvailability({
availability: input.availability,
date: cursor,
context: input.context,
});
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return hours;
}
export function countEffectiveWorkingDays(input: {
availability: WeekdayAvailability;
periodStart: Date;
periodEnd: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
let days = 0;
const cursor = new Date(input.periodStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(input.periodEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
if (calculateEffectiveDayAvailability({
availability: input.availability,
date: cursor,
context: input.context,
}) > 0) {
days += 1;
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return days;
}
export function calculateEffectiveBookedHours(input: {
availability: WeekdayAvailability;
startDate: Date;
endDate: Date;
hoursPerDay: number;
periodStart: Date;
periodEnd: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
const overlapStart = new Date(Math.max(input.startDate.getTime(), input.periodStart.getTime()));
const overlapEnd = new Date(Math.min(input.endDate.getTime(), input.periodEnd.getTime()));
if (overlapStart > overlapEnd) {
return 0;
}
let hours = 0;
const cursor = new Date(overlapStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(overlapEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
const dayBaseHours = getAvailabilityHoursForDate(input.availability, cursor);
if (dayBaseHours > 0) {
hours += input.hoursPerDay * calculateDayAvailabilityFraction(input.context, toIsoDate(cursor));
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return hours;
}
export {
calculateEffectiveBookedHours,
calculateEffectiveAvailableHours,
calculateEffectiveDayAvailability,
countEffectiveWorkingDays,
getAvailabilityHoursForDate,
loadResourceDailyAvailabilityContexts,
} from "@capakraken/application";
export type {
ResourceCapacityProfile,
ResourceDailyAvailabilityContext,
} from "@capakraken/application";
File diff suppressed because it is too large Load Diff
+65 -5
View File
@@ -1,12 +1,12 @@
/**
* AI Assistant router — provides a chat endpoint that uses OpenAI Function Calling
* to answer questions about plANARCHY data and modify resources/projects.
* to answer questions about CapaKraken data and modify resources/projects.
*/
import { z } from "zod";
import { TRPCError } from "@trpc/server";
import { AssistantApprovalStatus, type PrismaClient } from "@capakraken/db";
import { PermissionKey, resolvePermissions, type PermissionOverrides, type SystemRole } from "@capakraken/shared";
import { PermissionKey, resolvePermissions, type PermissionOverrides, SystemRole } from "@capakraken/shared";
import { createTRPCRouter, protectedProcedure } from "../trpc.js";
import { createAiClient, isAiConfigured, loggedAiCall, parseAiError } from "../ai-client.js";
import { ADVANCED_ASSISTANT_TOOLS, MUTATION_TOOLS, TOOL_DEFINITIONS, executeTool, type ToolContext, type ToolAction } from "./assistant-tools.js";
@@ -112,6 +112,11 @@ const TOOL_PERMISSION_MAP: Record<string, string> = {
create_allocation: "manageAllocations",
cancel_allocation: "manageAllocations",
update_allocation_status: "manageAllocations",
update_timeline_allocation_inline: "manageAllocations",
apply_timeline_project_shift: "manageAllocations",
quick_assign_timeline_resource: "manageAllocations",
batch_quick_assign_timeline_resources: "manageAllocations",
batch_shift_timeline_allocations: "manageAllocations",
create_demand: "manageAllocations",
fill_demand: "manageAllocations",
// Vacation management
@@ -127,16 +132,71 @@ const TOOL_PERMISSION_MAP: Record<string, string> = {
};
/** Tools that require cost visibility */
const COST_TOOLS = new Set(["get_budget_status", "get_chargeability", "resolve_rate", "list_rate_cards", "get_estimate_detail", "find_best_project_resource"]);
const COST_TOOLS = new Set([
"get_budget_status",
"get_chargeability",
"get_chargeability_report",
"get_resource_computation_graph",
"get_project_computation_graph",
"resolve_rate",
"list_rate_cards",
"get_estimate_detail",
"find_best_project_resource",
]);
export function getAvailableAssistantTools(permissions: Set<PermissionKey>) {
/** Tools that follow controllerProcedure access rules in the main API. */
const CONTROLLER_ONLY_TOOLS = new Set([
"get_chargeability_report",
"get_resource_computation_graph",
"get_project_computation_graph",
]);
/** Tools that follow managerProcedure access rules in the main API. */
const MANAGER_ONLY_TOOLS = new Set([
"update_timeline_allocation_inline",
"apply_timeline_project_shift",
"quick_assign_timeline_resource",
"batch_quick_assign_timeline_resources",
"batch_shift_timeline_allocations",
]);
/** Tools that are intentionally limited to ADMIN because the backing routers are admin-only today. */
const ADMIN_ONLY_TOOLS = new Set([
"create_country",
"update_country",
"create_metro_city",
"update_metro_city",
"delete_metro_city",
"create_holiday_calendar",
"update_holiday_calendar",
"delete_holiday_calendar",
"create_holiday_calendar_entry",
"update_holiday_calendar_entry",
"delete_holiday_calendar_entry",
]);
export function getAvailableAssistantTools(permissions: Set<PermissionKey>, userRole: string) {
return TOOL_DEFINITIONS.filter((tool) => {
const toolName = tool.function.name;
const requiredPerm = TOOL_PERMISSION_MAP[toolName];
const hasControllerAccess = userRole === SystemRole.ADMIN
|| userRole === SystemRole.MANAGER
|| userRole === SystemRole.CONTROLLER;
const hasManagerAccess = userRole === SystemRole.ADMIN
|| userRole === SystemRole.MANAGER;
if (requiredPerm && !permissions.has(requiredPerm as PermissionKey)) {
return false;
}
if (ADMIN_ONLY_TOOLS.has(toolName) && userRole !== "ADMIN") {
return false;
}
if (MANAGER_ONLY_TOOLS.has(toolName) && !hasManagerAccess) {
return false;
}
if (CONTROLLER_ONLY_TOOLS.has(toolName) && !hasControllerAccess) {
return false;
}
if (COST_TOOLS.has(toolName) && !permissions.has(PermissionKey.VIEW_COSTS)) {
return false;
}
@@ -597,7 +657,7 @@ export const assistantRouter = createTRPCRouter({
}
// 4. Filter tools based on granular permissions
const availableTools = getAvailableAssistantTools(permissions);
const availableTools = getAvailableAssistantTools(permissions, userRole);
// 5. Function calling loop
const toolCtx: ToolContext = {
+14
View File
@@ -117,6 +117,20 @@ export {
type RecomputeResourceValueScoresInput,
} from "./use-cases/resource/index.js";
export {
calculateEffectiveAllocationCostCents,
calculateEffectiveAllocationHours,
calculateEffectiveAvailableHours,
calculateEffectiveBookedHours,
calculateEffectiveDayAvailability,
countEffectiveWorkingDays,
enumerateIsoDates,
getAvailabilityHoursForDate,
loadResourceDailyAvailabilityContexts,
type ResourceCapacityProfile,
type ResourceDailyAvailabilityContext,
} from "./lib/resource-capacity.js";
export {
assessDispoImportReadiness,
parseMandatoryDispoReferenceWorkbook,
@@ -0,0 +1,508 @@
import { VacationStatus } from "@capakraken/db";
import { getPublicHolidays, type WeekdayAvailability } from "@capakraken/shared";
const MILLISECONDS_PER_DAY = 86_400_000;
type CalendarScope = "COUNTRY" | "STATE" | "CITY";
type HolidayCalendarEntryRecord = {
date: Date;
isRecurringAnnual: boolean;
};
type HolidayCalendarRecord = {
entries: HolidayCalendarEntryRecord[];
};
type VacationRecord = {
resourceId: string;
startDate: Date;
endDate: Date;
type: string;
isHalfDay: boolean;
};
export type ResourceCapacityProfile = {
id: string;
availability: WeekdayAvailability;
countryId: string | null | undefined;
countryCode: string | null | undefined;
federalState: string | null | undefined;
metroCityId: string | null | undefined;
metroCityName: string | null | undefined;
};
export type ResourceDailyAvailabilityContext = {
absenceFractionsByDate: Map<string, number>;
holidayDates: Set<string>;
vacationFractionsByDate: Map<string, number>;
};
type ResourceCapacityDbClient = {
holidayCalendar?: {
findMany: (args: {
where: Record<string, unknown>;
include: { entries: true };
orderBy: Array<Record<string, "asc" | "desc">>;
}) => Promise<unknown[]>;
};
vacation?: {
findMany: (args: {
where: Record<string, unknown>;
select: Record<string, boolean | Record<string, boolean>>;
}) => Promise<unknown[]>;
};
};
const DAY_KEYS: (keyof WeekdayAvailability)[] = [
"sunday",
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
];
const CITY_HOLIDAY_RULES: Array<{
countryCode: string;
cityName: string;
resolveDates: (year: number) => string[];
}> = [
{
countryCode: "DE",
cityName: "Augsburg",
resolveDates: (year) => [`${year}-08-08`],
},
];
function toIsoDate(value: Date): string {
return value.toISOString().slice(0, 10);
}
function normalizeCityName(cityName?: string | null): string | null {
const normalized = cityName?.trim().toLowerCase();
return normalized && normalized.length > 0 ? normalized : null;
}
function normalizeStateCode(stateCode?: string | null): string | null {
const normalized = stateCode?.trim().toUpperCase();
return normalized && normalized.length > 0 ? normalized : null;
}
export function getAvailabilityHoursForDate(
availability: WeekdayAvailability,
date: Date,
): number {
const key = DAY_KEYS[date.getUTCDay()];
return key ? (availability[key] ?? 0) : 0;
}
function listBuiltinHolidayDates(input: {
periodStart: Date;
periodEnd: Date;
countryCode: string | null | undefined;
federalState: string | null | undefined;
metroCityName: string | null | undefined;
}): Set<string> {
const dates = new Set<string>();
const startIso = toIsoDate(input.periodStart);
const endIso = toIsoDate(input.periodEnd);
const startYear = input.periodStart.getUTCFullYear();
const endYear = input.periodEnd.getUTCFullYear();
if (input.countryCode === "DE") {
for (let year = startYear; year <= endYear; year += 1) {
for (const holiday of getPublicHolidays(year, input.federalState ?? undefined)) {
if (holiday.date >= startIso && holiday.date <= endIso) {
dates.add(holiday.date);
}
}
}
}
const normalizedCityName = normalizeCityName(input.metroCityName);
if (input.countryCode && normalizedCityName) {
for (const rule of CITY_HOLIDAY_RULES) {
if (
rule.countryCode === input.countryCode
&& normalizeCityName(rule.cityName) === normalizedCityName
) {
for (let year = startYear; year <= endYear; year += 1) {
for (const date of rule.resolveDates(year)) {
if (date >= startIso && date <= endIso) {
dates.add(date);
}
}
}
}
}
}
return dates;
}
function resolveCalendarEntryDates(
calendars: HolidayCalendarRecord[],
periodStart: Date,
periodEnd: Date,
): Set<string> {
const dates = new Set<string>();
const startIso = toIsoDate(periodStart);
const endIso = toIsoDate(periodEnd);
const startYear = periodStart.getUTCFullYear();
const endYear = periodEnd.getUTCFullYear();
for (const calendar of calendars) {
for (const entry of calendar.entries) {
const baseDate = new Date(entry.date);
for (let year = startYear; year <= endYear; year += 1) {
const effectiveDate = entry.isRecurringAnnual
? new Date(Date.UTC(year, baseDate.getUTCMonth(), baseDate.getUTCDate()))
: baseDate;
const isoDate = toIsoDate(effectiveDate);
if (isoDate >= startIso && isoDate <= endIso) {
dates.add(isoDate);
}
if (!entry.isRecurringAnnual) {
break;
}
}
}
}
return dates;
}
async function loadCustomHolidayDates(
db: ResourceCapacityDbClient,
input: {
periodStart: Date;
periodEnd: Date;
countryId: string | null | undefined;
federalState: string | null | undefined;
metroCityId: string | null | undefined;
},
): Promise<Set<string>> {
if (!input.countryId || typeof db.holidayCalendar?.findMany !== "function") {
return new Set();
}
const stateCode = normalizeStateCode(input.federalState);
const metroCityId = input.metroCityId?.trim() || null;
const calendars = await db.holidayCalendar.findMany({
where: {
isActive: true,
countryId: input.countryId,
OR: [
{ scopeType: "COUNTRY" as CalendarScope },
...(stateCode ? [{ scopeType: "STATE" as CalendarScope, stateCode }] : []),
...(metroCityId ? [{ scopeType: "CITY" as CalendarScope, metroCityId }] : []),
],
},
include: { entries: true },
orderBy: [{ priority: "asc" }, { createdAt: "asc" }],
});
return resolveCalendarEntryDates(
calendars as HolidayCalendarRecord[],
input.periodStart,
input.periodEnd,
);
}
function buildProfileKey(profile: ResourceCapacityProfile): string {
return JSON.stringify({
countryId: profile.countryId ?? null,
countryCode: profile.countryCode ?? null,
federalState: profile.federalState ?? null,
metroCityId: profile.metroCityId ?? null,
metroCityName: profile.metroCityName ?? null,
});
}
export async function loadResourceDailyAvailabilityContexts(
db: ResourceCapacityDbClient,
resources: ResourceCapacityProfile[],
periodStart: Date,
periodEnd: Date,
): Promise<Map<string, ResourceDailyAvailabilityContext>> {
const profileHolidayCache = new Map<string, Promise<Set<string>>>();
const resourceIds = resources.map((resource) => resource.id);
const vacations = resourceIds.length > 0 && typeof db.vacation?.findMany === "function"
? await db.vacation.findMany({
where: {
resourceId: { in: resourceIds },
status: VacationStatus.APPROVED,
startDate: { lte: periodEnd },
endDate: { gte: periodStart },
},
select: {
resourceId: true,
startDate: true,
endDate: true,
type: true,
isHalfDay: true,
},
})
: [];
const vacationsByResourceId = new Map<string, VacationRecord[]>();
for (const vacation of vacations as VacationRecord[]) {
const items = vacationsByResourceId.get(vacation.resourceId) ?? [];
items.push(vacation);
vacationsByResourceId.set(vacation.resourceId, items);
}
const contexts = new Map<string, ResourceDailyAvailabilityContext>();
for (const resource of resources) {
const profileKey = buildProfileKey(resource);
const holidayPromise = profileHolidayCache.get(profileKey)
?? (async () => {
const builtin = listBuiltinHolidayDates({
periodStart,
periodEnd,
countryCode: resource.countryCode,
federalState: resource.federalState,
metroCityName: resource.metroCityName,
});
const custom = await loadCustomHolidayDates(db, {
periodStart,
periodEnd,
countryId: resource.countryId,
federalState: resource.federalState,
metroCityId: resource.metroCityId,
});
return new Set([...builtin, ...custom]);
})();
if (!profileHolidayCache.has(profileKey)) {
profileHolidayCache.set(profileKey, holidayPromise);
}
const holidayDates = new Set(await holidayPromise);
const absenceFractionsByDate = new Map<string, number>();
const vacationFractionsByDate = new Map<string, number>();
const resourceVacations = vacationsByResourceId.get(resource.id) ?? [];
for (const vacation of resourceVacations) {
const overlapStart = new Date(Math.max(vacation.startDate.getTime(), periodStart.getTime()));
const overlapEnd = new Date(Math.min(vacation.endDate.getTime(), periodEnd.getTime()));
if (overlapStart > overlapEnd) {
continue;
}
const cursor = new Date(overlapStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(overlapEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
const isoDate = toIsoDate(cursor);
const fraction = vacation.isHalfDay ? 0.5 : 1;
if (vacation.type === "PUBLIC_HOLIDAY") {
holidayDates.add(isoDate);
}
if (vacation.type !== "PUBLIC_HOLIDAY") {
const existingVacation = vacationFractionsByDate.get(isoDate) ?? 0;
vacationFractionsByDate.set(isoDate, Math.max(existingVacation, fraction));
}
const existing = absenceFractionsByDate.get(isoDate) ?? 0;
if (vacation.type === "PUBLIC_HOLIDAY" || !holidayDates.has(isoDate)) {
absenceFractionsByDate.set(isoDate, Math.max(existing, fraction));
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
}
for (const isoDate of holidayDates) {
const existing = absenceFractionsByDate.get(isoDate) ?? 0;
absenceFractionsByDate.set(isoDate, Math.max(existing, 1));
}
contexts.set(resource.id, {
absenceFractionsByDate,
holidayDates,
vacationFractionsByDate,
});
}
return contexts;
}
function calculateDayAvailabilityFraction(
context: ResourceDailyAvailabilityContext | undefined,
isoDate: string,
): number {
const fraction = context?.absenceFractionsByDate.get(isoDate) ?? 0;
return Math.max(0, 1 - fraction);
}
export function calculateEffectiveDayAvailability(input: {
availability: WeekdayAvailability;
date: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
const baseHours = getAvailabilityHoursForDate(input.availability, input.date);
if (baseHours <= 0) {
return 0;
}
return baseHours * calculateDayAvailabilityFraction(input.context, toIsoDate(input.date));
}
export function calculateEffectiveAvailableHours(input: {
availability: WeekdayAvailability;
periodStart: Date;
periodEnd: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
let hours = 0;
const cursor = new Date(input.periodStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(input.periodEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
hours += calculateEffectiveDayAvailability({
availability: input.availability,
date: cursor,
context: input.context,
});
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return hours;
}
export function countEffectiveWorkingDays(input: {
availability: WeekdayAvailability;
periodStart: Date;
periodEnd: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
let days = 0;
const cursor = new Date(input.periodStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(input.periodEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
if (calculateEffectiveDayAvailability({
availability: input.availability,
date: cursor,
context: input.context,
}) > 0) {
days += 1;
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return days;
}
export function calculateEffectiveBookedHours(input: {
availability: WeekdayAvailability;
startDate: Date;
endDate: Date;
hoursPerDay: number;
periodStart: Date;
periodEnd: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
const overlapStart = new Date(Math.max(input.startDate.getTime(), input.periodStart.getTime()));
const overlapEnd = new Date(Math.min(input.endDate.getTime(), input.periodEnd.getTime()));
if (overlapStart > overlapEnd) {
return 0;
}
let hours = 0;
const cursor = new Date(overlapStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(overlapEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
const dayBaseHours = getAvailabilityHoursForDate(input.availability, cursor);
if (dayBaseHours > 0) {
hours += input.hoursPerDay * calculateDayAvailabilityFraction(input.context, toIsoDate(cursor));
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return hours;
}
export function calculateEffectiveAllocationHours(input: {
availability: WeekdayAvailability;
startDate: Date;
endDate: Date;
hoursPerDay: number;
periodStart: Date;
periodEnd: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
return calculateEffectiveBookedHours(input);
}
export function calculateEffectiveAllocationCostCents(input: {
availability: WeekdayAvailability;
startDate: Date;
endDate: Date;
dailyCostCents: number;
periodStart: Date;
periodEnd: Date;
context: ResourceDailyAvailabilityContext | undefined;
}): number {
let costCents = 0;
const overlapStart = new Date(
Math.max(input.startDate.getTime(), input.periodStart.getTime()),
);
const overlapEnd = new Date(
Math.min(input.endDate.getTime(), input.periodEnd.getTime()),
);
if (overlapStart > overlapEnd) {
return 0;
}
const cursor = new Date(overlapStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(overlapEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
const baseHours = getAvailabilityHoursForDate(input.availability, cursor);
if (baseHours > 0) {
costCents += input.dailyCostCents * calculateDayAvailabilityFraction(input.context, toIsoDate(cursor));
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return Math.round(costCents);
}
export function enumerateIsoDates(
periodStart: Date,
periodEnd: Date,
): string[] {
const dates: string[] = [];
const cursor = new Date(periodStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(periodEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
dates.push(toIsoDate(cursor));
cursor.setTime(cursor.getTime() + MILLISECONDS_PER_DAY);
}
return dates;
}
@@ -1,459 +1,11 @@
import { VacationStatus } from "@capakraken/db";
import { getPublicHolidays, type WeekdayAvailability } from "@capakraken/shared";
const MILLISECONDS_PER_DAY = 86_400_000;
type CalendarScope = "COUNTRY" | "STATE" | "CITY";
type HolidayCalendarEntryRecord = {
date: Date;
isRecurringAnnual: boolean;
};
type HolidayCalendarRecord = {
entries: HolidayCalendarEntryRecord[];
};
type VacationRecord = {
resourceId: string;
startDate: Date;
endDate: Date;
type: string;
isHalfDay: boolean;
};
type ResourceHolidayProfile = {
id: string;
availability: WeekdayAvailability;
countryId: string | null | undefined;
countryCode: string | null | undefined;
federalState: string | null | undefined;
metroCityId: string | null | undefined;
metroCityName: string | null | undefined;
};
type DashboardHolidayDbClient = {
holidayCalendar?: {
findMany: (args: {
where: Record<string, unknown>;
include: { entries: true };
orderBy: Array<Record<string, "asc" | "desc">>;
}) => Promise<unknown[]>;
};
vacation?: {
findMany: (args: {
where: Record<string, unknown>;
select: Record<string, boolean | Record<string, boolean>>;
}) => Promise<unknown[]>;
};
};
type DailyAvailabilityContext = {
holidayDates: Set<string>;
absenceFractionsByDate: Map<string, number>;
};
const DAY_KEYS: (keyof WeekdayAvailability)[] = [
"sunday",
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
];
const CITY_HOLIDAY_RULES: Array<{
countryCode: string;
cityName: string;
resolveDates: (year: number) => string[];
}> = [
{
countryCode: "DE",
cityName: "Augsburg",
resolveDates: (year) => [`${year}-08-08`],
},
];
function toIsoDate(value: Date): string {
return value.toISOString().slice(0, 10);
}
function normalizeCityName(cityName?: string | null): string | null {
const normalized = cityName?.trim().toLowerCase();
return normalized && normalized.length > 0 ? normalized : null;
}
function normalizeStateCode(stateCode?: string | null): string | null {
const normalized = stateCode?.trim().toUpperCase();
return normalized && normalized.length > 0 ? normalized : null;
}
function getDailyAvailabilityHours(
availability: WeekdayAvailability,
date: Date,
): number {
const key = DAY_KEYS[date.getUTCDay()];
return key ? (availability[key] ?? 0) : 0;
}
function listBuiltinHolidayDates(input: {
periodStart: Date;
periodEnd: Date;
countryCode: string | null | undefined;
federalState: string | null | undefined;
metroCityName: string | null | undefined;
}): Set<string> {
const dates = new Set<string>();
const startIso = toIsoDate(input.periodStart);
const endIso = toIsoDate(input.periodEnd);
const startYear = input.periodStart.getUTCFullYear();
const endYear = input.periodEnd.getUTCFullYear();
if (input.countryCode === "DE") {
for (let year = startYear; year <= endYear; year += 1) {
for (const holiday of getPublicHolidays(year, input.federalState ?? undefined)) {
if (holiday.date >= startIso && holiday.date <= endIso) {
dates.add(holiday.date);
}
}
}
}
const normalizedCityName = normalizeCityName(input.metroCityName);
if (input.countryCode && normalizedCityName) {
for (const rule of CITY_HOLIDAY_RULES) {
if (
rule.countryCode === input.countryCode
&& normalizeCityName(rule.cityName) === normalizedCityName
) {
for (let year = startYear; year <= endYear; year += 1) {
for (const date of rule.resolveDates(year)) {
if (date >= startIso && date <= endIso) {
dates.add(date);
}
}
}
}
}
}
return dates;
}
function resolveCalendarEntryDates(
calendars: HolidayCalendarRecord[],
periodStart: Date,
periodEnd: Date,
): Set<string> {
const dates = new Set<string>();
const startIso = toIsoDate(periodStart);
const endIso = toIsoDate(periodEnd);
const startYear = periodStart.getUTCFullYear();
const endYear = periodEnd.getUTCFullYear();
for (const calendar of calendars) {
for (const entry of calendar.entries) {
const baseDate = new Date(entry.date);
for (let year = startYear; year <= endYear; year += 1) {
const effectiveDate = entry.isRecurringAnnual
? new Date(Date.UTC(year, baseDate.getUTCMonth(), baseDate.getUTCDate()))
: baseDate;
const isoDate = toIsoDate(effectiveDate);
if (isoDate >= startIso && isoDate <= endIso) {
dates.add(isoDate);
}
if (!entry.isRecurringAnnual) {
break;
}
}
}
}
return dates;
}
async function loadCustomHolidayDates(
db: DashboardHolidayDbClient,
input: {
periodStart: Date;
periodEnd: Date;
countryId: string | null | undefined;
federalState: string | null | undefined;
metroCityId: string | null | undefined;
},
): Promise<Set<string>> {
if (!input.countryId || typeof db.holidayCalendar?.findMany !== "function") {
return new Set();
}
const stateCode = normalizeStateCode(input.federalState);
const metroCityId = input.metroCityId?.trim() || null;
const calendars = await db.holidayCalendar.findMany({
where: {
isActive: true,
countryId: input.countryId,
OR: [
{ scopeType: "COUNTRY" as CalendarScope },
...(stateCode ? [{ scopeType: "STATE" as CalendarScope, stateCode }] : []),
...(metroCityId ? [{ scopeType: "CITY" as CalendarScope, metroCityId }] : []),
],
},
include: { entries: true },
orderBy: [{ priority: "asc" }, { createdAt: "asc" }],
});
return resolveCalendarEntryDates(
calendars as HolidayCalendarRecord[],
input.periodStart,
input.periodEnd,
);
}
function buildHolidayProfileKey(profile: ResourceHolidayProfile): string {
return JSON.stringify({
countryId: profile.countryId ?? null,
countryCode: profile.countryCode ?? null,
federalState: profile.federalState ?? null,
metroCityId: profile.metroCityId ?? null,
metroCityName: profile.metroCityName ?? null,
});
}
export async function loadDailyAvailabilityContexts(
db: DashboardHolidayDbClient,
resources: ResourceHolidayProfile[],
periodStart: Date,
periodEnd: Date,
): Promise<Map<string, DailyAvailabilityContext>> {
const profileHolidayCache = new Map<string, Promise<Set<string>>>();
const resourceIds = resources.map((resource) => resource.id);
const vacations = resourceIds.length > 0 && typeof db.vacation?.findMany === "function"
? await db.vacation.findMany({
where: {
resourceId: { in: resourceIds },
status: VacationStatus.APPROVED,
startDate: { lte: periodEnd },
endDate: { gte: periodStart },
},
select: {
resourceId: true,
startDate: true,
endDate: true,
type: true,
isHalfDay: true,
},
})
: [];
const vacationsByResourceId = new Map<string, VacationRecord[]>();
for (const vacation of vacations as VacationRecord[]) {
const items = vacationsByResourceId.get(vacation.resourceId) ?? [];
items.push(vacation);
vacationsByResourceId.set(vacation.resourceId, items);
}
const contexts = new Map<string, DailyAvailabilityContext>();
for (const resource of resources) {
const profileKey = buildHolidayProfileKey(resource);
const holidayPromise = profileHolidayCache.get(profileKey)
?? (async () => {
const builtin = listBuiltinHolidayDates({
periodStart,
periodEnd,
countryCode: resource.countryCode,
federalState: resource.federalState,
metroCityName: resource.metroCityName,
});
const custom = await loadCustomHolidayDates(db, {
periodStart,
periodEnd,
countryId: resource.countryId,
federalState: resource.federalState,
metroCityId: resource.metroCityId,
});
return new Set([...builtin, ...custom]);
})();
if (!profileHolidayCache.has(profileKey)) {
profileHolidayCache.set(profileKey, holidayPromise);
}
const holidayDates = new Set(await holidayPromise);
const absenceFractionsByDate = new Map<string, number>();
const resourceVacations = vacationsByResourceId.get(resource.id) ?? [];
for (const vacation of resourceVacations) {
const overlapStart = new Date(
Math.max(vacation.startDate.getTime(), periodStart.getTime()),
);
const overlapEnd = new Date(
Math.min(vacation.endDate.getTime(), periodEnd.getTime()),
);
if (overlapStart > overlapEnd) {
continue;
}
const cursor = new Date(overlapStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(overlapEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
const isoDate = toIsoDate(cursor);
const fraction = vacation.isHalfDay ? 0.5 : 1;
if (vacation.type === "PUBLIC_HOLIDAY") {
holidayDates.add(isoDate);
}
const existing = absenceFractionsByDate.get(isoDate) ?? 0;
if (vacation.type === "PUBLIC_HOLIDAY" || !holidayDates.has(isoDate)) {
absenceFractionsByDate.set(isoDate, Math.max(existing, fraction));
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
}
for (const isoDate of holidayDates) {
const existing = absenceFractionsByDate.get(isoDate) ?? 0;
absenceFractionsByDate.set(isoDate, Math.max(existing, 1));
}
contexts.set(resource.id, {
holidayDates,
absenceFractionsByDate,
});
}
return contexts;
}
function calculateDayAvailabilityFraction(
context: DailyAvailabilityContext | undefined,
isoDate: string,
): number {
const fraction = context?.absenceFractionsByDate.get(isoDate) ?? 0;
return Math.max(0, 1 - fraction);
}
export function calculateEffectiveAvailableHours(input: {
availability: WeekdayAvailability;
periodStart: Date;
periodEnd: Date;
context: DailyAvailabilityContext | undefined;
}): number {
let hours = 0;
const cursor = new Date(input.periodStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(input.periodEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
const baseHours = getDailyAvailabilityHours(input.availability, cursor);
if (baseHours > 0) {
hours += baseHours * calculateDayAvailabilityFraction(input.context, toIsoDate(cursor));
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return hours;
}
export function calculateEffectiveAllocationHours(input: {
availability: WeekdayAvailability;
startDate: Date;
endDate: Date;
hoursPerDay: number;
periodStart: Date;
periodEnd: Date;
context: DailyAvailabilityContext | undefined;
}): number {
let hours = 0;
const overlapStart = new Date(
Math.max(input.startDate.getTime(), input.periodStart.getTime()),
);
const overlapEnd = new Date(
Math.min(input.endDate.getTime(), input.periodEnd.getTime()),
);
if (overlapStart > overlapEnd) {
return 0;
}
const cursor = new Date(overlapStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(overlapEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
const baseHours = getDailyAvailabilityHours(input.availability, cursor);
if (baseHours > 0) {
hours += input.hoursPerDay * calculateDayAvailabilityFraction(input.context, toIsoDate(cursor));
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return hours;
}
export function calculateEffectiveAllocationCostCents(input: {
availability: WeekdayAvailability;
startDate: Date;
endDate: Date;
dailyCostCents: number;
periodStart: Date;
periodEnd: Date;
context: DailyAvailabilityContext | undefined;
}): number {
let costCents = 0;
const overlapStart = new Date(
Math.max(input.startDate.getTime(), input.periodStart.getTime()),
);
const overlapEnd = new Date(
Math.min(input.endDate.getTime(), input.periodEnd.getTime()),
);
if (overlapStart > overlapEnd) {
return 0;
}
const cursor = new Date(overlapStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(overlapEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
const baseHours = getDailyAvailabilityHours(input.availability, cursor);
if (baseHours > 0) {
costCents += input.dailyCostCents * calculateDayAvailabilityFraction(input.context, toIsoDate(cursor));
}
cursor.setUTCDate(cursor.getUTCDate() + 1);
}
return Math.round(costCents);
}
export function enumerateIsoDates(
periodStart: Date,
periodEnd: Date,
): string[] {
const dates: string[] = [];
const cursor = new Date(periodStart);
cursor.setUTCHours(0, 0, 0, 0);
const end = new Date(periodEnd);
end.setUTCHours(0, 0, 0, 0);
while (cursor <= end) {
dates.push(toIsoDate(cursor));
cursor.setTime(cursor.getTime() + MILLISECONDS_PER_DAY);
}
return dates;
}
export type { DailyAvailabilityContext, ResourceHolidayProfile };
export {
calculateEffectiveAllocationCostCents,
calculateEffectiveAvailableHours,
calculateEffectiveBookedHours as calculateEffectiveAllocationHours,
enumerateIsoDates,
loadResourceDailyAvailabilityContexts as loadDailyAvailabilityContexts,
} from "../../lib/resource-capacity.js";
export type {
ResourceCapacityProfile as ResourceHolidayProfile,
ResourceDailyAvailabilityContext as DailyAvailabilityContext,
} from "../../lib/resource-capacity.js";
+1
View File
@@ -12,6 +12,7 @@
"db:push": "node ../../scripts/with-env.mjs prisma db push --schema ./prisma/schema.prisma",
"db:migrate": "node ../../scripts/with-env.mjs prisma migrate dev --schema ./prisma/schema.prisma",
"db:migrate:deploy": "node ../../scripts/with-env.mjs prisma migrate deploy --schema ./prisma/schema.prisma",
"db:validate": "node ../../scripts/with-env.mjs prisma validate --schema ./prisma/schema.prisma",
"db:seed": "node ../../scripts/with-env.mjs tsx src/seed.ts",
"db:seed:holiday-demo-resources": "node ../../scripts/with-env.mjs tsx src/seed-holiday-demo-resources.ts",
"db:seed:holidays": "node ../../scripts/with-env.mjs tsx src/seed-holiday-calendars.ts",
+1 -1
View File
@@ -1,4 +1,4 @@
// Planarchy — Prisma Schema
// CapaKraken — Prisma Schema
// All monetary values stored as integer cents to avoid float precision issues.
generator client {
+3 -3
View File
@@ -86,11 +86,11 @@ test("assertDestructiveDbAllowed rejects missing destructive allow flag", () =>
);
});
test("assertSafeSeedTarget rejects legacy planarchy disposable databases", () => {
test("assertSafeSeedTarget rejects unexpected legacy disposable databases", () => {
setEnv({
DATABASE_URL: "postgresql://tester:secret@localhost:5432/planarchy_test",
DATABASE_URL: "postgresql://tester:secret@localhost:5432/legacy_test",
ALLOW_DESTRUCTIVE_DB_TOOLS: "true",
CONFIRM_DESTRUCTIVE_DB_NAME: "planarchy_test",
CONFIRM_DESTRUCTIVE_DB_NAME: "legacy_test",
});
assert.throws(
+1 -1
View File
@@ -6,7 +6,7 @@ interface DestructiveGuardOptions {
requireConfirmation?: boolean;
}
const PROTECTED_DATABASE_NAMES = new Set(["capakraken", "planarchy"]);
const PROTECTED_DATABASE_NAMES = new Set(["capakraken"]);
function parseDatabaseUrl(rawUrl: string) {
const parsed = new URL(rawUrl);
+4 -4
View File
@@ -1,5 +1,5 @@
/**
* Generate samples/PlanarchyExamples.xlsx from the live database.
* Generate samples/CapaKrakenExamples.xlsx from the live database.
*
* Run from repo root:
* DATABASE_URL=postgresql://capakraken:capakraken_dev@localhost:5433/capakraken \
@@ -334,7 +334,7 @@ async function buildSummarySheet(wb: ExcelJS.Workbook) {
] as ExcelJS.Column[];
const title = ws.getCell("A1");
title.value = "Planarchy — Seed Data Summary";
title.value = "CapaKraken — Seed Data Summary";
title.font = { bold: true, size: 14, color: { argb: COLORS.headerBg } };
ws.mergeCells("A1:B1");
ws.getRow(1).height = 28;
@@ -367,7 +367,7 @@ async function main() {
console.log("Connecting to database...");
const wb = new ExcelJS.Workbook();
wb.creator = "Planarchy";
wb.creator = "CapaKraken";
wb.created = new Date();
wb.modified = new Date();
@@ -377,7 +377,7 @@ async function main() {
await buildProjectsSheet(wb);
await buildAllocationsSheet(wb);
const outPath = path.resolve(__dirname, "../../../samples/PlanarchyExamples.xlsx");
const outPath = path.resolve(__dirname, "../../../samples/CapaKrakenExamples.xlsx");
await wb.xlsx.writeFile(outPath);
console.log(`Excel written to: ${outPath}`);
}
+1 -1
View File
@@ -420,7 +420,7 @@ export async function runImportDispoBatch(options: ImportDispoBatchOptions) {
ensureCommitAllowed(options, stageResult.readiness);
console.log("");
console.log("Committing staged rows into live Planarchy tables...");
console.log("Committing staged rows into live CapaKraken tables...");
const commitResult = await dispoImport.commitDispoImportBatch(prisma, {
allowTbdUnresolved: options.allowTbdUnresolved,
+1 -1
View File
@@ -28,7 +28,7 @@ function parseArgs(argv: string[]): ResetOptions {
backupDir: DEFAULT_BACKUP_DIR,
adminEmail: "admin@capakraken.dev",
adminPassword: "admin123",
adminName: "Planarchy Admin",
adminName: "CapaKraken Admin",
};
for (let index = 0; index < argv.length; index += 1) {
+36 -36
View File
@@ -1,7 +1,7 @@
/**
* Updates PlanarchyExamples.xlsx with missing columns and documentation.
* Adds: Display Name, Email, Skills, Planarchy Notes columns to EID sheet.
* Adds: Start Date, End Date, Status, Planarchy Notes columns to Projects sheet.
* Updates CapaKrakenExamples.xlsx with missing columns and documentation.
* Adds: Display Name, Email, Skills, CapaKraken Notes columns to EID sheet.
* Adds: Start Date, End Date, Status, CapaKraken Notes columns to Projects sheet.
*/
import ExcelJS from "exceljs";
@@ -11,7 +11,7 @@ import { dirname, join } from "path";
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const EXCEL_PATH = join(__dirname, "../../../samples/PlanarchyExamples.xlsx");
const EXCEL_PATH = join(__dirname, "../../../samples/CapaKrakenExamples.xlsx");
// ─── Helpers ─────────────────────────────────────────────────────────────────
@@ -23,7 +23,7 @@ function toDisplayName(eid) {
}
function toEmail(eid) {
return `${eid}@planarchy.example`;
return `${eid}@capakraken.example`;
}
function computeSkillLabel(chapter, typeOfWork) {
@@ -36,8 +36,8 @@ function computeSkillLabel(chapter, typeOfWork) {
return typeOfWork;
}
function computePlanarchyEid(eid) {
// In Planarchy the EID stays as firstname.lastname (unique key)
function computeCapaKrakenEid(eid) {
// In CapaKraken the EID stays as firstname.lastname (unique key)
return eid;
}
@@ -139,28 +139,28 @@ async function main() {
// Column N: Display Name
// Column O: Email
// Column P: Skills (derived)
// Column Q: Description / Notes for Planarchy
// Column Q: Description / Notes for CapaKraken
const newEidCols = [
{
col: 14, // N
header: "Display Name\n(auto-generated)",
doc: "Full display name derived from EID (firstname.lastname → Firstname Lastname). Used as the person's name in Planarchy.",
doc: "Full display name derived from EID (firstname.lastname → Firstname Lastname). Used as the person's name in CapaKraken.",
},
{
col: 15, // O
header: "Email\n(generated)",
doc: "Generated email: firstname.lastname@planarchy.example. Required unique field in Planarchy. Replace with real email in production.",
doc: "Generated email: firstname.lastname@capakraken.example. Required unique field in CapaKraken. Replace with real email in production.",
},
{
col: 16, // P
header: "Skills\n(derived from Chapter)",
doc: "Skill tags assigned based on Chapter + Type of Work. Format: 'SkillA | SkillB'. Stored as JSON array in Planarchy with proficiency 1-5. Senior (LCR 118) 5, Mid-Senior (LCR 95) 4, Mid 3.",
doc: "Skill tags assigned based on Chapter + Type of Work. Format: 'SkillA | SkillB'. Stored as JSON array in CapaKraken with proficiency 1-5. Senior (LCR >= 118) -> 5, Mid-Senior (LCR >= 95) -> 4, Mid -> 3.",
},
{
col: 17, // Q
header: "Planarchy Notes",
doc: "How data maps to Planarchy:\n EID = unique key (col A)\n Chapter = chapter field\n LCR / UCR multiply by 100 for integer cents (85.00 8500)\n Hours fraction × 8 = daily availability hours\n Chargeability multiply by 100 for % (0.75 75%)\n Employee type, City, Client Unit stored in dynamicFields JSONB",
header: "CapaKraken Notes",
doc: "How data maps to CapaKraken:\n- EID = unique key (col A)\n- Chapter = chapter field\n- LCR / UCR -> multiply by 100 for integer cents (EUR85.00 -> 8500)\n- Hours fraction x 8 = daily availability hours\n- Chargeability -> multiply by 100 for % (0.75 -> 75%)\n- Employee type, City, Client Unit -> stored in dynamicFields JSONB",
},
];
@@ -214,18 +214,18 @@ async function main() {
// Also add doc notes to existing header columns A-M in row 1
const eidExistingDocs = [
"Unique identifier. Used as EID in Planarchy (no EMP-XXX prefix needed). e.g. steve.rogers",
"Team / department. Maps to 'chapter' field in Planarchy.",
"Unique identifier. Used as EID in CapaKraken (no EMP-XXX prefix needed). e.g. steve.rogers",
"Team / department. Maps to 'chapter' field in CapaKraken.",
"Specialization within chapter. Stored in dynamicFields.workType.",
"Assigned client account. Stored in dynamicFields.clientUnit.",
"Unit-specific field. Currently unused — can be stored in dynamicFields.",
"Office city location. Stored in dynamicFields.city.",
"Employment type: Employee or Freelancer. Stored in dynamicFields.employeeType.",
"Loaded Cost Rate (LCR) in EUR/h. Multiply × 100 for Planarchy cents. e.g. 133.77 → 13377",
"Loaded Cost Rate (LCR) in EUR/h. Multiply × 100 for CapaKraken cents. e.g. 133.77 → 13377",
"Unloaded/Utilization Cost Rate (UCR) in EUR/h. Multiply × 100 for cents.",
"FTE fraction (1.0 = 40h/week, 0.8 = 4 days, 0.5 = 20h/week). Combined with col K for availability JSON.",
"Available weekdays. 'all' = Mon-Fri. Specific days listed = only those days active at 8h.",
"Chargeability target as decimal. Multiply × 100 for Planarchy % (0.75 → 75%).",
"Chargeability target as decimal. Multiply × 100 for CapaKraken % (0.75 → 75%).",
"(unused)",
];
for (let c = 1; c <= 13; c++) {
@@ -254,12 +254,12 @@ async function main() {
{
col: 19, // S
header: "Status\n(derived)",
doc: "Planarchy status derived from 'is ordered' + win probability + date:\n• COMPLETED: ordered + 100% + past dates\n• ACTIVE: ordered + 100% + current/future\n• ON_HOLD: ordered but paused\n• DRAFT: not ordered or low win probability",
doc: "CapaKraken status derived from 'is ordered' + win probability + date:\n• COMPLETED: ordered + 100% + past dates\n• ACTIVE: ordered + 100% + current/future\n• ON_HOLD: ordered but paused\n• DRAFT: not ordered or low win probability",
},
{
col: 20, // T
header: "Planarchy Notes",
doc: "How data maps to Planarchy:\n• Col C (short code) → shortCode (unique key)\n• Col B → name\n• BD/CH/UN → OrderType: BD / CHARGEABLE / INTERNAL\n• Internal/External → allocationType: INT / EXT\n• Resource Costs (col I) × 100 = budgetCents in Planarchy\n• Col H (chargability %) → stored in dynamicFields.chargeabilityPercent\n• Col J (person hours) → stored in dynamicFields.personHoursSold\n• Col O (classification) → stored in dynamicFields.classification",
header: "CapaKraken Notes",
doc: "How data maps to CapaKraken:\n• Col C (short code) → shortCode (unique key)\n• Col B → name\n• BD/CH/UN → OrderType: BD / CHARGEABLE / INTERNAL\n• Internal/External → allocationType: INT / EXT\n• Resource Costs (col I) × 100 = budgetCents in CapaKraken\n• Col H (chargability %) → stored in dynamicFields.chargeabilityPercent\n• Col J (person hours) → stored in dynamicFields.personHoursSold\n• Col O (classification) → stored in dynamicFields.classification",
},
];
@@ -286,21 +286,21 @@ async function main() {
const projExistingDocs = [
"Client Unit tag. e.g. [DAI]=Daimler, [PAG]=Porsche AG, [BMW], [JLR]=Jaguar Land Rover. Stored in dynamicFields.clientUnit.",
"Full project name → maps to Planarchy 'name' field.",
"Short internal project code (5-6 chars) → maps to Planarchy 'shortCode' (unique key). e.g. JLFJFL",
"Full project name → maps to CapaKraken 'name' field.",
"Short internal project code (5-6 chars) → maps to CapaKraken 'shortCode' (unique key). e.g. JLFJFL",
"'yes'/'no' — whether the project is formally ordered. Drives status: yes+100% → ACTIVE/COMPLETED.",
"Order type: BD=Business Development, CH=Chargeable, UN=Internal/Unordered. Maps to Planarchy OrderType.",
"Win probability 0-100. Used in Planarchy 'winProbability' field for pipeline forecasting.",
"Allocation type: Internal → INT, External → EXT. Maps to Planarchy 'allocationType' field.",
"Order type: BD=Business Development, CH=Chargeable, UN=Internal/Unordered. Maps to CapaKraken OrderType.",
"Win probability 0-100. Used in CapaKraken 'winProbability' field for pipeline forecasting.",
"Allocation type: Internal → INT, External → EXT. Maps to CapaKraken 'allocationType' field.",
"Chargeability % as decimal. Stored in dynamicFields.chargeabilityPercent.",
"Planned resource cost in EUR. Multiply × 100 for Planarchy budgetCents. e.g. 78799 → 7879900 cents.",
"Planned resource cost in EUR. Multiply × 100 for CapaKraken budgetCents. e.g. 78799 → 7879900 cents.",
"Person hours planned/sold. Stored in dynamicFields.personHoursSold for budget tracking.",
"Team staffing (empty in source). Would list assigned EIDs. Handled by Allocations in Planarchy.",
"Team staffing (empty in source). Would list assigned EIDs. Handled by Allocations in CapaKraken.",
"Day project sold (empty in source). Could be stored as dynamicFields.dateSold.",
"Project start date (empty in source → synthesized in col Q). Maps to Planarchy 'startDate'.",
"Project end date (empty in source → synthesized in col R). Maps to Planarchy 'endDate'.",
"Project start date (empty in source → synthesized in col Q). Maps to CapaKraken 'startDate'.",
"Project end date (empty in source → synthesized in col R). Maps to CapaKraken 'endDate'.",
"Confidentiality: Confidential / Not Confidential. Stored in dynamicFields.classification.",
"Responsible EID / Owner (empty in source). Would map to a PM allocation in Planarchy.",
"Responsible EID / Owner (empty in source). Would map to a PM allocation in CapaKraken.",
];
for (let c = 1; c <= 16; c++) {
const doc = projExistingDocs[c - 1];
@@ -329,15 +329,15 @@ async function main() {
projSheet.getColumn(19).width = 16;
projSheet.getColumn(20).width = 50;
// ─── Add a "Planarchy Data Model" sheet ──────────────────────────────────
// ─── Add a "CapaKraken Data Model" sheet ──────────────────────────────────
let modelSheet = workbook.getWorksheet("Planarchy Data Model");
let modelSheet = workbook.getWorksheet("CapaKraken Data Model");
if (!modelSheet) {
modelSheet = workbook.addWorksheet("Planarchy Data Model");
modelSheet = workbook.addWorksheet("CapaKraken Data Model");
}
const modelData = [
["Planarchy Data Model — Field Reference", "", "", "", ""],
["CapaKraken Data Model — Field Reference", "", "", "", ""],
["", "", "", "", ""],
["RESOURCE FIELDS", "", "", "", ""],
["Field", "Type", "Required", "Example", "Description"],
@@ -393,7 +393,7 @@ async function main() {
// Style title row
const titleCell = modelSheet.getCell(1, 1);
titleCell.font = { bold: true, size: 14, color: { argb: "FF4F46E5" } };
titleCell.value = "Planarchy Data Model — Field Reference";
titleCell.value = "CapaKraken Data Model — Field Reference";
// Style section headers and field headers
const sectionRows = [3, 17, 31];
@@ -421,7 +421,7 @@ async function main() {
console.log(`✅ Excel updated: ${EXCEL_PATH}`);
console.log(" - EID_Informationen: added Display Name, Email, Skills, Notes columns");
console.log(" - Projektinfomartionen: added Start Date, End Date, Status, Notes columns");
console.log(" - Added new sheet: 'Planarchy Data Model' (field reference)");
console.log(" - Added new sheet: 'CapaKraken Data Model' (field reference)");
}
main().catch((err) => {
+20 -20
View File
@@ -1,11 +1,11 @@
# Planarchy V2 Architecture Proposal
# CapaKraken V2 Architecture Proposal
**Date:** 2026-03-11
**Scope:** Codebase review, v2 direction, architecture rethink, parallel agent strategy
## Executive Summary
Planarchy already has a good base:
CapaKraken already has a good base:
- monorepo boundaries are mostly clean
- `engine` and `staffing` contain useful pure domain logic
- Next.js + tRPC + Prisma keeps product iteration fast
@@ -44,31 +44,31 @@ This gives you a v2 that is safer, easier to change, and still realistic for a s
## 1. Critical correctness and security issues exist today
### Auth hashing is inconsistent
- Login verifies Argon2 hashes in [`apps/web/src/server/auth.ts#L20`](/home/hartmut/Documents/Copilot/planarchy/apps/web/src/server/auth.ts#L20).
- Admin-created users are still stored with SHA-256 in [`packages/api/src/router/user.ts#L41`](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/user.ts#L41).
- Login verifies Argon2 hashes in [`apps/web/src/server/auth.ts#L20`](/home/hartmut/Documents/Copilot/capakraken/apps/web/src/server/auth.ts#L20).
- Admin-created users are still stored with SHA-256 in [`packages/api/src/router/user.ts#L41`](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/user.ts#L41).
- Impact: users created from the admin flow are likely unable to log in.
### Notification creation is open to any authenticated user
- `notification.create` is only `protectedProcedure` in [`packages/api/src/router/notification.ts#L66`](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/notification.ts#L66).
- `notification.create` is only `protectedProcedure` in [`packages/api/src/router/notification.ts#L66`](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/notification.ts#L66).
- Impact: any logged-in user can create notifications for arbitrary users.
### AI connection testing is Azure-shaped even when provider is OpenAI
- `testAiConnection` always constructs an Azure deployment URL in [`packages/api/src/router/settings.ts#L122`](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/settings.ts#L122).
- `testAiConnection` always constructs an Azure deployment URL in [`packages/api/src/router/settings.ts#L122`](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/settings.ts#L122).
- Impact: provider abstraction is not actually reliable.
### Repo health checks are currently failing
- `pnpm test:unit` fails because `@capakraken/shared` has a Vitest script but no tests in [`packages/shared/package.json`](/home/hartmut/Documents/Copilot/planarchy/packages/shared/package.json).
- `pnpm typecheck` fails because `crypto.randomUUID()` is used without a visible import/global typing in [`packages/shared/src/schemas/project.schema.ts#L5`](/home/hartmut/Documents/Copilot/planarchy/packages/shared/src/schemas/project.schema.ts#L5).
- `pnpm test:unit` fails because `@capakraken/shared` has a Vitest script but no tests in [`packages/shared/package.json`](/home/hartmut/Documents/Copilot/capakraken/packages/shared/package.json).
- `pnpm typecheck` fails because `crypto.randomUUID()` is used without a visible import/global typing in [`packages/shared/src/schemas/project.schema.ts#L5`](/home/hartmut/Documents/Copilot/capakraken/packages/shared/src/schemas/project.schema.ts#L5).
These are not “v2 someday” items. They should be fixed before deeper refactoring.
## 2. Large surfaces are carrying too much responsibility
The biggest modules are already a warning sign:
- [`apps/web/src/components/timeline/TimelineView.tsx`](/home/hartmut/Documents/Copilot/planarchy/apps/web/src/components/timeline/TimelineView.tsx) is 1720 lines.
- [`apps/web/src/components/projects/ProjectWizard.tsx`](/home/hartmut/Documents/Copilot/planarchy/apps/web/src/components/projects/ProjectWizard.tsx) is 1171 lines.
- [`packages/api/src/router/resource.ts`](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/resource.ts) is 908 lines.
- [`packages/api/src/router/timeline.ts`](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/timeline.ts) is 631 lines.
- [`apps/web/src/components/timeline/TimelineView.tsx`](/home/hartmut/Documents/Copilot/capakraken/apps/web/src/components/timeline/TimelineView.tsx) is 1720 lines.
- [`apps/web/src/components/projects/ProjectWizard.tsx`](/home/hartmut/Documents/Copilot/capakraken/apps/web/src/components/projects/ProjectWizard.tsx) is 1171 lines.
- [`packages/api/src/router/resource.ts`](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/resource.ts) is 908 lines.
- [`packages/api/src/router/timeline.ts`](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/timeline.ts) is 631 lines.
That usually means:
- transport, orchestration, validation, business rules, and data access are mixed
@@ -78,10 +78,10 @@ That usually means:
## 3. The core planning model is overloaded
The Prisma schema uses JSONB heavily in core workflows:
- blueprints and role presets in [`packages/db/prisma/schema.prisma#L147`](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L147)
- resource availability, skills, and dynamic fields in [`packages/db/prisma/schema.prisma#L208`](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L208)
- project staffing requirements and dynamic fields in [`packages/db/prisma/schema.prisma#L267`](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L267)
- allocation metadata in [`packages/db/prisma/schema.prisma#L301`](/home/hartmut/Documents/Copilot/planarchy/packages/db/prisma/schema.prisma#L301)
- blueprints and role presets in [`packages/db/prisma/schema.prisma#L147`](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L147)
- resource availability, skills, and dynamic fields in [`packages/db/prisma/schema.prisma#L208`](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L208)
- project staffing requirements and dynamic fields in [`packages/db/prisma/schema.prisma#L267`](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L267)
- allocation metadata in [`packages/db/prisma/schema.prisma#L301`](/home/hartmut/Documents/Copilot/capakraken/packages/db/prisma/schema.prisma#L301)
The bigger modeling problem is that **`Allocation` currently represents both demand and assignment**:
- placeholder demand is modeled with `resourceId = null`
@@ -95,7 +95,7 @@ This is the wrong aggregate for v2.
`staffing.getSuggestions` currently:
- loads all active resources with overlapping allocations
- computes utilization in the router
- uses only Monday availability as the denominator in [`packages/api/src/router/staffing.ts#L45`](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/staffing.ts#L45)
- uses only Monday availability as the denominator in [`packages/api/src/router/staffing.ts#L45`](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/staffing.ts#L45)
That means the suggestion layer is:
- hard to scale
@@ -105,8 +105,8 @@ That means the suggestion layer is:
## 5. Routers are doing application-service work
Representative examples:
- timeline queries and update workflows live directly in [`packages/api/src/router/timeline.ts#L12`](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/timeline.ts#L12)
- allocation creation, placeholder fill, validation, vacation handling, cost calc, audit log, and event emission all live in [`packages/api/src/router/allocation.ts#L8`](/home/hartmut/Documents/Copilot/planarchy/packages/api/src/router/allocation.ts#L8)
- timeline queries and update workflows live directly in [`packages/api/src/router/timeline.ts#L12`](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/timeline.ts#L12)
- allocation creation, placeholder fill, validation, vacation handling, cost calc, audit log, and event emission all live in [`packages/api/src/router/allocation.ts#L8`](/home/hartmut/Documents/Copilot/capakraken/packages/api/src/router/allocation.ts#L8)
The pure `engine` package exists, but the application layer that should orchestrate it does not.
@@ -530,4 +530,4 @@ It should be:
- read models for planning screens
- normalized planning entities with JSONB reserved for extension points
That will make Planarchy better at the thing it claims to be: a planning system, not just a CRUD app with a timeline.
That will make CapaKraken better at the thing it claims to be: a planning system, not just a CRUD app with a timeline.
+4 -4
View File
@@ -6,7 +6,7 @@
## Problem
The bi-weekly chargeability report is currently produced in Excel. Planarchy needs a **live reporting section** in the app that updates in real-time as assignments, resources, and SAH change. The report is not a static file — it is an interactive page that can be **exported** as Excel or PDF on demand.
The bi-weekly chargeability report is currently produced in Excel. CapaKraken needs a **live reporting section** in the app that updates in real-time as assignments, resources, and SAH change. The report is not a static file — it is an interactive page that can be **exported** as Excel or PDF on demand.
Core requirements:
@@ -98,7 +98,7 @@ SUM(resource.fte * resource.bdPercentage[month])
## Data Requirements
### What Planarchy needs to have (per resource, per month)
### What CapaKraken needs to have (per resource, per month)
| Data Point | Source | Notes |
|---|---|---|
@@ -114,7 +114,7 @@ SUM(resource.fte * resource.bdPercentage[month])
### Forecast Chargeability Derivation
This is the key insight: **predicted chargeability can be derived from what Planarchy already knows**:
This is the key insight: **predicted chargeability can be derived from what CapaKraken already knows**:
```
forecastChg(resource, month) =
@@ -133,7 +133,7 @@ This means the chargeability report is a **query over existing assignments + SAH
For historical data, two options:
1. **Import from SAP**: bulk import of period data (P-1, P-2, etc.) as snapshots
2. **Track in-app**: if Planarchy becomes the system of record for time tracking
2. **Track in-app**: if CapaKraken becomes the system of record for time tracking
Recommendation: Start with SAP import. Add a `ChargeabilitySnapshot` model for imported actuals.
+1 -1
View File
@@ -11,7 +11,7 @@ Projects need to be linked to clients for chargeability reporting, budget tracki
- **WBS Master Client** — the parent organization (e.g. "BMW", "VOLKSWAGEN")
- **WBS Client Name** — the legal entity (e.g. "BMW AG", "Dr. Ing. h.c. F. Porsche AG")
Currently Planarchy has no Client model. Projects exist independently without client attribution.
Currently CapaKraken has no Client model. Projects exist independently without client attribution.
## Data
+1 -1
View File
@@ -6,7 +6,7 @@
## Problem
Planarchy currently uses a flat `hoursPerDay` on allocations. The chargeability reporting model requires:
CapaKraken currently uses a flat `hoursPerDay` on allocations. The chargeability reporting model requires:
- Country-specific daily working hours (8h Germany, 9h India, variable Spain)
- Public holidays per country AND metro city
+1 -1
View File
@@ -16,7 +16,7 @@ Level 5: Content Production (department)
Every resource must be mapped to an Org Unit Level 7. Level 7 rolls up to Level 6, which rolls up to Level 5. The names of org units can change over time, so they must be editable.
Planarchy already has a `Role` model, but roles represent skills/functions (e.g. "3D Artist"), not organizational placement. A person's org unit and their role are different dimensions.
CapaKraken already has a `Role` model, but roles represent skills/functions (e.g. "3D Artist"), not organizational placement. A person's org unit and their role are different dimensions.
## Current Data
+3 -3
View File
@@ -6,7 +6,7 @@
## Goal
Extend Planarchy to support chargeability reporting with country-specific SAH (Standard Available Hours), FTE-based capacity, organizational hierarchy, utilization categories, client/WBS management, and a native chargeability report replacing the current Excel workflow.
Extend CapaKraken to support chargeability reporting with country-specific SAH (Standard Available Hours), FTE-based capacity, organizational hierarchy, utilization categories, client/WBS management, and a native chargeability report replacing the current Excel workflow.
## Plan Documents
@@ -39,7 +39,7 @@ Derivation rules are configurable in admin (which countries map to which reporti
Each project carries a utilization category (Chg, BD, MD&I, etc.). Hours assigned to a project inherit its category for reporting. Unassigned hours = SAH minus all categorized hours.
### Forecast chargeability = derived metric
`forecastChg = hours on Chg projects / SAH`. No manual chargeability entry — it comes from what Planarchy already knows about assignments.
`forecastChg = hours on Chg projects / SAH`. No manual chargeability entry — it comes from what CapaKraken already knows about assignments.
## Dependency Order
@@ -92,7 +92,7 @@ New fields on `Project`:
## Open Questions
1. **Resource Type derivation rules**: The country→reporting-type mapping should be admin-configurable. Exact admin UI TBD.
2. **Win Probability**: The Dispo file mentions it "should contain the value from MMS". Is this relevant for Planarchy? If so, it's a field on Project.
2. **Win Probability**: The Dispo file mentions it "should contain the value from MMS". Is this relevant for CapaKraken? If so, it's a field on Project.
3. **LCR/UCR**: Cost rate definitions are not yet available. Placeholder fields are included.
4. **SAP import format**: What format do SAP period exports come in? CSV? API? Needs clarification for the import mechanism.
5. **FTE history**: Currently single `fte` field. Monthly FTE tracking may be needed if contract changes happen mid-month.
+1 -1
View File
@@ -6,7 +6,7 @@
## Problem
The Dispo Categories file defines a rich set of EID (employee) attributes that Planarchy's Resource model currently does not cover. These attributes are needed for chargeability reporting, resource filtering, and organizational grouping.
The Dispo Categories file defines a rich set of EID (employee) attributes that CapaKraken's Resource model currently does not cover. These attributes are needed for chargeability reporting, resource filtering, and organizational grouping.
## Current Resource Model (relevant fields)
@@ -6,7 +6,7 @@
## Problem
The chargeability report categorizes all work into utilization buckets. Currently Planarchy projects have no utilization classification. Every project needs a utilization category so that hours booked against it flow into the correct reporting bucket.
The chargeability report categorizes all work into utilization buckets. Currently CapaKraken projects have no utilization classification. Every project needs a utilization category so that hours booked against it flow into the correct reporting bucket.
## Utilization Categories
+3 -3
View File
@@ -1,13 +1,13 @@
/**
* Generates skill matrix Excel files for all Planarchy resources.
* Generates skill matrix Excel files for all CapaKraken resources.
* Format matches skillmatrix_formular_example.xlsx exactly.
*/
import { createRequire } from "module";
import { writeFileSync, mkdirSync } from "fs";
const require = createRequire(import.meta.url);
const XLSX = require("/home/hartmut/Documents/Copilot/planarchy/node_modules/.pnpm/xlsx@0.18.5/node_modules/xlsx/xlsx.js");
const XLSX = require("/home/hartmut/Documents/Copilot/capakraken/node_modules/.pnpm/xlsx@0.18.5/node_modules/xlsx/xlsx.js");
const OUT_DIR = "/home/hartmut/Documents/Copilot/planarchy/samples/skillmatrix_dummydata";
const OUT_DIR = "/home/hartmut/Documents/Copilot/capakraken/samples/skillmatrix_dummydata";
mkdirSync(OUT_DIR, { recursive: true });
// ─── Skill Definitions ─────────────────────────────────────────────────────
-5
View File
@@ -33,9 +33,4 @@ if (databaseName !== expectedDatabase) {
process.exit(1);
}
if (databaseName === "planarchy") {
console.error(`Refusing to continue with deprecated database '${databaseName}'. Target=${target}`);
process.exit(1);
}
console.log(`DB target OK: ${target}`);
+31
View File
@@ -0,0 +1,31 @@
#!/usr/bin/env node
import { spawnSync } from "node:child_process";
import { resolve } from "node:path";
import { loadWorkspaceEnv } from "./load-env.mjs";
loadWorkspaceEnv();
const args = process.argv.slice(2);
if (args.length === 0) {
console.error("Usage: node scripts/prisma-with-env.mjs <prisma-args...>");
process.exit(1);
}
const hasSchemaArg = args.some((arg) => arg === "--schema" || arg.startsWith("--schema="));
const prismaArgs = hasSchemaArg
? args
: [...args, "--schema", resolve("packages/db/prisma/schema.prisma")];
const result = spawnSync("pnpm", ["--filter", "@capakraken/db", "exec", "prisma", ...prismaArgs], {
stdio: "inherit",
env: process.env,
});
if (result.error) {
console.error(result.error.message);
process.exit(1);
}
process.exit(result.status ?? 1);