chore(repo): initialize planarchy workspace

This commit is contained in:
2026-03-14 14:31:09 +01:00
commit dd55d0e78b
769 changed files with 166461 additions and 0 deletions
+38
View File
@@ -0,0 +1,38 @@
# .agents Directory
This directory contains agent configuration and skills for OpenAI Codex CLI.
## Structure
```
.agents/
config.toml # Main configuration file
skills/ # Skill definitions
skill-name/
SKILL.md # Skill instructions
scripts/ # Optional scripts
docs/ # Optional documentation
README.md # This file
```
## Configuration
The `config.toml` file controls:
- Model selection
- Approval policies
- Sandbox modes
- MCP server connections
- Skills configuration
## Skills
Skills are invoked using `$skill-name` syntax. Each skill has:
- YAML frontmatter with metadata
- Trigger and skip conditions
- Commands and examples
## Documentation
- Main instructions: `AGENTS.md` (project root)
- Local overrides: `.codex/AGENTS.override.md` (gitignored)
- Claude Flow: https://github.com/ruvnet/claude-flow
+92
View File
@@ -0,0 +1,92 @@
# =============================================================================
# Claude Flow V3 - Codex Configuration (GPT-5.4 Variant)
# =============================================================================
model = "gpt-5.4"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
web_search = "cached"
project_doc_max_bytes = 65536
project_doc_fallback_filenames = [
"AGENTS.md",
"TEAM_GUIDE.md",
".agents.md"
]
[features]
child_agents_md = true
shell_snapshot = true
request_rule = true
remote_compaction = true
[mcp_servers.claude-flow]
command = "npx"
args = ["-y", "@claude-flow/cli@latest"]
enabled = true
tool_timeout_sec = 120
[[skills.config]]
path = ".agents/skills/swarm-orchestration"
enabled = true
[[skills.config]]
path = ".agents/skills/memory-management"
enabled = true
[[skills.config]]
path = ".agents/skills/sparc-methodology"
enabled = true
[[skills.config]]
path = ".agents/skills/security-audit"
enabled = true
[profiles.dev]
approval_policy = "never"
sandbox_mode = "danger-full-access"
web_search = "live"
[profiles.safe]
approval_policy = "untrusted"
sandbox_mode = "read-only"
web_search = "disabled"
[profiles.ci]
approval_policy = "never"
sandbox_mode = "workspace-write"
web_search = "cached"
[history]
persistence = "save-all"
[shell_environment_policy]
inherit = "core"
exclude = ["*_KEY", "*_SECRET", "*_TOKEN", "*_PASSWORD"]
[sandbox_workspace_write]
writable_roots = []
network_access = true
exclude_slash_tmp = false
[security]
input_validation = true
path_traversal_prevention = true
secret_scanning = true
cve_scanning = true
max_file_size = 10485760
allowed_extensions = []
blocked_patterns = ["\\.env$", "credentials\\.json$", "\\.pem$", "\\.key$"]
[performance]
max_agents = 8
task_timeout = 300
memory_limit = "512MB"
cache_enabled = true
cache_ttl = 3600
parallel_execution = true
[logging]
level = "info"
format = "pretty"
destination = "stdout"
+92
View File
@@ -0,0 +1,92 @@
# =============================================================================
# Claude Flow V3 - Codex Configuration (GPT-5.4 Variant)
# =============================================================================
model = "gpt-5.4"
approval_policy = "on-request"
sandbox_mode = "workspace-write"
web_search = "cached"
project_doc_max_bytes = 65536
project_doc_fallback_filenames = [
"AGENTS.md",
"TEAM_GUIDE.md",
".agents.md"
]
[features]
child_agents_md = true
shell_snapshot = true
request_rule = true
remote_compaction = true
[mcp_servers.claude-flow]
command = "npx"
args = ["-y", "@claude-flow/cli@latest"]
enabled = true
tool_timeout_sec = 120
[[skills.config]]
path = ".agents/skills/swarm-orchestration"
enabled = true
[[skills.config]]
path = ".agents/skills/memory-management"
enabled = true
[[skills.config]]
path = ".agents/skills/sparc-methodology"
enabled = true
[[skills.config]]
path = ".agents/skills/security-audit"
enabled = true
[profiles.dev]
approval_policy = "never"
sandbox_mode = "danger-full-access"
web_search = "live"
[profiles.safe]
approval_policy = "untrusted"
sandbox_mode = "read-only"
web_search = "disabled"
[profiles.ci]
approval_policy = "never"
sandbox_mode = "workspace-write"
web_search = "cached"
[history]
persistence = "save-all"
[shell_environment_policy]
inherit = "core"
exclude = ["*_KEY", "*_SECRET", "*_TOKEN", "*_PASSWORD"]
[sandbox_workspace_write]
writable_roots = []
network_access = true
exclude_slash_tmp = false
[security]
input_validation = true
path_traversal_prevention = true
secret_scanning = true
cve_scanning = true
max_file_size = 10485760
allowed_extensions = []
blocked_patterns = ["\\.env$", "credentials\\.json$", "\\.pem$", "\\.key$"]
[performance]
max_agents = 8
task_timeout = 300
memory_limit = "512MB"
cache_enabled = true
cache_ttl = 3600
parallel_execution = true
[logging]
level = "info"
format = "pretty"
destination = "stdout"
+298
View File
@@ -0,0 +1,298 @@
# =============================================================================
# Claude Flow V3 - Codex Configuration
# =============================================================================
# Generated by: @claude-flow/codex
# Documentation: https://github.com/ruvnet/claude-flow
#
# This file configures the Codex CLI for Claude Flow integration.
# Place in .agents/config.toml (project) or .codex/config.toml (user).
# =============================================================================
# =============================================================================
# Core Settings
# =============================================================================
# Model selection - the AI model to use for code generation
# Options: gpt-5.3-codex, gpt-4o, claude-sonnet, claude-opus
model = "gpt-5.3-codex"
# Approval policy determines when human approval is required
# - untrusted: Always require approval
# - on-failure: Require approval only after failures
# - on-request: Require approval for significant changes
# - never: Auto-approve all actions (use with caution)
approval_policy = "on-request"
# Sandbox mode controls file system access
# - read-only: Can only read files, no modifications
# - workspace-write: Can write within workspace directory
# - danger-full-access: Full file system access (dangerous)
sandbox_mode = "workspace-write"
# Web search enables internet access for research
# - disabled: No web access
# - cached: Use cached results when available
# - live: Always fetch fresh results
web_search = "cached"
# =============================================================================
# Project Documentation
# =============================================================================
# Maximum bytes to read from AGENTS.md files
project_doc_max_bytes = 65536
# Fallback filenames if AGENTS.md not found
project_doc_fallback_filenames = [
"AGENTS.md",
"TEAM_GUIDE.md",
".agents.md"
]
# =============================================================================
# Features
# =============================================================================
[features]
# Enable child AGENTS.md guidance
child_agents_md = true
# Cache shell environment for faster repeated commands
shell_snapshot = true
# Smart approvals based on request context
request_rule = true
# Enable remote compaction for large histories
remote_compaction = true
# =============================================================================
# MCP Servers
# =============================================================================
[mcp_servers.claude-flow]
command = "npx"
args = ["-y", "@claude-flow/cli@latest"]
enabled = true
tool_timeout_sec = 120
# =============================================================================
# Skills Configuration
# =============================================================================
[[skills.config]]
path = ".agents/skills/swarm-orchestration"
enabled = true
[[skills.config]]
path = ".agents/skills/memory-management"
enabled = true
[[skills.config]]
path = ".agents/skills/sparc-methodology"
enabled = true
[[skills.config]]
path = ".agents/skills/security-audit"
enabled = true
# =============================================================================
# Profiles
# =============================================================================
# Development profile - more permissive for local work
[profiles.dev]
approval_policy = "never"
sandbox_mode = "danger-full-access"
web_search = "live"
# Safe profile - maximum restrictions
[profiles.safe]
approval_policy = "untrusted"
sandbox_mode = "read-only"
web_search = "disabled"
# CI profile - for automated pipelines
[profiles.ci]
approval_policy = "never"
sandbox_mode = "workspace-write"
web_search = "cached"
# =============================================================================
# History
# =============================================================================
[history]
# Save all session transcripts
persistence = "save-all"
# =============================================================================
# Shell Environment
# =============================================================================
[shell_environment_policy]
# Inherit environment variables
inherit = "core"
# Exclude sensitive variables
exclude = ["*_KEY", "*_SECRET", "*_TOKEN", "*_PASSWORD"]
# =============================================================================
# Sandbox Workspace Write Settings
# =============================================================================
[sandbox_workspace_write]
# Additional writable paths beyond workspace
writable_roots = []
# Allow network access
network_access = true
# Exclude temp directories
exclude_slash_tmp = false
# =============================================================================
# Security Settings
# =============================================================================
[security]
# Enable input validation for all user inputs
input_validation = true
# Prevent directory traversal attacks
path_traversal_prevention = true
# Scan for hardcoded secrets
secret_scanning = true
# Scan dependencies for known CVEs
cve_scanning = true
# Maximum file size for operations (bytes)
max_file_size = 10485760
# Allowed file extensions (empty = allow all)
allowed_extensions = []
# Blocked file patterns (regex)
blocked_patterns = ["\\.env$", "credentials\\.json$", "\\.pem$", "\\.key$"]
# =============================================================================
# Performance Settings
# =============================================================================
[performance]
# Maximum concurrent agents
max_agents = 8
# Task timeout in seconds
task_timeout = 300
# Memory limit per agent
memory_limit = "512MB"
# Enable response caching
cache_enabled = true
# Cache TTL in seconds
cache_ttl = 3600
# Enable parallel task execution
parallel_execution = true
# =============================================================================
# Logging Settings
# =============================================================================
[logging]
# Log level: debug, info, warn, error
level = "info"
# Log format: json, text, pretty
format = "pretty"
# Log destination: stdout, file, both
destination = "stdout"
# =============================================================================
# Neural Intelligence Settings
# =============================================================================
[neural]
# Enable SONA (Self-Optimizing Neural Architecture)
sona_enabled = true
# Enable HNSW vector search
hnsw_enabled = true
# HNSW index parameters
hnsw_m = 16
hnsw_ef_construction = 200
hnsw_ef_search = 100
# Enable pattern learning
pattern_learning = true
# Learning rate for neural adaptation
learning_rate = 0.01
# =============================================================================
# Swarm Orchestration Settings
# =============================================================================
[swarm]
# Default topology: hierarchical, mesh, ring, star
default_topology = "hierarchical"
# Default strategy: balanced, specialized, adaptive
default_strategy = "specialized"
# Consensus algorithm: raft, byzantine, gossip
consensus = "raft"
# Enable anti-drift measures
anti_drift = true
# Checkpoint interval (tasks)
checkpoint_interval = 10
# =============================================================================
# Hooks Configuration
# =============================================================================
[hooks]
# Enable lifecycle hooks
enabled = true
# Pre-task hook
pre_task = true
# Post-task hook (for learning)
post_task = true
# Enable neural training on post-edit
train_on_edit = true
# =============================================================================
# Background Workers
# =============================================================================
[workers]
# Enable background workers
enabled = true
# Worker configuration
[workers.audit]
enabled = true
priority = "critical"
interval = 300
[workers.optimize]
enabled = true
priority = "high"
interval = 600
[workers.consolidate]
enabled = true
priority = "low"
interval = 1800
+126
View File
@@ -0,0 +1,126 @@
---
name: memory-management
description: >
AgentDB memory system with HNSW vector search. Provides 150x-12,500x faster pattern retrieval, persistent storage, and semantic search capabilities for learning and knowledge management.
Use when: need to store successful patterns, searching for similar solutions, semantic lookup of past work, learning from previous tasks, sharing knowledge between agents, building knowledge base.
Skip when: no learning needed, ephemeral one-off tasks, external data sources available, read-only exploration.
---
# Memory Management Skill
## Purpose
AgentDB memory system with HNSW vector search. Provides 150x-12,500x faster pattern retrieval, persistent storage, and semantic search capabilities for learning and knowledge management.
## When to Trigger
- need to store successful patterns
- searching for similar solutions
- semantic lookup of past work
- learning from previous tasks
- sharing knowledge between agents
- building knowledge base
## When to Skip
- no learning needed
- ephemeral one-off tasks
- external data sources available
- read-only exploration
## Commands
### Store Pattern
Store a pattern or knowledge item in memory
```bash
npx @claude-flow/cli memory store --key "[key]" --value "[value]" --namespace patterns
```
**Example:**
```bash
npx @claude-flow/cli memory store --key "auth-jwt-pattern" --value "JWT validation with refresh tokens" --namespace patterns
```
### Semantic Search
Search memory using semantic similarity
```bash
npx @claude-flow/cli memory search --query "[search terms]" --limit 10
```
**Example:**
```bash
npx @claude-flow/cli memory search --query "authentication best practices" --limit 5
```
### Retrieve Entry
Retrieve a specific memory entry by key
```bash
npx @claude-flow/cli memory get --key "[key]" --namespace [namespace]
```
**Example:**
```bash
npx @claude-flow/cli memory get --key "auth-jwt-pattern" --namespace patterns
```
### List Entries
List all entries in a namespace
```bash
npx @claude-flow/cli memory list --namespace [namespace]
```
**Example:**
```bash
npx @claude-flow/cli memory list --namespace patterns --limit 20
```
### Delete Entry
Delete a memory entry
```bash
npx @claude-flow/cli memory delete --key "[key]" --namespace [namespace]
```
### Initialize HNSW Index
Initialize HNSW vector search index
```bash
npx @claude-flow/cli memory init --enable-hnsw
```
### Memory Stats
Show memory usage statistics
```bash
npx @claude-flow/cli memory stats
```
### Export Memory
Export memory to JSON
```bash
npx @claude-flow/cli memory export --output memory-backup.json
```
## Scripts
| Script | Path | Description |
|--------|------|-------------|
| `memory-backup` | `.agents/scripts/memory-backup.sh` | Backup memory to external storage |
| `memory-consolidate` | `.agents/scripts/memory-consolidate.sh` | Consolidate and optimize memory |
## References
| Document | Path | Description |
|----------|------|-------------|
| `HNSW Guide` | `docs/hnsw.md` | HNSW vector search configuration |
| `Memory Schema` | `docs/memory-schema.md` | Memory namespace and schema reference |
## Best Practices
1. Check memory for existing patterns before starting
2. Use hierarchical topology for coordination
3. Store successful patterns after completion
4. Document any new learnings
@@ -0,0 +1,16 @@
#!/bin/bash
# Memory Management - Backup Script
# Export memory to backup file
set -e
BACKUP_DIR="${BACKUP_DIR:-./.backups}"
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="${BACKUP_DIR}/memory_${TIMESTAMP}.json"
mkdir -p "$BACKUP_DIR"
echo "Backing up memory to $BACKUP_FILE..."
npx @claude-flow/cli memory export --output "$BACKUP_FILE"
echo "Backup complete: $BACKUP_FILE"
@@ -0,0 +1,11 @@
#!/bin/bash
# Memory Management - Consolidate Script
# Optimize and consolidate memory
set -e
echo "Running memory consolidation..."
npx @claude-flow/cli hooks worker dispatch --trigger consolidate
echo "Memory consolidation complete"
npx @claude-flow/cli memory stats
+135
View File
@@ -0,0 +1,135 @@
---
name: security-audit
description: >
Comprehensive security scanning and vulnerability detection. Includes input validation, path traversal prevention, CVE detection, and secure coding pattern enforcement.
Use when: authentication implementation, authorization logic, payment processing, user data handling, API endpoint creation, file upload handling, database queries, external API integration.
Skip when: read-only operations on public data, internal development tooling, static documentation, styling changes.
---
# Security Audit Skill
## Purpose
Comprehensive security scanning and vulnerability detection. Includes input validation, path traversal prevention, CVE detection, and secure coding pattern enforcement.
## When to Trigger
- authentication implementation
- authorization logic
- payment processing
- user data handling
- API endpoint creation
- file upload handling
- database queries
- external API integration
## When to Skip
- read-only operations on public data
- internal development tooling
- static documentation
- styling changes
## Commands
### Full Security Scan
Run comprehensive security analysis on the codebase
```bash
npx @claude-flow/cli security scan --depth full
```
**Example:**
```bash
npx @claude-flow/cli security scan --depth full --output security-report.json
```
### Input Validation Check
Check for input validation issues
```bash
npx @claude-flow/cli security scan --check input-validation
```
**Example:**
```bash
npx @claude-flow/cli security scan --check input-validation --path ./src/api
```
### Path Traversal Check
Check for path traversal vulnerabilities
```bash
npx @claude-flow/cli security scan --check path-traversal
```
### SQL Injection Check
Check for SQL injection vulnerabilities
```bash
npx @claude-flow/cli security scan --check sql-injection
```
### XSS Check
Check for cross-site scripting vulnerabilities
```bash
npx @claude-flow/cli security scan --check xss
```
### CVE Scan
Scan dependencies for known CVEs
```bash
npx @claude-flow/cli security cve --scan
```
**Example:**
```bash
npx @claude-flow/cli security cve --scan --severity high
```
### Security Audit Report
Generate full security audit report
```bash
npx @claude-flow/cli security audit --report
```
**Example:**
```bash
npx @claude-flow/cli security audit --report --format markdown --output SECURITY.md
```
### Threat Modeling
Run threat modeling analysis
```bash
npx @claude-flow/cli security threats --analyze
```
### Validate Secrets
Check for hardcoded secrets
```bash
npx @claude-flow/cli security validate --check secrets
```
## Scripts
| Script | Path | Description |
|--------|------|-------------|
| `security-scan` | `.agents/scripts/security-scan.sh` | Run full security scan pipeline |
| `cve-remediate` | `.agents/scripts/cve-remediate.sh` | Auto-remediate known CVEs |
## References
| Document | Path | Description |
|----------|------|-------------|
| `Security Checklist` | `docs/security-checklist.md` | Security review checklist |
| `OWASP Guide` | `docs/owasp-top10.md` | OWASP Top 10 mitigation guide |
## Best Practices
1. Check memory for existing patterns before starting
2. Use hierarchical topology for coordination
3. Store successful patterns after completion
4. Document any new learnings
@@ -0,0 +1,16 @@
#!/bin/bash
# Security Audit - CVE Remediation Script
# Auto-remediate known CVEs
set -e
echo "Scanning for CVEs..."
npx @claude-flow/cli security cve --scan --severity high
echo "Attempting auto-remediation..."
npm audit fix
echo "Re-scanning after remediation..."
npx @claude-flow/cli security cve --scan
echo "CVE remediation complete"
@@ -0,0 +1,33 @@
#!/bin/bash
# Security Audit - Full Scan Script
# Run comprehensive security scan pipeline
set -e
echo "Running full security scan..."
# Input validation
echo "Checking input validation..."
npx @claude-flow/cli security scan --check input-validation
# Path traversal
echo "Checking path traversal..."
npx @claude-flow/cli security scan --check path-traversal
# SQL injection
echo "Checking SQL injection..."
npx @claude-flow/cli security scan --check sql-injection
# XSS
echo "Checking XSS..."
npx @claude-flow/cli security scan --check xss
# Secrets
echo "Checking for hardcoded secrets..."
npx @claude-flow/cli security validate --check secrets
# CVE scan
echo "Scanning dependencies for CVEs..."
npx @claude-flow/cli security cve --scan
echo "Security scan complete"
+118
View File
@@ -0,0 +1,118 @@
---
name: sparc-methodology
description: >
SPARC development workflow: Specification, Pseudocode, Architecture, Refinement, Completion. A structured approach for complex implementations that ensures thorough planning before coding.
Use when: new feature implementation, complex implementations, architectural changes, system redesign, integration work, unclear requirements.
Skip when: simple bug fixes, documentation updates, configuration changes, well-defined small tasks, routine maintenance.
---
# Sparc Methodology Skill
## Purpose
SPARC development workflow: Specification, Pseudocode, Architecture, Refinement, Completion. A structured approach for complex implementations that ensures thorough planning before coding.
## When to Trigger
- new feature implementation
- complex implementations
- architectural changes
- system redesign
- integration work
- unclear requirements
## When to Skip
- simple bug fixes
- documentation updates
- configuration changes
- well-defined small tasks
- routine maintenance
## Commands
### Specification Phase
Define requirements, acceptance criteria, and constraints
```bash
npx @claude-flow/cli hooks route --task "specification: [requirements]"
```
**Example:**
```bash
npx @claude-flow/cli hooks route --task "specification: user authentication with OAuth2, MFA, and session management"
```
### Pseudocode Phase
Write high-level pseudocode for the implementation
```bash
npx @claude-flow/cli hooks route --task "pseudocode: [feature]"
```
**Example:**
```bash
npx @claude-flow/cli hooks route --task "pseudocode: OAuth2 login flow with token refresh"
```
### Architecture Phase
Design system structure, interfaces, and dependencies
```bash
npx @claude-flow/cli hooks route --task "architecture: [design]"
```
**Example:**
```bash
npx @claude-flow/cli hooks route --task "architecture: auth module with service layer, repository, and API endpoints"
```
### Refinement Phase
Iterate on the design based on feedback
```bash
npx @claude-flow/cli hooks route --task "refinement: [feedback]"
```
**Example:**
```bash
npx @claude-flow/cli hooks route --task "refinement: add rate limiting and brute force protection"
```
### Completion Phase
Finalize implementation with tests and documentation
```bash
npx @claude-flow/cli hooks route --task "completion: [final checks]"
```
**Example:**
```bash
npx @claude-flow/cli hooks route --task "completion: verify all tests pass, update API docs, security review"
```
### SPARC Coordinator
Spawn SPARC coordinator agent
```bash
npx @claude-flow/cli agent spawn --type sparc-coord --name sparc-lead
```
## Scripts
| Script | Path | Description |
|--------|------|-------------|
| `sparc-init` | `.agents/scripts/sparc-init.sh` | Initialize SPARC workflow for a new feature |
| `sparc-review` | `.agents/scripts/sparc-review.sh` | Run SPARC phase review checklist |
## References
| Document | Path | Description |
|----------|------|-------------|
| `SPARC Overview` | `docs/sparc.md` | Complete SPARC methodology guide |
| `Phase Templates` | `docs/sparc-templates.md` | Templates for each SPARC phase |
## Best Practices
1. Check memory for existing patterns before starting
2. Use hierarchical topology for coordination
3. Store successful patterns after completion
4. Document any new learnings
@@ -0,0 +1,21 @@
#!/bin/bash
# SPARC Methodology - Init Script
# Initialize SPARC workflow for a new feature
set -e
FEATURE_NAME="${1:-new-feature}"
echo "Initializing SPARC workflow for: $FEATURE_NAME"
# Create SPARC documentation directory
mkdir -p "./docs/sparc/$FEATURE_NAME"
# Create phase files
touch "./docs/sparc/$FEATURE_NAME/1-specification.md"
touch "./docs/sparc/$FEATURE_NAME/2-pseudocode.md"
touch "./docs/sparc/$FEATURE_NAME/3-architecture.md"
touch "./docs/sparc/$FEATURE_NAME/4-refinement.md"
touch "./docs/sparc/$FEATURE_NAME/5-completion.md"
echo "SPARC workflow initialized in ./docs/sparc/$FEATURE_NAME"
@@ -0,0 +1,18 @@
#!/bin/bash
# SPARC Methodology - Review Script
# Run SPARC phase review checklist
set -e
FEATURE_DIR="${1:-.}"
echo "SPARC Phase Review Checklist"
echo "============================="
for phase in specification pseudocode architecture refinement completion; do
if [ -f "$FEATURE_DIR/${phase}.md" ]; then
echo "[x] $phase - found"
else
echo "[ ] $phase - missing"
fi
done
+114
View File
@@ -0,0 +1,114 @@
---
name: swarm-orchestration
description: >
Multi-agent swarm coordination for complex tasks. Uses hierarchical topology with specialized agents to break down and execute complex work across multiple files and modules.
Use when: 3+ files need changes, new feature implementation, cross-module refactoring, API changes with tests, security-related changes, performance optimization across codebase, database schema changes.
Skip when: single file edits, simple bug fixes (1-2 lines), documentation updates, configuration changes, quick exploration.
---
# Swarm Orchestration Skill
## Purpose
Multi-agent swarm coordination for complex tasks. Uses hierarchical topology with specialized agents to break down and execute complex work across multiple files and modules.
## When to Trigger
- 3+ files need changes
- new feature implementation
- cross-module refactoring
- API changes with tests
- security-related changes
- performance optimization across codebase
- database schema changes
## When to Skip
- single file edits
- simple bug fixes (1-2 lines)
- documentation updates
- configuration changes
- quick exploration
## Commands
### Initialize Swarm
Start a new swarm with hierarchical topology (anti-drift)
```bash
npx @claude-flow/cli swarm init --topology hierarchical --max-agents 8 --strategy specialized
```
**Example:**
```bash
npx @claude-flow/cli swarm init --topology hierarchical --max-agents 6 --strategy specialized
```
### Route Task
Route a task to the appropriate agents based on task type
```bash
npx @claude-flow/cli hooks route --task "[task description]"
```
**Example:**
```bash
npx @claude-flow/cli hooks route --task "implement OAuth2 authentication flow"
```
### Spawn Agent
Spawn a specific agent type
```bash
npx @claude-flow/cli agent spawn --type [type] --name [name]
```
**Example:**
```bash
npx @claude-flow/cli agent spawn --type coder --name impl-auth
```
### Monitor Status
Check the current swarm status
```bash
npx @claude-flow/cli swarm status --verbose
```
### Orchestrate Task
Orchestrate a task across multiple agents
```bash
npx @claude-flow/cli task orchestrate --task "[task]" --strategy adaptive
```
**Example:**
```bash
npx @claude-flow/cli task orchestrate --task "refactor auth module" --strategy parallel --max-agents 4
```
### List Agents
List all active agents
```bash
npx @claude-flow/cli agent list --filter active
```
## Scripts
| Script | Path | Description |
|--------|------|-------------|
| `swarm-start` | `.agents/scripts/swarm-start.sh` | Initialize swarm with default settings |
| `swarm-monitor` | `.agents/scripts/swarm-monitor.sh` | Real-time swarm monitoring dashboard |
## References
| Document | Path | Description |
|----------|------|-------------|
| `Agent Types` | `docs/agents.md` | Complete list of agent types and capabilities |
| `Topology Guide` | `docs/topology.md` | Swarm topology configuration guide |
## Best Practices
1. Check memory for existing patterns before starting
2. Use hierarchical topology for coordination
3. Store successful patterns after completion
4. Document any new learnings
@@ -0,0 +1,8 @@
#!/bin/bash
# Swarm Orchestration - Monitor Script
# Real-time swarm monitoring
set -e
echo "Starting swarm monitor..."
npx @claude-flow/cli swarm status --watch --interval 5
@@ -0,0 +1,14 @@
#!/bin/bash
# Swarm Orchestration - Start Script
# Initialize swarm with default anti-drift settings
set -e
echo "Initializing hierarchical swarm..."
npx @claude-flow/cli swarm init \
--topology hierarchical \
--max-agents 8 \
--strategy specialized
echo "Swarm initialized successfully"
npx @claude-flow/cli swarm status
@@ -0,0 +1,317 @@
{
"agents": {
"agent-1773301124564-6uhddz": {
"agentId": "agent-1773301124564-6uhddz",
"agentType": "architect",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:38:44.564Z",
"model": "opus",
"modelRoutedBy": "default"
},
"agent-1773301125742-tqocas": {
"agentId": "agent-1773301125742-tqocas",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:38:45.742Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301127024-qlkmjm": {
"agentId": "agent-1773301127024-qlkmjm",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:38:47.024Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301128242-hf0jzi": {
"agentId": "agent-1773301128242-hf0jzi",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:38:48.242Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301129460-hmym2x": {
"agentId": "agent-1773301129460-hmym2x",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:38:49.460Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301130653-iingqg": {
"agentId": "agent-1773301130653-iingqg",
"agentType": "tester",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:38:50.653Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301131874-xh0mds": {
"agentId": "agent-1773301131874-xh0mds",
"agentType": "reviewer",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:38:51.874Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301263581-9yoxqb": {
"agentId": "agent-1773301263581-9yoxqb",
"agentType": "coordinator",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:41:03.581Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301264817-jl9lg9": {
"agentId": "agent-1773301264817-jl9lg9",
"agentType": "architect",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:41:04.817Z",
"model": "opus",
"modelRoutedBy": "default"
},
"agent-1773301266086-ccotvm": {
"agentId": "agent-1773301266086-ccotvm",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:41:06.087Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301267360-a4bdmh": {
"agentId": "agent-1773301267360-a4bdmh",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:41:07.360Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301268641-nl75k6": {
"agentId": "agent-1773301268641-nl75k6",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:41:08.641Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301269831-2dpxeu": {
"agentId": "agent-1773301269831-2dpxeu",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:41:09.831Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301271058-ks9ye3": {
"agentId": "agent-1773301271058-ks9ye3",
"agentType": "tester",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:41:11.058Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301272250-fby0pn": {
"agentId": "agent-1773301272250-fby0pn",
"agentType": "reviewer",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:41:12.250Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301423282-ce81we": {
"agentId": "agent-1773301423282-ce81we",
"agentType": "coordinator",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:43:43.282Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301659381-17mm4y": {
"agentId": "agent-1773301659381-17mm4y",
"agentType": "coordinator",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:47:39.381Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301660629-az6thk": {
"agentId": "agent-1773301660629-az6thk",
"agentType": "architect",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:47:40.629Z",
"model": "opus",
"modelRoutedBy": "default"
},
"agent-1773301661862-p3nhe2": {
"agentId": "agent-1773301661862-p3nhe2",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:47:41.862Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301663069-9or7ei": {
"agentId": "agent-1773301663069-9or7ei",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:47:43.069Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301664329-xc834o": {
"agentId": "agent-1773301664329-xc834o",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:47:44.329Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301665606-k2wr2k": {
"agentId": "agent-1773301665606-k2wr2k",
"agentType": "coder",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:47:45.606Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301666804-d9kuix": {
"agentId": "agent-1773301666804-d9kuix",
"agentType": "tester",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:47:46.805Z",
"model": "sonnet",
"modelRoutedBy": "default"
},
"agent-1773301668078-yoh6vm": {
"agentId": "agent-1773301668078-yoh6vm",
"agentType": "reviewer",
"status": "idle",
"health": 1,
"taskCount": 0,
"config": {
"provider": "anthropic"
},
"createdAt": "2026-03-12T07:47:48.078Z",
"model": "sonnet",
"modelRoutedBy": "default"
}
},
"version": "3.0.0"
}
@@ -0,0 +1,57 @@
# A1 Architect
## Mission
Define the contracts that remove field and widget drift before implementation starts.
## Scope
- canonical field-definition contract
- widget-config contract
- versioning and migration rules
- interface review across shared, API, engine, and UI packages
## Primary Files
- `packages/shared/src/types/dynamic-fields.ts`
- `packages/shared/src/schemas/blueprint.schema.ts`
- `apps/web/src/components/dashboard/widget-registry.ts`
- `apps/web/src/hooks/useDashboardLayout.ts`
## Deliverables
- contract note for field-definition model
- contract note for widget-config model
- migration/versioning notes
- accepted metadata defaults and unsupported cases list
## Done Means
- runtime-used field metadata is documented
- UI, API, and validation consumers have one contract target
- widget config has explicit shape, defaults, and versioning rules
- `O1` and `R1` sign off before coder work starts
## Agent Prompt
```text
You are A1, the architect for the Planarchy widget + field refactor sprint.
Your job is to define stable contracts before implementation starts. Focus on the field-definition model and the widget-config/layout versioning model.
Work from docs/refactor-sprint-plan.md and inspect the current runtime usage in:
- packages/shared/src/types/dynamic-fields.ts
- packages/shared/src/schemas/blueprint.schema.ts
- packages/engine/src/blueprint/validator.ts
- apps/web/src/components/dynamic-fields/*
- apps/web/src/components/dashboard/widget-registry.ts
- apps/web/src/hooks/useDashboardLayout.ts
Produce:
1. Canonical field-definition contract
2. Widget-config contract
3. Versioning and migration rules
4. Explicit notes on defaults, tolerated legacy cases, and rejection cases
Do not implement broad code changes. Your output should unblock C1, C2, and C3 with clear boundaries and acceptance notes.
```
@@ -0,0 +1,53 @@
# C1 Field Domain Coder
## Mission
Centralize dynamic-field validation and filter construction so project and resource behavior matches.
## Scope
- shared field helpers in `packages/application` and `packages/shared`
- project/resource validation parity
- removal of duplicated dynamic-field filter logic
## Primary Files
- `packages/application/src/index.ts`
- `packages/api/src/router/project.ts`
- `packages/api/src/router/resource.ts`
- `packages/engine/src/blueprint/validator.ts`
## Deliverables
- shared field validation path
- shared dynamic-field filter builder
- project/resource parity
## Done Means
- one implementation exists for validation
- one implementation exists for filter construction
- project create/update validates against blueprint definitions when applicable
- no router-specific logic drift remains
## Agent Prompt
```text
You are C1, the field domain coder for the Planarchy refactor sprint.
Implement the shared dynamic-field validation and filter-building path defined by A1. Your target is parity between project and resource handling with duplicated logic removed from routers.
Work in:
- packages/application/src/index.ts
- packages/api/src/router/project.ts
- packages/api/src/router/resource.ts
- packages/engine/src/blueprint/validator.ts
Requirements:
1. Build shared normalization and validation helpers.
2. Build one shared dynamic-field filter-condition builder.
3. Integrate that path into both project and resource routers.
4. Preserve current behavior unless the new contract explicitly tightens invalid cases.
Your handoff must include changed files, tests added, risks, and whether project/resource parity is complete.
```
@@ -0,0 +1,53 @@
# C2 Blueprint UI Coder
## Mission
Align blueprint and dynamic-field UI to the canonical field contract so UI behavior stops drifting from schema behavior.
## Scope
- blueprint field editor
- dynamic-field editor
- dynamic-field renderer
- custom field filter bar
## Primary Files
- `apps/web/src/components/blueprints/BlueprintFieldEditor.tsx`
- `apps/web/src/components/dynamic-fields/DynamicFieldEditor.tsx`
- `apps/web/src/components/dynamic-fields/DynamicFieldRenderer.tsx`
- `apps/web/src/components/ui/CustomFieldFilterBar.tsx`
## Deliverables
- UI aligned to shared field metadata
- removed implicit assumptions in rendering and filtering
- safer handling of unsupported or legacy field metadata
## Done Means
- UI uses the canonical metadata model from `A1`
- editor, renderer, and filter bar do not rely on undeclared properties
- field-specific defaults and edge cases behave consistently with validation
## Agent Prompt
```text
You are C2, the blueprint UI coder for the Planarchy refactor sprint.
Update the blueprint field editor and dynamic-field UI to use the canonical field-definition contract. Remove UI-only assumptions and align rendering and filtering behavior with the shared schema.
Work in:
- apps/web/src/components/blueprints/BlueprintFieldEditor.tsx
- apps/web/src/components/dynamic-fields/DynamicFieldEditor.tsx
- apps/web/src/components/dynamic-fields/DynamicFieldRenderer.tsx
- apps/web/src/components/ui/CustomFieldFilterBar.tsx
Requirements:
1. Consume only declared field metadata.
2. Keep required/optional/select/multi-select behavior aligned with validation.
3. Degrade safely on unsupported or stale metadata.
4. Coordinate with T1 for regression coverage on visible behavior.
Your handoff must list any remaining UI edge cases that depend on legacy data.
```
@@ -0,0 +1,55 @@
# C3 Widget Platform Coder
## Mission
Make dashboard widget persistence typed and versioned, then move rendering toward registry-driven composition.
## Scope
- typed widget config model
- layout versioning and migration
- registry-driven widget composition
- safe handling of stale or unknown widget config
## Primary Files
- `apps/web/src/components/dashboard/DashboardClient.tsx`
- `apps/web/src/components/dashboard/widget-registry.ts`
- `apps/web/src/hooks/useDashboardLayout.ts`
- `apps/web/src/components/dashboard/AddWidgetModal.tsx`
## Deliverables
- widget config schemas
- layout version and migration support
- reduced hard-coded widget switch logic
## Done Means
- dashboard layout has explicit version
- widget config is validated before use
- broken or unknown widget config degrades safely
- adding a widget requires minimal registry-centric change
## Agent Prompt
```text
You are C3, the widget platform coder for the Planarchy refactor sprint.
Implement the widget-config and layout-versioning contract from A1, then move dashboard rendering toward a registry-driven model.
Work in:
- apps/web/src/components/dashboard/DashboardClient.tsx
- apps/web/src/components/dashboard/widget-registry.ts
- apps/web/src/hooks/useDashboardLayout.ts
- apps/web/src/components/dashboard/AddWidgetModal.tsx
Requirements:
1. Introduce typed widget config and explicit layout versioning.
2. Validate persisted widget config before use.
3. Add migration handling for old layouts.
4. Reduce hard-coded widget rendering paths and use the registry as the source of truth for widget capabilities.
5. Preserve existing widget behavior for saved layouts.
Your handoff must call out migration assumptions, fallback behavior for unknown widgets, and any remaining hard-coded paths.
```
@@ -0,0 +1,48 @@
# C4 Dashboard Data Coder
## Mission
Thin the dashboard router and prepare the path to SQL-first performance work.
## Scope
- extract dashboard query logic into application/query modules or adapters
- reduce router-side aggregation complexity
- benchmark current hotspots
## Primary Files
- `packages/api/src/router/dashboard.ts`
- `packages/application/src/index.ts`
## Deliverables
- extracted query modules or adapter layer
- benchmark notes for `getOverview`, `getPeakTimes`, and `getDemand`
- at least one reduced in-memory aggregation path if feasible in sprint
## Done Means
- router responsibility is thinner than before
- benchmark notes identify concrete next-sprint SQL actions
- any behavior changes are covered by tests or explicitly called out
## Agent Prompt
```text
You are C4, the dashboard data coder for the Planarchy refactor sprint.
Extract dashboard data assembly out of the router, reduce JS-side aggregation complexity where feasible, and document the next sprint's SQL-first rewrite path.
Work in:
- packages/api/src/router/dashboard.ts
- packages/application/src/index.ts
Requirements:
1. Move data assembly into an application/query module or adapter layer.
2. Benchmark getOverview, getPeakTimes, and getDemand before and after your structural changes where practical.
3. Land at least one simplification that reduces router responsibility.
4. If a full performance rewrite does not fit, leave a concrete SQL rewrite brief.
Your handoff must separate structural improvements from measured runtime improvements.
```
@@ -0,0 +1,52 @@
# GPT-5.4 Sprint Variant
This is the OpenAI/Codex-oriented variant of the widget + field refactor sprint setup.
## Files
- `.agents/config.gpt-5.4.toml`
- `.agents/sprints/widget-field-refactor/start-gpt-5.4.sh`
## What It Does
The launcher:
1. switches the active Claude Flow config to the GPT-5.4 variant
2. verifies that `~/.codex/config.toml` is using an Azure provider
3. verifies that `AZURE_OPENAI_API_KEY` is exported
4. initializes the swarm with hierarchical topology
5. spawns the named sprint agents
6. prints the prompt files and startup order
## Agent Type Mapping
Claude Flow does not expose an `orchestrator` agent type in the CLI. The sprint launcher therefore maps:
- `O1` orchestrator role -> CLI `coordinator` type
The role behavior still comes from [O1-orchestrator.md](/home/hartmut/Documents/Copilot/planarchy/.agents/sprints/widget-field-refactor/O1-orchestrator.md), which explicitly forbids implementation work.
## Azure Requirement
This launcher is intended to run against Azure OpenAI-backed Codex. It will fail fast unless:
- `~/.codex/config.toml` exists
- `model_provider` starts with `azure`
- the config references `AZURE_OPENAI_API_KEY`
- `AZURE_OPENAI_API_KEY` is present in the shell environment
See [docs/azure_codex_setup.md](/home/hartmut/Documents/Copilot/planarchy/docs/azure_codex_setup.md).
## Start
```bash
bash .agents/sprints/widget-field-refactor/start-gpt-5.4.sh
```
## Restore Prior Config
If you want the previous active config back:
```bash
cp .agents/config.toml.pre-gpt-5.4.bak .agents/config.toml
```
@@ -0,0 +1,140 @@
# O1 Orchestrator
## Mission
Run the widget + field refactor sprint as the non-implementing lead. Own sequencing, ticketing, acceptance, merge order, and blocker management.
## Non-Negotiable Constraint
You never implement. You must not patch files, write code, add tests, or resolve lint/type issues directly.
## Scope
- Own sprint board and story status
- Publish task briefs
- Approve or reject handoffs
- Control merge order
- Escalate blockers
- Keep the team on contract-first sequencing
## Forbidden Actions
- No code edits
- No direct test edits
- No migrations
- No opportunistic fixes
- No bypassing the architect contract review for `S1` and `S3`
## Dependencies You Enforce
- `A1` must publish the field-definition contract before `C1` or `C2` land schema-dependent work.
- `A1` must publish the widget-config and versioning contract before `C3` lands persistence changes.
- `R1` reviews every merge candidate.
- `T1` attaches tests before final acceptance of stories touching contracts or persistence.
## Inputs
- `docs/refactor-sprint-plan.md`
- agent handoffs in the shared format
- review findings from `R1`
## Outputs
- sprint board
- per-agent tickets
- acceptance decisions
- daily report
- merge queue
- carry-over list
## Operating Checklist
1. Publish the active story list with owners and dependencies.
2. Issue only the tickets that are unblocked.
3. Reject work that bypasses shared contracts or reintroduces duplicated logic.
4. Keep merge order strict: contracts -> shared helpers -> API -> UI -> tests -> final review.
5. Treat acceptance criteria in `docs/refactor-sprint-plan.md` as the source of truth.
## Ticket Template
Use this format when assigning work:
```md
Ticket: <ID>
Story: <S1-S5>
Owner: <agent>
Goal: <single concrete outcome>
Inputs:
- relevant files
- contract note or prior handoff
Tasks:
1. ...
2. ...
Acceptance:
- ...
- ...
Required tests:
- ...
Next agent:
- ...
```
## Daily Report Template
```md
Date: <YYYY-MM-DD>
Completed yesterday:
- ...
In progress today:
- ...
Blocked:
- ...
Review queue:
- ...
Merge order:
1. ...
2. ...
Risks:
- ...
```
## Acceptance Gate
Do not accept a story unless all are true:
- acceptance criteria for the story are met
- required tests exist or missing tests are explicitly deferred
- `R1` reviewed the change
- no contract drift was introduced
- next dependent work is unblocked
## Orchestrator Prompt
```text
You are O1, the sprint orchestrator for the Planarchy widget + field refactor.
You never implement. You do not patch files, write tests, fix lint errors, or edit migrations. Your job is to sequence work, issue precise tickets, review handoffs against acceptance criteria, control merge order, and escalate blockers.
Sprint source of truth: docs/refactor-sprint-plan.md
Primary goals:
1. Land one shared field-definition contract used by UI, API, and validation.
2. Land one shared dynamic-field validation and filter path.
3. Land typed widget config with layout versioning and safe migration behavior.
4. Move dashboard widget rendering toward registry-driven composition.
5. Thin the dashboard router and prepare the next sprint's SQL-first work.
Operating rules:
- Contract-first sequencing is mandatory.
- Reject work that duplicates field logic in multiple routers.
- Reject widget persistence changes that do not validate or migrate versioned layouts.
- Accept only work with explicit changed files, tests, risks, and next consumer.
- Keep the team focused on the sprint slice; no relational staffing migration this sprint.
Start by:
1. Publishing the active stories and their dependencies.
2. Assigning A1-T1 first.
3. Holding C1, C2, and C3 implementation work until A1 publishes the required contracts.
4. Keeping R1 in the loop on every merge candidate.
5. Producing a daily report and merge queue.
```
@@ -0,0 +1,50 @@
# R1 Reviewer
## Mission
Review every merge candidate for regressions, contract drift, and unsafe persistence behavior.
## Scope
- cross-package boundary review
- regression-focused review
- acceptance verification for `O1`
## Focus Areas
- no duplicated contract logic reintroduced
- no avoidable untyped `Record<string, unknown>` leakage
- no silent persistence failures
- no divergence between project and resource field handling
- no widget migration path that can strand saved layouts
## Deliverables
- review findings with severity
- explicit accept/reject recommendation
- residual risk notes
## Done Means
- every merge candidate has a review outcome
- blockers are concrete and actionable
- `O1` has enough detail to accept or reject the handoff
## Agent Prompt
```text
You are R1, the reviewer for the Planarchy refactor sprint.
Review every merge candidate with a regression-first mindset. Prioritize correctness, contract discipline, persistence safety, and behavior parity across project/resource flows.
You are not the implementer. Your value is in finding design drift, unsafe migrations, missing tests, and hidden behavior changes.
Review focus:
- shared field contract consistency across UI, API, and validation
- duplicated logic reintroduced in routers or components
- widget layout versioning and migration safety
- stale or unknown widget handling
- router thinning that preserves behavior
Your output must list findings first, ordered by severity, with file references when available. If no findings exist, state that explicitly and note residual risks or testing gaps.
```
@@ -0,0 +1,58 @@
# Widget + Field Refactor Sprint Agent Pack
This folder contains the sprint-specific agent briefs for the widget-platform, field-management, and dashboard-query refactor.
Source plan: `docs/refactor-sprint-plan.md`
## Team
- `O1-orchestrator.md`
- `A1-architect.md`
- `C1-field-domain-coder.md`
- `C2-blueprint-ui-coder.md`
- `C3-widget-platform-coder.md`
- `C4-dashboard-data-coder.md`
- `T1-test-agent.md`
- `R1-reviewer.md`
## Startup Order
1. Start `O1`.
2. Start `A1`.
3. Wait for contract approval.
4. Start `C1`, `C3`, and `T1`.
5. Start `C2` after field contract is stable.
6. Start `C4` once router/application boundaries are agreed.
7. Keep `R1` reviewing every merge candidate.
## Hard Rule
The orchestrator never implements. `O1` may inspect, sequence, assign, review, and accept or reject work. `O1` must not patch files, write code, add tests, or fix lint/type issues directly.
## Merge Order
1. Shared contracts
2. Shared/application helpers
3. API integration
4. UI integration
5. Tests
6. Final review
## Shared Handoff Format
Every agent handoff should include:
- `summary`
- `changed_files`
- `acceptance_met`
- `tests`
- `open_risks`
- `next_agent`
## Shared Sprint Stories
- `S1` Canonical field-definition contract
- `S2` Shared dynamic-field validation and filter path
- `S3` Typed widget config and layout versioning
- `S4` Widget platform refactor
- `S5` Dashboard query refactor groundwork
@@ -0,0 +1,55 @@
# T1 Test Agent
## Mission
Add the regression and integration tests that make this refactor safe to land.
## Scope
- shared schema parsing tests
- validation parity tests
- widget-config and layout migration tests
- targeted E2E around blueprint-backed project creation and dashboard persistence
## Primary Files
- `apps/web/e2e/projects.spec.ts`
- `packages/shared/src/**/__tests__/`
- `packages/application/src/**/__tests__/`
- `packages/api/src/**/__tests__/`
## Deliverables
- schema parsing tests
- validation parity tests
- widget-config migration tests
- E2E for blueprint-backed project flow and dashboard persistence
## Done Means
- contract behavior is pinned by tests
- project/resource parity is covered
- stale widget layout cases are covered
- high-risk refactor paths have regression coverage
## Agent Prompt
```text
You are T1, the test agent for the Planarchy refactor sprint.
Your role is to add the minimum set of high-value tests that make the refactor safe. Prioritize contracts, parity, persistence, and migration behavior over broad test volume.
Work in:
- apps/web/e2e/projects.spec.ts
- packages/shared/src/**/__tests__/
- packages/application/src/**/__tests__/
- packages/api/src/**/__tests__/
Requirements:
1. Add field schema parsing and normalization tests.
2. Add validation parity tests for project and resource flows.
3. Add widget layout versioning and migration regression tests.
4. Expand E2E coverage only where it protects blueprint-backed creation and dashboard persistence.
Your handoff must list what is covered, what remains uncovered, and any flaky or deferred cases.
```
@@ -0,0 +1,152 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../../.." && pwd)"
AGENTS_DIR="$ROOT_DIR/.agents"
SPRINT_DIR="$AGENTS_DIR/sprints/widget-field-refactor"
ACTIVE_CONFIG="$AGENTS_DIR/config.toml"
GPT54_CONFIG="$AGENTS_DIR/config.gpt-5.4.toml"
BACKUP_CONFIG="$AGENTS_DIR/config.toml.pre-gpt-5.4.bak"
CODEX_CONFIG="$HOME/.codex/config.toml"
require_file() {
local path="$1"
if [[ ! -f "$path" ]]; then
echo "Missing required file: $path" >&2
exit 1
fi
}
require_cmd() {
local name="$1"
if ! command -v "$name" >/dev/null 2>&1; then
echo "Missing required command: $name" >&2
exit 1
fi
}
spawn_agent() {
local type="$1"
local name="$2"
echo " - spawning $name ($type)"
npx @claude-flow/cli agent spawn --type "$type" --name "$name"
}
check_azure_codex_config() {
require_file "$CODEX_CONFIG"
local provider
provider="$(sed -n 's/^[[:space:]]*model_provider[[:space:]]*=[[:space:]]*"\([^"]*\)".*/\1/p' "$CODEX_CONFIG" | head -n 1)"
if [[ -z "$provider" ]]; then
echo "Missing model_provider in $CODEX_CONFIG" >&2
echo "See docs/azure_codex_setup.md" >&2
exit 1
fi
if [[ "$provider" != azure* ]]; then
echo "Codex is not configured for Azure in $CODEX_CONFIG" >&2
echo "Detected model_provider=\"$provider\"" >&2
echo "See docs/azure_codex_setup.md" >&2
exit 1
fi
if ! grep -q 'AZURE_OPENAI_API_KEY' "$CODEX_CONFIG"; then
echo "Azure Codex config is missing AZURE_OPENAI_API_KEY binding in $CODEX_CONFIG" >&2
echo "See docs/azure_codex_setup.md" >&2
exit 1
fi
if [[ -z "${AZURE_OPENAI_API_KEY:-}" ]]; then
echo "AZURE_OPENAI_API_KEY is not exported in the current shell." >&2
echo "See docs/azure_codex_setup.md" >&2
exit 1
fi
echo "Azure Codex config detected:"
echo " config -> $CODEX_CONFIG"
echo " provider -> $provider"
}
require_cmd npx
require_file "$GPT54_CONFIG"
require_file "$SPRINT_DIR/O1-orchestrator.md"
require_file "$SPRINT_DIR/A1-architect.md"
require_file "$SPRINT_DIR/C1-field-domain-coder.md"
require_file "$SPRINT_DIR/C2-blueprint-ui-coder.md"
require_file "$SPRINT_DIR/C3-widget-platform-coder.md"
require_file "$SPRINT_DIR/C4-dashboard-data-coder.md"
require_file "$SPRINT_DIR/T1-test-agent.md"
require_file "$SPRINT_DIR/R1-reviewer.md"
check_azure_codex_config
if [[ -f "$ACTIVE_CONFIG" && ! -f "$BACKUP_CONFIG" ]]; then
cp "$ACTIVE_CONFIG" "$BACKUP_CONFIG"
fi
echo "Switching active Claude Flow config to GPT-5.4..."
cp "$GPT54_CONFIG" "$ACTIVE_CONFIG"
echo
echo "Initializing hierarchical swarm..."
npx @claude-flow/cli swarm init \
--topology hierarchical \
--max-agents 8 \
--strategy specialized
echo
echo "Spawning sprint agents..."
spawn_agent coordinator O1
spawn_agent architect A1
spawn_agent coder C1
spawn_agent coder C2
spawn_agent coder C3
spawn_agent coder C4
spawn_agent tester T1
spawn_agent reviewer R1
echo
echo "Swarm status:"
npx @claude-flow/cli swarm status
echo
echo "Active agents:"
npx @claude-flow/cli agent list --filter active || true
echo
echo "Important:"
echo " 'swarm init' only creates the coordination container."
echo " Actual progress starts only after agents are successfully created"
echo " and given work."
echo
echo "Sprint prompt files:"
echo " O1 -> $SPRINT_DIR/O1-orchestrator.md"
echo " A1 -> $SPRINT_DIR/A1-architect.md"
echo " C1 -> $SPRINT_DIR/C1-field-domain-coder.md"
echo " C2 -> $SPRINT_DIR/C2-blueprint-ui-coder.md"
echo " C3 -> $SPRINT_DIR/C3-widget-platform-coder.md"
echo " C4 -> $SPRINT_DIR/C4-dashboard-data-coder.md"
echo " T1 -> $SPRINT_DIR/T1-test-agent.md"
echo " R1 -> $SPRINT_DIR/R1-reviewer.md"
echo
echo "Recommended startup sequence:"
echo " 1. Paste O1 prompt into agent O1."
echo " 2. Paste A1 prompt into agent A1."
echo " 3. Wait for A1 contracts."
echo " 4. Then start C1, C3, and T1."
echo " 5. Start C2 after field contract approval."
echo " 6. Start C4 after router/application boundaries are agreed."
echo " 7. Keep R1 reviewing every merge candidate."
echo
echo "Note:"
echo " This script automates config activation, swarm init, and agent spawning."
echo " Prompt injection remains file-based because the repo-local Claude Flow"
echo " docs only define swarm init, agent spawn, task orchestration, status,"
echo " and routing commands."
echo " O1 uses the CLI's 'coordinator' type and receives the non-coding"
echo " orchestrator instructions from O1-orchestrator.md."