mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-30 21:53:28 +08:00
Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c63fa9006 | ||
|
|
5670fcd34f | ||
|
|
1c9fa0b8f8 | ||
|
|
2bfd2fbbee | ||
|
|
fae9716c0a | ||
|
|
a2087a8193 | ||
|
|
b9b7831ef5 | ||
|
|
660e0d3bad | ||
|
|
a7bc5f2a90 | ||
|
|
22ad036cb5 | ||
|
|
5230892ee8 | ||
|
|
970f8bf884 | ||
|
|
4ec7a6b15a | ||
|
|
0d438dd042 | ||
|
|
7f4f622517 | ||
|
|
c3f1594acd | ||
|
|
19345df79d | ||
|
|
73bda1aad6 | ||
|
|
ecfbbd3da1 | ||
|
|
ee5affbdbd | ||
|
|
d362ae65eb | ||
|
|
9e8006c8ca |
@@ -5,15 +5,13 @@
|
||||
"email": "affaan@example.com"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Battle-tested Claude Code configurations from an Anthropic hackathon winner",
|
||||
"version": "1.0.0"
|
||||
"description": "Battle-tested Claude Code configurations from an Anthropic hackathon winner"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"source": ".",
|
||||
"source": "./",
|
||||
"description": "Complete collection of agents, skills, hooks, commands, and rules evolved over 10+ months of intensive daily use",
|
||||
"version": "1.0.0",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa"
|
||||
},
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"version": "1.0.0",
|
||||
"description": "Complete collection of battle-tested Claude Code configs from an Anthropic hackathon winner - agents, skills, hooks, commands, and rules evolved over 10+ months of intensive daily use",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
"url": "https://x.com/affaanmustafa"
|
||||
},
|
||||
"homepage": "https://github.com/affaan-m/everything-claude-code",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/affaan-m/everything-claude-code.git"
|
||||
},
|
||||
"repository": "https://github.com/affaan-m/everything-claude-code",
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
"claude-code",
|
||||
@@ -27,7 +23,5 @@
|
||||
"best-practices"
|
||||
],
|
||||
"commands": "./commands",
|
||||
"agents": "./agents",
|
||||
"skills": "./skills",
|
||||
"hooks": "./hooks/hooks.json"
|
||||
"skills": "./skills"
|
||||
}
|
||||
|
||||
4
.claude/package-manager.json
Normal file
4
.claude/package-manager.json
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"packageManager": "bun",
|
||||
"setAt": "2026-01-23T02:09:58.819Z"
|
||||
}
|
||||
150
README.md
150
README.md
@@ -1,5 +1,11 @@
|
||||
# Everything Claude Code
|
||||
|
||||
[](https://github.com/affaan-m/everything-claude-code/stargazers)
|
||||
[](LICENSE)
|
||||

|
||||

|
||||

|
||||
|
||||
**The complete collection of Claude Code configs from an Anthropic hackathon winner.**
|
||||
|
||||
Production-ready agents, skills, hooks, commands, rules, and MCP configurations evolved over 10+ months of intensive daily use building real products.
|
||||
@@ -10,23 +16,24 @@ Production-ready agents, skills, hooks, commands, rules, and MCP configurations
|
||||
|
||||
This repo is the raw code only. The guides explain everything.
|
||||
|
||||
### Start Here: The Shorthand Guide
|
||||
|
||||
<img width="592" height="445" alt="image" src="https://github.com/user-attachments/assets/1a471488-59cc-425b-8345-5245c7efbcef" />
|
||||
|
||||
**[The Shorthand Guide to Everything Claude Code](https://x.com/affaanmustafa/status/2012378465664745795)**
|
||||
|
||||
The foundation - what each config type does, how to structure your setup, context window management, and the philosophy behind these configs. **Read this first.**
|
||||
|
||||
---
|
||||
|
||||
### Then: The Longform Guide
|
||||
|
||||
<img width="609" height="428" alt="image" src="https://github.com/user-attachments/assets/c9ca43bc-b149-427f-b551-af6840c368f0" />
|
||||
|
||||
**[The Longform Guide to Everything Claude Code](https://x.com/affaanmustafa/status/2014040193557471352)**
|
||||
|
||||
The advanced techniques - token optimization, memory persistence across sessions, verification loops & evals, parallelization strategies, subagent orchestration, and continuous learning. Everything in this guide has working code in this repo.
|
||||
<table>
|
||||
<tr>
|
||||
<td width="50%">
|
||||
<a href="https://x.com/affaanmustafa/status/2012378465664745795">
|
||||
<img src="https://github.com/user-attachments/assets/1a471488-59cc-425b-8345-5245c7efbcef" alt="The Shorthand Guide to Everything Claude Code" />
|
||||
</a>
|
||||
</td>
|
||||
<td width="50%">
|
||||
<a href="https://x.com/affaanmustafa/status/2014040193557471352">
|
||||
<img src="https://github.com/user-attachments/assets/c9ca43bc-b149-427f-b551-af6840c368f0" alt="The Longform Guide to Everything Claude Code" />
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center"><b>Shorthand Guide</b><br/>Setup, foundations, philosophy. <b>Read this first.</b></td>
|
||||
<td align="center"><b>Longform Guide</b><br/>Token optimization, memory persistence, evals, parallelization.</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
| Topic | What You'll Learn |
|
||||
|-------|-------------------|
|
||||
@@ -37,6 +44,40 @@ The advanced techniques - token optimization, memory persistence across sessions
|
||||
| Parallelization | Git worktrees, cascade method, when to scale instances |
|
||||
| Subagent Orchestration | The context problem, iterative retrieval pattern |
|
||||
|
||||
---
|
||||
|
||||
## Cross-Platform Support
|
||||
|
||||
This plugin now fully supports **Windows, macOS, and Linux**. All hooks and scripts have been rewritten in Node.js for maximum compatibility.
|
||||
|
||||
### Package Manager Detection
|
||||
|
||||
The plugin automatically detects your preferred package manager (npm, pnpm, yarn, or bun) with the following priority:
|
||||
|
||||
1. **Environment variable**: `CLAUDE_PACKAGE_MANAGER`
|
||||
2. **Project config**: `.claude/package-manager.json`
|
||||
3. **package.json**: `packageManager` field
|
||||
4. **Lock file**: Detection from package-lock.json, yarn.lock, pnpm-lock.yaml, or bun.lockb
|
||||
5. **Global config**: `~/.claude/package-manager.json`
|
||||
6. **Fallback**: First available package manager
|
||||
|
||||
To set your preferred package manager:
|
||||
|
||||
```bash
|
||||
# Via environment variable
|
||||
export CLAUDE_PACKAGE_MANAGER=pnpm
|
||||
|
||||
# Via global config
|
||||
node scripts/setup-package-manager.js --global pnpm
|
||||
|
||||
# Via project config
|
||||
node scripts/setup-package-manager.js --project bun
|
||||
|
||||
# Detect current setting
|
||||
node scripts/setup-package-manager.js --detect
|
||||
```
|
||||
|
||||
Or use the `/setup-pm` command in Claude Code.
|
||||
|
||||
---
|
||||
|
||||
@@ -66,6 +107,8 @@ everything-claude-code/
|
||||
| |-- backend-patterns/ # API, database, caching patterns
|
||||
| |-- frontend-patterns/ # React, Next.js patterns
|
||||
| |-- continuous-learning/ # Auto-extract patterns from sessions (Longform Guide)
|
||||
| |-- continuous-learning-v2/ # Instinct-based learning with confidence scoring
|
||||
| |-- iterative-retrieval/ # Progressive context refinement for subagents
|
||||
| |-- strategic-compact/ # Manual compaction suggestions (Longform Guide)
|
||||
| |-- tdd-workflow/ # TDD methodology
|
||||
| |-- security-review/ # Security checklist
|
||||
@@ -82,6 +125,7 @@ everything-claude-code/
|
||||
| |-- learn.md # /learn - Extract patterns mid-session (Longform Guide)
|
||||
| |-- checkpoint.md # /checkpoint - Save verification state (Longform Guide)
|
||||
| |-- verify.md # /verify - Run verification loop (Longform Guide)
|
||||
| |-- setup-pm.md # /setup-pm - Configure package manager (NEW)
|
||||
|
|
||||
|-- rules/ # Always-follow guidelines (copy to ~/.claude/rules/)
|
||||
| |-- security.md # Mandatory security checks
|
||||
@@ -96,6 +140,23 @@ everything-claude-code/
|
||||
| |-- memory-persistence/ # Session lifecycle hooks (Longform Guide)
|
||||
| |-- strategic-compact/ # Compaction suggestions (Longform Guide)
|
||||
|
|
||||
|-- scripts/ # Cross-platform Node.js scripts (NEW)
|
||||
| |-- lib/ # Shared utilities
|
||||
| | |-- utils.js # Cross-platform file/path/system utilities
|
||||
| | |-- package-manager.js # Package manager detection and selection
|
||||
| |-- hooks/ # Hook implementations
|
||||
| | |-- session-start.js # Load context on session start
|
||||
| | |-- session-end.js # Save state on session end
|
||||
| | |-- pre-compact.js # Pre-compaction state saving
|
||||
| | |-- suggest-compact.js # Strategic compaction suggestions
|
||||
| | |-- evaluate-session.js # Extract patterns from sessions
|
||||
| |-- setup-package-manager.js # Interactive PM setup
|
||||
|
|
||||
|-- tests/ # Test suite (NEW)
|
||||
| |-- lib/ # Library tests
|
||||
| |-- hooks/ # Hook tests
|
||||
| |-- run-all.js # Run all tests
|
||||
|
|
||||
|-- contexts/ # Dynamic system prompt injection contexts (Longform Guide)
|
||||
| |-- dev.md # Development mode context
|
||||
| |-- review.md # Code review mode context
|
||||
@@ -113,6 +174,28 @@ everything-claude-code/
|
||||
|
||||
---
|
||||
|
||||
## Ecosystem Tools
|
||||
|
||||
### ecc.tools - Skill Creator
|
||||
|
||||
Automatically generate Claude Code skills from your repository.
|
||||
|
||||
[Install GitHub App](https://github.com/apps/skill-creator) | [ecc.tools](https://ecc.tools)
|
||||
|
||||
Analyzes your repository and creates:
|
||||
- **SKILL.md files** - Ready-to-use skills for Claude Code
|
||||
- **Instinct collections** - For continuous-learning-v2
|
||||
- **Pattern extraction** - Learns from your commit history
|
||||
|
||||
```bash
|
||||
# After installing the GitHub App, skills appear in:
|
||||
~/.claude/skills/generated/
|
||||
```
|
||||
|
||||
Works seamlessly with the `continuous-learning-v2` skill for inherited instincts.
|
||||
|
||||
---
|
||||
|
||||
## Installation
|
||||
|
||||
### Option 1: Install as Plugin (Recommended)
|
||||
@@ -182,15 +265,6 @@ Copy desired MCP servers from `mcp-configs/mcp-servers.json` to your `~/.claude.
|
||||
|
||||
---
|
||||
|
||||
### Read the Guides
|
||||
|
||||
Seriously, read the guides. These configs make 10x more sense with context.
|
||||
|
||||
1. **[Shorthand Guide](https://x.com/affaanmustafa/status/2012378465664745795)** - Setup and foundations
|
||||
2. **[Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352)** - Advanced techniques (token optimization, memory persistence, evals, parallelization)
|
||||
|
||||
---
|
||||
|
||||
## Key Concepts
|
||||
|
||||
### Agents
|
||||
@@ -201,7 +275,7 @@ Subagents handle delegated tasks with limited scope. Example:
|
||||
---
|
||||
name: code-reviewer
|
||||
description: Reviews code for quality, security, and maintainability
|
||||
tools: Read, Grep, Glob, Bash
|
||||
tools: ["Read", "Grep", "Glob", "Bash"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
@@ -249,6 +323,22 @@ Rules are always-follow guidelines. Keep them modular:
|
||||
|
||||
---
|
||||
|
||||
## Running Tests
|
||||
|
||||
The plugin includes a comprehensive test suite:
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
node tests/run-all.js
|
||||
|
||||
# Run individual test files
|
||||
node tests/lib/utils.test.js
|
||||
node tests/lib/package-manager.test.js
|
||||
node tests/hooks/hooks.test.js
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
**Contributions are welcome and encouraged.**
|
||||
@@ -302,6 +392,12 @@ These configs work for my workflow. You should:
|
||||
|
||||
---
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#affaan-m/everything-claude-code&Date)
|
||||
|
||||
---
|
||||
|
||||
## Links
|
||||
|
||||
- **Shorthand Guide (Start Here):** [The Shorthand Guide to Everything Claude Code](https://x.com/affaanmustafa/status/2012378465664745795)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: architect
|
||||
description: Software architecture specialist for system design, scalability, and technical decision-making. Use PROACTIVELY when planning new features, refactoring large systems, or making architectural decisions.
|
||||
tools: Read, Grep, Glob
|
||||
tools: ["Read", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: build-error-resolver
|
||||
description: Build and TypeScript error resolution specialist. Use PROACTIVELY when build fails or type errors occur. Fixes build/type errors only with minimal diffs, no architectural edits. Focuses on getting the build green quickly.
|
||||
tools: Read, Write, Edit, Bash, Grep, Glob
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: code-reviewer
|
||||
description: Expert code review specialist. Proactively reviews code for quality, security, and maintainability. Use immediately after writing or modifying code. MUST BE USED for all code changes.
|
||||
tools: Read, Grep, Glob, Bash
|
||||
tools: ["Read", "Grep", "Glob", "Bash"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
|
||||
654
agents/database-reviewer.md
Normal file
654
agents/database-reviewer.md
Normal file
@@ -0,0 +1,654 @@
|
||||
---
|
||||
name: database-reviewer
|
||||
description: PostgreSQL database specialist for query optimization, schema design, security, and performance. Use PROACTIVELY when writing SQL, creating migrations, designing schemas, or troubleshooting database performance. Incorporates Supabase best practices.
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
# Database Reviewer
|
||||
|
||||
You are an expert PostgreSQL database specialist focused on query optimization, schema design, security, and performance. Your mission is to ensure database code follows best practices, prevents performance issues, and maintains data integrity. This agent incorporates patterns from [Supabase's postgres-best-practices](https://github.com/supabase/agent-skills).
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Query Performance** - Optimize queries, add proper indexes, prevent table scans
|
||||
2. **Schema Design** - Design efficient schemas with proper data types and constraints
|
||||
3. **Security & RLS** - Implement Row Level Security, least privilege access
|
||||
4. **Connection Management** - Configure pooling, timeouts, limits
|
||||
5. **Concurrency** - Prevent deadlocks, optimize locking strategies
|
||||
6. **Monitoring** - Set up query analysis and performance tracking
|
||||
|
||||
## Tools at Your Disposal
|
||||
|
||||
### Database Analysis Commands
|
||||
```bash
|
||||
# Connect to database
|
||||
psql $DATABASE_URL
|
||||
|
||||
# Check for slow queries (requires pg_stat_statements)
|
||||
psql -c "SELECT query, mean_exec_time, calls FROM pg_stat_statements ORDER BY mean_exec_time DESC LIMIT 10;"
|
||||
|
||||
# Check table sizes
|
||||
psql -c "SELECT relname, pg_size_pretty(pg_total_relation_size(relid)) FROM pg_stat_user_tables ORDER BY pg_total_relation_size(relid) DESC;"
|
||||
|
||||
# Check index usage
|
||||
psql -c "SELECT indexrelname, idx_scan, idx_tup_read FROM pg_stat_user_indexes ORDER BY idx_scan DESC;"
|
||||
|
||||
# Find missing indexes on foreign keys
|
||||
psql -c "SELECT conrelid::regclass, a.attname FROM pg_constraint c JOIN pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey) WHERE c.contype = 'f' AND NOT EXISTS (SELECT 1 FROM pg_index i WHERE i.indrelid = c.conrelid AND a.attnum = ANY(i.indkey));"
|
||||
|
||||
# Check for table bloat
|
||||
psql -c "SELECT relname, n_dead_tup, last_vacuum, last_autovacuum FROM pg_stat_user_tables WHERE n_dead_tup > 1000 ORDER BY n_dead_tup DESC;"
|
||||
```
|
||||
|
||||
## Database Review Workflow
|
||||
|
||||
### 1. Query Performance Review (CRITICAL)
|
||||
|
||||
For every SQL query, verify:
|
||||
|
||||
```
|
||||
a) Index Usage
|
||||
- Are WHERE columns indexed?
|
||||
- Are JOIN columns indexed?
|
||||
- Is the index type appropriate (B-tree, GIN, BRIN)?
|
||||
|
||||
b) Query Plan Analysis
|
||||
- Run EXPLAIN ANALYZE on complex queries
|
||||
- Check for Seq Scans on large tables
|
||||
- Verify row estimates match actuals
|
||||
|
||||
c) Common Issues
|
||||
- N+1 query patterns
|
||||
- Missing composite indexes
|
||||
- Wrong column order in indexes
|
||||
```
|
||||
|
||||
### 2. Schema Design Review (HIGH)
|
||||
|
||||
```
|
||||
a) Data Types
|
||||
- bigint for IDs (not int)
|
||||
- text for strings (not varchar(n) unless constraint needed)
|
||||
- timestamptz for timestamps (not timestamp)
|
||||
- numeric for money (not float)
|
||||
- boolean for flags (not varchar)
|
||||
|
||||
b) Constraints
|
||||
- Primary keys defined
|
||||
- Foreign keys with proper ON DELETE
|
||||
- NOT NULL where appropriate
|
||||
- CHECK constraints for validation
|
||||
|
||||
c) Naming
|
||||
- lowercase_snake_case (avoid quoted identifiers)
|
||||
- Consistent naming patterns
|
||||
```
|
||||
|
||||
### 3. Security Review (CRITICAL)
|
||||
|
||||
```
|
||||
a) Row Level Security
|
||||
- RLS enabled on multi-tenant tables?
|
||||
- Policies use (select auth.uid()) pattern?
|
||||
- RLS columns indexed?
|
||||
|
||||
b) Permissions
|
||||
- Least privilege principle followed?
|
||||
- No GRANT ALL to application users?
|
||||
- Public schema permissions revoked?
|
||||
|
||||
c) Data Protection
|
||||
- Sensitive data encrypted?
|
||||
- PII access logged?
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Index Patterns
|
||||
|
||||
### 1. Add Indexes on WHERE and JOIN Columns
|
||||
|
||||
**Impact:** 100-1000x faster queries on large tables
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: No index on foreign key
|
||||
CREATE TABLE orders (
|
||||
id bigint PRIMARY KEY,
|
||||
customer_id bigint REFERENCES customers(id)
|
||||
-- Missing index!
|
||||
);
|
||||
|
||||
-- ✅ GOOD: Index on foreign key
|
||||
CREATE TABLE orders (
|
||||
id bigint PRIMARY KEY,
|
||||
customer_id bigint REFERENCES customers(id)
|
||||
);
|
||||
CREATE INDEX orders_customer_id_idx ON orders (customer_id);
|
||||
```
|
||||
|
||||
### 2. Choose the Right Index Type
|
||||
|
||||
| Index Type | Use Case | Operators |
|
||||
|------------|----------|-----------|
|
||||
| **B-tree** (default) | Equality, range | `=`, `<`, `>`, `BETWEEN`, `IN` |
|
||||
| **GIN** | Arrays, JSONB, full-text | `@>`, `?`, `?&`, `?|`, `@@` |
|
||||
| **BRIN** | Large time-series tables | Range queries on sorted data |
|
||||
| **Hash** | Equality only | `=` (marginally faster than B-tree) |
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: B-tree for JSONB containment
|
||||
CREATE INDEX products_attrs_idx ON products (attributes);
|
||||
SELECT * FROM products WHERE attributes @> '{"color": "red"}';
|
||||
|
||||
-- ✅ GOOD: GIN for JSONB
|
||||
CREATE INDEX products_attrs_idx ON products USING gin (attributes);
|
||||
```
|
||||
|
||||
### 3. Composite Indexes for Multi-Column Queries
|
||||
|
||||
**Impact:** 5-10x faster multi-column queries
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Separate indexes
|
||||
CREATE INDEX orders_status_idx ON orders (status);
|
||||
CREATE INDEX orders_created_idx ON orders (created_at);
|
||||
|
||||
-- ✅ GOOD: Composite index (equality columns first, then range)
|
||||
CREATE INDEX orders_status_created_idx ON orders (status, created_at);
|
||||
```
|
||||
|
||||
**Leftmost Prefix Rule:**
|
||||
- Index `(status, created_at)` works for:
|
||||
- `WHERE status = 'pending'`
|
||||
- `WHERE status = 'pending' AND created_at > '2024-01-01'`
|
||||
- Does NOT work for:
|
||||
- `WHERE created_at > '2024-01-01'` alone
|
||||
|
||||
### 4. Covering Indexes (Index-Only Scans)
|
||||
|
||||
**Impact:** 2-5x faster queries by avoiding table lookups
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Must fetch name from table
|
||||
CREATE INDEX users_email_idx ON users (email);
|
||||
SELECT email, name FROM users WHERE email = 'user@example.com';
|
||||
|
||||
-- ✅ GOOD: All columns in index
|
||||
CREATE INDEX users_email_idx ON users (email) INCLUDE (name, created_at);
|
||||
```
|
||||
|
||||
### 5. Partial Indexes for Filtered Queries
|
||||
|
||||
**Impact:** 5-20x smaller indexes, faster writes and queries
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Full index includes deleted rows
|
||||
CREATE INDEX users_email_idx ON users (email);
|
||||
|
||||
-- ✅ GOOD: Partial index excludes deleted rows
|
||||
CREATE INDEX users_active_email_idx ON users (email) WHERE deleted_at IS NULL;
|
||||
```
|
||||
|
||||
**Common Patterns:**
|
||||
- Soft deletes: `WHERE deleted_at IS NULL`
|
||||
- Status filters: `WHERE status = 'pending'`
|
||||
- Non-null values: `WHERE sku IS NOT NULL`
|
||||
|
||||
---
|
||||
|
||||
## Schema Design Patterns
|
||||
|
||||
### 1. Data Type Selection
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Poor type choices
|
||||
CREATE TABLE users (
|
||||
id int, -- Overflows at 2.1B
|
||||
email varchar(255), -- Artificial limit
|
||||
created_at timestamp, -- No timezone
|
||||
is_active varchar(5), -- Should be boolean
|
||||
balance float -- Precision loss
|
||||
);
|
||||
|
||||
-- ✅ GOOD: Proper types
|
||||
CREATE TABLE users (
|
||||
id bigint GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
|
||||
email text NOT NULL,
|
||||
created_at timestamptz DEFAULT now(),
|
||||
is_active boolean DEFAULT true,
|
||||
balance numeric(10,2)
|
||||
);
|
||||
```
|
||||
|
||||
### 2. Primary Key Strategy
|
||||
|
||||
```sql
|
||||
-- ✅ Single database: IDENTITY (default, recommended)
|
||||
CREATE TABLE users (
|
||||
id bigint GENERATED ALWAYS AS IDENTITY PRIMARY KEY
|
||||
);
|
||||
|
||||
-- ✅ Distributed systems: UUIDv7 (time-ordered)
|
||||
CREATE EXTENSION IF NOT EXISTS pg_uuidv7;
|
||||
CREATE TABLE orders (
|
||||
id uuid DEFAULT uuid_generate_v7() PRIMARY KEY
|
||||
);
|
||||
|
||||
-- ❌ AVOID: Random UUIDs cause index fragmentation
|
||||
CREATE TABLE events (
|
||||
id uuid DEFAULT gen_random_uuid() PRIMARY KEY -- Fragmented inserts!
|
||||
);
|
||||
```
|
||||
|
||||
### 3. Table Partitioning
|
||||
|
||||
**Use When:** Tables > 100M rows, time-series data, need to drop old data
|
||||
|
||||
```sql
|
||||
-- ✅ GOOD: Partitioned by month
|
||||
CREATE TABLE events (
|
||||
id bigint GENERATED ALWAYS AS IDENTITY,
|
||||
created_at timestamptz NOT NULL,
|
||||
data jsonb
|
||||
) PARTITION BY RANGE (created_at);
|
||||
|
||||
CREATE TABLE events_2024_01 PARTITION OF events
|
||||
FOR VALUES FROM ('2024-01-01') TO ('2024-02-01');
|
||||
|
||||
CREATE TABLE events_2024_02 PARTITION OF events
|
||||
FOR VALUES FROM ('2024-02-01') TO ('2024-03-01');
|
||||
|
||||
-- Drop old data instantly
|
||||
DROP TABLE events_2023_01; -- Instant vs DELETE taking hours
|
||||
```
|
||||
|
||||
### 4. Use Lowercase Identifiers
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Quoted mixed-case requires quotes everywhere
|
||||
CREATE TABLE "Users" ("userId" bigint, "firstName" text);
|
||||
SELECT "firstName" FROM "Users"; -- Must quote!
|
||||
|
||||
-- ✅ GOOD: Lowercase works without quotes
|
||||
CREATE TABLE users (user_id bigint, first_name text);
|
||||
SELECT first_name FROM users;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security & Row Level Security (RLS)
|
||||
|
||||
### 1. Enable RLS for Multi-Tenant Data
|
||||
|
||||
**Impact:** CRITICAL - Database-enforced tenant isolation
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Application-only filtering
|
||||
SELECT * FROM orders WHERE user_id = $current_user_id;
|
||||
-- Bug means all orders exposed!
|
||||
|
||||
-- ✅ GOOD: Database-enforced RLS
|
||||
ALTER TABLE orders ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE orders FORCE ROW LEVEL SECURITY;
|
||||
|
||||
CREATE POLICY orders_user_policy ON orders
|
||||
FOR ALL
|
||||
USING (user_id = current_setting('app.current_user_id')::bigint);
|
||||
|
||||
-- Supabase pattern
|
||||
CREATE POLICY orders_user_policy ON orders
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (user_id = auth.uid());
|
||||
```
|
||||
|
||||
### 2. Optimize RLS Policies
|
||||
|
||||
**Impact:** 5-10x faster RLS queries
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Function called per row
|
||||
CREATE POLICY orders_policy ON orders
|
||||
USING (auth.uid() = user_id); -- Called 1M times for 1M rows!
|
||||
|
||||
-- ✅ GOOD: Wrap in SELECT (cached, called once)
|
||||
CREATE POLICY orders_policy ON orders
|
||||
USING ((SELECT auth.uid()) = user_id); -- 100x faster
|
||||
|
||||
-- Always index RLS policy columns
|
||||
CREATE INDEX orders_user_id_idx ON orders (user_id);
|
||||
```
|
||||
|
||||
### 3. Least Privilege Access
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Overly permissive
|
||||
GRANT ALL PRIVILEGES ON ALL TABLES TO app_user;
|
||||
|
||||
-- ✅ GOOD: Minimal permissions
|
||||
CREATE ROLE app_readonly NOLOGIN;
|
||||
GRANT USAGE ON SCHEMA public TO app_readonly;
|
||||
GRANT SELECT ON public.products, public.categories TO app_readonly;
|
||||
|
||||
CREATE ROLE app_writer NOLOGIN;
|
||||
GRANT USAGE ON SCHEMA public TO app_writer;
|
||||
GRANT SELECT, INSERT, UPDATE ON public.orders TO app_writer;
|
||||
-- No DELETE permission
|
||||
|
||||
REVOKE ALL ON SCHEMA public FROM public;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Connection Management
|
||||
|
||||
### 1. Connection Limits
|
||||
|
||||
**Formula:** `(RAM_in_MB / 5MB_per_connection) - reserved`
|
||||
|
||||
```sql
|
||||
-- 4GB RAM example
|
||||
ALTER SYSTEM SET max_connections = 100;
|
||||
ALTER SYSTEM SET work_mem = '8MB'; -- 8MB * 100 = 800MB max
|
||||
SELECT pg_reload_conf();
|
||||
|
||||
-- Monitor connections
|
||||
SELECT count(*), state FROM pg_stat_activity GROUP BY state;
|
||||
```
|
||||
|
||||
### 2. Idle Timeouts
|
||||
|
||||
```sql
|
||||
ALTER SYSTEM SET idle_in_transaction_session_timeout = '30s';
|
||||
ALTER SYSTEM SET idle_session_timeout = '10min';
|
||||
SELECT pg_reload_conf();
|
||||
```
|
||||
|
||||
### 3. Use Connection Pooling
|
||||
|
||||
- **Transaction mode**: Best for most apps (connection returned after each transaction)
|
||||
- **Session mode**: For prepared statements, temp tables
|
||||
- **Pool size**: `(CPU_cores * 2) + spindle_count`
|
||||
|
||||
---
|
||||
|
||||
## Concurrency & Locking
|
||||
|
||||
### 1. Keep Transactions Short
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Lock held during external API call
|
||||
BEGIN;
|
||||
SELECT * FROM orders WHERE id = 1 FOR UPDATE;
|
||||
-- HTTP call takes 5 seconds...
|
||||
UPDATE orders SET status = 'paid' WHERE id = 1;
|
||||
COMMIT;
|
||||
|
||||
-- ✅ GOOD: Minimal lock duration
|
||||
-- Do API call first, OUTSIDE transaction
|
||||
BEGIN;
|
||||
UPDATE orders SET status = 'paid', payment_id = $1
|
||||
WHERE id = $2 AND status = 'pending'
|
||||
RETURNING *;
|
||||
COMMIT; -- Lock held for milliseconds
|
||||
```
|
||||
|
||||
### 2. Prevent Deadlocks
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Inconsistent lock order causes deadlock
|
||||
-- Transaction A: locks row 1, then row 2
|
||||
-- Transaction B: locks row 2, then row 1
|
||||
-- DEADLOCK!
|
||||
|
||||
-- ✅ GOOD: Consistent lock order
|
||||
BEGIN;
|
||||
SELECT * FROM accounts WHERE id IN (1, 2) ORDER BY id FOR UPDATE;
|
||||
-- Now both rows locked, update in any order
|
||||
UPDATE accounts SET balance = balance - 100 WHERE id = 1;
|
||||
UPDATE accounts SET balance = balance + 100 WHERE id = 2;
|
||||
COMMIT;
|
||||
```
|
||||
|
||||
### 3. Use SKIP LOCKED for Queues
|
||||
|
||||
**Impact:** 10x throughput for worker queues
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Workers wait for each other
|
||||
SELECT * FROM jobs WHERE status = 'pending' LIMIT 1 FOR UPDATE;
|
||||
|
||||
-- ✅ GOOD: Workers skip locked rows
|
||||
UPDATE jobs
|
||||
SET status = 'processing', worker_id = $1, started_at = now()
|
||||
WHERE id = (
|
||||
SELECT id FROM jobs
|
||||
WHERE status = 'pending'
|
||||
ORDER BY created_at
|
||||
LIMIT 1
|
||||
FOR UPDATE SKIP LOCKED
|
||||
)
|
||||
RETURNING *;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Data Access Patterns
|
||||
|
||||
### 1. Batch Inserts
|
||||
|
||||
**Impact:** 10-50x faster bulk inserts
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Individual inserts
|
||||
INSERT INTO events (user_id, action) VALUES (1, 'click');
|
||||
INSERT INTO events (user_id, action) VALUES (2, 'view');
|
||||
-- 1000 round trips
|
||||
|
||||
-- ✅ GOOD: Batch insert
|
||||
INSERT INTO events (user_id, action) VALUES
|
||||
(1, 'click'),
|
||||
(2, 'view'),
|
||||
(3, 'click');
|
||||
-- 1 round trip
|
||||
|
||||
-- ✅ BEST: COPY for large datasets
|
||||
COPY events (user_id, action) FROM '/path/to/data.csv' WITH (FORMAT csv);
|
||||
```
|
||||
|
||||
### 2. Eliminate N+1 Queries
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: N+1 pattern
|
||||
SELECT id FROM users WHERE active = true; -- Returns 100 IDs
|
||||
-- Then 100 queries:
|
||||
SELECT * FROM orders WHERE user_id = 1;
|
||||
SELECT * FROM orders WHERE user_id = 2;
|
||||
-- ... 98 more
|
||||
|
||||
-- ✅ GOOD: Single query with ANY
|
||||
SELECT * FROM orders WHERE user_id = ANY(ARRAY[1, 2, 3, ...]);
|
||||
|
||||
-- ✅ GOOD: JOIN
|
||||
SELECT u.id, u.name, o.*
|
||||
FROM users u
|
||||
LEFT JOIN orders o ON o.user_id = u.id
|
||||
WHERE u.active = true;
|
||||
```
|
||||
|
||||
### 3. Cursor-Based Pagination
|
||||
|
||||
**Impact:** Consistent O(1) performance regardless of page depth
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: OFFSET gets slower with depth
|
||||
SELECT * FROM products ORDER BY id LIMIT 20 OFFSET 199980;
|
||||
-- Scans 200,000 rows!
|
||||
|
||||
-- ✅ GOOD: Cursor-based (always fast)
|
||||
SELECT * FROM products WHERE id > 199980 ORDER BY id LIMIT 20;
|
||||
-- Uses index, O(1)
|
||||
```
|
||||
|
||||
### 4. UPSERT for Insert-or-Update
|
||||
|
||||
```sql
|
||||
-- ❌ BAD: Race condition
|
||||
SELECT * FROM settings WHERE user_id = 123 AND key = 'theme';
|
||||
-- Both threads find nothing, both insert, one fails
|
||||
|
||||
-- ✅ GOOD: Atomic UPSERT
|
||||
INSERT INTO settings (user_id, key, value)
|
||||
VALUES (123, 'theme', 'dark')
|
||||
ON CONFLICT (user_id, key)
|
||||
DO UPDATE SET value = EXCLUDED.value, updated_at = now()
|
||||
RETURNING *;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Monitoring & Diagnostics
|
||||
|
||||
### 1. Enable pg_stat_statements
|
||||
|
||||
```sql
|
||||
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
|
||||
|
||||
-- Find slowest queries
|
||||
SELECT calls, round(mean_exec_time::numeric, 2) as mean_ms, query
|
||||
FROM pg_stat_statements
|
||||
ORDER BY mean_exec_time DESC
|
||||
LIMIT 10;
|
||||
|
||||
-- Find most frequent queries
|
||||
SELECT calls, query
|
||||
FROM pg_stat_statements
|
||||
ORDER BY calls DESC
|
||||
LIMIT 10;
|
||||
```
|
||||
|
||||
### 2. EXPLAIN ANALYZE
|
||||
|
||||
```sql
|
||||
EXPLAIN (ANALYZE, BUFFERS, FORMAT TEXT)
|
||||
SELECT * FROM orders WHERE customer_id = 123;
|
||||
```
|
||||
|
||||
| Indicator | Problem | Solution |
|
||||
|-----------|---------|----------|
|
||||
| `Seq Scan` on large table | Missing index | Add index on filter columns |
|
||||
| `Rows Removed by Filter` high | Poor selectivity | Check WHERE clause |
|
||||
| `Buffers: read >> hit` | Data not cached | Increase `shared_buffers` |
|
||||
| `Sort Method: external merge` | `work_mem` too low | Increase `work_mem` |
|
||||
|
||||
### 3. Maintain Statistics
|
||||
|
||||
```sql
|
||||
-- Analyze specific table
|
||||
ANALYZE orders;
|
||||
|
||||
-- Check when last analyzed
|
||||
SELECT relname, last_analyze, last_autoanalyze
|
||||
FROM pg_stat_user_tables
|
||||
ORDER BY last_analyze NULLS FIRST;
|
||||
|
||||
-- Tune autovacuum for high-churn tables
|
||||
ALTER TABLE orders SET (
|
||||
autovacuum_vacuum_scale_factor = 0.05,
|
||||
autovacuum_analyze_scale_factor = 0.02
|
||||
);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## JSONB Patterns
|
||||
|
||||
### 1. Index JSONB Columns
|
||||
|
||||
```sql
|
||||
-- GIN index for containment operators
|
||||
CREATE INDEX products_attrs_gin ON products USING gin (attributes);
|
||||
SELECT * FROM products WHERE attributes @> '{"color": "red"}';
|
||||
|
||||
-- Expression index for specific keys
|
||||
CREATE INDEX products_brand_idx ON products ((attributes->>'brand'));
|
||||
SELECT * FROM products WHERE attributes->>'brand' = 'Nike';
|
||||
|
||||
-- jsonb_path_ops: 2-3x smaller, only supports @>
|
||||
CREATE INDEX idx ON products USING gin (attributes jsonb_path_ops);
|
||||
```
|
||||
|
||||
### 2. Full-Text Search with tsvector
|
||||
|
||||
```sql
|
||||
-- Add generated tsvector column
|
||||
ALTER TABLE articles ADD COLUMN search_vector tsvector
|
||||
GENERATED ALWAYS AS (
|
||||
to_tsvector('english', coalesce(title,'') || ' ' || coalesce(content,''))
|
||||
) STORED;
|
||||
|
||||
CREATE INDEX articles_search_idx ON articles USING gin (search_vector);
|
||||
|
||||
-- Fast full-text search
|
||||
SELECT * FROM articles
|
||||
WHERE search_vector @@ to_tsquery('english', 'postgresql & performance');
|
||||
|
||||
-- With ranking
|
||||
SELECT *, ts_rank(search_vector, query) as rank
|
||||
FROM articles, to_tsquery('english', 'postgresql') query
|
||||
WHERE search_vector @@ query
|
||||
ORDER BY rank DESC;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Anti-Patterns to Flag
|
||||
|
||||
### ❌ Query Anti-Patterns
|
||||
- `SELECT *` in production code
|
||||
- Missing indexes on WHERE/JOIN columns
|
||||
- OFFSET pagination on large tables
|
||||
- N+1 query patterns
|
||||
- Unparameterized queries (SQL injection risk)
|
||||
|
||||
### ❌ Schema Anti-Patterns
|
||||
- `int` for IDs (use `bigint`)
|
||||
- `varchar(255)` without reason (use `text`)
|
||||
- `timestamp` without timezone (use `timestamptz`)
|
||||
- Random UUIDs as primary keys (use UUIDv7 or IDENTITY)
|
||||
- Mixed-case identifiers requiring quotes
|
||||
|
||||
### ❌ Security Anti-Patterns
|
||||
- `GRANT ALL` to application users
|
||||
- Missing RLS on multi-tenant tables
|
||||
- RLS policies calling functions per-row (not wrapped in SELECT)
|
||||
- Unindexed RLS policy columns
|
||||
|
||||
### ❌ Connection Anti-Patterns
|
||||
- No connection pooling
|
||||
- No idle timeouts
|
||||
- Prepared statements with transaction-mode pooling
|
||||
- Holding locks during external API calls
|
||||
|
||||
---
|
||||
|
||||
## Review Checklist
|
||||
|
||||
### Before Approving Database Changes:
|
||||
- [ ] All WHERE/JOIN columns indexed
|
||||
- [ ] Composite indexes in correct column order
|
||||
- [ ] Proper data types (bigint, text, timestamptz, numeric)
|
||||
- [ ] RLS enabled on multi-tenant tables
|
||||
- [ ] RLS policies use `(SELECT auth.uid())` pattern
|
||||
- [ ] Foreign keys have indexes
|
||||
- [ ] No N+1 query patterns
|
||||
- [ ] EXPLAIN ANALYZE run on complex queries
|
||||
- [ ] Lowercase identifiers used
|
||||
- [ ] Transactions kept short
|
||||
|
||||
---
|
||||
|
||||
**Remember**: Database issues are often the root cause of application performance problems. Optimize queries and schema design early. Use EXPLAIN ANALYZE to verify assumptions. Always index foreign keys and RLS policy columns.
|
||||
|
||||
*Patterns adapted from [Supabase Agent Skills](https://github.com/supabase/agent-skills) under MIT license.*
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: doc-updater
|
||||
description: Documentation and codemap specialist. Use PROACTIVELY for updating codemaps and documentation. Runs /update-codemaps and /update-docs, generates docs/CODEMAPS/*, updates READMEs and guides.
|
||||
tools: Read, Write, Edit, Bash, Grep, Glob
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
@@ -27,8 +27,8 @@ You are a documentation specialist focused on keeping codemaps and documentation
|
||||
|
||||
### Analysis Commands
|
||||
```bash
|
||||
# Analyze TypeScript project structure
|
||||
npx ts-morph
|
||||
# Analyze TypeScript project structure (run custom script using ts-morph library)
|
||||
npx tsx scripts/codemaps/generate.ts
|
||||
|
||||
# Generate dependency graph
|
||||
npx madge --image graph.svg src/
|
||||
|
||||
@@ -1,26 +1,77 @@
|
||||
---
|
||||
name: e2e-runner
|
||||
description: End-to-end testing specialist using Playwright. Use PROACTIVELY for generating, maintaining, and running E2E tests. Manages test journeys, quarantines flaky tests, uploads artifacts (screenshots, videos, traces), and ensures critical user flows work.
|
||||
tools: Read, Write, Edit, Bash, Grep, Glob
|
||||
description: End-to-end testing specialist using Vercel Agent Browser (preferred) with Playwright fallback. Use PROACTIVELY for generating, maintaining, and running E2E tests. Manages test journeys, quarantines flaky tests, uploads artifacts (screenshots, videos, traces), and ensures critical user flows work.
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
# E2E Test Runner
|
||||
|
||||
You are an expert end-to-end testing specialist focused on Playwright test automation. Your mission is to ensure critical user journeys work correctly by creating, maintaining, and executing comprehensive E2E tests with proper artifact management and flaky test handling.
|
||||
You are an expert end-to-end testing specialist. Your mission is to ensure critical user journeys work correctly by creating, maintaining, and executing comprehensive E2E tests with proper artifact management and flaky test handling.
|
||||
|
||||
## Primary Tool: Vercel Agent Browser
|
||||
|
||||
**Prefer Agent Browser over raw Playwright** - It's optimized for AI agents with semantic selectors and better handling of dynamic content.
|
||||
|
||||
### Why Agent Browser?
|
||||
- **Semantic selectors** - Find elements by meaning, not brittle CSS/XPath
|
||||
- **AI-optimized** - Designed for LLM-driven browser automation
|
||||
- **Auto-waiting** - Intelligent waits for dynamic content
|
||||
- **Built on Playwright** - Full Playwright compatibility as fallback
|
||||
|
||||
### Agent Browser Setup
|
||||
```bash
|
||||
# Install agent-browser
|
||||
npm install @anthropic-ai/agent-browser
|
||||
# or
|
||||
pnpm add @anthropic-ai/agent-browser
|
||||
```
|
||||
|
||||
### Agent Browser Usage
|
||||
```typescript
|
||||
import { AgentBrowser } from '@anthropic-ai/agent-browser'
|
||||
|
||||
const browser = new AgentBrowser()
|
||||
|
||||
// Semantic navigation - describe what you want
|
||||
await browser.navigate('https://example.com')
|
||||
await browser.click('the login button')
|
||||
await browser.fill('email input', 'user@example.com')
|
||||
await browser.fill('password field', 'securepassword')
|
||||
await browser.click('submit button')
|
||||
|
||||
// Wait for semantic conditions
|
||||
await browser.waitFor('dashboard to load')
|
||||
await browser.waitFor('user avatar to appear')
|
||||
|
||||
// Take screenshots
|
||||
await browser.screenshot('after-login.png')
|
||||
|
||||
// Extract data semantically
|
||||
const username = await browser.getText('the username in the header')
|
||||
```
|
||||
|
||||
### Agent Browser with Claude Code
|
||||
If you have the `agent-browser` skill installed, use `/agent-browser` for interactive browser automation tasks.
|
||||
|
||||
---
|
||||
|
||||
## Fallback Tool: Playwright
|
||||
|
||||
When Agent Browser isn't available or for complex test suites, fall back to Playwright.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Test Journey Creation** - Write Playwright tests for user flows
|
||||
1. **Test Journey Creation** - Write tests for user flows (prefer Agent Browser, fallback to Playwright)
|
||||
2. **Test Maintenance** - Keep tests up to date with UI changes
|
||||
3. **Flaky Test Management** - Identify and quarantine unstable tests
|
||||
4. **Artifact Management** - Capture screenshots, videos, traces
|
||||
5. **CI/CD Integration** - Ensure tests run reliably in pipelines
|
||||
6. **Test Reporting** - Generate HTML reports and JUnit XML
|
||||
|
||||
## Tools at Your Disposal
|
||||
## Playwright Testing Framework (Fallback)
|
||||
|
||||
### Playwright Testing Framework
|
||||
### Tools
|
||||
- **@playwright/test** - Core testing framework
|
||||
- **Playwright Inspector** - Debug tests interactively
|
||||
- **Playwright Trace Viewer** - Analyze test execution
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: planner
|
||||
description: Expert planning specialist for complex features and refactoring. Use PROACTIVELY when users request feature implementation, architectural changes, or complex refactoring. Automatically activated for planning tasks.
|
||||
tools: Read, Grep, Glob
|
||||
tools: ["Read", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: refactor-cleaner
|
||||
description: Dead code cleanup and consolidation specialist. Use PROACTIVELY for removing unused code, duplicates, and refactoring. Runs analysis tools (knip, depcheck, ts-prune) to identify dead code and safely removes it.
|
||||
tools: Read, Write, Edit, Bash, Grep, Glob
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: security-reviewer
|
||||
description: Security vulnerability detection and remediation specialist. Use PROACTIVELY after writing code that handles user input, authentication, API endpoints, or sensitive data. Flags secrets, SSRF, injection, unsafe crypto, and OWASP Top 10 vulnerabilities.
|
||||
tools: Read, Write, Edit, Bash, Grep, Glob
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
---
|
||||
name: tdd-guide
|
||||
description: Test-Driven Development specialist enforcing write-tests-first methodology. Use PROACTIVELY when writing new features, fixing bugs, or refactoring code. Ensures 80%+ test coverage.
|
||||
tools: Read, Write, Edit, Bash, Grep
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
|
||||
80
commands/setup-pm.md
Normal file
80
commands/setup-pm.md
Normal file
@@ -0,0 +1,80 @@
|
||||
---
|
||||
description: Configure your preferred package manager (npm/pnpm/yarn/bun)
|
||||
disable-model-invocation: true
|
||||
---
|
||||
|
||||
# Package Manager Setup
|
||||
|
||||
Configure your preferred package manager for this project or globally.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Detect current package manager
|
||||
node scripts/setup-package-manager.js --detect
|
||||
|
||||
# Set global preference
|
||||
node scripts/setup-package-manager.js --global pnpm
|
||||
|
||||
# Set project preference
|
||||
node scripts/setup-package-manager.js --project bun
|
||||
|
||||
# List available package managers
|
||||
node scripts/setup-package-manager.js --list
|
||||
```
|
||||
|
||||
## Detection Priority
|
||||
|
||||
When determining which package manager to use, the following order is checked:
|
||||
|
||||
1. **Environment variable**: `CLAUDE_PACKAGE_MANAGER`
|
||||
2. **Project config**: `.claude/package-manager.json`
|
||||
3. **package.json**: `packageManager` field
|
||||
4. **Lock file**: Presence of package-lock.json, yarn.lock, pnpm-lock.yaml, or bun.lockb
|
||||
5. **Global config**: `~/.claude/package-manager.json`
|
||||
6. **Fallback**: First available package manager (pnpm > bun > yarn > npm)
|
||||
|
||||
## Configuration Files
|
||||
|
||||
### Global Configuration
|
||||
```json
|
||||
// ~/.claude/package-manager.json
|
||||
{
|
||||
"packageManager": "pnpm"
|
||||
}
|
||||
```
|
||||
|
||||
### Project Configuration
|
||||
```json
|
||||
// .claude/package-manager.json
|
||||
{
|
||||
"packageManager": "bun"
|
||||
}
|
||||
```
|
||||
|
||||
### package.json
|
||||
```json
|
||||
{
|
||||
"packageManager": "pnpm@8.6.0"
|
||||
}
|
||||
```
|
||||
|
||||
## Environment Variable
|
||||
|
||||
Set `CLAUDE_PACKAGE_MANAGER` to override all other detection methods:
|
||||
|
||||
```bash
|
||||
# Windows (PowerShell)
|
||||
$env:CLAUDE_PACKAGE_MANAGER = "pnpm"
|
||||
|
||||
# macOS/Linux
|
||||
export CLAUDE_PACKAGE_MANAGER=pnpm
|
||||
```
|
||||
|
||||
## Run the Detection
|
||||
|
||||
To see current package manager detection results, run:
|
||||
|
||||
```bash
|
||||
node scripts/setup-package-manager.js --detect
|
||||
```
|
||||
@@ -35,6 +35,7 @@ Detailed guidelines are in `~/.claude/rules/`:
|
||||
| agents.md | Agent orchestration, when to use which agent |
|
||||
| patterns.md | API response, repository patterns |
|
||||
| performance.md | Model selection, context management |
|
||||
| hooks.md | Hooks System |
|
||||
|
||||
---
|
||||
|
||||
@@ -58,6 +59,10 @@ Located in `~/.claude/agents/`:
|
||||
|
||||
## Personal Preferences
|
||||
|
||||
### Privacy
|
||||
- Always redact logs; never paste secrets (API keys/tokens/passwords/JWTs)
|
||||
- Review output before sharing - remove any sensitive data
|
||||
|
||||
### Code Style
|
||||
- No emojis in code, comments, or documentation
|
||||
- Prefer immutability - never mutate objects or arrays
|
||||
|
||||
@@ -7,17 +7,17 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "#!/bin/bash\ninput=$(cat)\ncmd=$(echo \"$input\" | jq -r '.tool_input.command // \"\"')\n\n# Block dev servers that aren't run in tmux\necho '[Hook] BLOCKED: Dev server must run in tmux for log access' >&2\necho '[Hook] Use this command instead:' >&2\necho \"[Hook] tmux new-session -d -s dev 'npm run dev'\" >&2\necho '[Hook] Then: tmux attach -t dev' >&2\nexit 1"
|
||||
"command": "node -e \"console.error('[Hook] BLOCKED: Dev server must run in tmux for log access');console.error('[Hook] Use: tmux new-session -d -s dev \\\"npm run dev\\\"');console.error('[Hook] Then: tmux attach -t dev');process.exit(1)\""
|
||||
}
|
||||
],
|
||||
"description": "Block dev servers outside tmux - ensures you can access logs"
|
||||
},
|
||||
{
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"(npm (install|test)|pnpm (install|test)|yarn (install|test)|bun (install|test)|cargo build|make|docker|pytest|vitest|playwright)\"",
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"(npm (install|test)|pnpm (install|test)|yarn (install|test)?|bun (install|test)|cargo build|make|docker|pytest|vitest|playwright)\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "#!/bin/bash\ninput=$(cat)\nif [ -z \"$TMUX\" ]; then\n echo '[Hook] Consider running in tmux for session persistence' >&2\n echo '[Hook] tmux new -s dev | tmux attach -t dev' >&2\nfi\necho \"$input\""
|
||||
"command": "node -e \"if(!process.env.TMUX){console.error('[Hook] Consider running in tmux for session persistence');console.error('[Hook] tmux new -s dev | tmux attach -t dev')}\""
|
||||
}
|
||||
],
|
||||
"description": "Reminder to use tmux for long-running commands"
|
||||
@@ -27,17 +27,17 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "#!/bin/bash\n# Open editor for review before pushing\necho '[Hook] Review changes before push...' >&2\n# Uncomment your preferred editor:\n# zed . 2>/dev/null\n# code . 2>/dev/null\n# cursor . 2>/dev/null\necho '[Hook] Press Enter to continue with push or Ctrl+C to abort...' >&2\nread -r"
|
||||
"command": "node -e \"console.error('[Hook] Review changes before push...');console.error('[Hook] Continuing with push (remove this hook to add interactive review)')\""
|
||||
}
|
||||
],
|
||||
"description": "Pause before git push to review changes"
|
||||
"description": "Reminder before git push to review changes"
|
||||
},
|
||||
{
|
||||
"matcher": "tool == \"Write\" && tool_input.file_path matches \"\\\\.(md|txt)$\" && !(tool_input.file_path matches \"README\\\\.md|CLAUDE\\\\.md|AGENTS\\\\.md|CONTRIBUTING\\\\.md\")",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "#!/bin/bash\n# Block creation of unnecessary documentation files\ninput=$(cat)\nfile_path=$(echo \"$input\" | jq -r '.tool_input.file_path // \"\"')\n\nif [[ \"$file_path\" =~ \\.(md|txt)$ ]] && [[ ! \"$file_path\" =~ (README|CLAUDE|AGENTS|CONTRIBUTING)\\.md$ ]]; then\n echo \"[Hook] BLOCKED: Unnecessary documentation file creation\" >&2\n echo \"[Hook] File: $file_path\" >&2\n echo \"[Hook] Use README.md for documentation instead\" >&2\n exit 1\nfi\n\necho \"$input\""
|
||||
"command": "node -e \"const fs=require('fs');let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{const i=JSON.parse(d);const p=i.tool_input?.file_path||'';if(/\\.(md|txt)$/.test(p)&&!/(README|CLAUDE|AGENTS|CONTRIBUTING)\\.md$/.test(p)){console.error('[Hook] BLOCKED: Unnecessary documentation file creation');console.error('[Hook] File: '+p);console.error('[Hook] Use README.md for documentation instead');process.exit(1)}console.log(d)})\""
|
||||
}
|
||||
],
|
||||
"description": "Block creation of random .md files - keeps docs consolidated"
|
||||
@@ -47,7 +47,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "./hooks/strategic-compact/suggest-compact.sh"
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/suggest-compact.js\""
|
||||
}
|
||||
],
|
||||
"description": "Suggest manual compaction at logical intervals"
|
||||
@@ -59,7 +59,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "./hooks/memory-persistence/pre-compact.sh"
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/pre-compact.js\""
|
||||
}
|
||||
],
|
||||
"description": "Save state before context compaction"
|
||||
@@ -71,10 +71,10 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "./hooks/memory-persistence/session-start.sh"
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/session-start.js\""
|
||||
}
|
||||
],
|
||||
"description": "Load previous context on new session"
|
||||
"description": "Load previous context and detect package manager on new session"
|
||||
}
|
||||
],
|
||||
"PostToolUse": [
|
||||
@@ -83,17 +83,29 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "#!/bin/bash\n# Auto-detect PR creation and log useful info\ninput=$(cat)\ncmd=$(echo \"$input\" | jq -r '.tool_input.command')\n\nif echo \"$cmd\" | grep -qE 'gh pr create'; then\n output=$(echo \"$input\" | jq -r '.tool_output.output // \"\"')\n pr_url=$(echo \"$output\" | grep -oE 'https://github.com/[^/]+/[^/]+/pull/[0-9]+')\n \n if [ -n \"$pr_url\" ]; then\n echo \"[Hook] PR created: $pr_url\" >&2\n echo \"[Hook] Checking GitHub Actions status...\" >&2\n repo=$(echo \"$pr_url\" | sed -E 's|https://github.com/([^/]+/[^/]+)/pull/[0-9]+|\\1|')\n pr_num=$(echo \"$pr_url\" | sed -E 's|.*/pull/([0-9]+)|\\1|')\n echo \"[Hook] To review PR: gh pr review $pr_num --repo $repo\" >&2\n fi\nfi\n\necho \"$input\""
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(/gh pr create/.test(cmd)){const out=i.tool_output?.output||'';const m=out.match(/https:\\/\\/github.com\\/[^/]+\\/[^/]+\\/pull\\/\\d+/);if(m){console.error('[Hook] PR created: '+m[0]);const repo=m[0].replace(/https:\\/\\/github.com\\/([^/]+\\/[^/]+)\\/pull\\/\\d+/,'$1');const pr=m[0].replace(/.*\\/pull\\/(\\d+)/,'$1');console.error('[Hook] To review: gh pr review '+pr+' --repo '+repo)}}console.log(d)})\""
|
||||
}
|
||||
],
|
||||
"description": "Log PR URL and provide review command after PR creation"
|
||||
},
|
||||
{
|
||||
"matcher": "tool == \"Bash\" && tool_input.command matches \"(npm run build|pnpm build|yarn build)\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{console.error('[Hook] Build completed - async analysis running in background');console.log(d)})\"",
|
||||
"async": true,
|
||||
"timeout": 30
|
||||
}
|
||||
],
|
||||
"description": "Example: async hook for build analysis (runs in background without blocking)"
|
||||
},
|
||||
{
|
||||
"matcher": "tool == \"Edit\" && tool_input.file_path matches \"\\\\.(ts|tsx|js|jsx)$\"",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "#!/bin/bash\n# Auto-format with Prettier after editing JS/TS files\ninput=$(cat)\nfile_path=$(echo \"$input\" | jq -r '.tool_input.file_path // \"\"')\n\nif [ -n \"$file_path\" ] && [ -f \"$file_path\" ]; then\n if command -v prettier >/dev/null 2>&1; then\n prettier --write \"$file_path\" 2>&1 | head -5 >&2\n fi\nfi\n\necho \"$input\""
|
||||
"command": "node -e \"const{execSync}=require('child_process');const fs=require('fs');let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{const i=JSON.parse(d);const p=i.tool_input?.file_path;if(p&&fs.existsSync(p)){try{execSync('npx prettier --write \"'+p+'\"',{stdio:['pipe','pipe','pipe']})}catch(e){}}console.log(d)})\""
|
||||
}
|
||||
],
|
||||
"description": "Auto-format JS/TS files with Prettier after edits"
|
||||
@@ -103,7 +115,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "#!/bin/bash\n# Run TypeScript check after editing TS files\ninput=$(cat)\nfile_path=$(echo \"$input\" | jq -r '.tool_input.file_path // \"\"')\n\nif [ -n \"$file_path\" ] && [ -f \"$file_path\" ]; then\n dir=$(dirname \"$file_path\")\n project_root=\"$dir\"\n while [ \"$project_root\" != \"/\" ] && [ ! -f \"$project_root/package.json\" ]; do\n project_root=$(dirname \"$project_root\")\n done\n \n if [ -f \"$project_root/tsconfig.json\" ]; then\n cd \"$project_root\" && npx tsc --noEmit --pretty false 2>&1 | grep \"$file_path\" | head -10 >&2 || true\n fi\nfi\n\necho \"$input\""
|
||||
"command": "node -e \"const{execSync}=require('child_process');const fs=require('fs');const path=require('path');let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{const i=JSON.parse(d);const p=i.tool_input?.file_path;if(p&&fs.existsSync(p)){let dir=path.dirname(p);while(dir!==path.dirname(dir)&&!fs.existsSync(path.join(dir,'tsconfig.json'))){dir=path.dirname(dir)}if(fs.existsSync(path.join(dir,'tsconfig.json'))){try{const r=execSync('npx tsc --noEmit --pretty false 2>&1',{cwd:dir,encoding:'utf8',stdio:['pipe','pipe','pipe']});const lines=r.split('\\n').filter(l=>l.includes(p)).slice(0,10);if(lines.length)console.error(lines.join('\\n'))}catch(e){const lines=(e.stdout||'').split('\\n').filter(l=>l.includes(p)).slice(0,10);if(lines.length)console.error(lines.join('\\n'))}}}console.log(d)})\""
|
||||
}
|
||||
],
|
||||
"description": "TypeScript check after editing .ts/.tsx files"
|
||||
@@ -113,7 +125,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "#!/bin/bash\n# Warn about console.log in edited files\ninput=$(cat)\nfile_path=$(echo \"$input\" | jq -r '.tool_input.file_path // \"\"')\n\nif [ -n \"$file_path\" ] && [ -f \"$file_path\" ]; then\n console_logs=$(grep -n \"console\\\\.log\" \"$file_path\" 2>/dev/null || true)\n \n if [ -n \"$console_logs\" ]; then\n echo \"[Hook] WARNING: console.log found in $file_path\" >&2\n echo \"$console_logs\" | head -5 >&2\n echo \"[Hook] Remove console.log before committing\" >&2\n fi\nfi\n\necho \"$input\""
|
||||
"command": "node -e \"const fs=require('fs');let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{const i=JSON.parse(d);const p=i.tool_input?.file_path;if(p&&fs.existsSync(p)){const c=fs.readFileSync(p,'utf8');const lines=c.split('\\n');const matches=[];lines.forEach((l,idx)=>{if(/console\\.log/.test(l))matches.push((idx+1)+': '+l.trim())});if(matches.length){console.error('[Hook] WARNING: console.log found in '+p);matches.slice(0,5).forEach(m=>console.error(m));console.error('[Hook] Remove console.log before committing')}}console.log(d)})\""
|
||||
}
|
||||
],
|
||||
"description": "Warn about console.log statements after edits"
|
||||
@@ -125,17 +137,19 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "#!/bin/bash\n# Final check for console.logs in modified files\ninput=$(cat)\n\nif git rev-parse --git-dir > /dev/null 2>&1; then\n modified_files=$(git diff --name-only HEAD 2>/dev/null | grep -E '\\.(ts|tsx|js|jsx)$' || true)\n \n if [ -n \"$modified_files\" ]; then\n has_console=false\n while IFS= read -r file; do\n if [ -f \"$file\" ]; then\n if grep -q \"console\\.log\" \"$file\" 2>/dev/null; then\n echo \"[Hook] WARNING: console.log found in $file\" >&2\n has_console=true\n fi\n fi\n done <<< \"$modified_files\"\n \n if [ \"$has_console\" = true ]; then\n echo \"[Hook] Remove console.log statements before committing\" >&2\n fi\n fi\nfi\n\necho \"$input\""
|
||||
"command": "node -e \"const{execSync}=require('child_process');const fs=require('fs');let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{execSync('git rev-parse --git-dir',{stdio:'pipe'})}catch{console.log(d);process.exit(0)}try{const files=execSync('git diff --name-only HEAD',{encoding:'utf8',stdio:['pipe','pipe','pipe']}).split('\\n').filter(f=>/\\.(ts|tsx|js|jsx)$/.test(f)&&fs.existsSync(f));let hasConsole=false;for(const f of files){if(fs.readFileSync(f,'utf8').includes('console.log')){console.error('[Hook] WARNING: console.log found in '+f);hasConsole=true}}if(hasConsole)console.error('[Hook] Remove console.log statements before committing')}catch(e){}console.log(d)})\""
|
||||
}
|
||||
],
|
||||
"description": "Final audit for console.log in modified files before session ends"
|
||||
},
|
||||
"description": "Check for console.log in modified files after each response"
|
||||
}
|
||||
],
|
||||
"SessionEnd": [
|
||||
{
|
||||
"matcher": "*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "./hooks/memory-persistence/session-end.sh"
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/session-end.js\""
|
||||
}
|
||||
],
|
||||
"description": "Persist session state on end"
|
||||
@@ -145,7 +159,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "./skills/continuous-learning/evaluate-session.sh"
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/evaluate-session.js\""
|
||||
}
|
||||
],
|
||||
"description": "Evaluate session for extractable patterns"
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
# PreCompact Hook - Save state before context compaction
|
||||
#
|
||||
# Runs before Claude compacts context, giving you a chance to
|
||||
# preserve important state that might get lost in summarization.
|
||||
#
|
||||
# Hook config (in ~/.claude/settings.json):
|
||||
# {
|
||||
# "hooks": {
|
||||
# "PreCompact": [{
|
||||
# "matcher": "*",
|
||||
# "hooks": [{
|
||||
# "type": "command",
|
||||
# "command": "~/.claude/hooks/memory-persistence/pre-compact.sh"
|
||||
# }]
|
||||
# }]
|
||||
# }
|
||||
# }
|
||||
|
||||
SESSIONS_DIR="${HOME}/.claude/sessions"
|
||||
COMPACTION_LOG="${SESSIONS_DIR}/compaction-log.txt"
|
||||
|
||||
mkdir -p "$SESSIONS_DIR"
|
||||
|
||||
# Log compaction event with timestamp
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] Context compaction triggered" >> "$COMPACTION_LOG"
|
||||
|
||||
# If there's an active session file, note the compaction
|
||||
ACTIVE_SESSION=$(ls -t "$SESSIONS_DIR"/*.tmp 2>/dev/null | head -1)
|
||||
if [ -n "$ACTIVE_SESSION" ]; then
|
||||
echo "" >> "$ACTIVE_SESSION"
|
||||
echo "---" >> "$ACTIVE_SESSION"
|
||||
echo "**[Compaction occurred at $(date '+%H:%M')]** - Context was summarized" >> "$ACTIVE_SESSION"
|
||||
fi
|
||||
|
||||
echo "[PreCompact] State saved before compaction" >&2
|
||||
@@ -1,61 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Stop Hook (Session End) - Persist learnings when session ends
|
||||
#
|
||||
# Runs when Claude session ends. Creates/updates session log file
|
||||
# with timestamp for continuity tracking.
|
||||
#
|
||||
# Hook config (in ~/.claude/settings.json):
|
||||
# {
|
||||
# "hooks": {
|
||||
# "Stop": [{
|
||||
# "matcher": "*",
|
||||
# "hooks": [{
|
||||
# "type": "command",
|
||||
# "command": "~/.claude/hooks/memory-persistence/session-end.sh"
|
||||
# }]
|
||||
# }]
|
||||
# }
|
||||
# }
|
||||
|
||||
SESSIONS_DIR="${HOME}/.claude/sessions"
|
||||
TODAY=$(date '+%Y-%m-%d')
|
||||
SESSION_FILE="${SESSIONS_DIR}/${TODAY}-session.tmp"
|
||||
|
||||
mkdir -p "$SESSIONS_DIR"
|
||||
|
||||
# If session file exists for today, update the end time
|
||||
if [ -f "$SESSION_FILE" ]; then
|
||||
# Update Last Updated timestamp
|
||||
sed -i '' "s/\*\*Last Updated:\*\*.*/\*\*Last Updated:\*\* $(date '+%H:%M')/" "$SESSION_FILE" 2>/dev/null || \
|
||||
sed -i "s/\*\*Last Updated:\*\*.*/\*\*Last Updated:\*\* $(date '+%H:%M')/" "$SESSION_FILE" 2>/dev/null
|
||||
echo "[SessionEnd] Updated session file: $SESSION_FILE" >&2
|
||||
else
|
||||
# Create new session file with template
|
||||
cat > "$SESSION_FILE" << EOF
|
||||
# Session: $(date '+%Y-%m-%d')
|
||||
**Date:** $TODAY
|
||||
**Started:** $(date '+%H:%M')
|
||||
**Last Updated:** $(date '+%H:%M')
|
||||
|
||||
---
|
||||
|
||||
## Current State
|
||||
|
||||
[Session context goes here]
|
||||
|
||||
### Completed
|
||||
- [ ]
|
||||
|
||||
### In Progress
|
||||
- [ ]
|
||||
|
||||
### Notes for Next Session
|
||||
-
|
||||
|
||||
### Context to Load
|
||||
\`\`\`
|
||||
[relevant files]
|
||||
\`\`\`
|
||||
EOF
|
||||
echo "[SessionEnd] Created session file: $SESSION_FILE" >&2
|
||||
fi
|
||||
@@ -1,37 +0,0 @@
|
||||
#!/bin/bash
|
||||
# SessionStart Hook - Load previous context on new session
|
||||
#
|
||||
# Runs when a new Claude session starts. Checks for recent session
|
||||
# files and notifies Claude of available context to load.
|
||||
#
|
||||
# Hook config (in ~/.claude/settings.json):
|
||||
# {
|
||||
# "hooks": {
|
||||
# "SessionStart": [{
|
||||
# "matcher": "*",
|
||||
# "hooks": [{
|
||||
# "type": "command",
|
||||
# "command": "~/.claude/hooks/memory-persistence/session-start.sh"
|
||||
# }]
|
||||
# }]
|
||||
# }
|
||||
# }
|
||||
|
||||
SESSIONS_DIR="${HOME}/.claude/sessions"
|
||||
LEARNED_DIR="${HOME}/.claude/skills/learned"
|
||||
|
||||
# Check for recent session files (last 7 days)
|
||||
recent_sessions=$(find "$SESSIONS_DIR" -name "*.tmp" -mtime -7 2>/dev/null | wc -l | tr -d ' ')
|
||||
|
||||
if [ "$recent_sessions" -gt 0 ]; then
|
||||
latest=$(ls -t "$SESSIONS_DIR"/*.tmp 2>/dev/null | head -1)
|
||||
echo "[SessionStart] Found $recent_sessions recent session(s)" >&2
|
||||
echo "[SessionStart] Latest: $latest" >&2
|
||||
fi
|
||||
|
||||
# Check for learned skills
|
||||
learned_count=$(find "$LEARNED_DIR" -name "*.md" 2>/dev/null | wc -l | tr -d ' ')
|
||||
|
||||
if [ "$learned_count" -gt 0 ]; then
|
||||
echo "[SessionStart] $learned_count learned skill(s) available in $LEARNED_DIR" >&2
|
||||
fi
|
||||
@@ -1,52 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Strategic Compact Suggester
|
||||
# Runs on PreToolUse or periodically to suggest manual compaction at logical intervals
|
||||
#
|
||||
# Why manual over auto-compact:
|
||||
# - Auto-compact happens at arbitrary points, often mid-task
|
||||
# - Strategic compacting preserves context through logical phases
|
||||
# - Compact after exploration, before execution
|
||||
# - Compact after completing a milestone, before starting next
|
||||
#
|
||||
# Hook config (in ~/.claude/settings.json):
|
||||
# {
|
||||
# "hooks": {
|
||||
# "PreToolUse": [{
|
||||
# "matcher": "Edit|Write",
|
||||
# "hooks": [{
|
||||
# "type": "command",
|
||||
# "command": "~/.claude/skills/strategic-compact/suggest-compact.sh"
|
||||
# }]
|
||||
# }]
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# Criteria for suggesting compact:
|
||||
# - Session has been running for extended period
|
||||
# - Large number of tool calls made
|
||||
# - Transitioning from research/exploration to implementation
|
||||
# - Plan has been finalized
|
||||
|
||||
# Track tool call count (increment in a temp file)
|
||||
COUNTER_FILE="/tmp/claude-tool-count-$$"
|
||||
THRESHOLD=${COMPACT_THRESHOLD:-50}
|
||||
|
||||
# Initialize or increment counter
|
||||
if [ -f "$COUNTER_FILE" ]; then
|
||||
count=$(cat "$COUNTER_FILE")
|
||||
count=$((count + 1))
|
||||
echo "$count" > "$COUNTER_FILE"
|
||||
else
|
||||
echo "1" > "$COUNTER_FILE"
|
||||
count=1
|
||||
fi
|
||||
|
||||
# Suggest compact after threshold tool calls
|
||||
if [ "$count" -eq "$THRESHOLD" ]; then
|
||||
echo "[StrategicCompact] $THRESHOLD tool calls reached - consider /compact if transitioning phases" >&2
|
||||
fi
|
||||
|
||||
# Suggest at regular intervals after threshold
|
||||
if [ "$count" -gt "$THRESHOLD" ] && [ $((count % 25)) -eq 0 ]; then
|
||||
echo "[StrategicCompact] $count tool calls - good checkpoint for /compact if context is stale" >&2
|
||||
fi
|
||||
78
scripts/hooks/evaluate-session.js
Normal file
78
scripts/hooks/evaluate-session.js
Normal file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Continuous Learning - Session Evaluator
|
||||
*
|
||||
* Cross-platform (Windows, macOS, Linux)
|
||||
*
|
||||
* Runs on Stop hook to extract reusable patterns from Claude Code sessions
|
||||
*
|
||||
* Why Stop hook instead of UserPromptSubmit:
|
||||
* - Stop runs once at session end (lightweight)
|
||||
* - UserPromptSubmit runs every message (heavy, adds latency)
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const {
|
||||
getLearnedSkillsDir,
|
||||
ensureDir,
|
||||
readFile,
|
||||
countInFile,
|
||||
log
|
||||
} = require('../lib/utils');
|
||||
|
||||
async function main() {
|
||||
// Get script directory to find config
|
||||
const scriptDir = __dirname;
|
||||
const configFile = path.join(scriptDir, '..', '..', 'skills', 'continuous-learning', 'config.json');
|
||||
|
||||
// Default configuration
|
||||
let minSessionLength = 10;
|
||||
let learnedSkillsPath = getLearnedSkillsDir();
|
||||
|
||||
// Load config if exists
|
||||
const configContent = readFile(configFile);
|
||||
if (configContent) {
|
||||
try {
|
||||
const config = JSON.parse(configContent);
|
||||
minSessionLength = config.min_session_length || 10;
|
||||
|
||||
if (config.learned_skills_path) {
|
||||
// Handle ~ in path
|
||||
learnedSkillsPath = config.learned_skills_path.replace(/^~/, require('os').homedir());
|
||||
}
|
||||
} catch {
|
||||
// Invalid config, use defaults
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure learned skills directory exists
|
||||
ensureDir(learnedSkillsPath);
|
||||
|
||||
// Get transcript path from environment (set by Claude Code)
|
||||
const transcriptPath = process.env.CLAUDE_TRANSCRIPT_PATH;
|
||||
|
||||
if (!transcriptPath || !fs.existsSync(transcriptPath)) {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Count user messages in session
|
||||
const messageCount = countInFile(transcriptPath, /"type":"user"/g);
|
||||
|
||||
// Skip short sessions
|
||||
if (messageCount < minSessionLength) {
|
||||
log(`[ContinuousLearning] Session too short (${messageCount} messages), skipping`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Signal to Claude that session should be evaluated for extractable patterns
|
||||
log(`[ContinuousLearning] Session has ${messageCount} messages - evaluate for extractable patterns`);
|
||||
log(`[ContinuousLearning] Save learned skills to: ${learnedSkillsPath}`);
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
console.error('[ContinuousLearning] Error:', err.message);
|
||||
process.exit(0);
|
||||
});
|
||||
48
scripts/hooks/pre-compact.js
Normal file
48
scripts/hooks/pre-compact.js
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* PreCompact Hook - Save state before context compaction
|
||||
*
|
||||
* Cross-platform (Windows, macOS, Linux)
|
||||
*
|
||||
* Runs before Claude compacts context, giving you a chance to
|
||||
* preserve important state that might get lost in summarization.
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
const {
|
||||
getSessionsDir,
|
||||
getDateTimeString,
|
||||
getTimeString,
|
||||
findFiles,
|
||||
ensureDir,
|
||||
appendFile,
|
||||
log
|
||||
} = require('../lib/utils');
|
||||
|
||||
async function main() {
|
||||
const sessionsDir = getSessionsDir();
|
||||
const compactionLog = path.join(sessionsDir, 'compaction-log.txt');
|
||||
|
||||
ensureDir(sessionsDir);
|
||||
|
||||
// Log compaction event with timestamp
|
||||
const timestamp = getDateTimeString();
|
||||
appendFile(compactionLog, `[${timestamp}] Context compaction triggered\n`);
|
||||
|
||||
// If there's an active session file, note the compaction
|
||||
const sessions = findFiles(sessionsDir, '*.tmp');
|
||||
|
||||
if (sessions.length > 0) {
|
||||
const activeSession = sessions[0].path;
|
||||
const timeStr = getTimeString();
|
||||
appendFile(activeSession, `\n---\n**[Compaction occurred at ${timeStr}]** - Context was summarized\n`);
|
||||
}
|
||||
|
||||
log('[PreCompact] State saved before compaction');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
console.error('[PreCompact] Error:', err.message);
|
||||
process.exit(0);
|
||||
});
|
||||
85
scripts/hooks/session-end.js
Normal file
85
scripts/hooks/session-end.js
Normal file
@@ -0,0 +1,85 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Stop Hook (Session End) - Persist learnings when session ends
|
||||
*
|
||||
* Cross-platform (Windows, macOS, Linux)
|
||||
*
|
||||
* Runs when Claude session ends. Creates/updates session log file
|
||||
* with timestamp for continuity tracking.
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const {
|
||||
getSessionsDir,
|
||||
getDateString,
|
||||
getTimeString,
|
||||
getSessionIdShort,
|
||||
ensureDir,
|
||||
readFile,
|
||||
writeFile,
|
||||
replaceInFile,
|
||||
log
|
||||
} = require('../lib/utils');
|
||||
|
||||
async function main() {
|
||||
const sessionsDir = getSessionsDir();
|
||||
const today = getDateString();
|
||||
const shortId = getSessionIdShort();
|
||||
// Include session ID in filename for unique per-session tracking
|
||||
const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`);
|
||||
|
||||
ensureDir(sessionsDir);
|
||||
|
||||
const currentTime = getTimeString();
|
||||
|
||||
// If session file exists for today, update the end time
|
||||
if (fs.existsSync(sessionFile)) {
|
||||
const success = replaceInFile(
|
||||
sessionFile,
|
||||
/\*\*Last Updated:\*\*.*/,
|
||||
`**Last Updated:** ${currentTime}`
|
||||
);
|
||||
|
||||
if (success) {
|
||||
log(`[SessionEnd] Updated session file: ${sessionFile}`);
|
||||
}
|
||||
} else {
|
||||
// Create new session file with template
|
||||
const template = `# Session: ${today}
|
||||
**Date:** ${today}
|
||||
**Started:** ${currentTime}
|
||||
**Last Updated:** ${currentTime}
|
||||
|
||||
---
|
||||
|
||||
## Current State
|
||||
|
||||
[Session context goes here]
|
||||
|
||||
### Completed
|
||||
- [ ]
|
||||
|
||||
### In Progress
|
||||
- [ ]
|
||||
|
||||
### Notes for Next Session
|
||||
-
|
||||
|
||||
### Context to Load
|
||||
\`\`\`
|
||||
[relevant files]
|
||||
\`\`\`
|
||||
`;
|
||||
|
||||
writeFile(sessionFile, template);
|
||||
log(`[SessionEnd] Created session file: ${sessionFile}`);
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
console.error('[SessionEnd] Error:', err.message);
|
||||
process.exit(0);
|
||||
});
|
||||
62
scripts/hooks/session-start.js
Normal file
62
scripts/hooks/session-start.js
Normal file
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* SessionStart Hook - Load previous context on new session
|
||||
*
|
||||
* Cross-platform (Windows, macOS, Linux)
|
||||
*
|
||||
* Runs when a new Claude session starts. Checks for recent session
|
||||
* files and notifies Claude of available context to load.
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
const {
|
||||
getSessionsDir,
|
||||
getLearnedSkillsDir,
|
||||
findFiles,
|
||||
ensureDir,
|
||||
log
|
||||
} = require('../lib/utils');
|
||||
const { getPackageManager, getSelectionPrompt } = require('../lib/package-manager');
|
||||
|
||||
async function main() {
|
||||
const sessionsDir = getSessionsDir();
|
||||
const learnedDir = getLearnedSkillsDir();
|
||||
|
||||
// Ensure directories exist
|
||||
ensureDir(sessionsDir);
|
||||
ensureDir(learnedDir);
|
||||
|
||||
// Check for recent session files (last 7 days)
|
||||
// Match both old format (YYYY-MM-DD-session.tmp) and new format (YYYY-MM-DD-shortid-session.tmp)
|
||||
const recentSessions = findFiles(sessionsDir, '*-session.tmp', { maxAge: 7 });
|
||||
|
||||
if (recentSessions.length > 0) {
|
||||
const latest = recentSessions[0];
|
||||
log(`[SessionStart] Found ${recentSessions.length} recent session(s)`);
|
||||
log(`[SessionStart] Latest: ${latest.path}`);
|
||||
}
|
||||
|
||||
// Check for learned skills
|
||||
const learnedSkills = findFiles(learnedDir, '*.md');
|
||||
|
||||
if (learnedSkills.length > 0) {
|
||||
log(`[SessionStart] ${learnedSkills.length} learned skill(s) available in ${learnedDir}`);
|
||||
}
|
||||
|
||||
// Detect and report package manager
|
||||
const pm = getPackageManager();
|
||||
log(`[SessionStart] Package manager: ${pm.name} (${pm.source})`);
|
||||
|
||||
// If package manager was detected via fallback, show selection prompt
|
||||
if (pm.source === 'fallback' || pm.source === 'default') {
|
||||
log('[SessionStart] No package manager preference found.');
|
||||
log(getSelectionPrompt());
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
console.error('[SessionStart] Error:', err.message);
|
||||
process.exit(0); // Don't block on errors
|
||||
});
|
||||
60
scripts/hooks/suggest-compact.js
Normal file
60
scripts/hooks/suggest-compact.js
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Strategic Compact Suggester
|
||||
*
|
||||
* Cross-platform (Windows, macOS, Linux)
|
||||
*
|
||||
* Runs on PreToolUse or periodically to suggest manual compaction at logical intervals
|
||||
*
|
||||
* Why manual over auto-compact:
|
||||
* - Auto-compact happens at arbitrary points, often mid-task
|
||||
* - Strategic compacting preserves context through logical phases
|
||||
* - Compact after exploration, before execution
|
||||
* - Compact after completing a milestone, before starting next
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const {
|
||||
getTempDir,
|
||||
readFile,
|
||||
writeFile,
|
||||
log
|
||||
} = require('../lib/utils');
|
||||
|
||||
async function main() {
|
||||
// Track tool call count (increment in a temp file)
|
||||
// Use a session-specific counter file based on PID from parent process
|
||||
// or session ID from environment
|
||||
const sessionId = process.env.CLAUDE_SESSION_ID || process.ppid || 'default';
|
||||
const counterFile = path.join(getTempDir(), `claude-tool-count-${sessionId}`);
|
||||
const threshold = parseInt(process.env.COMPACT_THRESHOLD || '50', 10);
|
||||
|
||||
let count = 1;
|
||||
|
||||
// Read existing count or start at 1
|
||||
const existing = readFile(counterFile);
|
||||
if (existing) {
|
||||
count = parseInt(existing.trim(), 10) + 1;
|
||||
}
|
||||
|
||||
// Save updated count
|
||||
writeFile(counterFile, String(count));
|
||||
|
||||
// Suggest compact after threshold tool calls
|
||||
if (count === threshold) {
|
||||
log(`[StrategicCompact] ${threshold} tool calls reached - consider /compact if transitioning phases`);
|
||||
}
|
||||
|
||||
// Suggest at regular intervals after threshold
|
||||
if (count > threshold && count % 25 === 0) {
|
||||
log(`[StrategicCompact] ${count} tool calls - good checkpoint for /compact if context is stale`);
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
console.error('[StrategicCompact] Error:', err.message);
|
||||
process.exit(0);
|
||||
});
|
||||
390
scripts/lib/package-manager.js
Normal file
390
scripts/lib/package-manager.js
Normal file
@@ -0,0 +1,390 @@
|
||||
/**
|
||||
* Package Manager Detection and Selection
|
||||
* Automatically detects the preferred package manager or lets user choose
|
||||
*
|
||||
* Supports: npm, pnpm, yarn, bun
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { commandExists, getClaudeDir, readFile, writeFile, log, runCommand } = require('./utils');
|
||||
|
||||
// Package manager definitions
|
||||
const PACKAGE_MANAGERS = {
|
||||
npm: {
|
||||
name: 'npm',
|
||||
lockFile: 'package-lock.json',
|
||||
installCmd: 'npm install',
|
||||
runCmd: 'npm run',
|
||||
execCmd: 'npx',
|
||||
testCmd: 'npm test',
|
||||
buildCmd: 'npm run build',
|
||||
devCmd: 'npm run dev'
|
||||
},
|
||||
pnpm: {
|
||||
name: 'pnpm',
|
||||
lockFile: 'pnpm-lock.yaml',
|
||||
installCmd: 'pnpm install',
|
||||
runCmd: 'pnpm',
|
||||
execCmd: 'pnpm dlx',
|
||||
testCmd: 'pnpm test',
|
||||
buildCmd: 'pnpm build',
|
||||
devCmd: 'pnpm dev'
|
||||
},
|
||||
yarn: {
|
||||
name: 'yarn',
|
||||
lockFile: 'yarn.lock',
|
||||
installCmd: 'yarn',
|
||||
runCmd: 'yarn',
|
||||
execCmd: 'yarn dlx',
|
||||
testCmd: 'yarn test',
|
||||
buildCmd: 'yarn build',
|
||||
devCmd: 'yarn dev'
|
||||
},
|
||||
bun: {
|
||||
name: 'bun',
|
||||
lockFile: 'bun.lockb',
|
||||
installCmd: 'bun install',
|
||||
runCmd: 'bun run',
|
||||
execCmd: 'bunx',
|
||||
testCmd: 'bun test',
|
||||
buildCmd: 'bun run build',
|
||||
devCmd: 'bun run dev'
|
||||
}
|
||||
};
|
||||
|
||||
// Priority order for detection
|
||||
const DETECTION_PRIORITY = ['pnpm', 'bun', 'yarn', 'npm'];
|
||||
|
||||
// Config file path
|
||||
function getConfigPath() {
|
||||
return path.join(getClaudeDir(), 'package-manager.json');
|
||||
}
|
||||
|
||||
/**
|
||||
* Load saved package manager configuration
|
||||
*/
|
||||
function loadConfig() {
|
||||
const configPath = getConfigPath();
|
||||
const content = readFile(configPath);
|
||||
|
||||
if (content) {
|
||||
try {
|
||||
return JSON.parse(content);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save package manager configuration
|
||||
*/
|
||||
function saveConfig(config) {
|
||||
const configPath = getConfigPath();
|
||||
writeFile(configPath, JSON.stringify(config, null, 2));
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect package manager from lock file in project directory
|
||||
*/
|
||||
function detectFromLockFile(projectDir = process.cwd()) {
|
||||
for (const pmName of DETECTION_PRIORITY) {
|
||||
const pm = PACKAGE_MANAGERS[pmName];
|
||||
const lockFilePath = path.join(projectDir, pm.lockFile);
|
||||
|
||||
if (fs.existsSync(lockFilePath)) {
|
||||
return pmName;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect package manager from package.json packageManager field
|
||||
*/
|
||||
function detectFromPackageJson(projectDir = process.cwd()) {
|
||||
const packageJsonPath = path.join(projectDir, 'package.json');
|
||||
const content = readFile(packageJsonPath);
|
||||
|
||||
if (content) {
|
||||
try {
|
||||
const pkg = JSON.parse(content);
|
||||
if (pkg.packageManager) {
|
||||
// Format: "pnpm@8.6.0" or just "pnpm"
|
||||
const pmName = pkg.packageManager.split('@')[0];
|
||||
if (PACKAGE_MANAGERS[pmName]) {
|
||||
return pmName;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Invalid package.json
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available package managers (installed on system)
|
||||
*/
|
||||
function getAvailablePackageManagers() {
|
||||
const available = [];
|
||||
|
||||
for (const pmName of Object.keys(PACKAGE_MANAGERS)) {
|
||||
if (commandExists(pmName)) {
|
||||
available.push(pmName);
|
||||
}
|
||||
}
|
||||
|
||||
return available;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the package manager to use for current project
|
||||
*
|
||||
* Detection priority:
|
||||
* 1. Environment variable CLAUDE_PACKAGE_MANAGER
|
||||
* 2. Project-specific config (in .claude/package-manager.json)
|
||||
* 3. package.json packageManager field
|
||||
* 4. Lock file detection
|
||||
* 5. Global user preference (in ~/.claude/package-manager.json)
|
||||
* 6. First available package manager (by priority)
|
||||
*
|
||||
* @param {object} options - { projectDir, fallbackOrder }
|
||||
* @returns {object} - { name, config, source }
|
||||
*/
|
||||
function getPackageManager(options = {}) {
|
||||
const { projectDir = process.cwd(), fallbackOrder = DETECTION_PRIORITY } = options;
|
||||
|
||||
// 1. Check environment variable
|
||||
const envPm = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
if (envPm && PACKAGE_MANAGERS[envPm]) {
|
||||
return {
|
||||
name: envPm,
|
||||
config: PACKAGE_MANAGERS[envPm],
|
||||
source: 'environment'
|
||||
};
|
||||
}
|
||||
|
||||
// 2. Check project-specific config
|
||||
const projectConfigPath = path.join(projectDir, '.claude', 'package-manager.json');
|
||||
const projectConfig = readFile(projectConfigPath);
|
||||
if (projectConfig) {
|
||||
try {
|
||||
const config = JSON.parse(projectConfig);
|
||||
if (config.packageManager && PACKAGE_MANAGERS[config.packageManager]) {
|
||||
return {
|
||||
name: config.packageManager,
|
||||
config: PACKAGE_MANAGERS[config.packageManager],
|
||||
source: 'project-config'
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
// Invalid config
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Check package.json packageManager field
|
||||
const fromPackageJson = detectFromPackageJson(projectDir);
|
||||
if (fromPackageJson) {
|
||||
return {
|
||||
name: fromPackageJson,
|
||||
config: PACKAGE_MANAGERS[fromPackageJson],
|
||||
source: 'package.json'
|
||||
};
|
||||
}
|
||||
|
||||
// 4. Check lock file
|
||||
const fromLockFile = detectFromLockFile(projectDir);
|
||||
if (fromLockFile) {
|
||||
return {
|
||||
name: fromLockFile,
|
||||
config: PACKAGE_MANAGERS[fromLockFile],
|
||||
source: 'lock-file'
|
||||
};
|
||||
}
|
||||
|
||||
// 5. Check global user preference
|
||||
const globalConfig = loadConfig();
|
||||
if (globalConfig && globalConfig.packageManager && PACKAGE_MANAGERS[globalConfig.packageManager]) {
|
||||
return {
|
||||
name: globalConfig.packageManager,
|
||||
config: PACKAGE_MANAGERS[globalConfig.packageManager],
|
||||
source: 'global-config'
|
||||
};
|
||||
}
|
||||
|
||||
// 6. Use first available package manager
|
||||
const available = getAvailablePackageManagers();
|
||||
for (const pmName of fallbackOrder) {
|
||||
if (available.includes(pmName)) {
|
||||
return {
|
||||
name: pmName,
|
||||
config: PACKAGE_MANAGERS[pmName],
|
||||
source: 'fallback'
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Default to npm (always available with Node.js)
|
||||
return {
|
||||
name: 'npm',
|
||||
config: PACKAGE_MANAGERS.npm,
|
||||
source: 'default'
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Set user's preferred package manager (global)
|
||||
*/
|
||||
function setPreferredPackageManager(pmName) {
|
||||
if (!PACKAGE_MANAGERS[pmName]) {
|
||||
throw new Error(`Unknown package manager: ${pmName}`);
|
||||
}
|
||||
|
||||
const config = loadConfig() || {};
|
||||
config.packageManager = pmName;
|
||||
config.setAt = new Date().toISOString();
|
||||
saveConfig(config);
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set project's preferred package manager
|
||||
*/
|
||||
function setProjectPackageManager(pmName, projectDir = process.cwd()) {
|
||||
if (!PACKAGE_MANAGERS[pmName]) {
|
||||
throw new Error(`Unknown package manager: ${pmName}`);
|
||||
}
|
||||
|
||||
const configDir = path.join(projectDir, '.claude');
|
||||
const configPath = path.join(configDir, 'package-manager.json');
|
||||
|
||||
const config = {
|
||||
packageManager: pmName,
|
||||
setAt: new Date().toISOString()
|
||||
};
|
||||
|
||||
writeFile(configPath, JSON.stringify(config, null, 2));
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the command to run a script
|
||||
* @param {string} script - Script name (e.g., "dev", "build", "test")
|
||||
* @param {object} options - { projectDir }
|
||||
*/
|
||||
function getRunCommand(script, options = {}) {
|
||||
const pm = getPackageManager(options);
|
||||
|
||||
switch (script) {
|
||||
case 'install':
|
||||
return pm.config.installCmd;
|
||||
case 'test':
|
||||
return pm.config.testCmd;
|
||||
case 'build':
|
||||
return pm.config.buildCmd;
|
||||
case 'dev':
|
||||
return pm.config.devCmd;
|
||||
default:
|
||||
return `${pm.config.runCmd} ${script}`;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the command to execute a package binary
|
||||
* @param {string} binary - Binary name (e.g., "prettier", "eslint")
|
||||
* @param {string} args - Arguments to pass
|
||||
*/
|
||||
function getExecCommand(binary, args = '', options = {}) {
|
||||
const pm = getPackageManager(options);
|
||||
return `${pm.config.execCmd} ${binary}${args ? ' ' + args : ''}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Interactive prompt for package manager selection
|
||||
* Returns a message for Claude to show to user
|
||||
*/
|
||||
function getSelectionPrompt() {
|
||||
const available = getAvailablePackageManagers();
|
||||
const current = getPackageManager();
|
||||
|
||||
let message = '[PackageManager] Available package managers:\n';
|
||||
|
||||
for (const pmName of available) {
|
||||
const indicator = pmName === current.name ? ' (current)' : '';
|
||||
message += ` - ${pmName}${indicator}\n`;
|
||||
}
|
||||
|
||||
message += '\nTo set your preferred package manager:\n';
|
||||
message += ' - Global: Set CLAUDE_PACKAGE_MANAGER environment variable\n';
|
||||
message += ' - Or add to ~/.claude/package-manager.json: {"packageManager": "pnpm"}\n';
|
||||
message += ' - Or add to package.json: {"packageManager": "pnpm@8"}\n';
|
||||
|
||||
return message;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a regex pattern that matches commands for all package managers
|
||||
* @param {string} action - Action pattern (e.g., "run dev", "install", "test")
|
||||
*/
|
||||
function getCommandPattern(action) {
|
||||
const patterns = [];
|
||||
|
||||
if (action === 'dev') {
|
||||
patterns.push(
|
||||
'npm run dev',
|
||||
'pnpm( run)? dev',
|
||||
'yarn dev',
|
||||
'bun run dev'
|
||||
);
|
||||
} else if (action === 'install') {
|
||||
patterns.push(
|
||||
'npm install',
|
||||
'pnpm install',
|
||||
'yarn( install)?',
|
||||
'bun install'
|
||||
);
|
||||
} else if (action === 'test') {
|
||||
patterns.push(
|
||||
'npm test',
|
||||
'pnpm test',
|
||||
'yarn test',
|
||||
'bun test'
|
||||
);
|
||||
} else if (action === 'build') {
|
||||
patterns.push(
|
||||
'npm run build',
|
||||
'pnpm( run)? build',
|
||||
'yarn build',
|
||||
'bun run build'
|
||||
);
|
||||
} else {
|
||||
// Generic run command
|
||||
patterns.push(
|
||||
`npm run ${action}`,
|
||||
`pnpm( run)? ${action}`,
|
||||
`yarn ${action}`,
|
||||
`bun run ${action}`
|
||||
);
|
||||
}
|
||||
|
||||
return `(${patterns.join('|')})`;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
PACKAGE_MANAGERS,
|
||||
DETECTION_PRIORITY,
|
||||
getPackageManager,
|
||||
setPreferredPackageManager,
|
||||
setProjectPackageManager,
|
||||
getAvailablePackageManagers,
|
||||
detectFromLockFile,
|
||||
detectFromPackageJson,
|
||||
getRunCommand,
|
||||
getExecCommand,
|
||||
getSelectionPrompt,
|
||||
getCommandPattern
|
||||
};
|
||||
397
scripts/lib/utils.js
Normal file
397
scripts/lib/utils.js
Normal file
@@ -0,0 +1,397 @@
|
||||
/**
|
||||
* Cross-platform utility functions for Claude Code hooks and scripts
|
||||
* Works on Windows, macOS, and Linux
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
const { execSync, spawnSync } = require('child_process');
|
||||
|
||||
// Platform detection
|
||||
const isWindows = process.platform === 'win32';
|
||||
const isMacOS = process.platform === 'darwin';
|
||||
const isLinux = process.platform === 'linux';
|
||||
|
||||
/**
|
||||
* Get the user's home directory (cross-platform)
|
||||
*/
|
||||
function getHomeDir() {
|
||||
return os.homedir();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the Claude config directory
|
||||
*/
|
||||
function getClaudeDir() {
|
||||
return path.join(getHomeDir(), '.claude');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the sessions directory
|
||||
*/
|
||||
function getSessionsDir() {
|
||||
return path.join(getClaudeDir(), 'sessions');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the learned skills directory
|
||||
*/
|
||||
function getLearnedSkillsDir() {
|
||||
return path.join(getClaudeDir(), 'skills', 'learned');
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the temp directory (cross-platform)
|
||||
*/
|
||||
function getTempDir() {
|
||||
return os.tmpdir();
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure a directory exists (create if not)
|
||||
*/
|
||||
function ensureDir(dirPath) {
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
fs.mkdirSync(dirPath, { recursive: true });
|
||||
}
|
||||
return dirPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current date in YYYY-MM-DD format
|
||||
*/
|
||||
function getDateString() {
|
||||
const now = new Date();
|
||||
const year = now.getFullYear();
|
||||
const month = String(now.getMonth() + 1).padStart(2, '0');
|
||||
const day = String(now.getDate()).padStart(2, '0');
|
||||
return `${year}-${month}-${day}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current time in HH:MM format
|
||||
*/
|
||||
function getTimeString() {
|
||||
const now = new Date();
|
||||
const hours = String(now.getHours()).padStart(2, '0');
|
||||
const minutes = String(now.getMinutes()).padStart(2, '0');
|
||||
return `${hours}:${minutes}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get short session ID from CLAUDE_SESSION_ID environment variable
|
||||
* Returns the last 8 characters for uniqueness with brevity
|
||||
* @param {string} fallback - Fallback value if no session ID (default: 'default')
|
||||
*/
|
||||
function getSessionIdShort(fallback = 'default') {
|
||||
const sessionId = process.env.CLAUDE_SESSION_ID;
|
||||
if (!sessionId || sessionId.length === 0) {
|
||||
return fallback;
|
||||
}
|
||||
return sessionId.slice(-8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current datetime in YYYY-MM-DD HH:MM:SS format
|
||||
*/
|
||||
function getDateTimeString() {
|
||||
const now = new Date();
|
||||
const year = now.getFullYear();
|
||||
const month = String(now.getMonth() + 1).padStart(2, '0');
|
||||
const day = String(now.getDate()).padStart(2, '0');
|
||||
const hours = String(now.getHours()).padStart(2, '0');
|
||||
const minutes = String(now.getMinutes()).padStart(2, '0');
|
||||
const seconds = String(now.getSeconds()).padStart(2, '0');
|
||||
return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find files matching a pattern in a directory (cross-platform alternative to find)
|
||||
* @param {string} dir - Directory to search
|
||||
* @param {string} pattern - File pattern (e.g., "*.tmp", "*.md")
|
||||
* @param {object} options - Options { maxAge: days, recursive: boolean }
|
||||
*/
|
||||
function findFiles(dir, pattern, options = {}) {
|
||||
const { maxAge = null, recursive = false } = options;
|
||||
const results = [];
|
||||
|
||||
if (!fs.existsSync(dir)) {
|
||||
return results;
|
||||
}
|
||||
|
||||
const regexPattern = pattern
|
||||
.replace(/\./g, '\\.')
|
||||
.replace(/\*/g, '.*')
|
||||
.replace(/\?/g, '.');
|
||||
const regex = new RegExp(`^${regexPattern}$`);
|
||||
|
||||
function searchDir(currentDir) {
|
||||
try {
|
||||
const entries = fs.readdirSync(currentDir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = path.join(currentDir, entry.name);
|
||||
|
||||
if (entry.isFile() && regex.test(entry.name)) {
|
||||
if (maxAge !== null) {
|
||||
const stats = fs.statSync(fullPath);
|
||||
const ageInDays = (Date.now() - stats.mtimeMs) / (1000 * 60 * 60 * 24);
|
||||
if (ageInDays <= maxAge) {
|
||||
results.push({ path: fullPath, mtime: stats.mtimeMs });
|
||||
}
|
||||
} else {
|
||||
const stats = fs.statSync(fullPath);
|
||||
results.push({ path: fullPath, mtime: stats.mtimeMs });
|
||||
}
|
||||
} else if (entry.isDirectory() && recursive) {
|
||||
searchDir(fullPath);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
// Ignore permission errors
|
||||
}
|
||||
}
|
||||
|
||||
searchDir(dir);
|
||||
|
||||
// Sort by modification time (newest first)
|
||||
results.sort((a, b) => b.mtime - a.mtime);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read JSON from stdin (for hook input)
|
||||
*/
|
||||
async function readStdinJson() {
|
||||
return new Promise((resolve, reject) => {
|
||||
let data = '';
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
data += chunk;
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
if (data.trim()) {
|
||||
resolve(JSON.parse(data));
|
||||
} else {
|
||||
resolve({});
|
||||
}
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Log to stderr (visible to user in Claude Code)
|
||||
*/
|
||||
function log(message) {
|
||||
console.error(message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Output to stdout (returned to Claude)
|
||||
*/
|
||||
function output(data) {
|
||||
if (typeof data === 'object') {
|
||||
console.log(JSON.stringify(data));
|
||||
} else {
|
||||
console.log(data);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read a text file safely
|
||||
*/
|
||||
function readFile(filePath) {
|
||||
try {
|
||||
return fs.readFileSync(filePath, 'utf8');
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a text file
|
||||
*/
|
||||
function writeFile(filePath, content) {
|
||||
ensureDir(path.dirname(filePath));
|
||||
fs.writeFileSync(filePath, content, 'utf8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Append to a text file
|
||||
*/
|
||||
function appendFile(filePath, content) {
|
||||
ensureDir(path.dirname(filePath));
|
||||
fs.appendFileSync(filePath, content, 'utf8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a command exists in PATH
|
||||
* Uses execFileSync to prevent command injection
|
||||
*/
|
||||
function commandExists(cmd) {
|
||||
// Validate command name - only allow alphanumeric, dash, underscore, dot
|
||||
if (!/^[a-zA-Z0-9_.-]+$/.test(cmd)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
if (isWindows) {
|
||||
// Use spawnSync to avoid shell interpolation
|
||||
const result = spawnSync('where', [cmd], { stdio: 'pipe' });
|
||||
return result.status === 0;
|
||||
} else {
|
||||
const result = spawnSync('which', [cmd], { stdio: 'pipe' });
|
||||
return result.status === 0;
|
||||
}
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a command and return output
|
||||
*
|
||||
* SECURITY NOTE: This function executes shell commands. Only use with
|
||||
* trusted, hardcoded commands. Never pass user-controlled input directly.
|
||||
* For user input, use spawnSync with argument arrays instead.
|
||||
*
|
||||
* @param {string} cmd - Command to execute (should be trusted/hardcoded)
|
||||
* @param {object} options - execSync options
|
||||
*/
|
||||
function runCommand(cmd, options = {}) {
|
||||
try {
|
||||
const result = execSync(cmd, {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
...options
|
||||
});
|
||||
return { success: true, output: result.trim() };
|
||||
} catch (err) {
|
||||
return { success: false, output: err.stderr || err.message };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if current directory is a git repository
|
||||
*/
|
||||
function isGitRepo() {
|
||||
return runCommand('git rev-parse --git-dir').success;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get git modified files
|
||||
*/
|
||||
function getGitModifiedFiles(patterns = []) {
|
||||
if (!isGitRepo()) return [];
|
||||
|
||||
const result = runCommand('git diff --name-only HEAD');
|
||||
if (!result.success) return [];
|
||||
|
||||
let files = result.output.split('\n').filter(Boolean);
|
||||
|
||||
if (patterns.length > 0) {
|
||||
files = files.filter(file => {
|
||||
return patterns.some(pattern => {
|
||||
const regex = new RegExp(pattern);
|
||||
return regex.test(file);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace text in a file (cross-platform sed alternative)
|
||||
*/
|
||||
function replaceInFile(filePath, search, replace) {
|
||||
const content = readFile(filePath);
|
||||
if (content === null) return false;
|
||||
|
||||
const newContent = content.replace(search, replace);
|
||||
writeFile(filePath, newContent);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Count occurrences of a pattern in a file
|
||||
*/
|
||||
function countInFile(filePath, pattern) {
|
||||
const content = readFile(filePath);
|
||||
if (content === null) return 0;
|
||||
|
||||
const regex = pattern instanceof RegExp ? pattern : new RegExp(pattern, 'g');
|
||||
const matches = content.match(regex);
|
||||
return matches ? matches.length : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Search for pattern in file and return matching lines with line numbers
|
||||
*/
|
||||
function grepFile(filePath, pattern) {
|
||||
const content = readFile(filePath);
|
||||
if (content === null) return [];
|
||||
|
||||
const regex = pattern instanceof RegExp ? pattern : new RegExp(pattern);
|
||||
const lines = content.split('\n');
|
||||
const results = [];
|
||||
|
||||
lines.forEach((line, index) => {
|
||||
if (regex.test(line)) {
|
||||
results.push({ lineNumber: index + 1, content: line });
|
||||
}
|
||||
});
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
// Platform info
|
||||
isWindows,
|
||||
isMacOS,
|
||||
isLinux,
|
||||
|
||||
// Directories
|
||||
getHomeDir,
|
||||
getClaudeDir,
|
||||
getSessionsDir,
|
||||
getLearnedSkillsDir,
|
||||
getTempDir,
|
||||
ensureDir,
|
||||
|
||||
// Date/Time
|
||||
getDateString,
|
||||
getTimeString,
|
||||
getDateTimeString,
|
||||
getSessionIdShort,
|
||||
|
||||
// File operations
|
||||
findFiles,
|
||||
readFile,
|
||||
writeFile,
|
||||
appendFile,
|
||||
replaceInFile,
|
||||
countInFile,
|
||||
grepFile,
|
||||
|
||||
// Hook I/O
|
||||
readStdinJson,
|
||||
log,
|
||||
output,
|
||||
|
||||
// System
|
||||
commandExists,
|
||||
runCommand,
|
||||
isGitRepo,
|
||||
getGitModifiedFiles
|
||||
};
|
||||
206
scripts/setup-package-manager.js
Normal file
206
scripts/setup-package-manager.js
Normal file
@@ -0,0 +1,206 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Package Manager Setup Script
|
||||
*
|
||||
* Interactive script to configure preferred package manager.
|
||||
* Can be run directly or via the /setup-pm command.
|
||||
*
|
||||
* Usage:
|
||||
* node scripts/setup-package-manager.js [pm-name]
|
||||
* node scripts/setup-package-manager.js --detect
|
||||
* node scripts/setup-package-manager.js --global pnpm
|
||||
* node scripts/setup-package-manager.js --project bun
|
||||
*/
|
||||
|
||||
const {
|
||||
PACKAGE_MANAGERS,
|
||||
getPackageManager,
|
||||
setPreferredPackageManager,
|
||||
setProjectPackageManager,
|
||||
getAvailablePackageManagers,
|
||||
detectFromLockFile,
|
||||
detectFromPackageJson,
|
||||
getSelectionPrompt
|
||||
} = require('./lib/package-manager');
|
||||
const { log } = require('./lib/utils');
|
||||
|
||||
function showHelp() {
|
||||
console.log(`
|
||||
Package Manager Setup for Claude Code
|
||||
|
||||
Usage:
|
||||
node scripts/setup-package-manager.js [options] [package-manager]
|
||||
|
||||
Options:
|
||||
--detect Detect and show current package manager
|
||||
--global <pm> Set global preference (saves to ~/.claude/package-manager.json)
|
||||
--project <pm> Set project preference (saves to .claude/package-manager.json)
|
||||
--list List available package managers
|
||||
--help Show this help message
|
||||
|
||||
Package Managers:
|
||||
npm Node Package Manager (default with Node.js)
|
||||
pnpm Fast, disk space efficient package manager
|
||||
yarn Classic Yarn package manager
|
||||
bun All-in-one JavaScript runtime & toolkit
|
||||
|
||||
Examples:
|
||||
# Detect current package manager
|
||||
node scripts/setup-package-manager.js --detect
|
||||
|
||||
# Set pnpm as global preference
|
||||
node scripts/setup-package-manager.js --global pnpm
|
||||
|
||||
# Set bun for current project
|
||||
node scripts/setup-package-manager.js --project bun
|
||||
|
||||
# List available package managers
|
||||
node scripts/setup-package-manager.js --list
|
||||
`);
|
||||
}
|
||||
|
||||
function detectAndShow() {
|
||||
const pm = getPackageManager();
|
||||
const available = getAvailablePackageManagers();
|
||||
const fromLock = detectFromLockFile();
|
||||
const fromPkg = detectFromPackageJson();
|
||||
|
||||
console.log('\n=== Package Manager Detection ===\n');
|
||||
|
||||
console.log('Current selection:');
|
||||
console.log(` Package Manager: ${pm.name}`);
|
||||
console.log(` Source: ${pm.source}`);
|
||||
console.log('');
|
||||
|
||||
console.log('Detection results:');
|
||||
console.log(` From package.json: ${fromPkg || 'not specified'}`);
|
||||
console.log(` From lock file: ${fromLock || 'not found'}`);
|
||||
console.log(` Environment var: ${process.env.CLAUDE_PACKAGE_MANAGER || 'not set'}`);
|
||||
console.log('');
|
||||
|
||||
console.log('Available package managers:');
|
||||
for (const pmName of Object.keys(PACKAGE_MANAGERS)) {
|
||||
const installed = available.includes(pmName);
|
||||
const indicator = installed ? '✓' : '✗';
|
||||
const current = pmName === pm.name ? ' (current)' : '';
|
||||
console.log(` ${indicator} ${pmName}${current}`);
|
||||
}
|
||||
|
||||
console.log('');
|
||||
console.log('Commands:');
|
||||
console.log(` Install: ${pm.config.installCmd}`);
|
||||
console.log(` Run script: ${pm.config.runCmd} [script-name]`);
|
||||
console.log(` Execute binary: ${pm.config.execCmd} [binary-name]`);
|
||||
console.log('');
|
||||
}
|
||||
|
||||
function listAvailable() {
|
||||
const available = getAvailablePackageManagers();
|
||||
const pm = getPackageManager();
|
||||
|
||||
console.log('\nAvailable Package Managers:\n');
|
||||
|
||||
for (const pmName of Object.keys(PACKAGE_MANAGERS)) {
|
||||
const config = PACKAGE_MANAGERS[pmName];
|
||||
const installed = available.includes(pmName);
|
||||
const current = pmName === pm.name ? ' (current)' : '';
|
||||
|
||||
console.log(`${pmName}${current}`);
|
||||
console.log(` Installed: ${installed ? 'Yes' : 'No'}`);
|
||||
console.log(` Lock file: ${config.lockFile}`);
|
||||
console.log(` Install: ${config.installCmd}`);
|
||||
console.log(` Run: ${config.runCmd}`);
|
||||
console.log('');
|
||||
}
|
||||
}
|
||||
|
||||
function setGlobal(pmName) {
|
||||
if (!PACKAGE_MANAGERS[pmName]) {
|
||||
console.error(`Error: Unknown package manager "${pmName}"`);
|
||||
console.error(`Available: ${Object.keys(PACKAGE_MANAGERS).join(', ')}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const available = getAvailablePackageManagers();
|
||||
if (!available.includes(pmName)) {
|
||||
console.warn(`Warning: ${pmName} is not installed on your system`);
|
||||
}
|
||||
|
||||
try {
|
||||
setPreferredPackageManager(pmName);
|
||||
console.log(`\n✓ Global preference set to: ${pmName}`);
|
||||
console.log(' Saved to: ~/.claude/package-manager.json');
|
||||
console.log('');
|
||||
} catch (err) {
|
||||
console.error(`Error: ${err.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
function setProject(pmName) {
|
||||
if (!PACKAGE_MANAGERS[pmName]) {
|
||||
console.error(`Error: Unknown package manager "${pmName}"`);
|
||||
console.error(`Available: ${Object.keys(PACKAGE_MANAGERS).join(', ')}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
setProjectPackageManager(pmName);
|
||||
console.log(`\n✓ Project preference set to: ${pmName}`);
|
||||
console.log(' Saved to: .claude/package-manager.json');
|
||||
console.log('');
|
||||
} catch (err) {
|
||||
console.error(`Error: ${err.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Main
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length === 0 || args.includes('--help') || args.includes('-h')) {
|
||||
showHelp();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
if (args.includes('--detect')) {
|
||||
detectAndShow();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
if (args.includes('--list')) {
|
||||
listAvailable();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const globalIdx = args.indexOf('--global');
|
||||
if (globalIdx !== -1) {
|
||||
const pmName = args[globalIdx + 1];
|
||||
if (!pmName) {
|
||||
console.error('Error: --global requires a package manager name');
|
||||
process.exit(1);
|
||||
}
|
||||
setGlobal(pmName);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const projectIdx = args.indexOf('--project');
|
||||
if (projectIdx !== -1) {
|
||||
const pmName = args[projectIdx + 1];
|
||||
if (!pmName) {
|
||||
console.error('Error: --project requires a package manager name');
|
||||
process.exit(1);
|
||||
}
|
||||
setProject(pmName);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// If just a package manager name is provided, set it globally
|
||||
const pmName = args[0];
|
||||
if (PACKAGE_MANAGERS[pmName]) {
|
||||
setGlobal(pmName);
|
||||
} else {
|
||||
console.error(`Error: Unknown option or package manager "${pmName}"`);
|
||||
showHelp();
|
||||
process.exit(1);
|
||||
}
|
||||
@@ -395,21 +395,26 @@ export function hasPermission(user: User, permission: Permission): boolean {
|
||||
}
|
||||
|
||||
export function requirePermission(permission: Permission) {
|
||||
return async (request: Request) => {
|
||||
const user = await requireAuth(request)
|
||||
return (handler: (request: Request, user: User) => Promise<Response>) => {
|
||||
return async (request: Request) => {
|
||||
const user = await requireAuth(request)
|
||||
|
||||
if (!hasPermission(user, permission)) {
|
||||
throw new ApiError(403, 'Insufficient permissions')
|
||||
if (!hasPermission(user, permission)) {
|
||||
throw new ApiError(403, 'Insufficient permissions')
|
||||
}
|
||||
|
||||
return handler(request, user)
|
||||
}
|
||||
|
||||
return user
|
||||
}
|
||||
}
|
||||
|
||||
// Usage
|
||||
export const DELETE = requirePermission('delete')(async (request: Request) => {
|
||||
// Handler with permission check
|
||||
})
|
||||
// Usage - HOF wraps the handler
|
||||
export const DELETE = requirePermission('delete')(
|
||||
async (request: Request, user: User) => {
|
||||
// Handler receives authenticated user with verified permission
|
||||
return new Response('Deleted', { status: 200 })
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
257
skills/continuous-learning-v2/SKILL.md
Normal file
257
skills/continuous-learning-v2/SKILL.md
Normal file
@@ -0,0 +1,257 @@
|
||||
---
|
||||
name: continuous-learning-v2
|
||||
description: Instinct-based learning system that observes sessions via hooks, creates atomic instincts with confidence scoring, and evolves them into skills/commands/agents.
|
||||
version: 2.0.0
|
||||
---
|
||||
|
||||
# Continuous Learning v2 - Instinct-Based Architecture
|
||||
|
||||
An advanced learning system that turns your Claude Code sessions into reusable knowledge through atomic "instincts" - small learned behaviors with confidence scoring.
|
||||
|
||||
## What's New in v2
|
||||
|
||||
| Feature | v1 | v2 |
|
||||
|---------|----|----|
|
||||
| Observation | Stop hook (session end) | PreToolUse/PostToolUse (100% reliable) |
|
||||
| Analysis | Main context | Background agent (Haiku) |
|
||||
| Granularity | Full skills | Atomic "instincts" |
|
||||
| Confidence | None | 0.3-0.9 weighted |
|
||||
| Evolution | Direct to skill | Instincts → cluster → skill/command/agent |
|
||||
| Sharing | None | Export/import instincts |
|
||||
|
||||
## The Instinct Model
|
||||
|
||||
An instinct is a small learned behavior:
|
||||
|
||||
```yaml
|
||||
---
|
||||
id: prefer-functional-style
|
||||
trigger: "when writing new functions"
|
||||
confidence: 0.7
|
||||
domain: "code-style"
|
||||
source: "session-observation"
|
||||
---
|
||||
|
||||
# Prefer Functional Style
|
||||
|
||||
## Action
|
||||
Use functional patterns over classes when appropriate.
|
||||
|
||||
## Evidence
|
||||
- Observed 5 instances of functional pattern preference
|
||||
- User corrected class-based approach to functional on 2025-01-15
|
||||
```
|
||||
|
||||
**Properties:**
|
||||
- **Atomic** — one trigger, one action
|
||||
- **Confidence-weighted** — 0.3 = tentative, 0.9 = near certain
|
||||
- **Domain-tagged** — code-style, testing, git, debugging, workflow, etc.
|
||||
- **Evidence-backed** — tracks what observations created it
|
||||
|
||||
## How It Works
|
||||
|
||||
```
|
||||
Session Activity
|
||||
│
|
||||
│ Hooks capture prompts + tool use (100% reliable)
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ observations.jsonl │
|
||||
│ (prompts, tool calls, outcomes) │
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
│ Observer agent reads (background, Haiku)
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ PATTERN DETECTION │
|
||||
│ • User corrections → instinct │
|
||||
│ • Error resolutions → instinct │
|
||||
│ • Repeated workflows → instinct │
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
│ Creates/updates
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ instincts/personal/ │
|
||||
│ • prefer-functional.md (0.7) │
|
||||
│ • always-test-first.md (0.9) │
|
||||
│ • use-zod-validation.md (0.6) │
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
│ /evolve clusters
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ evolved/ │
|
||||
│ • commands/new-feature.md │
|
||||
│ • skills/testing-workflow.md │
|
||||
│ • agents/refactor-specialist.md │
|
||||
└─────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Enable Observation Hooks
|
||||
|
||||
Add to your `~/.claude/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"hooks": {
|
||||
"PreToolUse": [{
|
||||
"matcher": "*",
|
||||
"hooks": [{
|
||||
"type": "command",
|
||||
"command": "~/.claude/skills/continuous-learning-v2/hooks/observe.sh pre"
|
||||
}]
|
||||
}],
|
||||
"PostToolUse": [{
|
||||
"matcher": "*",
|
||||
"hooks": [{
|
||||
"type": "command",
|
||||
"command": "~/.claude/skills/continuous-learning-v2/hooks/observe.sh post"
|
||||
}]
|
||||
}]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Initialize Directory Structure
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.claude/homunculus/{instincts/{personal,inherited},evolved/{agents,skills,commands}}
|
||||
touch ~/.claude/homunculus/observations.jsonl
|
||||
```
|
||||
|
||||
### 3. Run the Observer Agent (Optional)
|
||||
|
||||
The observer can run in the background analyzing observations:
|
||||
|
||||
```bash
|
||||
# Start background observer
|
||||
~/.claude/skills/continuous-learning-v2/agents/start-observer.sh
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/instinct-status` | Show all learned instincts with confidence |
|
||||
| `/evolve` | Cluster related instincts into skills/commands |
|
||||
| `/instinct-export` | Export instincts for sharing |
|
||||
| `/instinct-import <file>` | Import instincts from others |
|
||||
|
||||
## Configuration
|
||||
|
||||
Edit `config.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "2.0",
|
||||
"observation": {
|
||||
"enabled": true,
|
||||
"store_path": "~/.claude/homunculus/observations.jsonl",
|
||||
"max_file_size_mb": 10,
|
||||
"archive_after_days": 7
|
||||
},
|
||||
"instincts": {
|
||||
"personal_path": "~/.claude/homunculus/instincts/personal/",
|
||||
"inherited_path": "~/.claude/homunculus/instincts/inherited/",
|
||||
"min_confidence": 0.3,
|
||||
"auto_approve_threshold": 0.7,
|
||||
"confidence_decay_rate": 0.05
|
||||
},
|
||||
"observer": {
|
||||
"enabled": true,
|
||||
"model": "haiku",
|
||||
"run_interval_minutes": 5,
|
||||
"patterns_to_detect": [
|
||||
"user_corrections",
|
||||
"error_resolutions",
|
||||
"repeated_workflows",
|
||||
"tool_preferences"
|
||||
]
|
||||
},
|
||||
"evolution": {
|
||||
"cluster_threshold": 3,
|
||||
"evolved_path": "~/.claude/homunculus/evolved/"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
~/.claude/homunculus/
|
||||
├── identity.json # Your profile, technical level
|
||||
├── observations.jsonl # Current session observations
|
||||
├── observations.archive/ # Processed observations
|
||||
├── instincts/
|
||||
│ ├── personal/ # Auto-learned instincts
|
||||
│ └── inherited/ # Imported from others
|
||||
└── evolved/
|
||||
├── agents/ # Generated specialist agents
|
||||
├── skills/ # Generated skills
|
||||
└── commands/ # Generated commands
|
||||
```
|
||||
|
||||
## Integration with Skill Creator
|
||||
|
||||
When you use the [Skill Creator GitHub App](https://skill-creator.app), it now generates **both**:
|
||||
- Traditional SKILL.md files (for backward compatibility)
|
||||
- Instinct collections (for v2 learning system)
|
||||
|
||||
Instincts from repo analysis have `source: "repo-analysis"` and include the source repository URL.
|
||||
|
||||
## Confidence Scoring
|
||||
|
||||
Confidence evolves over time:
|
||||
|
||||
| Score | Meaning | Behavior |
|
||||
|-------|---------|----------|
|
||||
| 0.3 | Tentative | Suggested but not enforced |
|
||||
| 0.5 | Moderate | Applied when relevant |
|
||||
| 0.7 | Strong | Auto-approved for application |
|
||||
| 0.9 | Near-certain | Core behavior |
|
||||
|
||||
**Confidence increases** when:
|
||||
- Pattern is repeatedly observed
|
||||
- User doesn't correct the suggested behavior
|
||||
- Similar instincts from other sources agree
|
||||
|
||||
**Confidence decreases** when:
|
||||
- User explicitly corrects the behavior
|
||||
- Pattern isn't observed for extended periods
|
||||
- Contradicting evidence appears
|
||||
|
||||
## Why Hooks vs Skills for Observation?
|
||||
|
||||
> "v1 relied on skills to observe. Skills are probabilistic—they fire ~50-80% of the time based on Claude's judgment."
|
||||
|
||||
Hooks fire **100% of the time**, deterministically. This means:
|
||||
- Every tool call is observed
|
||||
- No patterns are missed
|
||||
- Learning is comprehensive
|
||||
|
||||
## Backward Compatibility
|
||||
|
||||
v2 is fully compatible with v1:
|
||||
- Existing `~/.claude/skills/learned/` skills still work
|
||||
- Stop hook still runs (but now also feeds into v2)
|
||||
- Gradual migration path: run both in parallel
|
||||
|
||||
## Privacy
|
||||
|
||||
- Observations stay **local** on your machine
|
||||
- Only **instincts** (patterns) can be exported
|
||||
- No actual code or conversation content is shared
|
||||
- You control what gets exported
|
||||
|
||||
## Related
|
||||
|
||||
- [Skill Creator](https://skill-creator.app) - Generate instincts from repo history
|
||||
- [Homunculus](https://github.com/humanplane/homunculus) - Inspiration for v2 architecture
|
||||
- [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) - Continuous learning section
|
||||
|
||||
---
|
||||
|
||||
*Instinct-based learning: teaching Claude your patterns, one observation at a time.*
|
||||
137
skills/continuous-learning-v2/agents/observer.md
Normal file
137
skills/continuous-learning-v2/agents/observer.md
Normal file
@@ -0,0 +1,137 @@
|
||||
---
|
||||
name: observer
|
||||
description: Background agent that analyzes session observations to detect patterns and create instincts. Uses Haiku for cost-efficiency.
|
||||
model: haiku
|
||||
run_mode: background
|
||||
---
|
||||
|
||||
# Observer Agent
|
||||
|
||||
A background agent that analyzes observations from Claude Code sessions to detect patterns and create instincts.
|
||||
|
||||
## When to Run
|
||||
|
||||
- After significant session activity (20+ tool calls)
|
||||
- When user runs `/analyze-patterns`
|
||||
- On a scheduled interval (configurable, default 5 minutes)
|
||||
- When triggered by observation hook (SIGUSR1)
|
||||
|
||||
## Input
|
||||
|
||||
Reads observations from `~/.claude/homunculus/observations.jsonl`:
|
||||
|
||||
```jsonl
|
||||
{"timestamp":"2025-01-22T10:30:00Z","event":"tool_start","session":"abc123","tool":"Edit","input":"..."}
|
||||
{"timestamp":"2025-01-22T10:30:01Z","event":"tool_complete","session":"abc123","tool":"Edit","output":"..."}
|
||||
{"timestamp":"2025-01-22T10:30:05Z","event":"tool_start","session":"abc123","tool":"Bash","input":"npm test"}
|
||||
{"timestamp":"2025-01-22T10:30:10Z","event":"tool_complete","session":"abc123","tool":"Bash","output":"All tests pass"}
|
||||
```
|
||||
|
||||
## Pattern Detection
|
||||
|
||||
Look for these patterns in observations:
|
||||
|
||||
### 1. User Corrections
|
||||
When a user's follow-up message corrects Claude's previous action:
|
||||
- "No, use X instead of Y"
|
||||
- "Actually, I meant..."
|
||||
- Immediate undo/redo patterns
|
||||
|
||||
→ Create instinct: "When doing X, prefer Y"
|
||||
|
||||
### 2. Error Resolutions
|
||||
When an error is followed by a fix:
|
||||
- Tool output contains error
|
||||
- Next few tool calls fix it
|
||||
- Same error type resolved similarly multiple times
|
||||
|
||||
→ Create instinct: "When encountering error X, try Y"
|
||||
|
||||
### 3. Repeated Workflows
|
||||
When the same sequence of tools is used multiple times:
|
||||
- Same tool sequence with similar inputs
|
||||
- File patterns that change together
|
||||
- Time-clustered operations
|
||||
|
||||
→ Create workflow instinct: "When doing X, follow steps Y, Z, W"
|
||||
|
||||
### 4. Tool Preferences
|
||||
When certain tools are consistently preferred:
|
||||
- Always uses Grep before Edit
|
||||
- Prefers Read over Bash cat
|
||||
- Uses specific Bash commands for certain tasks
|
||||
|
||||
→ Create instinct: "When needing X, use tool Y"
|
||||
|
||||
## Output
|
||||
|
||||
Creates/updates instincts in `~/.claude/homunculus/instincts/personal/`:
|
||||
|
||||
```yaml
|
||||
---
|
||||
id: prefer-grep-before-edit
|
||||
trigger: "when searching for code to modify"
|
||||
confidence: 0.65
|
||||
domain: "workflow"
|
||||
source: "session-observation"
|
||||
---
|
||||
|
||||
# Prefer Grep Before Edit
|
||||
|
||||
## Action
|
||||
Always use Grep to find the exact location before using Edit.
|
||||
|
||||
## Evidence
|
||||
- Observed 8 times in session abc123
|
||||
- Pattern: Grep → Read → Edit sequence
|
||||
- Last observed: 2025-01-22
|
||||
```
|
||||
|
||||
## Confidence Calculation
|
||||
|
||||
Initial confidence based on observation frequency:
|
||||
- 1-2 observations: 0.3 (tentative)
|
||||
- 3-5 observations: 0.5 (moderate)
|
||||
- 6-10 observations: 0.7 (strong)
|
||||
- 11+ observations: 0.85 (very strong)
|
||||
|
||||
Confidence adjusts over time:
|
||||
- +0.05 for each confirming observation
|
||||
- -0.1 for each contradicting observation
|
||||
- -0.02 per week without observation (decay)
|
||||
|
||||
## Important Guidelines
|
||||
|
||||
1. **Be Conservative**: Only create instincts for clear patterns (3+ observations)
|
||||
2. **Be Specific**: Narrow triggers are better than broad ones
|
||||
3. **Track Evidence**: Always include what observations led to the instinct
|
||||
4. **Respect Privacy**: Never include actual code snippets, only patterns
|
||||
5. **Merge Similar**: If a new instinct is similar to existing, update rather than duplicate
|
||||
|
||||
## Example Analysis Session
|
||||
|
||||
Given observations:
|
||||
```jsonl
|
||||
{"event":"tool_start","tool":"Grep","input":"pattern: useState"}
|
||||
{"event":"tool_complete","tool":"Grep","output":"Found in 3 files"}
|
||||
{"event":"tool_start","tool":"Read","input":"src/hooks/useAuth.ts"}
|
||||
{"event":"tool_complete","tool":"Read","output":"[file content]"}
|
||||
{"event":"tool_start","tool":"Edit","input":"src/hooks/useAuth.ts..."}
|
||||
```
|
||||
|
||||
Analysis:
|
||||
- Detected workflow: Grep → Read → Edit
|
||||
- Frequency: Seen 5 times this session
|
||||
- Create instinct:
|
||||
- trigger: "when modifying code"
|
||||
- action: "Search with Grep, confirm with Read, then Edit"
|
||||
- confidence: 0.6
|
||||
- domain: "workflow"
|
||||
|
||||
## Integration with Skill Creator
|
||||
|
||||
When instincts are imported from Skill Creator (repo analysis), they have:
|
||||
- `source: "repo-analysis"`
|
||||
- `source_repo: "https://github.com/..."`
|
||||
|
||||
These should be treated as team/project conventions with higher initial confidence (0.7+).
|
||||
134
skills/continuous-learning-v2/agents/start-observer.sh
Executable file
134
skills/continuous-learning-v2/agents/start-observer.sh
Executable file
@@ -0,0 +1,134 @@
|
||||
#!/bin/bash
|
||||
# Continuous Learning v2 - Observer Agent Launcher
|
||||
#
|
||||
# Starts the background observer agent that analyzes observations
|
||||
# and creates instincts. Uses Haiku model for cost efficiency.
|
||||
#
|
||||
# Usage:
|
||||
# start-observer.sh # Start observer in background
|
||||
# start-observer.sh stop # Stop running observer
|
||||
# start-observer.sh status # Check if observer is running
|
||||
|
||||
set -e
|
||||
|
||||
CONFIG_DIR="${HOME}/.claude/homunculus"
|
||||
PID_FILE="${CONFIG_DIR}/.observer.pid"
|
||||
LOG_FILE="${CONFIG_DIR}/observer.log"
|
||||
OBSERVATIONS_FILE="${CONFIG_DIR}/observations.jsonl"
|
||||
|
||||
mkdir -p "$CONFIG_DIR"
|
||||
|
||||
case "${1:-start}" in
|
||||
stop)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
pid=$(cat "$PID_FILE")
|
||||
if kill -0 "$pid" 2>/dev/null; then
|
||||
echo "Stopping observer (PID: $pid)..."
|
||||
kill "$pid"
|
||||
rm -f "$PID_FILE"
|
||||
echo "Observer stopped."
|
||||
else
|
||||
echo "Observer not running (stale PID file)."
|
||||
rm -f "$PID_FILE"
|
||||
fi
|
||||
else
|
||||
echo "Observer not running."
|
||||
fi
|
||||
exit 0
|
||||
;;
|
||||
|
||||
status)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
pid=$(cat "$PID_FILE")
|
||||
if kill -0 "$pid" 2>/dev/null; then
|
||||
echo "Observer is running (PID: $pid)"
|
||||
echo "Log: $LOG_FILE"
|
||||
echo "Observations: $(wc -l < "$OBSERVATIONS_FILE" 2>/dev/null || echo 0) lines"
|
||||
exit 0
|
||||
else
|
||||
echo "Observer not running (stale PID file)"
|
||||
rm -f "$PID_FILE"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Observer not running"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
|
||||
start)
|
||||
# Check if already running
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
pid=$(cat "$PID_FILE")
|
||||
if kill -0 "$pid" 2>/dev/null; then
|
||||
echo "Observer already running (PID: $pid)"
|
||||
exit 0
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
fi
|
||||
|
||||
echo "Starting observer agent..."
|
||||
|
||||
# The observer loop
|
||||
(
|
||||
trap 'rm -f "$PID_FILE"; exit 0' TERM INT
|
||||
|
||||
analyze_observations() {
|
||||
# Only analyze if we have enough observations
|
||||
obs_count=$(wc -l < "$OBSERVATIONS_FILE" 2>/dev/null || echo 0)
|
||||
if [ "$obs_count" -lt 10 ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "[$(date)] Analyzing $obs_count observations..." >> "$LOG_FILE"
|
||||
|
||||
# Use Claude Code with Haiku to analyze observations
|
||||
# This spawns a quick analysis session
|
||||
if command -v claude &> /dev/null; then
|
||||
claude --model haiku --max-turns 3 --print \
|
||||
"Read $OBSERVATIONS_FILE and identify patterns. If you find 3+ occurrences of the same pattern, create an instinct file in $CONFIG_DIR/instincts/personal/ following the format in the observer agent spec. Be conservative - only create instincts for clear patterns." \
|
||||
>> "$LOG_FILE" 2>&1 || true
|
||||
fi
|
||||
|
||||
# Archive processed observations
|
||||
if [ -f "$OBSERVATIONS_FILE" ]; then
|
||||
archive_dir="${CONFIG_DIR}/observations.archive"
|
||||
mkdir -p "$archive_dir"
|
||||
mv "$OBSERVATIONS_FILE" "$archive_dir/processed-$(date +%Y%m%d-%H%M%S).jsonl"
|
||||
touch "$OBSERVATIONS_FILE"
|
||||
fi
|
||||
}
|
||||
|
||||
# Handle SIGUSR1 for on-demand analysis
|
||||
trap 'analyze_observations' USR1
|
||||
|
||||
echo "$$" > "$PID_FILE"
|
||||
echo "[$(date)] Observer started (PID: $$)" >> "$LOG_FILE"
|
||||
|
||||
while true; do
|
||||
# Check every 5 minutes
|
||||
sleep 300
|
||||
|
||||
analyze_observations
|
||||
done
|
||||
) &
|
||||
|
||||
disown
|
||||
|
||||
# Wait a moment for PID file
|
||||
sleep 1
|
||||
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
echo "Observer started (PID: $(cat "$PID_FILE"))"
|
||||
echo "Log: $LOG_FILE"
|
||||
else
|
||||
echo "Failed to start observer"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|status}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
186
skills/continuous-learning-v2/commands/evolve.md
Normal file
186
skills/continuous-learning-v2/commands/evolve.md
Normal file
@@ -0,0 +1,186 @@
|
||||
---
|
||||
name: evolve
|
||||
description: Cluster related instincts into skills, commands, or agents
|
||||
command: /evolve
|
||||
implementation: python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py evolve
|
||||
---
|
||||
|
||||
# Evolve Command
|
||||
|
||||
## Implementation
|
||||
|
||||
```bash
|
||||
python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py evolve [--generate]
|
||||
```
|
||||
|
||||
Analyzes instincts and clusters related ones into higher-level structures:
|
||||
- **Commands**: When instincts describe user-invoked actions
|
||||
- **Skills**: When instincts describe auto-triggered behaviors
|
||||
- **Agents**: When instincts describe complex, multi-step processes
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/evolve # Analyze all instincts and suggest evolutions
|
||||
/evolve --domain testing # Only evolve instincts in testing domain
|
||||
/evolve --dry-run # Show what would be created without creating
|
||||
/evolve --threshold 5 # Require 5+ related instincts to cluster
|
||||
```
|
||||
|
||||
## Evolution Rules
|
||||
|
||||
### → Command (User-Invoked)
|
||||
When instincts describe actions a user would explicitly request:
|
||||
- Multiple instincts about "when user asks to..."
|
||||
- Instincts with triggers like "when creating a new X"
|
||||
- Instincts that follow a repeatable sequence
|
||||
|
||||
Example:
|
||||
- `new-table-step1`: "when adding a database table, create migration"
|
||||
- `new-table-step2`: "when adding a database table, update schema"
|
||||
- `new-table-step3`: "when adding a database table, regenerate types"
|
||||
|
||||
→ Creates: `/new-table` command
|
||||
|
||||
### → Skill (Auto-Triggered)
|
||||
When instincts describe behaviors that should happen automatically:
|
||||
- Pattern-matching triggers
|
||||
- Error handling responses
|
||||
- Code style enforcement
|
||||
|
||||
Example:
|
||||
- `prefer-functional`: "when writing functions, prefer functional style"
|
||||
- `use-immutable`: "when modifying state, use immutable patterns"
|
||||
- `avoid-classes`: "when designing modules, avoid class-based design"
|
||||
|
||||
→ Creates: `functional-patterns` skill
|
||||
|
||||
### → Agent (Needs Depth/Isolation)
|
||||
When instincts describe complex, multi-step processes that benefit from isolation:
|
||||
- Debugging workflows
|
||||
- Refactoring sequences
|
||||
- Research tasks
|
||||
|
||||
Example:
|
||||
- `debug-step1`: "when debugging, first check logs"
|
||||
- `debug-step2`: "when debugging, isolate the failing component"
|
||||
- `debug-step3`: "when debugging, create minimal reproduction"
|
||||
- `debug-step4`: "when debugging, verify fix with test"
|
||||
|
||||
→ Creates: `debugger` agent
|
||||
|
||||
## What to Do
|
||||
|
||||
1. Read all instincts from `~/.claude/homunculus/instincts/`
|
||||
2. Group instincts by:
|
||||
- Domain similarity
|
||||
- Trigger pattern overlap
|
||||
- Action sequence relationship
|
||||
3. For each cluster of 3+ related instincts:
|
||||
- Determine evolution type (command/skill/agent)
|
||||
- Generate the appropriate file
|
||||
- Save to `~/.claude/homunculus/evolved/{commands,skills,agents}/`
|
||||
4. Link evolved structure back to source instincts
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
🧬 Evolve Analysis
|
||||
==================
|
||||
|
||||
Found 3 clusters ready for evolution:
|
||||
|
||||
## Cluster 1: Database Migration Workflow
|
||||
Instincts: new-table-migration, update-schema, regenerate-types
|
||||
Type: Command
|
||||
Confidence: 85% (based on 12 observations)
|
||||
|
||||
Would create: /new-table command
|
||||
Files:
|
||||
- ~/.claude/homunculus/evolved/commands/new-table.md
|
||||
|
||||
## Cluster 2: Functional Code Style
|
||||
Instincts: prefer-functional, use-immutable, avoid-classes, pure-functions
|
||||
Type: Skill
|
||||
Confidence: 78% (based on 8 observations)
|
||||
|
||||
Would create: functional-patterns skill
|
||||
Files:
|
||||
- ~/.claude/homunculus/evolved/skills/functional-patterns.md
|
||||
|
||||
## Cluster 3: Debugging Process
|
||||
Instincts: debug-check-logs, debug-isolate, debug-reproduce, debug-verify
|
||||
Type: Agent
|
||||
Confidence: 72% (based on 6 observations)
|
||||
|
||||
Would create: debugger agent
|
||||
Files:
|
||||
- ~/.claude/homunculus/evolved/agents/debugger.md
|
||||
|
||||
---
|
||||
Run `/evolve --execute` to create these files.
|
||||
```
|
||||
|
||||
## Flags
|
||||
|
||||
- `--execute`: Actually create the evolved structures (default is preview)
|
||||
- `--dry-run`: Preview without creating
|
||||
- `--domain <name>`: Only evolve instincts in specified domain
|
||||
- `--threshold <n>`: Minimum instincts required to form cluster (default: 3)
|
||||
- `--type <command|skill|agent>`: Only create specified type
|
||||
|
||||
## Generated File Format
|
||||
|
||||
### Command
|
||||
```markdown
|
||||
---
|
||||
name: new-table
|
||||
description: Create a new database table with migration, schema update, and type generation
|
||||
command: /new-table
|
||||
evolved_from:
|
||||
- new-table-migration
|
||||
- update-schema
|
||||
- regenerate-types
|
||||
---
|
||||
|
||||
# New Table Command
|
||||
|
||||
[Generated content based on clustered instincts]
|
||||
|
||||
## Steps
|
||||
1. ...
|
||||
2. ...
|
||||
```
|
||||
|
||||
### Skill
|
||||
```markdown
|
||||
---
|
||||
name: functional-patterns
|
||||
description: Enforce functional programming patterns
|
||||
evolved_from:
|
||||
- prefer-functional
|
||||
- use-immutable
|
||||
- avoid-classes
|
||||
---
|
||||
|
||||
# Functional Patterns Skill
|
||||
|
||||
[Generated content based on clustered instincts]
|
||||
```
|
||||
|
||||
### Agent
|
||||
```markdown
|
||||
---
|
||||
name: debugger
|
||||
description: Systematic debugging agent
|
||||
model: sonnet
|
||||
evolved_from:
|
||||
- debug-check-logs
|
||||
- debug-isolate
|
||||
- debug-reproduce
|
||||
---
|
||||
|
||||
# Debugger Agent
|
||||
|
||||
[Generated content based on clustered instincts]
|
||||
```
|
||||
91
skills/continuous-learning-v2/commands/instinct-export.md
Normal file
91
skills/continuous-learning-v2/commands/instinct-export.md
Normal file
@@ -0,0 +1,91 @@
|
||||
---
|
||||
name: instinct-export
|
||||
description: Export instincts for sharing with teammates or other projects
|
||||
command: /instinct-export
|
||||
---
|
||||
|
||||
# Instinct Export Command
|
||||
|
||||
Exports instincts to a shareable format. Perfect for:
|
||||
- Sharing with teammates
|
||||
- Transferring to a new machine
|
||||
- Contributing to project conventions
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/instinct-export # Export all personal instincts
|
||||
/instinct-export --domain testing # Export only testing instincts
|
||||
/instinct-export --min-confidence 0.7 # Only export high-confidence instincts
|
||||
/instinct-export --output team-instincts.yaml
|
||||
```
|
||||
|
||||
## What to Do
|
||||
|
||||
1. Read instincts from `~/.claude/homunculus/instincts/personal/`
|
||||
2. Filter based on flags
|
||||
3. Strip sensitive information:
|
||||
- Remove session IDs
|
||||
- Remove file paths (keep only patterns)
|
||||
- Remove timestamps older than "last week"
|
||||
4. Generate export file
|
||||
|
||||
## Output Format
|
||||
|
||||
Creates a YAML file:
|
||||
|
||||
```yaml
|
||||
# Instincts Export
|
||||
# Generated: 2025-01-22
|
||||
# Source: personal
|
||||
# Count: 12 instincts
|
||||
|
||||
version: "2.0"
|
||||
exported_by: "continuous-learning-v2"
|
||||
export_date: "2025-01-22T10:30:00Z"
|
||||
|
||||
instincts:
|
||||
- id: prefer-functional-style
|
||||
trigger: "when writing new functions"
|
||||
action: "Use functional patterns over classes"
|
||||
confidence: 0.8
|
||||
domain: code-style
|
||||
observations: 8
|
||||
|
||||
- id: test-first-workflow
|
||||
trigger: "when adding new functionality"
|
||||
action: "Write test first, then implementation"
|
||||
confidence: 0.9
|
||||
domain: testing
|
||||
observations: 12
|
||||
|
||||
- id: grep-before-edit
|
||||
trigger: "when modifying code"
|
||||
action: "Search with Grep, confirm with Read, then Edit"
|
||||
confidence: 0.7
|
||||
domain: workflow
|
||||
observations: 6
|
||||
```
|
||||
|
||||
## Privacy Considerations
|
||||
|
||||
Exports include:
|
||||
- ✅ Trigger patterns
|
||||
- ✅ Actions
|
||||
- ✅ Confidence scores
|
||||
- ✅ Domains
|
||||
- ✅ Observation counts
|
||||
|
||||
Exports do NOT include:
|
||||
- ❌ Actual code snippets
|
||||
- ❌ File paths
|
||||
- ❌ Session transcripts
|
||||
- ❌ Personal identifiers
|
||||
|
||||
## Flags
|
||||
|
||||
- `--domain <name>`: Export only specified domain
|
||||
- `--min-confidence <n>`: Minimum confidence threshold (default: 0.3)
|
||||
- `--output <file>`: Output file path (default: instincts-export-YYYYMMDD.yaml)
|
||||
- `--format <yaml|json|md>`: Output format (default: yaml)
|
||||
- `--include-evidence`: Include evidence text (default: excluded)
|
||||
135
skills/continuous-learning-v2/commands/instinct-import.md
Normal file
135
skills/continuous-learning-v2/commands/instinct-import.md
Normal file
@@ -0,0 +1,135 @@
|
||||
---
|
||||
name: instinct-import
|
||||
description: Import instincts from teammates, Skill Creator, or other sources
|
||||
command: /instinct-import
|
||||
implementation: python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py import <file>
|
||||
---
|
||||
|
||||
# Instinct Import Command
|
||||
|
||||
## Implementation
|
||||
|
||||
```bash
|
||||
python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py import <file-or-url> [--dry-run] [--force] [--min-confidence 0.7]
|
||||
```
|
||||
|
||||
Import instincts from:
|
||||
- Teammates' exports
|
||||
- Skill Creator (repo analysis)
|
||||
- Community collections
|
||||
- Previous machine backups
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/instinct-import team-instincts.yaml
|
||||
/instinct-import https://github.com/org/repo/instincts.yaml
|
||||
/instinct-import --from-skill-creator acme/webapp
|
||||
```
|
||||
|
||||
## What to Do
|
||||
|
||||
1. Fetch the instinct file (local path or URL)
|
||||
2. Parse and validate the format
|
||||
3. Check for duplicates with existing instincts
|
||||
4. Merge or add new instincts
|
||||
5. Save to `~/.claude/homunculus/instincts/inherited/`
|
||||
|
||||
## Import Process
|
||||
|
||||
```
|
||||
📥 Importing instincts from: team-instincts.yaml
|
||||
================================================
|
||||
|
||||
Found 12 instincts to import.
|
||||
|
||||
Analyzing conflicts...
|
||||
|
||||
## New Instincts (8)
|
||||
These will be added:
|
||||
✓ use-zod-validation (confidence: 0.7)
|
||||
✓ prefer-named-exports (confidence: 0.65)
|
||||
✓ test-async-functions (confidence: 0.8)
|
||||
...
|
||||
|
||||
## Duplicate Instincts (3)
|
||||
Already have similar instincts:
|
||||
⚠️ prefer-functional-style
|
||||
Local: 0.8 confidence, 12 observations
|
||||
Import: 0.7 confidence
|
||||
→ Keep local (higher confidence)
|
||||
|
||||
⚠️ test-first-workflow
|
||||
Local: 0.75 confidence
|
||||
Import: 0.9 confidence
|
||||
→ Update to import (higher confidence)
|
||||
|
||||
## Conflicting Instincts (1)
|
||||
These contradict local instincts:
|
||||
❌ use-classes-for-services
|
||||
Conflicts with: avoid-classes
|
||||
→ Skip (requires manual resolution)
|
||||
|
||||
---
|
||||
Import 8 new, update 1, skip 3?
|
||||
```
|
||||
|
||||
## Merge Strategies
|
||||
|
||||
### For Duplicates
|
||||
When importing an instinct that matches an existing one:
|
||||
- **Higher confidence wins**: Keep the one with higher confidence
|
||||
- **Merge evidence**: Combine observation counts
|
||||
- **Update timestamp**: Mark as recently validated
|
||||
|
||||
### For Conflicts
|
||||
When importing an instinct that contradicts an existing one:
|
||||
- **Skip by default**: Don't import conflicting instincts
|
||||
- **Flag for review**: Mark both as needing attention
|
||||
- **Manual resolution**: User decides which to keep
|
||||
|
||||
## Source Tracking
|
||||
|
||||
Imported instincts are marked with:
|
||||
```yaml
|
||||
source: "inherited"
|
||||
imported_from: "team-instincts.yaml"
|
||||
imported_at: "2025-01-22T10:30:00Z"
|
||||
original_source: "session-observation" # or "repo-analysis"
|
||||
```
|
||||
|
||||
## Skill Creator Integration
|
||||
|
||||
When importing from Skill Creator:
|
||||
|
||||
```
|
||||
/instinct-import --from-skill-creator acme/webapp
|
||||
```
|
||||
|
||||
This fetches instincts generated from repo analysis:
|
||||
- Source: `repo-analysis`
|
||||
- Higher initial confidence (0.7+)
|
||||
- Linked to source repository
|
||||
|
||||
## Flags
|
||||
|
||||
- `--dry-run`: Preview without importing
|
||||
- `--force`: Import even if conflicts exist
|
||||
- `--merge-strategy <higher|local|import>`: How to handle duplicates
|
||||
- `--from-skill-creator <owner/repo>`: Import from Skill Creator analysis
|
||||
- `--min-confidence <n>`: Only import instincts above threshold
|
||||
|
||||
## Output
|
||||
|
||||
After import:
|
||||
```
|
||||
✅ Import complete!
|
||||
|
||||
Added: 8 instincts
|
||||
Updated: 1 instinct
|
||||
Skipped: 3 instincts (2 duplicates, 1 conflict)
|
||||
|
||||
New instincts saved to: ~/.claude/homunculus/instincts/inherited/
|
||||
|
||||
Run /instinct-status to see all instincts.
|
||||
```
|
||||
79
skills/continuous-learning-v2/commands/instinct-status.md
Normal file
79
skills/continuous-learning-v2/commands/instinct-status.md
Normal file
@@ -0,0 +1,79 @@
|
||||
---
|
||||
name: instinct-status
|
||||
description: Show all learned instincts with their confidence levels
|
||||
command: /instinct-status
|
||||
implementation: python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py status
|
||||
---
|
||||
|
||||
# Instinct Status Command
|
||||
|
||||
Shows all learned instincts with their confidence scores, grouped by domain.
|
||||
|
||||
## Implementation
|
||||
|
||||
```bash
|
||||
python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py status
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/instinct-status
|
||||
/instinct-status --domain code-style
|
||||
/instinct-status --low-confidence
|
||||
```
|
||||
|
||||
## What to Do
|
||||
|
||||
1. Read all instinct files from `~/.claude/homunculus/instincts/personal/`
|
||||
2. Read inherited instincts from `~/.claude/homunculus/instincts/inherited/`
|
||||
3. Display them grouped by domain with confidence bars
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
📊 Instinct Status
|
||||
==================
|
||||
|
||||
## Code Style (4 instincts)
|
||||
|
||||
### prefer-functional-style
|
||||
Trigger: when writing new functions
|
||||
Action: Use functional patterns over classes
|
||||
Confidence: ████████░░ 80%
|
||||
Source: session-observation | Last updated: 2025-01-22
|
||||
|
||||
### use-path-aliases
|
||||
Trigger: when importing modules
|
||||
Action: Use @/ path aliases instead of relative imports
|
||||
Confidence: ██████░░░░ 60%
|
||||
Source: repo-analysis (github.com/acme/webapp)
|
||||
|
||||
## Testing (2 instincts)
|
||||
|
||||
### test-first-workflow
|
||||
Trigger: when adding new functionality
|
||||
Action: Write test first, then implementation
|
||||
Confidence: █████████░ 90%
|
||||
Source: session-observation
|
||||
|
||||
## Workflow (3 instincts)
|
||||
|
||||
### grep-before-edit
|
||||
Trigger: when modifying code
|
||||
Action: Search with Grep, confirm with Read, then Edit
|
||||
Confidence: ███████░░░ 70%
|
||||
Source: session-observation
|
||||
|
||||
---
|
||||
Total: 9 instincts (4 personal, 5 inherited)
|
||||
Observer: Running (last analysis: 5 min ago)
|
||||
```
|
||||
|
||||
## Flags
|
||||
|
||||
- `--domain <name>`: Filter by domain (code-style, testing, git, etc.)
|
||||
- `--low-confidence`: Show only instincts with confidence < 0.5
|
||||
- `--high-confidence`: Show only instincts with confidence >= 0.7
|
||||
- `--source <type>`: Filter by source (session-observation, repo-analysis, inherited)
|
||||
- `--json`: Output as JSON for programmatic use
|
||||
41
skills/continuous-learning-v2/config.json
Normal file
41
skills/continuous-learning-v2/config.json
Normal file
@@ -0,0 +1,41 @@
|
||||
{
|
||||
"version": "2.0",
|
||||
"observation": {
|
||||
"enabled": true,
|
||||
"store_path": "~/.claude/homunculus/observations.jsonl",
|
||||
"max_file_size_mb": 10,
|
||||
"archive_after_days": 7,
|
||||
"capture_tools": ["Edit", "Write", "Bash", "Read", "Grep", "Glob"],
|
||||
"ignore_tools": ["TodoWrite"]
|
||||
},
|
||||
"instincts": {
|
||||
"personal_path": "~/.claude/homunculus/instincts/personal/",
|
||||
"inherited_path": "~/.claude/homunculus/instincts/inherited/",
|
||||
"min_confidence": 0.3,
|
||||
"auto_approve_threshold": 0.7,
|
||||
"confidence_decay_rate": 0.02,
|
||||
"max_instincts": 100
|
||||
},
|
||||
"observer": {
|
||||
"enabled": false,
|
||||
"model": "haiku",
|
||||
"run_interval_minutes": 5,
|
||||
"min_observations_to_analyze": 20,
|
||||
"patterns_to_detect": [
|
||||
"user_corrections",
|
||||
"error_resolutions",
|
||||
"repeated_workflows",
|
||||
"tool_preferences",
|
||||
"file_patterns"
|
||||
]
|
||||
},
|
||||
"evolution": {
|
||||
"cluster_threshold": 3,
|
||||
"evolved_path": "~/.claude/homunculus/evolved/",
|
||||
"auto_evolve": false
|
||||
},
|
||||
"integration": {
|
||||
"skill_creator_api": "https://skill-creator.app/api",
|
||||
"backward_compatible_v1": true
|
||||
}
|
||||
}
|
||||
137
skills/continuous-learning-v2/hooks/observe.sh
Executable file
137
skills/continuous-learning-v2/hooks/observe.sh
Executable file
@@ -0,0 +1,137 @@
|
||||
#!/bin/bash
|
||||
# Continuous Learning v2 - Observation Hook
|
||||
#
|
||||
# Captures tool use events for pattern analysis.
|
||||
# Claude Code passes hook data via stdin as JSON.
|
||||
#
|
||||
# Hook config (in ~/.claude/settings.json):
|
||||
# {
|
||||
# "hooks": {
|
||||
# "PreToolUse": [{
|
||||
# "matcher": "*",
|
||||
# "hooks": [{ "type": "command", "command": "~/.claude/skills/continuous-learning-v2/hooks/observe.sh" }]
|
||||
# }],
|
||||
# "PostToolUse": [{
|
||||
# "matcher": "*",
|
||||
# "hooks": [{ "type": "command", "command": "~/.claude/skills/continuous-learning-v2/hooks/observe.sh" }]
|
||||
# }]
|
||||
# }
|
||||
# }
|
||||
|
||||
set -e
|
||||
|
||||
CONFIG_DIR="${HOME}/.claude/homunculus"
|
||||
OBSERVATIONS_FILE="${CONFIG_DIR}/observations.jsonl"
|
||||
MAX_FILE_SIZE_MB=10
|
||||
|
||||
# Ensure directory exists
|
||||
mkdir -p "$CONFIG_DIR"
|
||||
|
||||
# Skip if disabled
|
||||
if [ -f "$CONFIG_DIR/disabled" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Read JSON from stdin (Claude Code hook format)
|
||||
INPUT_JSON=$(cat)
|
||||
|
||||
# Exit if no input
|
||||
if [ -z "$INPUT_JSON" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Parse using python (more reliable than jq for complex JSON)
|
||||
PARSED=$(python3 << EOF
|
||||
import json
|
||||
import sys
|
||||
|
||||
try:
|
||||
data = json.loads('''$INPUT_JSON''')
|
||||
|
||||
# Extract fields - Claude Code hook format
|
||||
hook_type = data.get('hook_type', 'unknown') # PreToolUse or PostToolUse
|
||||
tool_name = data.get('tool_name', data.get('tool', 'unknown'))
|
||||
tool_input = data.get('tool_input', data.get('input', {}))
|
||||
tool_output = data.get('tool_output', data.get('output', ''))
|
||||
session_id = data.get('session_id', 'unknown')
|
||||
|
||||
# Truncate large inputs/outputs
|
||||
if isinstance(tool_input, dict):
|
||||
tool_input_str = json.dumps(tool_input)[:5000]
|
||||
else:
|
||||
tool_input_str = str(tool_input)[:5000]
|
||||
|
||||
if isinstance(tool_output, dict):
|
||||
tool_output_str = json.dumps(tool_output)[:5000]
|
||||
else:
|
||||
tool_output_str = str(tool_output)[:5000]
|
||||
|
||||
# Determine event type
|
||||
event = 'tool_start' if 'Pre' in hook_type else 'tool_complete'
|
||||
|
||||
print(json.dumps({
|
||||
'parsed': True,
|
||||
'event': event,
|
||||
'tool': tool_name,
|
||||
'input': tool_input_str if event == 'tool_start' else None,
|
||||
'output': tool_output_str if event == 'tool_complete' else None,
|
||||
'session': session_id
|
||||
}))
|
||||
except Exception as e:
|
||||
print(json.dumps({'parsed': False, 'error': str(e)}))
|
||||
EOF
|
||||
)
|
||||
|
||||
# Check if parsing succeeded
|
||||
PARSED_OK=$(echo "$PARSED" | python3 -c "import json,sys; print(json.load(sys.stdin).get('parsed', False))")
|
||||
|
||||
if [ "$PARSED_OK" != "True" ]; then
|
||||
# Fallback: log raw input for debugging
|
||||
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
echo "{\"timestamp\":\"$timestamp\",\"event\":\"parse_error\",\"raw\":$(echo "$INPUT_JSON" | python3 -c 'import json,sys; print(json.dumps(sys.stdin.read()[:1000]))')}" >> "$OBSERVATIONS_FILE"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Archive if file too large
|
||||
if [ -f "$OBSERVATIONS_FILE" ]; then
|
||||
file_size_mb=$(du -m "$OBSERVATIONS_FILE" 2>/dev/null | cut -f1)
|
||||
if [ "${file_size_mb:-0}" -ge "$MAX_FILE_SIZE_MB" ]; then
|
||||
archive_dir="${CONFIG_DIR}/observations.archive"
|
||||
mkdir -p "$archive_dir"
|
||||
mv "$OBSERVATIONS_FILE" "$archive_dir/observations-$(date +%Y%m%d-%H%M%S).jsonl"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Build and write observation
|
||||
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
python3 << EOF
|
||||
import json
|
||||
|
||||
parsed = json.loads('''$PARSED''')
|
||||
observation = {
|
||||
'timestamp': '$timestamp',
|
||||
'event': parsed['event'],
|
||||
'tool': parsed['tool'],
|
||||
'session': parsed['session']
|
||||
}
|
||||
|
||||
if parsed['input']:
|
||||
observation['input'] = parsed['input']
|
||||
if parsed['output']:
|
||||
observation['output'] = parsed['output']
|
||||
|
||||
with open('$OBSERVATIONS_FILE', 'a') as f:
|
||||
f.write(json.dumps(observation) + '\n')
|
||||
EOF
|
||||
|
||||
# Signal observer if running
|
||||
OBSERVER_PID_FILE="${CONFIG_DIR}/.observer.pid"
|
||||
if [ -f "$OBSERVER_PID_FILE" ]; then
|
||||
observer_pid=$(cat "$OBSERVER_PID_FILE")
|
||||
if kill -0 "$observer_pid" 2>/dev/null; then
|
||||
kill -USR1 "$observer_pid" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
|
||||
exit 0
|
||||
494
skills/continuous-learning-v2/scripts/instinct-cli.py
Executable file
494
skills/continuous-learning-v2/scripts/instinct-cli.py
Executable file
@@ -0,0 +1,494 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Instinct CLI - Manage instincts for Continuous Learning v2
|
||||
|
||||
Commands:
|
||||
status - Show all instincts and their status
|
||||
import - Import instincts from file or URL
|
||||
export - Export instincts to file
|
||||
evolve - Cluster instincts into skills/commands/agents
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from collections import defaultdict
|
||||
from typing import Optional
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Configuration
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
HOMUNCULUS_DIR = Path.home() / ".claude" / "homunculus"
|
||||
INSTINCTS_DIR = HOMUNCULUS_DIR / "instincts"
|
||||
PERSONAL_DIR = INSTINCTS_DIR / "personal"
|
||||
INHERITED_DIR = INSTINCTS_DIR / "inherited"
|
||||
EVOLVED_DIR = HOMUNCULUS_DIR / "evolved"
|
||||
OBSERVATIONS_FILE = HOMUNCULUS_DIR / "observations.jsonl"
|
||||
|
||||
# Ensure directories exist
|
||||
for d in [PERSONAL_DIR, INHERITED_DIR, EVOLVED_DIR / "skills", EVOLVED_DIR / "commands", EVOLVED_DIR / "agents"]:
|
||||
d.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Instinct Parser
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
def parse_instinct_file(content: str) -> list[dict]:
|
||||
"""Parse YAML-like instinct file format."""
|
||||
instincts = []
|
||||
current = {}
|
||||
in_frontmatter = False
|
||||
content_lines = []
|
||||
|
||||
for line in content.split('\n'):
|
||||
if line.strip() == '---':
|
||||
if in_frontmatter:
|
||||
# End of frontmatter
|
||||
in_frontmatter = False
|
||||
if current:
|
||||
current['content'] = '\n'.join(content_lines).strip()
|
||||
instincts.append(current)
|
||||
current = {}
|
||||
content_lines = []
|
||||
else:
|
||||
# Start of frontmatter
|
||||
in_frontmatter = True
|
||||
if current:
|
||||
current['content'] = '\n'.join(content_lines).strip()
|
||||
instincts.append(current)
|
||||
current = {}
|
||||
content_lines = []
|
||||
elif in_frontmatter:
|
||||
# Parse YAML-like frontmatter
|
||||
if ':' in line:
|
||||
key, value = line.split(':', 1)
|
||||
key = key.strip()
|
||||
value = value.strip().strip('"').strip("'")
|
||||
if key == 'confidence':
|
||||
current[key] = float(value)
|
||||
else:
|
||||
current[key] = value
|
||||
else:
|
||||
content_lines.append(line)
|
||||
|
||||
# Don't forget the last instinct
|
||||
if current:
|
||||
current['content'] = '\n'.join(content_lines).strip()
|
||||
instincts.append(current)
|
||||
|
||||
return [i for i in instincts if i.get('id')]
|
||||
|
||||
|
||||
def load_all_instincts() -> list[dict]:
|
||||
"""Load all instincts from personal and inherited directories."""
|
||||
instincts = []
|
||||
|
||||
for directory in [PERSONAL_DIR, INHERITED_DIR]:
|
||||
if not directory.exists():
|
||||
continue
|
||||
for file in directory.glob("*.yaml"):
|
||||
try:
|
||||
content = file.read_text()
|
||||
parsed = parse_instinct_file(content)
|
||||
for inst in parsed:
|
||||
inst['_source_file'] = str(file)
|
||||
inst['_source_type'] = directory.name
|
||||
instincts.extend(parsed)
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to parse {file}: {e}", file=sys.stderr)
|
||||
|
||||
return instincts
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Status Command
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
def cmd_status(args):
|
||||
"""Show status of all instincts."""
|
||||
instincts = load_all_instincts()
|
||||
|
||||
if not instincts:
|
||||
print("No instincts found.")
|
||||
print(f"\nInstinct directories:")
|
||||
print(f" Personal: {PERSONAL_DIR}")
|
||||
print(f" Inherited: {INHERITED_DIR}")
|
||||
return
|
||||
|
||||
# Group by domain
|
||||
by_domain = defaultdict(list)
|
||||
for inst in instincts:
|
||||
domain = inst.get('domain', 'general')
|
||||
by_domain[domain].append(inst)
|
||||
|
||||
# Print header
|
||||
print(f"\n{'='*60}")
|
||||
print(f" INSTINCT STATUS - {len(instincts)} total")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
# Summary by source
|
||||
personal = [i for i in instincts if i.get('_source_type') == 'personal']
|
||||
inherited = [i for i in instincts if i.get('_source_type') == 'inherited']
|
||||
print(f" Personal: {len(personal)}")
|
||||
print(f" Inherited: {len(inherited)}")
|
||||
print()
|
||||
|
||||
# Print by domain
|
||||
for domain in sorted(by_domain.keys()):
|
||||
domain_instincts = by_domain[domain]
|
||||
print(f"## {domain.upper()} ({len(domain_instincts)})")
|
||||
print()
|
||||
|
||||
for inst in sorted(domain_instincts, key=lambda x: -x.get('confidence', 0.5)):
|
||||
conf = inst.get('confidence', 0.5)
|
||||
conf_bar = '█' * int(conf * 10) + '░' * (10 - int(conf * 10))
|
||||
trigger = inst.get('trigger', 'unknown trigger')
|
||||
source = inst.get('source', 'unknown')
|
||||
|
||||
print(f" {conf_bar} {int(conf*100):3d}% {inst.get('id', 'unnamed')}")
|
||||
print(f" trigger: {trigger}")
|
||||
|
||||
# Extract action from content
|
||||
content = inst.get('content', '')
|
||||
action_match = re.search(r'## Action\s*\n\s*(.+?)(?:\n\n|\n##|$)', content, re.DOTALL)
|
||||
if action_match:
|
||||
action = action_match.group(1).strip().split('\n')[0]
|
||||
print(f" action: {action[:60]}{'...' if len(action) > 60 else ''}")
|
||||
|
||||
print()
|
||||
|
||||
# Observations stats
|
||||
if OBSERVATIONS_FILE.exists():
|
||||
obs_count = sum(1 for _ in open(OBSERVATIONS_FILE))
|
||||
print(f"─────────────────────────────────────────────────────────")
|
||||
print(f" Observations: {obs_count} events logged")
|
||||
print(f" File: {OBSERVATIONS_FILE}")
|
||||
|
||||
print(f"\n{'='*60}\n")
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Import Command
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
def cmd_import(args):
|
||||
"""Import instincts from file or URL."""
|
||||
source = args.source
|
||||
|
||||
# Fetch content
|
||||
if source.startswith('http://') or source.startswith('https://'):
|
||||
print(f"Fetching from URL: {source}")
|
||||
try:
|
||||
with urllib.request.urlopen(source) as response:
|
||||
content = response.read().decode('utf-8')
|
||||
except Exception as e:
|
||||
print(f"Error fetching URL: {e}", file=sys.stderr)
|
||||
return 1
|
||||
else:
|
||||
path = Path(source).expanduser()
|
||||
if not path.exists():
|
||||
print(f"File not found: {path}", file=sys.stderr)
|
||||
return 1
|
||||
content = path.read_text()
|
||||
|
||||
# Parse instincts
|
||||
new_instincts = parse_instinct_file(content)
|
||||
if not new_instincts:
|
||||
print("No valid instincts found in source.")
|
||||
return 1
|
||||
|
||||
print(f"\nFound {len(new_instincts)} instincts to import.\n")
|
||||
|
||||
# Load existing
|
||||
existing = load_all_instincts()
|
||||
existing_ids = {i.get('id') for i in existing}
|
||||
|
||||
# Categorize
|
||||
to_add = []
|
||||
duplicates = []
|
||||
to_update = []
|
||||
|
||||
for inst in new_instincts:
|
||||
inst_id = inst.get('id')
|
||||
if inst_id in existing_ids:
|
||||
# Check if we should update
|
||||
existing_inst = next((e for e in existing if e.get('id') == inst_id), None)
|
||||
if existing_inst:
|
||||
if inst.get('confidence', 0) > existing_inst.get('confidence', 0):
|
||||
to_update.append(inst)
|
||||
else:
|
||||
duplicates.append(inst)
|
||||
else:
|
||||
to_add.append(inst)
|
||||
|
||||
# Filter by minimum confidence
|
||||
min_conf = args.min_confidence or 0.0
|
||||
to_add = [i for i in to_add if i.get('confidence', 0.5) >= min_conf]
|
||||
to_update = [i for i in to_update if i.get('confidence', 0.5) >= min_conf]
|
||||
|
||||
# Display summary
|
||||
if to_add:
|
||||
print(f"NEW ({len(to_add)}):")
|
||||
for inst in to_add:
|
||||
print(f" + {inst.get('id')} (confidence: {inst.get('confidence', 0.5):.2f})")
|
||||
|
||||
if to_update:
|
||||
print(f"\nUPDATE ({len(to_update)}):")
|
||||
for inst in to_update:
|
||||
print(f" ~ {inst.get('id')} (confidence: {inst.get('confidence', 0.5):.2f})")
|
||||
|
||||
if duplicates:
|
||||
print(f"\nSKIP ({len(duplicates)} - already exists with equal/higher confidence):")
|
||||
for inst in duplicates[:5]:
|
||||
print(f" - {inst.get('id')}")
|
||||
if len(duplicates) > 5:
|
||||
print(f" ... and {len(duplicates) - 5} more")
|
||||
|
||||
if args.dry_run:
|
||||
print("\n[DRY RUN] No changes made.")
|
||||
return 0
|
||||
|
||||
if not to_add and not to_update:
|
||||
print("\nNothing to import.")
|
||||
return 0
|
||||
|
||||
# Confirm
|
||||
if not args.force:
|
||||
response = input(f"\nImport {len(to_add)} new, update {len(to_update)}? [y/N] ")
|
||||
if response.lower() != 'y':
|
||||
print("Cancelled.")
|
||||
return 0
|
||||
|
||||
# Write to inherited directory
|
||||
timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
|
||||
source_name = Path(source).stem if not source.startswith('http') else 'web-import'
|
||||
output_file = INHERITED_DIR / f"{source_name}-{timestamp}.yaml"
|
||||
|
||||
all_to_write = to_add + to_update
|
||||
output_content = f"# Imported from {source}\n# Date: {datetime.now().isoformat()}\n\n"
|
||||
|
||||
for inst in all_to_write:
|
||||
output_content += "---\n"
|
||||
output_content += f"id: {inst.get('id')}\n"
|
||||
output_content += f"trigger: \"{inst.get('trigger', 'unknown')}\"\n"
|
||||
output_content += f"confidence: {inst.get('confidence', 0.5)}\n"
|
||||
output_content += f"domain: {inst.get('domain', 'general')}\n"
|
||||
output_content += f"source: inherited\n"
|
||||
output_content += f"imported_from: \"{source}\"\n"
|
||||
if inst.get('source_repo'):
|
||||
output_content += f"source_repo: {inst.get('source_repo')}\n"
|
||||
output_content += "---\n\n"
|
||||
output_content += inst.get('content', '') + "\n\n"
|
||||
|
||||
output_file.write_text(output_content)
|
||||
|
||||
print(f"\n✅ Import complete!")
|
||||
print(f" Added: {len(to_add)}")
|
||||
print(f" Updated: {len(to_update)}")
|
||||
print(f" Saved to: {output_file}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Export Command
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
def cmd_export(args):
|
||||
"""Export instincts to file."""
|
||||
instincts = load_all_instincts()
|
||||
|
||||
if not instincts:
|
||||
print("No instincts to export.")
|
||||
return 1
|
||||
|
||||
# Filter by domain if specified
|
||||
if args.domain:
|
||||
instincts = [i for i in instincts if i.get('domain') == args.domain]
|
||||
|
||||
# Filter by minimum confidence
|
||||
if args.min_confidence:
|
||||
instincts = [i for i in instincts if i.get('confidence', 0.5) >= args.min_confidence]
|
||||
|
||||
if not instincts:
|
||||
print("No instincts match the criteria.")
|
||||
return 1
|
||||
|
||||
# Generate output
|
||||
output = f"# Instincts export\n# Date: {datetime.now().isoformat()}\n# Total: {len(instincts)}\n\n"
|
||||
|
||||
for inst in instincts:
|
||||
output += "---\n"
|
||||
for key in ['id', 'trigger', 'confidence', 'domain', 'source', 'source_repo']:
|
||||
if inst.get(key):
|
||||
value = inst[key]
|
||||
if key == 'trigger':
|
||||
output += f'{key}: "{value}"\n'
|
||||
else:
|
||||
output += f"{key}: {value}\n"
|
||||
output += "---\n\n"
|
||||
output += inst.get('content', '') + "\n\n"
|
||||
|
||||
# Write to file or stdout
|
||||
if args.output:
|
||||
Path(args.output).write_text(output)
|
||||
print(f"Exported {len(instincts)} instincts to {args.output}")
|
||||
else:
|
||||
print(output)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Evolve Command
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
def cmd_evolve(args):
|
||||
"""Analyze instincts and suggest evolutions to skills/commands/agents."""
|
||||
instincts = load_all_instincts()
|
||||
|
||||
if len(instincts) < 3:
|
||||
print("Need at least 3 instincts to analyze patterns.")
|
||||
print(f"Currently have: {len(instincts)}")
|
||||
return 1
|
||||
|
||||
print(f"\n{'='*60}")
|
||||
print(f" EVOLVE ANALYSIS - {len(instincts)} instincts")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
# Group by domain
|
||||
by_domain = defaultdict(list)
|
||||
for inst in instincts:
|
||||
domain = inst.get('domain', 'general')
|
||||
by_domain[domain].append(inst)
|
||||
|
||||
# High-confidence instincts by domain (candidates for skills)
|
||||
high_conf = [i for i in instincts if i.get('confidence', 0) >= 0.8]
|
||||
print(f"High confidence instincts (>=80%): {len(high_conf)}")
|
||||
|
||||
# Find clusters (instincts with similar triggers)
|
||||
trigger_clusters = defaultdict(list)
|
||||
for inst in instincts:
|
||||
trigger = inst.get('trigger', '')
|
||||
# Normalize trigger
|
||||
trigger_key = trigger.lower()
|
||||
for keyword in ['when', 'creating', 'writing', 'adding', 'implementing', 'testing']:
|
||||
trigger_key = trigger_key.replace(keyword, '').strip()
|
||||
trigger_clusters[trigger_key].append(inst)
|
||||
|
||||
# Find clusters with 3+ instincts (good skill candidates)
|
||||
skill_candidates = []
|
||||
for trigger, cluster in trigger_clusters.items():
|
||||
if len(cluster) >= 2:
|
||||
avg_conf = sum(i.get('confidence', 0.5) for i in cluster) / len(cluster)
|
||||
skill_candidates.append({
|
||||
'trigger': trigger,
|
||||
'instincts': cluster,
|
||||
'avg_confidence': avg_conf,
|
||||
'domains': list(set(i.get('domain', 'general') for i in cluster))
|
||||
})
|
||||
|
||||
# Sort by cluster size and confidence
|
||||
skill_candidates.sort(key=lambda x: (-len(x['instincts']), -x['avg_confidence']))
|
||||
|
||||
print(f"\nPotential skill clusters found: {len(skill_candidates)}")
|
||||
|
||||
if skill_candidates:
|
||||
print(f"\n## SKILL CANDIDATES\n")
|
||||
for i, cand in enumerate(skill_candidates[:5], 1):
|
||||
print(f"{i}. Cluster: \"{cand['trigger']}\"")
|
||||
print(f" Instincts: {len(cand['instincts'])}")
|
||||
print(f" Avg confidence: {cand['avg_confidence']:.0%}")
|
||||
print(f" Domains: {', '.join(cand['domains'])}")
|
||||
print(f" Instincts:")
|
||||
for inst in cand['instincts'][:3]:
|
||||
print(f" - {inst.get('id')}")
|
||||
print()
|
||||
|
||||
# Command candidates (workflow instincts with high confidence)
|
||||
workflow_instincts = [i for i in instincts if i.get('domain') == 'workflow' and i.get('confidence', 0) >= 0.7]
|
||||
if workflow_instincts:
|
||||
print(f"\n## COMMAND CANDIDATES ({len(workflow_instincts)})\n")
|
||||
for inst in workflow_instincts[:5]:
|
||||
trigger = inst.get('trigger', 'unknown')
|
||||
# Suggest command name
|
||||
cmd_name = trigger.replace('when ', '').replace('implementing ', '').replace('a ', '')
|
||||
cmd_name = cmd_name.replace(' ', '-')[:20]
|
||||
print(f" /{cmd_name}")
|
||||
print(f" From: {inst.get('id')}")
|
||||
print(f" Confidence: {inst.get('confidence', 0.5):.0%}")
|
||||
print()
|
||||
|
||||
# Agent candidates (complex multi-step patterns)
|
||||
agent_candidates = [c for c in skill_candidates if len(c['instincts']) >= 3 and c['avg_confidence'] >= 0.75]
|
||||
if agent_candidates:
|
||||
print(f"\n## AGENT CANDIDATES ({len(agent_candidates)})\n")
|
||||
for cand in agent_candidates[:3]:
|
||||
agent_name = cand['trigger'].replace(' ', '-')[:20] + '-agent'
|
||||
print(f" {agent_name}")
|
||||
print(f" Covers {len(cand['instincts'])} instincts")
|
||||
print(f" Avg confidence: {cand['avg_confidence']:.0%}")
|
||||
print()
|
||||
|
||||
if args.generate:
|
||||
print("\n[Would generate evolved structures here]")
|
||||
print(" Skills would be saved to:", EVOLVED_DIR / "skills")
|
||||
print(" Commands would be saved to:", EVOLVED_DIR / "commands")
|
||||
print(" Agents would be saved to:", EVOLVED_DIR / "agents")
|
||||
|
||||
print(f"\n{'='*60}\n")
|
||||
return 0
|
||||
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Main
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Instinct CLI for Continuous Learning v2')
|
||||
subparsers = parser.add_subparsers(dest='command', help='Available commands')
|
||||
|
||||
# Status
|
||||
status_parser = subparsers.add_parser('status', help='Show instinct status')
|
||||
|
||||
# Import
|
||||
import_parser = subparsers.add_parser('import', help='Import instincts')
|
||||
import_parser.add_argument('source', help='File path or URL')
|
||||
import_parser.add_argument('--dry-run', action='store_true', help='Preview without importing')
|
||||
import_parser.add_argument('--force', action='store_true', help='Skip confirmation')
|
||||
import_parser.add_argument('--min-confidence', type=float, help='Minimum confidence threshold')
|
||||
|
||||
# Export
|
||||
export_parser = subparsers.add_parser('export', help='Export instincts')
|
||||
export_parser.add_argument('--output', '-o', help='Output file')
|
||||
export_parser.add_argument('--domain', help='Filter by domain')
|
||||
export_parser.add_argument('--min-confidence', type=float, help='Minimum confidence')
|
||||
|
||||
# Evolve
|
||||
evolve_parser = subparsers.add_parser('evolve', help='Analyze and evolve instincts')
|
||||
evolve_parser.add_argument('--generate', action='store_true', help='Generate evolved structures')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.command == 'status':
|
||||
return cmd_status(args)
|
||||
elif args.command == 'import':
|
||||
return cmd_import(args)
|
||||
elif args.command == 'export':
|
||||
return cmd_export(args)
|
||||
elif args.command == 'evolve':
|
||||
return cmd_evolve(args)
|
||||
else:
|
||||
parser.print_help()
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main() or 0)
|
||||
@@ -78,3 +78,33 @@ Add to your `~/.claude/settings.json`:
|
||||
|
||||
- [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) - Section on continuous learning
|
||||
- `/learn` command - Manual pattern extraction mid-session
|
||||
|
||||
---
|
||||
|
||||
## Comparison Notes (Research: Jan 2025)
|
||||
|
||||
### vs Homunculus (github.com/humanplane/homunculus)
|
||||
|
||||
Homunculus v2 takes a more sophisticated approach:
|
||||
|
||||
| Feature | Our Approach | Homunculus v2 |
|
||||
|---------|--------------|---------------|
|
||||
| Observation | Stop hook (end of session) | PreToolUse/PostToolUse hooks (100% reliable) |
|
||||
| Analysis | Main context | Background agent (Haiku) |
|
||||
| Granularity | Full skills | Atomic "instincts" |
|
||||
| Confidence | None | 0.3-0.9 weighted |
|
||||
| Evolution | Direct to skill | Instincts → cluster → skill/command/agent |
|
||||
| Sharing | None | Export/import instincts |
|
||||
|
||||
**Key insight from homunculus:**
|
||||
> "v1 relied on skills to observe. Skills are probabilistic—they fire ~50-80% of the time. v2 uses hooks for observation (100% reliable) and instincts as the atomic unit of learned behavior."
|
||||
|
||||
### Potential v2 Enhancements
|
||||
|
||||
1. **Instinct-based learning** - Smaller, atomic behaviors with confidence scoring
|
||||
2. **Background observer** - Haiku agent analyzing in parallel
|
||||
3. **Confidence decay** - Instincts lose confidence if contradicted
|
||||
4. **Domain tagging** - code-style, testing, git, debugging, etc.
|
||||
5. **Evolution path** - Cluster related instincts into skills/commands
|
||||
|
||||
See: `/Users/affoon/Documents/tasks/12-continuous-learning-v2.md` for full spec.
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: eval-harness
|
||||
description: Formal evaluation framework for Claude Code sessions implementing eval-driven development (EDD) principles
|
||||
tools: Read, Write, Edit, Bash, Grep, Glob
|
||||
---
|
||||
|
||||
# Eval Harness Skill
|
||||
|
||||
A formal evaluation framework for Claude Code sessions, implementing eval-driven development (EDD) principles.
|
||||
|
||||
202
skills/iterative-retrieval/SKILL.md
Normal file
202
skills/iterative-retrieval/SKILL.md
Normal file
@@ -0,0 +1,202 @@
|
||||
---
|
||||
name: iterative-retrieval
|
||||
description: Pattern for progressively refining context retrieval to solve the subagent context problem
|
||||
---
|
||||
|
||||
# Iterative Retrieval Pattern
|
||||
|
||||
Solves the "context problem" in multi-agent workflows where subagents don't know what context they need until they start working.
|
||||
|
||||
## The Problem
|
||||
|
||||
Subagents are spawned with limited context. They don't know:
|
||||
- Which files contain relevant code
|
||||
- What patterns exist in the codebase
|
||||
- What terminology the project uses
|
||||
|
||||
Standard approaches fail:
|
||||
- **Send everything**: Exceeds context limits
|
||||
- **Send nothing**: Agent lacks critical information
|
||||
- **Guess what's needed**: Often wrong
|
||||
|
||||
## The Solution: Iterative Retrieval
|
||||
|
||||
A 4-phase loop that progressively refines context:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ │
|
||||
│ ┌──────────┐ ┌──────────┐ │
|
||||
│ │ DISPATCH │─────▶│ EVALUATE │ │
|
||||
│ └──────────┘ └──────────┘ │
|
||||
│ ▲ │ │
|
||||
│ │ ▼ │
|
||||
│ ┌──────────┐ ┌──────────┐ │
|
||||
│ │ LOOP │◀─────│ REFINE │ │
|
||||
│ └──────────┘ └──────────┘ │
|
||||
│ │
|
||||
│ Max 3 cycles, then proceed │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Phase 1: DISPATCH
|
||||
|
||||
Initial broad query to gather candidate files:
|
||||
|
||||
```javascript
|
||||
// Start with high-level intent
|
||||
const initialQuery = {
|
||||
patterns: ['src/**/*.ts', 'lib/**/*.ts'],
|
||||
keywords: ['authentication', 'user', 'session'],
|
||||
excludes: ['*.test.ts', '*.spec.ts']
|
||||
};
|
||||
|
||||
// Dispatch to retrieval agent
|
||||
const candidates = await retrieveFiles(initialQuery);
|
||||
```
|
||||
|
||||
### Phase 2: EVALUATE
|
||||
|
||||
Assess retrieved content for relevance:
|
||||
|
||||
```javascript
|
||||
function evaluateRelevance(files, task) {
|
||||
return files.map(file => ({
|
||||
path: file.path,
|
||||
relevance: scoreRelevance(file.content, task),
|
||||
reason: explainRelevance(file.content, task),
|
||||
missingContext: identifyGaps(file.content, task)
|
||||
}));
|
||||
}
|
||||
```
|
||||
|
||||
Scoring criteria:
|
||||
- **High (0.8-1.0)**: Directly implements target functionality
|
||||
- **Medium (0.5-0.7)**: Contains related patterns or types
|
||||
- **Low (0.2-0.4)**: Tangentially related
|
||||
- **None (0-0.2)**: Not relevant, exclude
|
||||
|
||||
### Phase 3: REFINE
|
||||
|
||||
Update search criteria based on evaluation:
|
||||
|
||||
```javascript
|
||||
function refineQuery(evaluation, previousQuery) {
|
||||
return {
|
||||
// Add new patterns discovered in high-relevance files
|
||||
patterns: [...previousQuery.patterns, ...extractPatterns(evaluation)],
|
||||
|
||||
// Add terminology found in codebase
|
||||
keywords: [...previousQuery.keywords, ...extractKeywords(evaluation)],
|
||||
|
||||
// Exclude confirmed irrelevant paths
|
||||
excludes: [...previousQuery.excludes, ...evaluation
|
||||
.filter(e => e.relevance < 0.2)
|
||||
.map(e => e.path)
|
||||
],
|
||||
|
||||
// Target specific gaps
|
||||
focusAreas: evaluation
|
||||
.flatMap(e => e.missingContext)
|
||||
.filter(unique)
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: LOOP
|
||||
|
||||
Repeat with refined criteria (max 3 cycles):
|
||||
|
||||
```javascript
|
||||
async function iterativeRetrieve(task, maxCycles = 3) {
|
||||
let query = createInitialQuery(task);
|
||||
let bestContext = [];
|
||||
|
||||
for (let cycle = 0; cycle < maxCycles; cycle++) {
|
||||
const candidates = await retrieveFiles(query);
|
||||
const evaluation = evaluateRelevance(candidates, task);
|
||||
|
||||
// Check if we have sufficient context
|
||||
const highRelevance = evaluation.filter(e => e.relevance >= 0.7);
|
||||
if (highRelevance.length >= 3 && !hasCriticalGaps(evaluation)) {
|
||||
return highRelevance;
|
||||
}
|
||||
|
||||
// Refine and continue
|
||||
query = refineQuery(evaluation, query);
|
||||
bestContext = mergeContext(bestContext, highRelevance);
|
||||
}
|
||||
|
||||
return bestContext;
|
||||
}
|
||||
```
|
||||
|
||||
## Practical Examples
|
||||
|
||||
### Example 1: Bug Fix Context
|
||||
|
||||
```
|
||||
Task: "Fix the authentication token expiry bug"
|
||||
|
||||
Cycle 1:
|
||||
DISPATCH: Search for "token", "auth", "expiry" in src/**
|
||||
EVALUATE: Found auth.ts (0.9), tokens.ts (0.8), user.ts (0.3)
|
||||
REFINE: Add "refresh", "jwt" keywords; exclude user.ts
|
||||
|
||||
Cycle 2:
|
||||
DISPATCH: Search refined terms
|
||||
EVALUATE: Found session-manager.ts (0.95), jwt-utils.ts (0.85)
|
||||
REFINE: Sufficient context (2 high-relevance files)
|
||||
|
||||
Result: auth.ts, tokens.ts, session-manager.ts, jwt-utils.ts
|
||||
```
|
||||
|
||||
### Example 2: Feature Implementation
|
||||
|
||||
```
|
||||
Task: "Add rate limiting to API endpoints"
|
||||
|
||||
Cycle 1:
|
||||
DISPATCH: Search "rate", "limit", "api" in routes/**
|
||||
EVALUATE: No matches - codebase uses "throttle" terminology
|
||||
REFINE: Add "throttle", "middleware" keywords
|
||||
|
||||
Cycle 2:
|
||||
DISPATCH: Search refined terms
|
||||
EVALUATE: Found throttle.ts (0.9), middleware/index.ts (0.7)
|
||||
REFINE: Need router patterns
|
||||
|
||||
Cycle 3:
|
||||
DISPATCH: Search "router", "express" patterns
|
||||
EVALUATE: Found router-setup.ts (0.8)
|
||||
REFINE: Sufficient context
|
||||
|
||||
Result: throttle.ts, middleware/index.ts, router-setup.ts
|
||||
```
|
||||
|
||||
## Integration with Agents
|
||||
|
||||
Use in agent prompts:
|
||||
|
||||
```markdown
|
||||
When retrieving context for this task:
|
||||
1. Start with broad keyword search
|
||||
2. Evaluate each file's relevance (0-1 scale)
|
||||
3. Identify what context is still missing
|
||||
4. Refine search criteria and repeat (max 3 cycles)
|
||||
5. Return files with relevance >= 0.7
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Start broad, narrow progressively** - Don't over-specify initial queries
|
||||
2. **Learn codebase terminology** - First cycle often reveals naming conventions
|
||||
3. **Track what's missing** - Explicit gap identification drives refinement
|
||||
4. **Stop at "good enough"** - 3 high-relevance files beats 10 mediocre ones
|
||||
5. **Exclude confidently** - Low-relevance files won't become relevant
|
||||
|
||||
## Related
|
||||
|
||||
- [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) - Subagent orchestration section
|
||||
- `continuous-learning` skill - For patterns that improve over time
|
||||
- Agent definitions in `~/.claude/agents/`
|
||||
146
skills/postgres-patterns/SKILL.md
Normal file
146
skills/postgres-patterns/SKILL.md
Normal file
@@ -0,0 +1,146 @@
|
||||
---
|
||||
name: postgres-patterns
|
||||
description: PostgreSQL database patterns for query optimization, schema design, indexing, and security. Based on Supabase best practices.
|
||||
---
|
||||
|
||||
# PostgreSQL Patterns
|
||||
|
||||
Quick reference for PostgreSQL best practices. For detailed guidance, use the `database-reviewer` agent.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Writing SQL queries or migrations
|
||||
- Designing database schemas
|
||||
- Troubleshooting slow queries
|
||||
- Implementing Row Level Security
|
||||
- Setting up connection pooling
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Index Cheat Sheet
|
||||
|
||||
| Query Pattern | Index Type | Example |
|
||||
|--------------|------------|---------|
|
||||
| `WHERE col = value` | B-tree (default) | `CREATE INDEX idx ON t (col)` |
|
||||
| `WHERE col > value` | B-tree | `CREATE INDEX idx ON t (col)` |
|
||||
| `WHERE a = x AND b > y` | Composite | `CREATE INDEX idx ON t (a, b)` |
|
||||
| `WHERE jsonb @> '{}'` | GIN | `CREATE INDEX idx ON t USING gin (col)` |
|
||||
| `WHERE tsv @@ query` | GIN | `CREATE INDEX idx ON t USING gin (col)` |
|
||||
| Time-series ranges | BRIN | `CREATE INDEX idx ON t USING brin (col)` |
|
||||
|
||||
### Data Type Quick Reference
|
||||
|
||||
| Use Case | Correct Type | Avoid |
|
||||
|----------|-------------|-------|
|
||||
| IDs | `bigint` | `int`, random UUID |
|
||||
| Strings | `text` | `varchar(255)` |
|
||||
| Timestamps | `timestamptz` | `timestamp` |
|
||||
| Money | `numeric(10,2)` | `float` |
|
||||
| Flags | `boolean` | `varchar`, `int` |
|
||||
|
||||
### Common Patterns
|
||||
|
||||
**Composite Index Order:**
|
||||
```sql
|
||||
-- Equality columns first, then range columns
|
||||
CREATE INDEX idx ON orders (status, created_at);
|
||||
-- Works for: WHERE status = 'pending' AND created_at > '2024-01-01'
|
||||
```
|
||||
|
||||
**Covering Index:**
|
||||
```sql
|
||||
CREATE INDEX idx ON users (email) INCLUDE (name, created_at);
|
||||
-- Avoids table lookup for SELECT email, name, created_at
|
||||
```
|
||||
|
||||
**Partial Index:**
|
||||
```sql
|
||||
CREATE INDEX idx ON users (email) WHERE deleted_at IS NULL;
|
||||
-- Smaller index, only includes active users
|
||||
```
|
||||
|
||||
**RLS Policy (Optimized):**
|
||||
```sql
|
||||
CREATE POLICY policy ON orders
|
||||
USING ((SELECT auth.uid()) = user_id); -- Wrap in SELECT!
|
||||
```
|
||||
|
||||
**UPSERT:**
|
||||
```sql
|
||||
INSERT INTO settings (user_id, key, value)
|
||||
VALUES (123, 'theme', 'dark')
|
||||
ON CONFLICT (user_id, key)
|
||||
DO UPDATE SET value = EXCLUDED.value;
|
||||
```
|
||||
|
||||
**Cursor Pagination:**
|
||||
```sql
|
||||
SELECT * FROM products WHERE id > $last_id ORDER BY id LIMIT 20;
|
||||
-- O(1) vs OFFSET which is O(n)
|
||||
```
|
||||
|
||||
**Queue Processing:**
|
||||
```sql
|
||||
UPDATE jobs SET status = 'processing'
|
||||
WHERE id = (
|
||||
SELECT id FROM jobs WHERE status = 'pending'
|
||||
ORDER BY created_at LIMIT 1
|
||||
FOR UPDATE SKIP LOCKED
|
||||
) RETURNING *;
|
||||
```
|
||||
|
||||
### Anti-Pattern Detection
|
||||
|
||||
```sql
|
||||
-- Find unindexed foreign keys
|
||||
SELECT conrelid::regclass, a.attname
|
||||
FROM pg_constraint c
|
||||
JOIN pg_attribute a ON a.attrelid = c.conrelid AND a.attnum = ANY(c.conkey)
|
||||
WHERE c.contype = 'f'
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM pg_index i
|
||||
WHERE i.indrelid = c.conrelid AND a.attnum = ANY(i.indkey)
|
||||
);
|
||||
|
||||
-- Find slow queries
|
||||
SELECT query, mean_exec_time, calls
|
||||
FROM pg_stat_statements
|
||||
WHERE mean_exec_time > 100
|
||||
ORDER BY mean_exec_time DESC;
|
||||
|
||||
-- Check table bloat
|
||||
SELECT relname, n_dead_tup, last_vacuum
|
||||
FROM pg_stat_user_tables
|
||||
WHERE n_dead_tup > 1000
|
||||
ORDER BY n_dead_tup DESC;
|
||||
```
|
||||
|
||||
### Configuration Template
|
||||
|
||||
```sql
|
||||
-- Connection limits (adjust for RAM)
|
||||
ALTER SYSTEM SET max_connections = 100;
|
||||
ALTER SYSTEM SET work_mem = '8MB';
|
||||
|
||||
-- Timeouts
|
||||
ALTER SYSTEM SET idle_in_transaction_session_timeout = '30s';
|
||||
ALTER SYSTEM SET statement_timeout = '30s';
|
||||
|
||||
-- Monitoring
|
||||
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
|
||||
|
||||
-- Security defaults
|
||||
REVOKE ALL ON SCHEMA public FROM public;
|
||||
|
||||
SELECT pg_reload_conf();
|
||||
```
|
||||
|
||||
## Related
|
||||
|
||||
- Agent: `database-reviewer` - Full database review workflow
|
||||
- Skill: `clickhouse-io` - ClickHouse analytics patterns
|
||||
- Skill: `backend-patterns` - API and backend patterns
|
||||
|
||||
---
|
||||
|
||||
*Based on [Supabase Agent Skills](https://github.com/supabase/agent-skills) (MIT License)*
|
||||
361
skills/security-review/cloud-infrastructure-security.md
Normal file
361
skills/security-review/cloud-infrastructure-security.md
Normal file
@@ -0,0 +1,361 @@
|
||||
| name | description |
|
||||
|------|-------------|
|
||||
| cloud-infrastructure-security | Use this skill when deploying to cloud platforms, configuring infrastructure, managing IAM policies, setting up logging/monitoring, or implementing CI/CD pipelines. Provides cloud security checklist aligned with best practices. |
|
||||
|
||||
# Cloud & Infrastructure Security Skill
|
||||
|
||||
This skill ensures cloud infrastructure, CI/CD pipelines, and deployment configurations follow security best practices and comply with industry standards.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Deploying applications to cloud platforms (AWS, Vercel, Railway, Cloudflare)
|
||||
- Configuring IAM roles and permissions
|
||||
- Setting up CI/CD pipelines
|
||||
- Implementing infrastructure as code (Terraform, CloudFormation)
|
||||
- Configuring logging and monitoring
|
||||
- Managing secrets in cloud environments
|
||||
- Setting up CDN and edge security
|
||||
- Implementing disaster recovery and backup strategies
|
||||
|
||||
## Cloud Security Checklist
|
||||
|
||||
### 1. IAM & Access Control
|
||||
|
||||
#### Principle of Least Privilege
|
||||
|
||||
```yaml
|
||||
# ✅ CORRECT: Minimal permissions
|
||||
iam_role:
|
||||
permissions:
|
||||
- s3:GetObject # Only read access
|
||||
- s3:ListBucket
|
||||
resources:
|
||||
- arn:aws:s3:::my-bucket/* # Specific bucket only
|
||||
|
||||
# ❌ WRONG: Overly broad permissions
|
||||
iam_role:
|
||||
permissions:
|
||||
- s3:* # All S3 actions
|
||||
resources:
|
||||
- "*" # All resources
|
||||
```
|
||||
|
||||
#### Multi-Factor Authentication (MFA)
|
||||
|
||||
```bash
|
||||
# ALWAYS enable MFA for root/admin accounts
|
||||
aws iam enable-mfa-device \
|
||||
--user-name admin \
|
||||
--serial-number arn:aws:iam::123456789:mfa/admin \
|
||||
--authentication-code1 123456 \
|
||||
--authentication-code2 789012
|
||||
```
|
||||
|
||||
#### Verification Steps
|
||||
|
||||
- [ ] No root account usage in production
|
||||
- [ ] MFA enabled for all privileged accounts
|
||||
- [ ] Service accounts use roles, not long-lived credentials
|
||||
- [ ] IAM policies follow least privilege
|
||||
- [ ] Regular access reviews conducted
|
||||
- [ ] Unused credentials rotated or removed
|
||||
|
||||
### 2. Secrets Management
|
||||
|
||||
#### Cloud Secrets Managers
|
||||
|
||||
```typescript
|
||||
// ✅ CORRECT: Use cloud secrets manager
|
||||
import { SecretsManager } from '@aws-sdk/client-secrets-manager';
|
||||
|
||||
const client = new SecretsManager({ region: 'us-east-1' });
|
||||
const secret = await client.getSecretValue({ SecretId: 'prod/api-key' });
|
||||
const apiKey = JSON.parse(secret.SecretString).key;
|
||||
|
||||
// ❌ WRONG: Hardcoded or in environment variables only
|
||||
const apiKey = process.env.API_KEY; // Not rotated, not audited
|
||||
```
|
||||
|
||||
#### Secrets Rotation
|
||||
|
||||
```bash
|
||||
# Set up automatic rotation for database credentials
|
||||
aws secretsmanager rotate-secret \
|
||||
--secret-id prod/db-password \
|
||||
--rotation-lambda-arn arn:aws:lambda:region:account:function:rotate \
|
||||
--rotation-rules AutomaticallyAfterDays=30
|
||||
```
|
||||
|
||||
#### Verification Steps
|
||||
|
||||
- [ ] All secrets stored in cloud secrets manager (AWS Secrets Manager, Vercel Secrets)
|
||||
- [ ] Automatic rotation enabled for database credentials
|
||||
- [ ] API keys rotated at least quarterly
|
||||
- [ ] No secrets in code, logs, or error messages
|
||||
- [ ] Audit logging enabled for secret access
|
||||
|
||||
### 3. Network Security
|
||||
|
||||
#### VPC and Firewall Configuration
|
||||
|
||||
```terraform
|
||||
# ✅ CORRECT: Restricted security group
|
||||
resource "aws_security_group" "app" {
|
||||
name = "app-sg"
|
||||
|
||||
ingress {
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["10.0.0.0/16"] # Internal VPC only
|
||||
}
|
||||
|
||||
egress {
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"] # Only HTTPS outbound
|
||||
}
|
||||
}
|
||||
|
||||
# ❌ WRONG: Open to the internet
|
||||
resource "aws_security_group" "bad" {
|
||||
ingress {
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"] # All ports, all IPs!
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Verification Steps
|
||||
|
||||
- [ ] Database not publicly accessible
|
||||
- [ ] SSH/RDP ports restricted to VPN/bastion only
|
||||
- [ ] Security groups follow least privilege
|
||||
- [ ] Network ACLs configured
|
||||
- [ ] VPC flow logs enabled
|
||||
|
||||
### 4. Logging & Monitoring
|
||||
|
||||
#### CloudWatch/Logging Configuration
|
||||
|
||||
```typescript
|
||||
// ✅ CORRECT: Comprehensive logging
|
||||
import { CloudWatchLogsClient, CreateLogStreamCommand } from '@aws-sdk/client-cloudwatch-logs';
|
||||
|
||||
const logSecurityEvent = async (event: SecurityEvent) => {
|
||||
await cloudwatch.putLogEvents({
|
||||
logGroupName: '/aws/security/events',
|
||||
logStreamName: 'authentication',
|
||||
logEvents: [{
|
||||
timestamp: Date.now(),
|
||||
message: JSON.stringify({
|
||||
type: event.type,
|
||||
userId: event.userId,
|
||||
ip: event.ip,
|
||||
result: event.result,
|
||||
// Never log sensitive data
|
||||
})
|
||||
}]
|
||||
});
|
||||
};
|
||||
```
|
||||
|
||||
#### Verification Steps
|
||||
|
||||
- [ ] CloudWatch/logging enabled for all services
|
||||
- [ ] Failed authentication attempts logged
|
||||
- [ ] Admin actions audited
|
||||
- [ ] Log retention configured (90+ days for compliance)
|
||||
- [ ] Alerts configured for suspicious activity
|
||||
- [ ] Logs centralized and tamper-proof
|
||||
|
||||
### 5. CI/CD Pipeline Security
|
||||
|
||||
#### Secure Pipeline Configuration
|
||||
|
||||
```yaml
|
||||
# ✅ CORRECT: Secure GitHub Actions workflow
|
||||
name: Deploy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read # Minimal permissions
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Scan for secrets
|
||||
- name: Secret scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
|
||||
# Dependency audit
|
||||
- name: Audit dependencies
|
||||
run: npm audit --audit-level=high
|
||||
|
||||
# Use OIDC, not long-lived tokens
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::123456789:role/GitHubActionsRole
|
||||
aws-region: us-east-1
|
||||
```
|
||||
|
||||
#### Supply Chain Security
|
||||
|
||||
```json
|
||||
// package.json - Use lock files and integrity checks
|
||||
{
|
||||
"scripts": {
|
||||
"install": "npm ci", // Use ci for reproducible builds
|
||||
"audit": "npm audit --audit-level=moderate",
|
||||
"check": "npm outdated"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Verification Steps
|
||||
|
||||
- [ ] OIDC used instead of long-lived credentials
|
||||
- [ ] Secrets scanning in pipeline
|
||||
- [ ] Dependency vulnerability scanning
|
||||
- [ ] Container image scanning (if applicable)
|
||||
- [ ] Branch protection rules enforced
|
||||
- [ ] Code review required before merge
|
||||
- [ ] Signed commits enforced
|
||||
|
||||
### 6. Cloudflare & CDN Security
|
||||
|
||||
#### Cloudflare Security Configuration
|
||||
|
||||
```typescript
|
||||
// ✅ CORRECT: Cloudflare Workers with security headers
|
||||
export default {
|
||||
async fetch(request: Request): Promise<Response> {
|
||||
const response = await fetch(request);
|
||||
|
||||
// Add security headers
|
||||
const headers = new Headers(response.headers);
|
||||
headers.set('X-Frame-Options', 'DENY');
|
||||
headers.set('X-Content-Type-Options', 'nosniff');
|
||||
headers.set('Referrer-Policy', 'strict-origin-when-cross-origin');
|
||||
headers.set('Permissions-Policy', 'geolocation=(), microphone=()');
|
||||
|
||||
return new Response(response.body, {
|
||||
status: response.status,
|
||||
headers
|
||||
});
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
#### WAF Rules
|
||||
|
||||
```bash
|
||||
# Enable Cloudflare WAF managed rules
|
||||
# - OWASP Core Ruleset
|
||||
# - Cloudflare Managed Ruleset
|
||||
# - Rate limiting rules
|
||||
# - Bot protection
|
||||
```
|
||||
|
||||
#### Verification Steps
|
||||
|
||||
- [ ] WAF enabled with OWASP rules
|
||||
- [ ] Rate limiting configured
|
||||
- [ ] Bot protection active
|
||||
- [ ] DDoS protection enabled
|
||||
- [ ] Security headers configured
|
||||
- [ ] SSL/TLS strict mode enabled
|
||||
|
||||
### 7. Backup & Disaster Recovery
|
||||
|
||||
#### Automated Backups
|
||||
|
||||
```terraform
|
||||
# ✅ CORRECT: Automated RDS backups
|
||||
resource "aws_db_instance" "main" {
|
||||
allocated_storage = 20
|
||||
engine = "postgres"
|
||||
|
||||
backup_retention_period = 30 # 30 days retention
|
||||
backup_window = "03:00-04:00"
|
||||
maintenance_window = "mon:04:00-mon:05:00"
|
||||
|
||||
enabled_cloudwatch_logs_exports = ["postgresql"]
|
||||
|
||||
deletion_protection = true # Prevent accidental deletion
|
||||
}
|
||||
```
|
||||
|
||||
#### Verification Steps
|
||||
|
||||
- [ ] Automated daily backups configured
|
||||
- [ ] Backup retention meets compliance requirements
|
||||
- [ ] Point-in-time recovery enabled
|
||||
- [ ] Backup testing performed quarterly
|
||||
- [ ] Disaster recovery plan documented
|
||||
- [ ] RPO and RTO defined and tested
|
||||
|
||||
## Pre-Deployment Cloud Security Checklist
|
||||
|
||||
Before ANY production cloud deployment:
|
||||
|
||||
- [ ] **IAM**: Root account not used, MFA enabled, least privilege policies
|
||||
- [ ] **Secrets**: All secrets in cloud secrets manager with rotation
|
||||
- [ ] **Network**: Security groups restricted, no public databases
|
||||
- [ ] **Logging**: CloudWatch/logging enabled with retention
|
||||
- [ ] **Monitoring**: Alerts configured for anomalies
|
||||
- [ ] **CI/CD**: OIDC auth, secrets scanning, dependency audits
|
||||
- [ ] **CDN/WAF**: Cloudflare WAF enabled with OWASP rules
|
||||
- [ ] **Encryption**: Data encrypted at rest and in transit
|
||||
- [ ] **Backups**: Automated backups with tested recovery
|
||||
- [ ] **Compliance**: GDPR/HIPAA requirements met (if applicable)
|
||||
- [ ] **Documentation**: Infrastructure documented, runbooks created
|
||||
- [ ] **Incident Response**: Security incident plan in place
|
||||
|
||||
## Common Cloud Security Misconfigurations
|
||||
|
||||
### S3 Bucket Exposure
|
||||
|
||||
```bash
|
||||
# ❌ WRONG: Public bucket
|
||||
aws s3api put-bucket-acl --bucket my-bucket --acl public-read
|
||||
|
||||
# ✅ CORRECT: Private bucket with specific access
|
||||
aws s3api put-bucket-acl --bucket my-bucket --acl private
|
||||
aws s3api put-bucket-policy --bucket my-bucket --policy file://policy.json
|
||||
```
|
||||
|
||||
### RDS Public Access
|
||||
|
||||
```terraform
|
||||
# ❌ WRONG
|
||||
resource "aws_db_instance" "bad" {
|
||||
publicly_accessible = true # NEVER do this!
|
||||
}
|
||||
|
||||
# ✅ CORRECT
|
||||
resource "aws_db_instance" "good" {
|
||||
publicly_accessible = false
|
||||
vpc_security_group_ids = [aws_security_group.db.id]
|
||||
}
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
- [AWS Security Best Practices](https://aws.amazon.com/security/best-practices/)
|
||||
- [CIS AWS Foundations Benchmark](https://www.cisecurity.org/benchmark/amazon_web_services)
|
||||
- [Cloudflare Security Documentation](https://developers.cloudflare.com/security/)
|
||||
- [OWASP Cloud Security](https://owasp.org/www-project-cloud-security/)
|
||||
- [Terraform Security Best Practices](https://www.terraform.io/docs/cloud/guides/recommended-practices/)
|
||||
|
||||
**Remember**: Cloud misconfigurations are the leading cause of data breaches. A single exposed S3 bucket or overly permissive IAM policy can compromise your entire infrastructure. Always follow the principle of least privilege and defense in depth.
|
||||
337
tests/hooks/hooks.test.js
Normal file
337
tests/hooks/hooks.test.js
Normal file
@@ -0,0 +1,337 @@
|
||||
/**
|
||||
* Tests for hook scripts
|
||||
*
|
||||
* Run with: node tests/hooks/hooks.test.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const { execSync, spawn } = require('child_process');
|
||||
|
||||
// Test helper
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Async test helper
|
||||
async function asyncTest(name, fn) {
|
||||
try {
|
||||
await fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Run a script and capture output
|
||||
function runScript(scriptPath, input = '', env = {}) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const proc = spawn('node', [scriptPath], {
|
||||
env: { ...process.env, ...env },
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
|
||||
proc.stdout.on('data', data => stdout += data);
|
||||
proc.stderr.on('data', data => stderr += data);
|
||||
|
||||
if (input) {
|
||||
proc.stdin.write(input);
|
||||
}
|
||||
proc.stdin.end();
|
||||
|
||||
proc.on('close', code => {
|
||||
resolve({ code, stdout, stderr });
|
||||
});
|
||||
|
||||
proc.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
// Create a temporary test directory
|
||||
function createTestDir() {
|
||||
const testDir = path.join(os.tmpdir(), `hooks-test-${Date.now()}`);
|
||||
fs.mkdirSync(testDir, { recursive: true });
|
||||
return testDir;
|
||||
}
|
||||
|
||||
// Clean up test directory
|
||||
function cleanupTestDir(testDir) {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
// Test suite
|
||||
async function runTests() {
|
||||
console.log('\n=== Testing Hook Scripts ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
const scriptsDir = path.join(__dirname, '..', '..', 'scripts', 'hooks');
|
||||
|
||||
// session-start.js tests
|
||||
console.log('session-start.js:');
|
||||
|
||||
if (await asyncTest('runs without error', async () => {
|
||||
const result = await runScript(path.join(scriptsDir, 'session-start.js'));
|
||||
assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('outputs session info to stderr', async () => {
|
||||
const result = await runScript(path.join(scriptsDir, 'session-start.js'));
|
||||
assert.ok(
|
||||
result.stderr.includes('[SessionStart]') ||
|
||||
result.stderr.includes('Package manager'),
|
||||
'Should output session info'
|
||||
);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// session-end.js tests
|
||||
console.log('\nsession-end.js:');
|
||||
|
||||
if (await asyncTest('runs without error', async () => {
|
||||
const result = await runScript(path.join(scriptsDir, 'session-end.js'));
|
||||
assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('creates or updates session file', async () => {
|
||||
// Run the script
|
||||
await runScript(path.join(scriptsDir, 'session-end.js'));
|
||||
|
||||
// Check if session file was created (default session ID)
|
||||
// Use local time to match the script's getDateString() function
|
||||
const sessionsDir = path.join(os.homedir(), '.claude', 'sessions');
|
||||
const now = new Date();
|
||||
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
|
||||
const sessionFile = path.join(sessionsDir, `${today}-default-session.tmp`);
|
||||
|
||||
assert.ok(fs.existsSync(sessionFile), 'Session file should exist');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('includes session ID in filename', async () => {
|
||||
const testSessionId = 'test-session-abc12345';
|
||||
const expectedShortId = 'abc12345'; // Last 8 chars
|
||||
|
||||
// Run with custom session ID
|
||||
await runScript(path.join(scriptsDir, 'session-end.js'), '', {
|
||||
CLAUDE_SESSION_ID: testSessionId
|
||||
});
|
||||
|
||||
// Check if session file was created with session ID
|
||||
// Use local time to match the script's getDateString() function
|
||||
const sessionsDir = path.join(os.homedir(), '.claude', 'sessions');
|
||||
const now = new Date();
|
||||
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
|
||||
const sessionFile = path.join(sessionsDir, `${today}-${expectedShortId}-session.tmp`);
|
||||
|
||||
assert.ok(fs.existsSync(sessionFile), `Session file should exist: ${sessionFile}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// pre-compact.js tests
|
||||
console.log('\npre-compact.js:');
|
||||
|
||||
if (await asyncTest('runs without error', async () => {
|
||||
const result = await runScript(path.join(scriptsDir, 'pre-compact.js'));
|
||||
assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('outputs PreCompact message', async () => {
|
||||
const result = await runScript(path.join(scriptsDir, 'pre-compact.js'));
|
||||
assert.ok(result.stderr.includes('[PreCompact]'), 'Should output PreCompact message');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('creates compaction log', async () => {
|
||||
await runScript(path.join(scriptsDir, 'pre-compact.js'));
|
||||
const logFile = path.join(os.homedir(), '.claude', 'sessions', 'compaction-log.txt');
|
||||
assert.ok(fs.existsSync(logFile), 'Compaction log should exist');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// suggest-compact.js tests
|
||||
console.log('\nsuggest-compact.js:');
|
||||
|
||||
if (await asyncTest('runs without error', async () => {
|
||||
const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', {
|
||||
CLAUDE_SESSION_ID: 'test-session-' + Date.now()
|
||||
});
|
||||
assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('increments counter on each call', async () => {
|
||||
const sessionId = 'test-counter-' + Date.now();
|
||||
|
||||
// Run multiple times
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', {
|
||||
CLAUDE_SESSION_ID: sessionId
|
||||
});
|
||||
}
|
||||
|
||||
// Check counter file
|
||||
const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`);
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 3, `Counter should be 3, got ${count}`);
|
||||
|
||||
// Cleanup
|
||||
fs.unlinkSync(counterFile);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('suggests compact at threshold', async () => {
|
||||
const sessionId = 'test-threshold-' + Date.now();
|
||||
const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`);
|
||||
|
||||
// Set counter to threshold - 1
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
|
||||
const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', {
|
||||
CLAUDE_SESSION_ID: sessionId,
|
||||
COMPACT_THRESHOLD: '50'
|
||||
});
|
||||
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
'Should suggest compact at threshold'
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
fs.unlinkSync(counterFile);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// evaluate-session.js tests
|
||||
console.log('\nevaluate-session.js:');
|
||||
|
||||
if (await asyncTest('runs without error when no transcript', async () => {
|
||||
const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'));
|
||||
assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('skips short sessions', async () => {
|
||||
const testDir = createTestDir();
|
||||
const transcriptPath = path.join(testDir, 'transcript.jsonl');
|
||||
|
||||
// Create a short transcript (less than 10 user messages)
|
||||
const transcript = Array(5).fill('{"type":"user","content":"test"}\n').join('');
|
||||
fs.writeFileSync(transcriptPath, transcript);
|
||||
|
||||
const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), '', {
|
||||
CLAUDE_TRANSCRIPT_PATH: transcriptPath
|
||||
});
|
||||
|
||||
assert.ok(
|
||||
result.stderr.includes('Session too short'),
|
||||
'Should indicate session is too short'
|
||||
);
|
||||
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('processes sessions with enough messages', async () => {
|
||||
const testDir = createTestDir();
|
||||
const transcriptPath = path.join(testDir, 'transcript.jsonl');
|
||||
|
||||
// Create a longer transcript (more than 10 user messages)
|
||||
const transcript = Array(15).fill('{"type":"user","content":"test"}\n').join('');
|
||||
fs.writeFileSync(transcriptPath, transcript);
|
||||
|
||||
const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), '', {
|
||||
CLAUDE_TRANSCRIPT_PATH: transcriptPath
|
||||
});
|
||||
|
||||
assert.ok(
|
||||
result.stderr.includes('15 messages'),
|
||||
'Should report message count'
|
||||
);
|
||||
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// hooks.json validation
|
||||
console.log('\nhooks.json Validation:');
|
||||
|
||||
if (test('hooks.json is valid JSON', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const content = fs.readFileSync(hooksPath, 'utf8');
|
||||
JSON.parse(content); // Will throw if invalid
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('hooks.json has required event types', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
|
||||
|
||||
assert.ok(hooks.hooks.PreToolUse, 'Should have PreToolUse hooks');
|
||||
assert.ok(hooks.hooks.PostToolUse, 'Should have PostToolUse hooks');
|
||||
assert.ok(hooks.hooks.SessionStart, 'Should have SessionStart hooks');
|
||||
assert.ok(hooks.hooks.Stop, 'Should have Stop hooks');
|
||||
assert.ok(hooks.hooks.PreCompact, 'Should have PreCompact hooks');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('all hook commands use node', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
|
||||
|
||||
const checkHooks = (hookArray) => {
|
||||
for (const entry of hookArray) {
|
||||
for (const hook of entry.hooks) {
|
||||
if (hook.type === 'command') {
|
||||
assert.ok(
|
||||
hook.command.startsWith('node'),
|
||||
`Hook command should start with 'node': ${hook.command.substring(0, 50)}...`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
for (const [eventType, hookArray] of Object.entries(hooks.hooks)) {
|
||||
checkHooks(hookArray);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('script references use CLAUDE_PLUGIN_ROOT variable', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
|
||||
|
||||
const checkHooks = (hookArray) => {
|
||||
for (const entry of hookArray) {
|
||||
for (const hook of entry.hooks) {
|
||||
if (hook.type === 'command' && hook.command.includes('scripts/hooks/')) {
|
||||
// Check for the literal string "${CLAUDE_PLUGIN_ROOT}" in the command
|
||||
const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}');
|
||||
assert.ok(
|
||||
hasPluginRoot,
|
||||
`Script paths should use CLAUDE_PLUGIN_ROOT: ${hook.command.substring(0, 80)}...`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
for (const [eventType, hookArray] of Object.entries(hooks.hooks)) {
|
||||
checkHooks(hookArray);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log('\n=== Test Results ===');
|
||||
console.log(`Passed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
console.log(`Total: ${passed + failed}\n`);
|
||||
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
352
tests/lib/package-manager.test.js
Normal file
352
tests/lib/package-manager.test.js
Normal file
@@ -0,0 +1,352 @@
|
||||
/**
|
||||
* Tests for scripts/lib/package-manager.js
|
||||
*
|
||||
* Run with: node tests/lib/package-manager.test.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
|
||||
// Import the modules
|
||||
const pm = require('../../scripts/lib/package-manager');
|
||||
const utils = require('../../scripts/lib/utils');
|
||||
|
||||
// Test helper
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Create a temporary test directory
|
||||
function createTestDir() {
|
||||
const testDir = path.join(os.tmpdir(), `pm-test-${Date.now()}`);
|
||||
fs.mkdirSync(testDir, { recursive: true });
|
||||
return testDir;
|
||||
}
|
||||
|
||||
// Clean up test directory
|
||||
function cleanupTestDir(testDir) {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
// Test suite
|
||||
function runTests() {
|
||||
console.log('\n=== Testing package-manager.js ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
// PACKAGE_MANAGERS constant tests
|
||||
console.log('PACKAGE_MANAGERS Constant:');
|
||||
|
||||
if (test('PACKAGE_MANAGERS has all expected managers', () => {
|
||||
assert.ok(pm.PACKAGE_MANAGERS.npm, 'Should have npm');
|
||||
assert.ok(pm.PACKAGE_MANAGERS.pnpm, 'Should have pnpm');
|
||||
assert.ok(pm.PACKAGE_MANAGERS.yarn, 'Should have yarn');
|
||||
assert.ok(pm.PACKAGE_MANAGERS.bun, 'Should have bun');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('Each manager has required properties', () => {
|
||||
const requiredProps = ['name', 'lockFile', 'installCmd', 'runCmd', 'execCmd', 'testCmd', 'buildCmd', 'devCmd'];
|
||||
for (const [name, config] of Object.entries(pm.PACKAGE_MANAGERS)) {
|
||||
for (const prop of requiredProps) {
|
||||
assert.ok(config[prop], `${name} should have ${prop}`);
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// detectFromLockFile tests
|
||||
console.log('\ndetectFromLockFile:');
|
||||
|
||||
if (test('detects npm from package-lock.json', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
fs.writeFileSync(path.join(testDir, 'package-lock.json'), '{}');
|
||||
const result = pm.detectFromLockFile(testDir);
|
||||
assert.strictEqual(result, 'npm');
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('detects pnpm from pnpm-lock.yaml', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
fs.writeFileSync(path.join(testDir, 'pnpm-lock.yaml'), '');
|
||||
const result = pm.detectFromLockFile(testDir);
|
||||
assert.strictEqual(result, 'pnpm');
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('detects yarn from yarn.lock', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
fs.writeFileSync(path.join(testDir, 'yarn.lock'), '');
|
||||
const result = pm.detectFromLockFile(testDir);
|
||||
assert.strictEqual(result, 'yarn');
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('detects bun from bun.lockb', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
fs.writeFileSync(path.join(testDir, 'bun.lockb'), '');
|
||||
const result = pm.detectFromLockFile(testDir);
|
||||
assert.strictEqual(result, 'bun');
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('returns null when no lock file exists', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
const result = pm.detectFromLockFile(testDir);
|
||||
assert.strictEqual(result, null);
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('respects detection priority (pnpm > npm)', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
// Create both lock files
|
||||
fs.writeFileSync(path.join(testDir, 'package-lock.json'), '{}');
|
||||
fs.writeFileSync(path.join(testDir, 'pnpm-lock.yaml'), '');
|
||||
const result = pm.detectFromLockFile(testDir);
|
||||
// pnpm has higher priority in DETECTION_PRIORITY
|
||||
assert.strictEqual(result, 'pnpm');
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// detectFromPackageJson tests
|
||||
console.log('\ndetectFromPackageJson:');
|
||||
|
||||
if (test('detects package manager from packageManager field', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
fs.writeFileSync(path.join(testDir, 'package.json'), JSON.stringify({
|
||||
name: 'test',
|
||||
packageManager: 'pnpm@8.6.0'
|
||||
}));
|
||||
const result = pm.detectFromPackageJson(testDir);
|
||||
assert.strictEqual(result, 'pnpm');
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('handles packageManager without version', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
fs.writeFileSync(path.join(testDir, 'package.json'), JSON.stringify({
|
||||
name: 'test',
|
||||
packageManager: 'yarn'
|
||||
}));
|
||||
const result = pm.detectFromPackageJson(testDir);
|
||||
assert.strictEqual(result, 'yarn');
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('returns null when no packageManager field', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
fs.writeFileSync(path.join(testDir, 'package.json'), JSON.stringify({
|
||||
name: 'test'
|
||||
}));
|
||||
const result = pm.detectFromPackageJson(testDir);
|
||||
assert.strictEqual(result, null);
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('returns null when no package.json exists', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
const result = pm.detectFromPackageJson(testDir);
|
||||
assert.strictEqual(result, null);
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getAvailablePackageManagers tests
|
||||
console.log('\ngetAvailablePackageManagers:');
|
||||
|
||||
if (test('returns array of available managers', () => {
|
||||
const available = pm.getAvailablePackageManagers();
|
||||
assert.ok(Array.isArray(available), 'Should return array');
|
||||
// npm should always be available with Node.js
|
||||
assert.ok(available.includes('npm'), 'npm should be available');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getPackageManager tests
|
||||
console.log('\ngetPackageManager:');
|
||||
|
||||
if (test('returns object with name, config, and source', () => {
|
||||
const result = pm.getPackageManager();
|
||||
assert.ok(result.name, 'Should have name');
|
||||
assert.ok(result.config, 'Should have config');
|
||||
assert.ok(result.source, 'Should have source');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('respects environment variable', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'yarn';
|
||||
const result = pm.getPackageManager();
|
||||
assert.strictEqual(result.name, 'yarn');
|
||||
assert.strictEqual(result.source, 'environment');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
} else {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('detects from lock file in project', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
fs.writeFileSync(path.join(testDir, 'bun.lockb'), '');
|
||||
const result = pm.getPackageManager({ projectDir: testDir });
|
||||
assert.strictEqual(result.name, 'bun');
|
||||
assert.strictEqual(result.source, 'lock-file');
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getRunCommand tests
|
||||
console.log('\ngetRunCommand:');
|
||||
|
||||
if (test('returns correct install command', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'pnpm';
|
||||
const cmd = pm.getRunCommand('install');
|
||||
assert.strictEqual(cmd, 'pnpm install');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
} else {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('returns correct test command', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'npm';
|
||||
const cmd = pm.getRunCommand('test');
|
||||
assert.strictEqual(cmd, 'npm test');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
} else {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getExecCommand tests
|
||||
console.log('\ngetExecCommand:');
|
||||
|
||||
if (test('returns correct exec command for npm', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'npm';
|
||||
const cmd = pm.getExecCommand('prettier', '--write .');
|
||||
assert.strictEqual(cmd, 'npx prettier --write .');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
} else {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('returns correct exec command for pnpm', () => {
|
||||
const originalEnv = process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
try {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = 'pnpm';
|
||||
const cmd = pm.getExecCommand('eslint', '.');
|
||||
assert.strictEqual(cmd, 'pnpm dlx eslint .');
|
||||
} finally {
|
||||
if (originalEnv !== undefined) {
|
||||
process.env.CLAUDE_PACKAGE_MANAGER = originalEnv;
|
||||
} else {
|
||||
delete process.env.CLAUDE_PACKAGE_MANAGER;
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getCommandPattern tests
|
||||
console.log('\ngetCommandPattern:');
|
||||
|
||||
if (test('generates pattern for dev command', () => {
|
||||
const pattern = pm.getCommandPattern('dev');
|
||||
assert.ok(pattern.includes('npm run dev'), 'Should include npm');
|
||||
assert.ok(pattern.includes('pnpm'), 'Should include pnpm');
|
||||
assert.ok(pattern.includes('yarn dev'), 'Should include yarn');
|
||||
assert.ok(pattern.includes('bun run dev'), 'Should include bun');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('pattern matches actual commands', () => {
|
||||
const pattern = pm.getCommandPattern('test');
|
||||
const regex = new RegExp(pattern);
|
||||
|
||||
assert.ok(regex.test('npm test'), 'Should match npm test');
|
||||
assert.ok(regex.test('pnpm test'), 'Should match pnpm test');
|
||||
assert.ok(regex.test('yarn test'), 'Should match yarn test');
|
||||
assert.ok(regex.test('bun test'), 'Should match bun test');
|
||||
assert.ok(!regex.test('cargo test'), 'Should not match cargo test');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// getSelectionPrompt tests
|
||||
console.log('\ngetSelectionPrompt:');
|
||||
|
||||
if (test('returns informative prompt', () => {
|
||||
const prompt = pm.getSelectionPrompt();
|
||||
assert.ok(prompt.includes('Available package managers'), 'Should list available managers');
|
||||
assert.ok(prompt.includes('CLAUDE_PACKAGE_MANAGER'), 'Should mention env var');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log('\n=== Test Results ===');
|
||||
console.log(`Passed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
console.log(`Total: ${passed + failed}\n`);
|
||||
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
291
tests/lib/utils.test.js
Normal file
291
tests/lib/utils.test.js
Normal file
@@ -0,0 +1,291 @@
|
||||
/**
|
||||
* Tests for scripts/lib/utils.js
|
||||
*
|
||||
* Run with: node tests/lib/utils.test.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
|
||||
// Import the module
|
||||
const utils = require('../../scripts/lib/utils');
|
||||
|
||||
// Test helper
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Test suite
|
||||
function runTests() {
|
||||
console.log('\n=== Testing utils.js ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
// Platform detection tests
|
||||
console.log('Platform Detection:');
|
||||
|
||||
if (test('isWindows/isMacOS/isLinux are booleans', () => {
|
||||
assert.strictEqual(typeof utils.isWindows, 'boolean');
|
||||
assert.strictEqual(typeof utils.isMacOS, 'boolean');
|
||||
assert.strictEqual(typeof utils.isLinux, 'boolean');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('exactly one platform should be true', () => {
|
||||
const platforms = [utils.isWindows, utils.isMacOS, utils.isLinux];
|
||||
const trueCount = platforms.filter(p => p).length;
|
||||
// Note: Could be 0 on other platforms like FreeBSD
|
||||
assert.ok(trueCount <= 1, 'More than one platform is true');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Directory functions tests
|
||||
console.log('\nDirectory Functions:');
|
||||
|
||||
if (test('getHomeDir returns valid path', () => {
|
||||
const home = utils.getHomeDir();
|
||||
assert.strictEqual(typeof home, 'string');
|
||||
assert.ok(home.length > 0, 'Home dir should not be empty');
|
||||
assert.ok(fs.existsSync(home), 'Home dir should exist');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getClaudeDir returns path under home', () => {
|
||||
const claudeDir = utils.getClaudeDir();
|
||||
const homeDir = utils.getHomeDir();
|
||||
assert.ok(claudeDir.startsWith(homeDir), 'Claude dir should be under home');
|
||||
assert.ok(claudeDir.includes('.claude'), 'Should contain .claude');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionsDir returns path under Claude dir', () => {
|
||||
const sessionsDir = utils.getSessionsDir();
|
||||
const claudeDir = utils.getClaudeDir();
|
||||
assert.ok(sessionsDir.startsWith(claudeDir), 'Sessions should be under Claude dir');
|
||||
assert.ok(sessionsDir.includes('sessions'), 'Should contain sessions');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getTempDir returns valid temp directory', () => {
|
||||
const tempDir = utils.getTempDir();
|
||||
assert.strictEqual(typeof tempDir, 'string');
|
||||
assert.ok(tempDir.length > 0, 'Temp dir should not be empty');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('ensureDir creates directory', () => {
|
||||
const testDir = path.join(utils.getTempDir(), `utils-test-${Date.now()}`);
|
||||
try {
|
||||
utils.ensureDir(testDir);
|
||||
assert.ok(fs.existsSync(testDir), 'Directory should be created');
|
||||
} finally {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Date/Time functions tests
|
||||
console.log('\nDate/Time Functions:');
|
||||
|
||||
if (test('getDateString returns YYYY-MM-DD format', () => {
|
||||
const date = utils.getDateString();
|
||||
assert.ok(/^\d{4}-\d{2}-\d{2}$/.test(date), `Expected YYYY-MM-DD, got ${date}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getTimeString returns HH:MM format', () => {
|
||||
const time = utils.getTimeString();
|
||||
assert.ok(/^\d{2}:\d{2}$/.test(time), `Expected HH:MM, got ${time}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getDateTimeString returns full datetime format', () => {
|
||||
const dt = utils.getDateTimeString();
|
||||
assert.ok(/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$/.test(dt), `Expected YYYY-MM-DD HH:MM:SS, got ${dt}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Session ID tests
|
||||
console.log('\nSession ID Functions:');
|
||||
|
||||
if (test('getSessionIdShort returns default when no env var', () => {
|
||||
const originalEnv = process.env.CLAUDE_SESSION_ID;
|
||||
delete process.env.CLAUDE_SESSION_ID;
|
||||
try {
|
||||
const shortId = utils.getSessionIdShort();
|
||||
assert.strictEqual(shortId, 'default');
|
||||
} finally {
|
||||
if (originalEnv) process.env.CLAUDE_SESSION_ID = originalEnv;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionIdShort returns last 8 characters', () => {
|
||||
const originalEnv = process.env.CLAUDE_SESSION_ID;
|
||||
process.env.CLAUDE_SESSION_ID = 'test-session-abc12345';
|
||||
try {
|
||||
const shortId = utils.getSessionIdShort();
|
||||
assert.strictEqual(shortId, 'abc12345');
|
||||
} finally {
|
||||
if (originalEnv) {
|
||||
process.env.CLAUDE_SESSION_ID = originalEnv;
|
||||
} else {
|
||||
delete process.env.CLAUDE_SESSION_ID;
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionIdShort uses custom fallback', () => {
|
||||
const originalEnv = process.env.CLAUDE_SESSION_ID;
|
||||
delete process.env.CLAUDE_SESSION_ID;
|
||||
try {
|
||||
const shortId = utils.getSessionIdShort('custom');
|
||||
assert.strictEqual(shortId, 'custom');
|
||||
} finally {
|
||||
if (originalEnv) process.env.CLAUDE_SESSION_ID = originalEnv;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionIdShort handles short session IDs', () => {
|
||||
const originalEnv = process.env.CLAUDE_SESSION_ID;
|
||||
process.env.CLAUDE_SESSION_ID = 'short';
|
||||
try {
|
||||
const shortId = utils.getSessionIdShort();
|
||||
assert.strictEqual(shortId, 'short');
|
||||
} finally {
|
||||
if (originalEnv) {
|
||||
process.env.CLAUDE_SESSION_ID = originalEnv;
|
||||
} else {
|
||||
delete process.env.CLAUDE_SESSION_ID;
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// File operations tests
|
||||
console.log('\nFile Operations:');
|
||||
|
||||
if (test('readFile returns null for non-existent file', () => {
|
||||
const content = utils.readFile('/non/existent/file/path.txt');
|
||||
assert.strictEqual(content, null);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('writeFile and readFile work together', () => {
|
||||
const testFile = path.join(utils.getTempDir(), `utils-test-${Date.now()}.txt`);
|
||||
const testContent = 'Hello, World!';
|
||||
try {
|
||||
utils.writeFile(testFile, testContent);
|
||||
const read = utils.readFile(testFile);
|
||||
assert.strictEqual(read, testContent);
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('appendFile adds content to file', () => {
|
||||
const testFile = path.join(utils.getTempDir(), `utils-test-${Date.now()}.txt`);
|
||||
try {
|
||||
utils.writeFile(testFile, 'Line 1\n');
|
||||
utils.appendFile(testFile, 'Line 2\n');
|
||||
const content = utils.readFile(testFile);
|
||||
assert.strictEqual(content, 'Line 1\nLine 2\n');
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('replaceInFile replaces text', () => {
|
||||
const testFile = path.join(utils.getTempDir(), `utils-test-${Date.now()}.txt`);
|
||||
try {
|
||||
utils.writeFile(testFile, 'Hello, World!');
|
||||
utils.replaceInFile(testFile, /World/, 'Universe');
|
||||
const content = utils.readFile(testFile);
|
||||
assert.strictEqual(content, 'Hello, Universe!');
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('countInFile counts occurrences', () => {
|
||||
const testFile = path.join(utils.getTempDir(), `utils-test-${Date.now()}.txt`);
|
||||
try {
|
||||
utils.writeFile(testFile, 'foo bar foo baz foo');
|
||||
const count = utils.countInFile(testFile, /foo/g);
|
||||
assert.strictEqual(count, 3);
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('grepFile finds matching lines', () => {
|
||||
const testFile = path.join(utils.getTempDir(), `utils-test-${Date.now()}.txt`);
|
||||
try {
|
||||
utils.writeFile(testFile, 'line 1 foo\nline 2 bar\nline 3 foo');
|
||||
const matches = utils.grepFile(testFile, /foo/);
|
||||
assert.strictEqual(matches.length, 2);
|
||||
assert.strictEqual(matches[0].lineNumber, 1);
|
||||
assert.strictEqual(matches[1].lineNumber, 3);
|
||||
} finally {
|
||||
fs.unlinkSync(testFile);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// findFiles tests
|
||||
console.log('\nfindFiles:');
|
||||
|
||||
if (test('findFiles returns empty for non-existent directory', () => {
|
||||
const results = utils.findFiles('/non/existent/dir', '*.txt');
|
||||
assert.strictEqual(results.length, 0);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('findFiles finds matching files', () => {
|
||||
const testDir = path.join(utils.getTempDir(), `utils-test-${Date.now()}`);
|
||||
try {
|
||||
fs.mkdirSync(testDir);
|
||||
fs.writeFileSync(path.join(testDir, 'test1.txt'), 'content');
|
||||
fs.writeFileSync(path.join(testDir, 'test2.txt'), 'content');
|
||||
fs.writeFileSync(path.join(testDir, 'test.md'), 'content');
|
||||
|
||||
const txtFiles = utils.findFiles(testDir, '*.txt');
|
||||
assert.strictEqual(txtFiles.length, 2);
|
||||
|
||||
const mdFiles = utils.findFiles(testDir, '*.md');
|
||||
assert.strictEqual(mdFiles.length, 1);
|
||||
} finally {
|
||||
fs.rmSync(testDir, { recursive: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// System functions tests
|
||||
console.log('\nSystem Functions:');
|
||||
|
||||
if (test('commandExists finds node', () => {
|
||||
const exists = utils.commandExists('node');
|
||||
assert.strictEqual(exists, true);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('commandExists returns false for fake command', () => {
|
||||
const exists = utils.commandExists('nonexistent_command_12345');
|
||||
assert.strictEqual(exists, false);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('runCommand executes simple command', () => {
|
||||
const result = utils.runCommand('node --version');
|
||||
assert.strictEqual(result.success, true);
|
||||
assert.ok(result.output.startsWith('v'), 'Should start with v');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('runCommand handles failed command', () => {
|
||||
const result = utils.runCommand('node --invalid-flag-12345');
|
||||
assert.strictEqual(result.success, false);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Summary
|
||||
console.log('\n=== Test Results ===');
|
||||
console.log(`Passed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
console.log(`Total: ${passed + failed}\n`);
|
||||
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
76
tests/run-all.js
Normal file
76
tests/run-all.js
Normal file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Run all tests
|
||||
*
|
||||
* Usage: node tests/run-all.js
|
||||
*/
|
||||
|
||||
const { execSync } = require('child_process');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
const testsDir = __dirname;
|
||||
const testFiles = [
|
||||
'lib/utils.test.js',
|
||||
'lib/package-manager.test.js',
|
||||
'hooks/hooks.test.js'
|
||||
];
|
||||
|
||||
console.log('╔══════════════════════════════════════════════════════════╗');
|
||||
console.log('║ Everything Claude Code - Test Suite ║');
|
||||
console.log('╚══════════════════════════════════════════════════════════╝');
|
||||
console.log();
|
||||
|
||||
let totalPassed = 0;
|
||||
let totalFailed = 0;
|
||||
let totalTests = 0;
|
||||
|
||||
for (const testFile of testFiles) {
|
||||
const testPath = path.join(testsDir, testFile);
|
||||
|
||||
if (!fs.existsSync(testPath)) {
|
||||
console.log(`⚠ Skipping ${testFile} (file not found)`);
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(`\n━━━ Running ${testFile} ━━━`);
|
||||
|
||||
try {
|
||||
const output = execSync(`node "${testPath}"`, {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
console.log(output);
|
||||
|
||||
// Parse results from output
|
||||
const passedMatch = output.match(/Passed:\s*(\d+)/);
|
||||
const failedMatch = output.match(/Failed:\s*(\d+)/);
|
||||
|
||||
if (passedMatch) totalPassed += parseInt(passedMatch[1], 10);
|
||||
if (failedMatch) totalFailed += parseInt(failedMatch[1], 10);
|
||||
|
||||
} catch (err) {
|
||||
console.log(err.stdout || '');
|
||||
console.log(err.stderr || '');
|
||||
|
||||
// Parse results even on failure
|
||||
const output = (err.stdout || '') + (err.stderr || '');
|
||||
const passedMatch = output.match(/Passed:\s*(\d+)/);
|
||||
const failedMatch = output.match(/Failed:\s*(\d+)/);
|
||||
|
||||
if (passedMatch) totalPassed += parseInt(passedMatch[1], 10);
|
||||
if (failedMatch) totalFailed += parseInt(failedMatch[1], 10);
|
||||
}
|
||||
}
|
||||
|
||||
totalTests = totalPassed + totalFailed;
|
||||
|
||||
console.log('\n╔══════════════════════════════════════════════════════════╗');
|
||||
console.log('║ Final Results ║');
|
||||
console.log('╠══════════════════════════════════════════════════════════╣');
|
||||
console.log(`║ Total Tests: ${String(totalTests).padStart(4)} ║`);
|
||||
console.log(`║ Passed: ${String(totalPassed).padStart(4)} ✓ ║`);
|
||||
console.log(`║ Failed: ${String(totalFailed).padStart(4)} ${totalFailed > 0 ? '✗' : ' '} ║`);
|
||||
console.log('╚══════════════════════════════════════════════════════════╝');
|
||||
|
||||
process.exit(totalFailed > 0 ? 1 : 0);
|
||||
Reference in New Issue
Block a user