mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-30 13:43:26 +08:00
Merge origin/main into feat/insaits-security-hook
This commit is contained in:
@@ -0,0 +1,162 @@
|
||||
# Curated instincts for affaan-m/everything-claude-code
|
||||
# Import with: /instinct-import .claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml
|
||||
|
||||
---
|
||||
id: everything-claude-code-conventional-commits
|
||||
trigger: "when making a commit in everything-claude-code"
|
||||
confidence: 0.9
|
||||
domain: git
|
||||
source: repo-curation
|
||||
source_repo: affaan-m/everything-claude-code
|
||||
---
|
||||
|
||||
# Everything Claude Code Conventional Commits
|
||||
|
||||
## Action
|
||||
|
||||
Use conventional commit prefixes such as `feat:`, `fix:`, `docs:`, `test:`, `chore:`, and `refactor:`.
|
||||
|
||||
## Evidence
|
||||
|
||||
- Mainline history consistently uses conventional commit subjects.
|
||||
- Release and changelog automation expect readable commit categorization.
|
||||
|
||||
---
|
||||
id: everything-claude-code-commit-length
|
||||
trigger: "when writing a commit subject in everything-claude-code"
|
||||
confidence: 0.8
|
||||
domain: git
|
||||
source: repo-curation
|
||||
source_repo: affaan-m/everything-claude-code
|
||||
---
|
||||
|
||||
# Everything Claude Code Commit Length
|
||||
|
||||
## Action
|
||||
|
||||
Keep commit subjects concise and close to the repository norm of about 70 characters.
|
||||
|
||||
## Evidence
|
||||
|
||||
- Recent history clusters around ~70 characters, not ~50.
|
||||
- Short, descriptive subjects read well in release notes and PR summaries.
|
||||
|
||||
---
|
||||
id: everything-claude-code-js-file-naming
|
||||
trigger: "when creating a new JavaScript or TypeScript module in everything-claude-code"
|
||||
confidence: 0.85
|
||||
domain: code-style
|
||||
source: repo-curation
|
||||
source_repo: affaan-m/everything-claude-code
|
||||
---
|
||||
|
||||
# Everything Claude Code JS File Naming
|
||||
|
||||
## Action
|
||||
|
||||
Prefer camelCase for JavaScript and TypeScript module filenames, and keep skill or command directories in kebab-case.
|
||||
|
||||
## Evidence
|
||||
|
||||
- `scripts/` and test helpers mostly use camelCase module names.
|
||||
- `skills/` and `commands/` directories use kebab-case consistently.
|
||||
|
||||
---
|
||||
id: everything-claude-code-test-runner
|
||||
trigger: "when adding or updating tests in everything-claude-code"
|
||||
confidence: 0.9
|
||||
domain: testing
|
||||
source: repo-curation
|
||||
source_repo: affaan-m/everything-claude-code
|
||||
---
|
||||
|
||||
# Everything Claude Code Test Runner
|
||||
|
||||
## Action
|
||||
|
||||
Use the repository's existing Node-based test flow: targeted `*.test.js` files first, then `node tests/run-all.js` or `npm test` for broader verification.
|
||||
|
||||
## Evidence
|
||||
|
||||
- The repo uses `tests/run-all.js` as the central test orchestrator.
|
||||
- Test files follow the `*.test.js` naming pattern across hook, CI, and integration coverage.
|
||||
|
||||
---
|
||||
id: everything-claude-code-hooks-change-set
|
||||
trigger: "when modifying hooks or hook-adjacent behavior in everything-claude-code"
|
||||
confidence: 0.88
|
||||
domain: workflow
|
||||
source: repo-curation
|
||||
source_repo: affaan-m/everything-claude-code
|
||||
---
|
||||
|
||||
# Everything Claude Code Hooks Change Set
|
||||
|
||||
## Action
|
||||
|
||||
Update the hook script, its configuration, its tests, and its user-facing documentation together.
|
||||
|
||||
## Evidence
|
||||
|
||||
- Hook fixes routinely span `hooks/hooks.json`, `scripts/hooks/`, `tests/hooks/`, `tests/integration/`, and `hooks/README.md`.
|
||||
- Partial hook changes are a common source of regressions and stale docs.
|
||||
|
||||
---
|
||||
id: everything-claude-code-cross-platform-sync
|
||||
trigger: "when shipping a user-visible feature across ECC surfaces"
|
||||
confidence: 0.9
|
||||
domain: workflow
|
||||
source: repo-curation
|
||||
source_repo: affaan-m/everything-claude-code
|
||||
---
|
||||
|
||||
# Everything Claude Code Cross Platform Sync
|
||||
|
||||
## Action
|
||||
|
||||
Treat the root repo as the source of truth, then mirror shipped changes to `.cursor/`, `.codex/`, `.opencode/`, and `.agents/` only where the feature actually exists.
|
||||
|
||||
## Evidence
|
||||
|
||||
- ECC maintains multiple harness-specific surfaces with overlapping but not identical files.
|
||||
- The safest workflow is root-first followed by explicit parity updates.
|
||||
|
||||
---
|
||||
id: everything-claude-code-release-sync
|
||||
trigger: "when preparing a release for everything-claude-code"
|
||||
confidence: 0.86
|
||||
domain: workflow
|
||||
source: repo-curation
|
||||
source_repo: affaan-m/everything-claude-code
|
||||
---
|
||||
|
||||
# Everything Claude Code Release Sync
|
||||
|
||||
## Action
|
||||
|
||||
Keep package versions, plugin manifests, and release-facing docs synchronized before publishing.
|
||||
|
||||
## Evidence
|
||||
|
||||
- Release work spans `package.json`, `.claude-plugin/*`, `.opencode/package.json`, and release-note content.
|
||||
- Version drift causes broken update paths and confusing install surfaces.
|
||||
|
||||
---
|
||||
id: everything-claude-code-learning-curation
|
||||
trigger: "when importing or evolving instincts for everything-claude-code"
|
||||
confidence: 0.84
|
||||
domain: workflow
|
||||
source: repo-curation
|
||||
source_repo: affaan-m/everything-claude-code
|
||||
---
|
||||
|
||||
# Everything Claude Code Learning Curation
|
||||
|
||||
## Action
|
||||
|
||||
Prefer a small set of accurate instincts over bulk-generated, duplicated, or contradictory instincts.
|
||||
|
||||
## Evidence
|
||||
|
||||
- Auto-generated instinct dumps can duplicate rules, widen triggers too far, or preserve placeholder detector output.
|
||||
- Curated instincts are easier to import, audit, and trust during continuous-learning workflows.
|
||||
97
.claude/skills/everything-claude-code/SKILL.md
Normal file
97
.claude/skills/everything-claude-code/SKILL.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# Everything Claude Code
|
||||
|
||||
Use this skill when working inside the `everything-claude-code` repository and you need repo-specific guidance instead of generic coding advice.
|
||||
|
||||
Optional companion instincts live at `.claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml` for teams using `continuous-learning-v2`.
|
||||
|
||||
## When to Use
|
||||
|
||||
Activate this skill when the task touches one or more of these areas:
|
||||
- cross-platform parity across Claude Code, Cursor, Codex, and OpenCode
|
||||
- hook scripts, hook docs, or hook tests
|
||||
- skills, commands, agents, or rules that must stay synchronized across surfaces
|
||||
- release work such as version bumps, changelog updates, or plugin metadata updates
|
||||
- continuous-learning or instinct workflows inside this repository
|
||||
|
||||
## How It Works
|
||||
|
||||
### 1. Follow the repo's development contract
|
||||
|
||||
- Use conventional commits such as `feat:`, `fix:`, `docs:`, `test:`, `chore:`.
|
||||
- Keep commit subjects concise and close to the repo norm of about 70 characters.
|
||||
- Prefer camelCase for JavaScript and TypeScript module filenames.
|
||||
- Use kebab-case for skill directories and command filenames.
|
||||
- Keep test files on the existing `*.test.js` pattern.
|
||||
|
||||
### 2. Treat the root repo as the source of truth
|
||||
|
||||
Start from the root implementation, then mirror changes where they are intentionally shipped.
|
||||
|
||||
Typical mirror targets:
|
||||
- `.cursor/`
|
||||
- `.codex/`
|
||||
- `.opencode/`
|
||||
- `.agents/`
|
||||
|
||||
Do not assume every `.claude/` artifact needs a cross-platform copy. Only mirror files that are part of the shipped multi-platform surface.
|
||||
|
||||
### 3. Update hooks with tests and docs together
|
||||
|
||||
When changing hook behavior:
|
||||
1. update `hooks/hooks.json` or the relevant script in `scripts/hooks/`
|
||||
2. update matching tests in `tests/hooks/` or `tests/integration/`
|
||||
3. update `hooks/README.md` if behavior or configuration changed
|
||||
4. verify parity for `.cursor/hooks/` and `.opencode/plugins/` when applicable
|
||||
|
||||
### 4. Keep release metadata in sync
|
||||
|
||||
When preparing a release, verify the same version is reflected anywhere it is surfaced:
|
||||
- `package.json`
|
||||
- `.claude-plugin/plugin.json`
|
||||
- `.claude-plugin/marketplace.json`
|
||||
- `.opencode/package.json`
|
||||
- release notes or changelog entries when the release process expects them
|
||||
|
||||
### 5. Be explicit about continuous-learning changes
|
||||
|
||||
If the task touches `skills/continuous-learning-v2/` or imported instincts:
|
||||
- prefer accurate, low-noise instincts over auto-generated bulk output
|
||||
- keep instinct files importable by `instinct-cli.py`
|
||||
- remove duplicated or contradictory instincts instead of layering more guidance on top
|
||||
|
||||
## Examples
|
||||
|
||||
### Naming examples
|
||||
|
||||
```text
|
||||
skills/continuous-learning-v2/SKILL.md
|
||||
commands/update-docs.md
|
||||
scripts/hooks/session-start.js
|
||||
tests/hooks/hooks.test.js
|
||||
```
|
||||
|
||||
### Commit examples
|
||||
|
||||
```text
|
||||
fix: harden session summary extraction on Stop hook
|
||||
docs: align Codex config examples with current schema
|
||||
test: cover Windows formatter fallback behavior
|
||||
```
|
||||
|
||||
### Skill update checklist
|
||||
|
||||
```text
|
||||
1. Update the root skill or command.
|
||||
2. Mirror it only where that surface is shipped.
|
||||
3. Run targeted tests first, then the broader suite if behavior changed.
|
||||
4. Review docs and release notes for user-visible changes.
|
||||
```
|
||||
|
||||
### Release checklist
|
||||
|
||||
```text
|
||||
1. Bump package and plugin versions.
|
||||
2. Run npm test.
|
||||
3. Verify platform-specific manifests.
|
||||
4. Publish the release notes with a human-readable summary.
|
||||
```
|
||||
@@ -39,6 +39,20 @@ Available skills:
|
||||
|
||||
Configure in `~/.codex/config.toml` under `[mcp_servers]`. See `.codex/config.toml` for reference configuration with GitHub, Context7, Memory, and Sequential Thinking servers.
|
||||
|
||||
## Multi-Agent Support
|
||||
|
||||
Codex now supports multi-agent workflows behind the experimental `features.multi_agent` flag.
|
||||
|
||||
- Enable it in `.codex/config.toml` with `[features] multi_agent = true`
|
||||
- Define project-local roles under `[agents.<name>]`
|
||||
- Point each role at a TOML layer under `.codex/agents/`
|
||||
- Use `/agent` inside Codex CLI to inspect and steer child agents
|
||||
|
||||
Sample role configs in this repo:
|
||||
- `.codex/agents/explorer.toml` — read-only evidence gathering
|
||||
- `.codex/agents/reviewer.toml` — correctness/security review
|
||||
- `.codex/agents/docs-researcher.toml` — API and release-note verification
|
||||
|
||||
## Key Differences from Claude Code
|
||||
|
||||
| Feature | Claude Code | Codex CLI |
|
||||
@@ -47,7 +61,7 @@ Configure in `~/.codex/config.toml` under `[mcp_servers]`. See `.codex/config.to
|
||||
| Context file | CLAUDE.md + AGENTS.md | AGENTS.md only |
|
||||
| Skills | Skills loaded via plugin | `.agents/skills/` directory |
|
||||
| Commands | `/slash` commands | Instruction-based |
|
||||
| Agents | Subagent Task tool | Single agent model |
|
||||
| Agents | Subagent Task tool | Multi-agent via `/agent` and `[agents.<name>]` roles |
|
||||
| Security | Hook-based enforcement | Instruction + sandbox |
|
||||
| MCP | Full support | Command-based only |
|
||||
|
||||
|
||||
9
.codex/agents/docs-researcher.toml
Normal file
9
.codex/agents/docs-researcher.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
model = "gpt-5.4"
|
||||
model_reasoning_effort = "medium"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
developer_instructions = """
|
||||
Verify APIs, framework behavior, and release-note claims against primary documentation before changes land.
|
||||
Cite the exact docs or file paths that support each claim.
|
||||
Do not invent undocumented behavior.
|
||||
"""
|
||||
9
.codex/agents/explorer.toml
Normal file
9
.codex/agents/explorer.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
model = "o4-mini"
|
||||
model_reasoning_effort = "medium"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
developer_instructions = """
|
||||
Stay in exploration mode.
|
||||
Trace the real execution path, cite files and symbols, and avoid proposing fixes unless the parent agent asks for them.
|
||||
Prefer targeted search and file reads over broad scans.
|
||||
"""
|
||||
9
.codex/agents/reviewer.toml
Normal file
9
.codex/agents/reviewer.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
model = "gpt-5.4"
|
||||
model_reasoning_effort = "high"
|
||||
sandbox_mode = "read-only"
|
||||
|
||||
developer_instructions = """
|
||||
Review like an owner.
|
||||
Prioritize correctness, security, behavioral regressions, and missing tests.
|
||||
Lead with concrete findings and avoid style-only feedback unless it hides a real bug.
|
||||
"""
|
||||
@@ -1,42 +1,37 @@
|
||||
# Everything Claude Code (ECC) — Codex CLI Reference Configuration
|
||||
#:schema https://developers.openai.com/codex/config-schema.json
|
||||
|
||||
# Everything Claude Code (ECC) — Codex Reference Configuration
|
||||
#
|
||||
# Copy this file to ~/.codex/config.toml to apply globally.
|
||||
# Or keep it in your project root for project-level config.
|
||||
# Copy this file to ~/.codex/config.toml for global defaults, or keep it in
|
||||
# the project root as .codex/config.toml for project-local settings.
|
||||
#
|
||||
# Docs: https://github.com/openai/codex
|
||||
# Official docs:
|
||||
# - https://developers.openai.com/codex/config-reference
|
||||
# - https://developers.openai.com/codex/multi-agent
|
||||
|
||||
# Model selection
|
||||
model = "o4-mini"
|
||||
model_provider = "openai"
|
||||
|
||||
# Permissions
|
||||
[permissions]
|
||||
# "untrusted" = no writes, "on-request" = ask per action, "never" = full auto
|
||||
# Top-level runtime settings (current Codex schema)
|
||||
approval_policy = "on-request"
|
||||
# "off", "workspace-read", "workspace-write", "danger-full-access"
|
||||
sandbox_mode = "workspace-write"
|
||||
web_search = "live"
|
||||
|
||||
# Notifications (macOS)
|
||||
[notify]
|
||||
command = "terminal-notifier -title 'Codex ECC' -message 'Task completed!' -sound default"
|
||||
on_complete = true
|
||||
# External notifications receive a JSON payload on stdin.
|
||||
notify = [
|
||||
"terminal-notifier",
|
||||
"-title", "Codex ECC",
|
||||
"-message", "Task completed!",
|
||||
"-sound", "default",
|
||||
]
|
||||
|
||||
# History - persistent instructions applied to every session
|
||||
[history]
|
||||
# These are prepended to every conversation
|
||||
persistent_instructions = """
|
||||
Follow ECC principles:
|
||||
1. Test-Driven Development (TDD) - write tests first, 80%+ coverage required
|
||||
2. Immutability - always create new objects, never mutate
|
||||
3. Security-First - validate all inputs, no hardcoded secrets
|
||||
4. Conventional commits: feat|fix|refactor|docs|test|chore|perf|ci: description
|
||||
5. File organization: many small files (200-400 lines, 800 max)
|
||||
6. Error handling: handle at every level, never swallow errors
|
||||
7. Input validation: schema-based validation at system boundaries
|
||||
"""
|
||||
# Prefer AGENTS.md and project-local .codex/AGENTS.md for instructions.
|
||||
# model_instructions_file replaces built-in instructions instead of AGENTS.md,
|
||||
# so leave it unset unless you intentionally want a single override file.
|
||||
# model_instructions_file = "/absolute/path/to/instructions.md"
|
||||
|
||||
# MCP Servers
|
||||
# Codex supports command-based MCP servers
|
||||
# MCP servers
|
||||
[mcp_servers.github]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-github"]
|
||||
@@ -62,19 +57,41 @@ args = ["-y", "@modelcontextprotocol/server-sequential-thinking"]
|
||||
# command = "npx"
|
||||
# args = ["-y", "firecrawl-mcp"]
|
||||
#
|
||||
# [mcp_servers.railway]
|
||||
# [mcp_servers.cloudflare]
|
||||
# command = "npx"
|
||||
# args = ["-y", "@anthropic/railway-mcp"]
|
||||
# args = ["-y", "@cloudflare/mcp-server-cloudflare"]
|
||||
|
||||
# Features
|
||||
[features]
|
||||
web_search_request = true
|
||||
# Codex multi-agent support is experimental as of March 2026.
|
||||
multi_agent = true
|
||||
|
||||
# Profiles — switch with CODEX_PROFILE=<name>
|
||||
# Profiles — switch with `codex -p <name>`
|
||||
[profiles.strict]
|
||||
approval_policy = "on-request"
|
||||
sandbox_mode = "workspace-read"
|
||||
sandbox_mode = "read-only"
|
||||
web_search = "cached"
|
||||
|
||||
[profiles.yolo]
|
||||
approval_policy = "never"
|
||||
sandbox_mode = "workspace-write"
|
||||
web_search = "live"
|
||||
|
||||
# Optional project-local multi-agent roles.
|
||||
# Keep these commented in global config, because config_file paths are resolved
|
||||
# relative to the config.toml file and must exist at load time.
|
||||
#
|
||||
# [agents]
|
||||
# max_threads = 6
|
||||
# max_depth = 1
|
||||
#
|
||||
# [agents.explorer]
|
||||
# description = "Read-only codebase explorer for gathering evidence before changes are proposed."
|
||||
# config_file = "agents/explorer.toml"
|
||||
#
|
||||
# [agents.reviewer]
|
||||
# description = "PR reviewer focused on correctness, security, and missing tests."
|
||||
# config_file = "agents/reviewer.toml"
|
||||
#
|
||||
# [agents.docs_researcher]
|
||||
# description = "Documentation specialist that verifies APIs, framework behavior, and release notes."
|
||||
# config_file = "agents/docs-researcher.toml"
|
||||
|
||||
3
.github/workflows/ci.yml
vendored
3
.github/workflows/ci.yml
vendored
@@ -156,6 +156,9 @@ jobs:
|
||||
with:
|
||||
node-version: '20.x'
|
||||
|
||||
- name: Install validation dependencies
|
||||
run: npm ci --ignore-scripts
|
||||
|
||||
- name: Validate agents
|
||||
run: node scripts/ci/validate-agents.js
|
||||
continue-on-error: false
|
||||
|
||||
3
.github/workflows/reusable-validate.yml
vendored
3
.github/workflows/reusable-validate.yml
vendored
@@ -24,6 +24,9 @@ jobs:
|
||||
with:
|
||||
node-version: ${{ inputs.node-version }}
|
||||
|
||||
- name: Install validation dependencies
|
||||
run: npm ci --ignore-scripts
|
||||
|
||||
- name: Validate agents
|
||||
run: node scripts/ci/validate-agents.js
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -23,6 +23,7 @@ node_modules/
|
||||
|
||||
# Build output
|
||||
dist/
|
||||
coverage/
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
|
||||
@@ -297,6 +297,15 @@ Then in your `opencode.json`:
|
||||
}
|
||||
```
|
||||
|
||||
This only loads the published ECC OpenCode plugin module (hooks/events and exported plugin tools).
|
||||
It does **not** automatically inject ECC's full `agent`, `command`, or `instructions` config into your project.
|
||||
|
||||
If you want the full ECC OpenCode workflow surface, use the repository's bundled `.opencode/opencode.json` as your base config or copy these pieces into your project:
|
||||
- `.opencode/commands/`
|
||||
- `.opencode/prompts/`
|
||||
- `.opencode/instructions/INSTRUCTIONS.md`
|
||||
- the `agent` and `command` sections from `.opencode/opencode.json`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Configuration Not Loading
|
||||
@@ -322,6 +331,7 @@ Then in your `opencode.json`:
|
||||
1. Verify the command is defined in `opencode.json` or as `.md` file in `.opencode/commands/`
|
||||
2. Check the referenced agent exists
|
||||
3. Ensure the template uses `$ARGUMENTS` for user input
|
||||
4. If you installed only `plugin: ["ecc-universal"]`, note that npm plugin install does not auto-add ECC commands or agents to your project config
|
||||
|
||||
## Best Practices
|
||||
|
||||
|
||||
@@ -32,7 +32,16 @@ Add to your `opencode.json`:
|
||||
"plugin": ["ecc-universal"]
|
||||
}
|
||||
```
|
||||
After installation, the `ecc-install` CLI becomes available:
|
||||
|
||||
This loads the ECC OpenCode plugin module from npm:
|
||||
- hook/event integrations
|
||||
- bundled custom tools exported by the plugin
|
||||
|
||||
It does **not** auto-register the full ECC command/agent/instruction catalog in your project config. For the full OpenCode setup, either:
|
||||
- run OpenCode inside this repository, or
|
||||
- copy the relevant `.opencode/commands/`, `.opencode/prompts/`, `.opencode/instructions/`, and the `instructions`, `agent`, and `command` config entries into your own project
|
||||
|
||||
After installation, the `ecc-install` CLI is also available:
|
||||
|
||||
```bash
|
||||
npx ecc-install typescript
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
/**
|
||||
* Everything Claude Code (ECC) Plugin for OpenCode
|
||||
*
|
||||
* This package provides a complete OpenCode plugin with:
|
||||
* - 13 specialized agents (planner, architect, code-reviewer, etc.)
|
||||
* - 31 commands (/plan, /tdd, /code-review, etc.)
|
||||
* This package provides the published ECC OpenCode plugin module:
|
||||
* - Plugin hooks (auto-format, TypeScript check, console.log warning, env injection, etc.)
|
||||
* - Custom tools (run-tests, check-coverage, security-audit, format-code, lint-check, git-summary)
|
||||
* - 37 skills (coding-standards, security-review, tdd-workflow, etc.)
|
||||
* - Bundled reference config/assets for the wider ECC OpenCode setup
|
||||
*
|
||||
* Usage:
|
||||
*
|
||||
@@ -22,6 +20,10 @@
|
||||
* }
|
||||
* ```
|
||||
*
|
||||
* That enables the published plugin module only. For ECC commands, agents,
|
||||
* prompts, and instructions, use this repository's `.opencode/opencode.json`
|
||||
* as a base or copy the bundled `.opencode/` assets into your project.
|
||||
*
|
||||
* Option 2: Clone and use directly
|
||||
* ```bash
|
||||
* git clone https://github.com/affaan-m/everything-claude-code
|
||||
@@ -51,6 +53,7 @@ export const metadata = {
|
||||
agents: 13,
|
||||
commands: 31,
|
||||
skills: 37,
|
||||
configAssets: true,
|
||||
hookEvents: [
|
||||
"file.edited",
|
||||
"tool.execute.before",
|
||||
|
||||
12
AGENTS.md
12
AGENTS.md
@@ -1,6 +1,6 @@
|
||||
# Everything Claude Code (ECC) — Agent Instructions
|
||||
|
||||
This is a **production-ready AI coding plugin** providing 13 specialized agents, 50+ skills, 33 commands, and automated hook workflows for software development.
|
||||
This is a **production-ready AI coding plugin** providing 16 specialized agents, 65+ skills, 40 commands, and automated hook workflows for software development.
|
||||
|
||||
## Core Principles
|
||||
|
||||
@@ -27,6 +27,9 @@ This is a **production-ready AI coding plugin** providing 13 specialized agents,
|
||||
| go-build-resolver | Go build errors | Go build failures |
|
||||
| database-reviewer | PostgreSQL/Supabase specialist | Schema design, query optimization |
|
||||
| python-reviewer | Python code review | Python projects |
|
||||
| chief-of-staff | Communication triage and drafts | Multi-channel email, Slack, LINE, Messenger |
|
||||
| loop-operator | Autonomous loop execution | Run loops safely, monitor stalls, intervene |
|
||||
| harness-optimizer | Harness config tuning | Reliability, cost, throughput |
|
||||
|
||||
## Agent Orchestration
|
||||
|
||||
@@ -36,6 +39,9 @@ Use agents proactively without user prompt:
|
||||
- Bug fix or new feature → **tdd-guide**
|
||||
- Architectural decision → **architect**
|
||||
- Security-sensitive code → **security-reviewer**
|
||||
- Multi-channel communication triage → **chief-of-staff**
|
||||
- Autonomous loops / loop monitoring → **loop-operator**
|
||||
- Harness config reliability and cost → **harness-optimizer**
|
||||
|
||||
Use parallel execution for independent operations — launch multiple agents simultaneously.
|
||||
|
||||
@@ -123,8 +129,8 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat
|
||||
|
||||
```
|
||||
agents/ — 13 specialized subagents
|
||||
skills/ — 50+ workflow skills and domain knowledge
|
||||
commands/ — 33 slash commands
|
||||
skills/ — 65+ workflow skills and domain knowledge
|
||||
commands/ — 40 slash commands
|
||||
hooks/ — Trigger-based automations
|
||||
rules/ — Always-follow guidelines (common + per-language)
|
||||
scripts/ — Cross-platform Node.js utilities
|
||||
|
||||
@@ -10,6 +10,7 @@ Thanks for wanting to contribute! This repo is a community resource for Claude C
|
||||
- [Contributing Agents](#contributing-agents)
|
||||
- [Contributing Hooks](#contributing-hooks)
|
||||
- [Contributing Commands](#contributing-commands)
|
||||
- [Cross-Harness and Translations](#cross-harness-and-translations)
|
||||
- [Pull Request Process](#pull-request-process)
|
||||
|
||||
---
|
||||
@@ -348,6 +349,29 @@ What the user receives.
|
||||
|
||||
---
|
||||
|
||||
## Cross-Harness and Translations
|
||||
|
||||
### Skill subsets (Codex and Cursor)
|
||||
|
||||
ECC ships skill subsets for other harnesses:
|
||||
|
||||
- **Codex:** `.agents/skills/` — skills listed in `agents/openai.yaml` are loaded by Codex.
|
||||
- **Cursor:** `.cursor/skills/` — a subset of skills is bundled for Cursor.
|
||||
|
||||
When you **add a new skill** that should be available on Codex or Cursor:
|
||||
|
||||
1. Add the skill under `skills/your-skill-name/` as usual.
|
||||
2. If it should be available on **Codex**, add it to `.agents/skills/` (copy the skill directory or add a reference) and ensure it is referenced in `agents/openai.yaml` if required.
|
||||
3. If it should be available on **Cursor**, add it under `.cursor/skills/` per Cursor's layout.
|
||||
|
||||
Check existing skills in those directories for the expected structure. Keeping these subsets in sync is manual; mention in your PR if you updated them.
|
||||
|
||||
### Translations
|
||||
|
||||
Translations live under `docs/` (e.g. `docs/zh-CN`, `docs/zh-TW`, `docs/ja-JP`). If you change agents, commands, or skills that are translated, consider updating the corresponding translation files or opening an issue so maintainers or translators can update them.
|
||||
|
||||
---
|
||||
|
||||
## Pull Request Process
|
||||
|
||||
### 1. PR Title Format
|
||||
|
||||
65
README.md
65
README.md
@@ -14,9 +14,10 @@
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
> **50K+ stars** | **6K+ forks** | **30 contributors** | **6 languages supported** | **Anthropic Hackathon Winner**
|
||||
> **50K+ stars** | **6K+ forks** | **30 contributors** | **5 languages supported** | **Anthropic Hackathon Winner**
|
||||
|
||||
---
|
||||
|
||||
@@ -155,9 +156,9 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
cd everything-claude-code
|
||||
|
||||
# Recommended: use the installer (handles common + language rules safely)
|
||||
./install.sh typescript # or python or golang
|
||||
./install.sh typescript # or python or golang or perl
|
||||
# You can pass multiple languages:
|
||||
# ./install.sh typescript python golang
|
||||
# ./install.sh typescript python golang perl
|
||||
# or target cursor:
|
||||
# ./install.sh --target cursor typescript
|
||||
# or target antigravity:
|
||||
@@ -310,6 +311,9 @@ everything-claude-code/
|
||||
| |-- liquid-glass-design/ # iOS 26 Liquid Glass design system (NEW)
|
||||
| |-- foundation-models-on-device/ # Apple on-device LLM with FoundationModels (NEW)
|
||||
| |-- swift-concurrency-6-2/ # Swift 6.2 Approachable Concurrency (NEW)
|
||||
| |-- perl-patterns/ # Modern Perl 5.36+ idioms and best practices (NEW)
|
||||
| |-- perl-security/ # Perl security patterns, taint mode, safe I/O (NEW)
|
||||
| |-- perl-testing/ # Perl TDD with Test2::V0, prove, Devel::Cover (NEW)
|
||||
| |-- autonomous-loops/ # Autonomous loop patterns: sequential pipelines, PR loops, DAG orchestration (NEW)
|
||||
| |-- plankton-code-quality/ # Write-time code quality enforcement with Plankton hooks (NEW)
|
||||
|
|
||||
@@ -361,6 +365,8 @@ everything-claude-code/
|
||||
| |-- typescript/ # TypeScript/JavaScript specific
|
||||
| |-- python/ # Python specific
|
||||
| |-- golang/ # Go specific
|
||||
| |-- perl/ # Perl specific (NEW)
|
||||
| |-- swift/ # Swift specific
|
||||
|
|
||||
|-- hooks/ # Trigger-based automations
|
||||
| |-- README.md # Hook documentation, recipes, and customization guide
|
||||
@@ -563,6 +569,7 @@ This gives you instant access to all commands, agents, skills, and hooks.
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
>
|
||||
> # Option B: Project-level rules (applies to current project only)
|
||||
> mkdir -p .claude/rules
|
||||
@@ -588,6 +595,7 @@ cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
|
||||
# Copy commands
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
@@ -670,6 +678,7 @@ rules/
|
||||
typescript/ # TS/JS specific patterns and tools
|
||||
python/ # Python specific patterns and tools
|
||||
golang/ # Go specific patterns and tools
|
||||
perl/ # Perl specific patterns and tools
|
||||
```
|
||||
|
||||
See [`rules/README.md`](rules/README.md) for installation and structure details.
|
||||
@@ -824,7 +833,7 @@ Please contribute! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
||||
|
||||
### Ideas for Contributions
|
||||
|
||||
- Language-specific skills (Rust, C#, Swift, Kotlin) — Go, Python, Java already included
|
||||
- Language-specific skills (Rust, C#, Kotlin, Java) — Go, Python, Perl, Swift, and TypeScript already included
|
||||
- Framework-specific configs (Rails, Laravel, FastAPI, NestJS) — Django, Spring Boot already included
|
||||
- DevOps agents (Kubernetes, Terraform, AWS, Docker)
|
||||
- Testing strategies (different frameworks, visual regression)
|
||||
@@ -841,7 +850,7 @@ ECC provides **full Cursor IDE support** with hooks, rules, agents, skills, comm
|
||||
```bash
|
||||
# Install for your language(s)
|
||||
./install.sh --target cursor typescript
|
||||
./install.sh --target cursor python golang swift
|
||||
./install.sh --target cursor python golang swift perl
|
||||
```
|
||||
|
||||
### What's Included
|
||||
@@ -850,7 +859,7 @@ ECC provides **full Cursor IDE support** with hooks, rules, agents, skills, comm
|
||||
|-----------|-------|---------|
|
||||
| Hook Events | 15 | sessionStart, beforeShellExecution, afterFileEdit, beforeMCPExecution, beforeSubmitPrompt, and 10 more |
|
||||
| Hook Scripts | 16 | Thin Node.js scripts delegating to `scripts/hooks/` via shared adapter |
|
||||
| Rules | 29 | 9 common (alwaysApply) + 20 language-specific (TypeScript, Python, Go, Swift) |
|
||||
| Rules | 34 | 9 common (alwaysApply) + 25 language-specific (TypeScript, Python, Go, Swift, Perl) |
|
||||
| Agents | Shared | Via AGENTS.md at root (read by Cursor natively) |
|
||||
| Skills | Shared + Bundled | Via AGENTS.md at root and `.cursor/skills/` for translated additions |
|
||||
| Commands | Shared | `.cursor/commands/` if installed |
|
||||
@@ -893,27 +902,29 @@ ECC provides **first-class Codex support** for both the macOS app and CLI, with
|
||||
### Quick Start (Codex App + CLI)
|
||||
|
||||
```bash
|
||||
# Copy the reference config to your home directory
|
||||
cp .codex/config.toml ~/.codex/config.toml
|
||||
|
||||
# Run Codex CLI in the repo — AGENTS.md is auto-detected
|
||||
# Run Codex CLI in the repo — AGENTS.md and .codex/ are auto-detected
|
||||
codex
|
||||
|
||||
# Optional: copy the global-safe defaults to your home directory
|
||||
cp .codex/config.toml ~/.codex/config.toml
|
||||
```
|
||||
|
||||
Codex macOS app:
|
||||
- Open this repository as your workspace.
|
||||
- The root `AGENTS.md` is auto-detected.
|
||||
- Optional: copy `.codex/config.toml` to `~/.codex/config.toml` for CLI/app behavior consistency.
|
||||
- `.codex/config.toml` and `.codex/agents/*.toml` work best when kept project-local.
|
||||
- Optional: copy `.codex/config.toml` to `~/.codex/config.toml` for global defaults; keep the multi-agent role files project-local unless you also copy `.codex/agents/`.
|
||||
|
||||
### What's Included
|
||||
|
||||
| Component | Count | Details |
|
||||
|-----------|-------|---------|
|
||||
| Config | 1 | `.codex/config.toml` — model, permissions, MCP servers, persistent instructions |
|
||||
| Config | 1 | `.codex/config.toml` — top-level approvals/sandbox/web_search, MCP servers, notifications, profiles |
|
||||
| AGENTS.md | 2 | Root (universal) + `.codex/AGENTS.md` (Codex-specific supplement) |
|
||||
| Skills | 16 | `.agents/skills/` — SKILL.md + agents/openai.yaml per skill |
|
||||
| MCP Servers | 4 | GitHub, Context7, Memory, Sequential Thinking (command-based) |
|
||||
| Profiles | 2 | `strict` (read-only sandbox) and `yolo` (full auto-approve) |
|
||||
| Agent Roles | 3 | `.codex/agents/` — explorer, reviewer, docs-researcher |
|
||||
|
||||
### Skills
|
||||
|
||||
@@ -940,7 +951,24 @@ Skills at `.agents/skills/` are auto-loaded by Codex:
|
||||
|
||||
### Key Limitation
|
||||
|
||||
Codex does **not yet provide Claude-style hook execution parity**. ECC enforcement there is instruction-based via `AGENTS.md` and `persistent_instructions`, plus sandbox permissions.
|
||||
Codex does **not yet provide Claude-style hook execution parity**. ECC enforcement there is instruction-based via `AGENTS.md`, optional `model_instructions_file` overrides, and sandbox/approval settings.
|
||||
|
||||
### Multi-Agent Support
|
||||
|
||||
Current Codex builds support experimental multi-agent workflows.
|
||||
|
||||
- Enable `features.multi_agent = true` in `.codex/config.toml`
|
||||
- Define roles under `[agents.<name>]`
|
||||
- Point each role at a file under `.codex/agents/`
|
||||
- Use `/agent` in the CLI to inspect or steer child agents
|
||||
|
||||
ECC ships three sample role configs:
|
||||
|
||||
| Role | Purpose |
|
||||
|------|---------|
|
||||
| `explorer` | Read-only codebase evidence gathering before edits |
|
||||
| `reviewer` | Correctness, security, and missing-test review |
|
||||
| `docs_researcher` | Documentation and API verification before release/docs changes |
|
||||
|
||||
---
|
||||
|
||||
@@ -1050,6 +1078,13 @@ Then add to your `opencode.json`:
|
||||
}
|
||||
```
|
||||
|
||||
That npm plugin entry enables ECC's published OpenCode plugin module (hooks/events and plugin tools).
|
||||
It does **not** automatically add ECC's full command/agent/instruction catalog to your project config.
|
||||
|
||||
For the full ECC OpenCode setup, either:
|
||||
- run OpenCode inside this repository, or
|
||||
- copy the bundled `.opencode/` config assets into your project and wire the `instructions`, `agent`, and `command` entries in `opencode.json`
|
||||
|
||||
### Documentation
|
||||
|
||||
- **Migration Guide**: `.opencode/MIGRATION.md`
|
||||
@@ -1070,7 +1105,7 @@ ECC is the **first plugin to maximize every major AI coding tool**. Here's how e
|
||||
| **Skills** | 65 | Shared | 10 (native format) | 37 |
|
||||
| **Hook Events** | 8 types | 15 types | None yet | 11 types |
|
||||
| **Hook Scripts** | 20+ scripts | 16 scripts (DRY adapter) | N/A | Plugin hooks |
|
||||
| **Rules** | 29 (common + lang) | 29 (YAML frontmatter) | Instruction-based | 13 instructions |
|
||||
| **Rules** | 34 (common + lang) | 34 (YAML frontmatter) | Instruction-based | 13 instructions |
|
||||
| **Custom Tools** | Via hooks | Via hooks | N/A | 6 native tools |
|
||||
| **MCP Servers** | 14 | Shared (mcp.json) | 4 (command-based) | Full |
|
||||
| **Config Format** | settings.json | hooks.json + rules/ | config.toml | opencode.json |
|
||||
@@ -1083,7 +1118,7 @@ ECC is the **first plugin to maximize every major AI coding tool**. Here's how e
|
||||
- **AGENTS.md** at root is the universal cross-tool file (read by all 4 tools)
|
||||
- **DRY adapter pattern** lets Cursor reuse Claude Code's hook scripts without duplication
|
||||
- **Skills format** (SKILL.md with YAML frontmatter) works across Claude Code, Codex, and OpenCode
|
||||
- Codex's lack of hooks is compensated by `persistent_instructions` and sandbox permissions
|
||||
- Codex's lack of hooks is compensated by `AGENTS.md`, optional `model_instructions_file` overrides, and sandbox permissions
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
---
|
||||
@@ -81,8 +82,12 @@
|
||||
# 首先克隆仓库
|
||||
git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
|
||||
# 复制规则(应用于所有项目)
|
||||
cp -r everything-claude-code/rules/* ~/.claude/rules/
|
||||
# 复制规则(通用 + 语言特定)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
```
|
||||
|
||||
### 第三步:开始使用
|
||||
@@ -175,6 +180,9 @@ everything-claude-code/
|
||||
| |-- golang-patterns/ # Go 惯用语和最佳实践(新增)
|
||||
| |-- golang-testing/ # Go 测试模式、TDD、基准测试(新增)
|
||||
| |-- cpp-testing/ # C++ 测试模式、GoogleTest、CMake/CTest(新增)
|
||||
| |-- perl-patterns/ # 现代 Perl 5.36+ 惯用语和最佳实践(新增)
|
||||
| |-- perl-security/ # Perl 安全模式、污染模式、安全 I/O(新增)
|
||||
| |-- perl-testing/ # 使用 Test2::V0、prove、Devel::Cover 的 Perl TDD(新增)
|
||||
|
|
||||
|-- commands/ # 用于快速执行的斜杠命令
|
||||
| |-- tdd.md # /tdd - 测试驱动开发
|
||||
@@ -197,12 +205,20 @@ everything-claude-code/
|
||||
| |-- evolve.md # /evolve - 将直觉聚类到技能中(新增)
|
||||
|
|
||||
|-- rules/ # 始终遵循的指南(复制到 ~/.claude/rules/)
|
||||
| |-- security.md # 强制性安全检查
|
||||
| |-- coding-style.md # 不可变性、文件组织
|
||||
| |-- testing.md # TDD、80% 覆盖率要求
|
||||
| |-- git-workflow.md # 提交格式、PR 流程
|
||||
| |-- agents.md # 何时委托给子代理
|
||||
| |-- performance.md # 模型选择、上下文管理
|
||||
| |-- README.md # 结构概述和安装指南
|
||||
| |-- common/ # 与语言无关的原则
|
||||
| | |-- coding-style.md # 不可变性、文件组织
|
||||
| | |-- git-workflow.md # 提交格式、PR 流程
|
||||
| | |-- testing.md # TDD、80% 覆盖率要求
|
||||
| | |-- performance.md # 模型选择、上下文管理
|
||||
| | |-- patterns.md # 设计模式、骨架项目
|
||||
| | |-- hooks.md # 钩子架构、TodoWrite
|
||||
| | |-- agents.md # 何时委托给子代理
|
||||
| | |-- security.md # 强制性安全检查
|
||||
| |-- typescript/ # TypeScript/JavaScript 特定
|
||||
| |-- python/ # Python 特定
|
||||
| |-- golang/ # Go 特定
|
||||
| |-- perl/ # Perl 特定(新增)
|
||||
|
|
||||
|-- hooks/ # 基于触发器的自动化
|
||||
| |-- hooks.json # 所有钩子配置(PreToolUse、PostToolUse、Stop 等)
|
||||
@@ -356,8 +372,12 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
# 将代理复制到你的 Claude 配置
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# 复制规则
|
||||
cp everything-claude-code/rules/*.md ~/.claude/rules/
|
||||
# 复制规则(通用 + 语言特定)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
|
||||
# 复制命令
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
@@ -425,13 +445,15 @@ model: opus
|
||||
|
||||
### 规则
|
||||
|
||||
规则是始终遵循的指南。保持模块化:
|
||||
规则是始终遵循的指南,分为 `common/`(通用)+ 语言特定目录:
|
||||
|
||||
```
|
||||
~/.claude/rules/
|
||||
security.md # 无硬编码秘密
|
||||
coding-style.md # 不可变性、文件限制
|
||||
testing.md # TDD、覆盖率要求
|
||||
common/ # 通用原则(必装)
|
||||
typescript/ # TS/JS 特定模式和工具
|
||||
python/ # Python 特定模式和工具
|
||||
golang/ # Go 特定模式和工具
|
||||
perl/ # Perl 特定模式和工具
|
||||
```
|
||||
|
||||
---
|
||||
@@ -466,7 +488,7 @@ node tests/hooks/hooks.test.js
|
||||
|
||||
### 贡献想法
|
||||
|
||||
- 特定语言的技能(Python、Rust 模式)- 现已包含 Go!
|
||||
- 特定语言的技能(Rust、C#、Kotlin、Java)- 现已包含 Go、Python、Perl、Swift 和 TypeScript!
|
||||
- 特定框架的配置(Django、Rails、Laravel)
|
||||
- DevOps 代理(Kubernetes、Terraform、AWS)
|
||||
- 测试策略(不同框架)
|
||||
|
||||
422
TROUBLESHOOTING.md
Normal file
422
TROUBLESHOOTING.md
Normal file
@@ -0,0 +1,422 @@
|
||||
# Troubleshooting Guide
|
||||
|
||||
Common issues and solutions for Everything Claude Code (ECC) plugin.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Memory & Context Issues](#memory--context-issues)
|
||||
- [Agent Harness Failures](#agent-harness-failures)
|
||||
- [Hook & Workflow Errors](#hook--workflow-errors)
|
||||
- [Installation & Setup](#installation--setup)
|
||||
- [Performance Issues](#performance-issues)
|
||||
- [Common Error Messages](#common-error-messages)
|
||||
- [Getting Help](#getting-help)
|
||||
|
||||
---
|
||||
|
||||
## Memory & Context Issues
|
||||
|
||||
### Context Window Overflow
|
||||
|
||||
**Symptom:** "Context too long" errors or incomplete responses
|
||||
|
||||
**Causes:**
|
||||
- Large file uploads exceeding token limits
|
||||
- Accumulated conversation history
|
||||
- Multiple large tool outputs in single session
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# 1. Clear conversation history and start fresh
|
||||
# Use Claude Code: "New Chat" or Cmd/Ctrl+Shift+N
|
||||
|
||||
# 2. Reduce file size before analysis
|
||||
head -n 100 large-file.log > sample.log
|
||||
|
||||
# 3. Use streaming for large outputs
|
||||
head -n 50 large-file.txt
|
||||
|
||||
# 4. Split tasks into smaller chunks
|
||||
# Instead of: "Analyze all 50 files"
|
||||
# Use: "Analyze files in src/components/ directory"
|
||||
```
|
||||
|
||||
### Memory Persistence Failures
|
||||
|
||||
**Symptom:** Agent doesn't remember previous context or observations
|
||||
|
||||
**Causes:**
|
||||
- Disabled continuous-learning hooks
|
||||
- Corrupted observation files
|
||||
- Project detection failures
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# Check if observations are being recorded
|
||||
ls ~/.claude/homunculus/projects/*/observations.jsonl
|
||||
|
||||
# Find the current project's hash id
|
||||
python3 - <<'PY'
|
||||
import json, os
|
||||
registry_path = os.path.expanduser("~/.claude/homunculus/projects.json")
|
||||
with open(registry_path) as f:
|
||||
registry = json.load(f)
|
||||
for project_id, meta in registry.items():
|
||||
if meta.get("root") == os.getcwd():
|
||||
print(project_id)
|
||||
break
|
||||
else:
|
||||
raise SystemExit("Project hash not found in ~/.claude/homunculus/projects.json")
|
||||
PY
|
||||
|
||||
# View recent observations for that project
|
||||
tail -20 ~/.claude/homunculus/projects/<project-hash>/observations.jsonl
|
||||
|
||||
# Back up a corrupted observations file before recreating it
|
||||
mv ~/.claude/homunculus/projects/<project-hash>/observations.jsonl \
|
||||
~/.claude/homunculus/projects/<project-hash>/observations.jsonl.bak.$(date +%Y%m%d-%H%M%S)
|
||||
|
||||
# Verify hooks are enabled
|
||||
grep -r "observe" ~/.claude/settings.json
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Agent Harness Failures
|
||||
|
||||
### Agent Not Found
|
||||
|
||||
**Symptom:** "Agent not loaded" or "Unknown agent" errors
|
||||
|
||||
**Causes:**
|
||||
- Plugin not installed correctly
|
||||
- Agent path misconfiguration
|
||||
- Marketplace vs manual install mismatch
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# Check plugin installation
|
||||
ls ~/.claude/plugins/cache/
|
||||
|
||||
# Verify agent exists (marketplace install)
|
||||
ls ~/.claude/plugins/cache/*/agents/
|
||||
|
||||
# For manual install, agents should be in:
|
||||
ls ~/.claude/agents/ # Custom agents only
|
||||
|
||||
# Reload plugin
|
||||
# Claude Code → Settings → Extensions → Reload
|
||||
```
|
||||
|
||||
### Workflow Execution Hangs
|
||||
|
||||
**Symptom:** Agent starts but never completes
|
||||
|
||||
**Causes:**
|
||||
- Infinite loops in agent logic
|
||||
- Blocked on user input
|
||||
- Network timeout waiting for API
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# 1. Check for stuck processes
|
||||
ps aux | grep claude
|
||||
|
||||
# 2. Enable debug mode
|
||||
export CLAUDE_DEBUG=1
|
||||
|
||||
# 3. Set shorter timeouts
|
||||
export CLAUDE_TIMEOUT=30
|
||||
|
||||
# 4. Check network connectivity
|
||||
curl -I https://api.anthropic.com
|
||||
```
|
||||
|
||||
### Tool Use Errors
|
||||
|
||||
**Symptom:** "Tool execution failed" or permission denied
|
||||
|
||||
**Causes:**
|
||||
- Missing dependencies (npm, python, etc.)
|
||||
- Insufficient file permissions
|
||||
- Path not found
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# Verify required tools are installed
|
||||
which node python3 npm git
|
||||
|
||||
# Fix permissions on hook scripts
|
||||
chmod +x ~/.claude/plugins/cache/*/hooks/*.sh
|
||||
chmod +x ~/.claude/plugins/cache/*/skills/*/hooks/*.sh
|
||||
|
||||
# Check PATH includes necessary binaries
|
||||
echo $PATH
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Hook & Workflow Errors
|
||||
|
||||
### Hooks Not Firing
|
||||
|
||||
**Symptom:** Pre/post hooks don't execute
|
||||
|
||||
**Causes:**
|
||||
- Hooks not registered in settings.json
|
||||
- Invalid hook syntax
|
||||
- Hook script not executable
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# Check hooks are registered
|
||||
grep -A 10 '"hooks"' ~/.claude/settings.json
|
||||
|
||||
# Verify hook files exist and are executable
|
||||
ls -la ~/.claude/plugins/cache/*/hooks/
|
||||
|
||||
# Test hook manually
|
||||
bash ~/.claude/plugins/cache/*/hooks/pre-bash.sh <<< '{"command":"echo test"}'
|
||||
|
||||
# Re-register hooks (if using plugin)
|
||||
# Disable and re-enable plugin in Claude Code settings
|
||||
```
|
||||
|
||||
### Python/Node Version Mismatches
|
||||
|
||||
**Symptom:** "python3 not found" or "node: command not found"
|
||||
|
||||
**Causes:**
|
||||
- Missing Python/Node installation
|
||||
- PATH not configured
|
||||
- Wrong Python version (Windows)
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# Install Python 3 (if missing)
|
||||
# macOS: brew install python3
|
||||
# Ubuntu: sudo apt install python3
|
||||
# Windows: Download from python.org
|
||||
|
||||
# Install Node.js (if missing)
|
||||
# macOS: brew install node
|
||||
# Ubuntu: sudo apt install nodejs npm
|
||||
# Windows: Download from nodejs.org
|
||||
|
||||
# Verify installations
|
||||
python3 --version
|
||||
node --version
|
||||
npm --version
|
||||
|
||||
# Windows: Ensure python (not python3) works
|
||||
python --version
|
||||
```
|
||||
|
||||
### Dev Server Blocker False Positives
|
||||
|
||||
**Symptom:** Hook blocks legitimate commands mentioning "dev"
|
||||
|
||||
**Causes:**
|
||||
- Heredoc content triggering pattern match
|
||||
- Non-dev commands with "dev" in arguments
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# This is fixed in v1.8.0+ (PR #371)
|
||||
# Upgrade plugin to latest version
|
||||
|
||||
# Workaround: Wrap dev servers in tmux
|
||||
tmux new-session -d -s dev "npm run dev"
|
||||
tmux attach -t dev
|
||||
|
||||
# Disable hook temporarily if needed
|
||||
# Edit ~/.claude/settings.json and remove pre-bash hook
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
### Plugin Not Loading
|
||||
|
||||
**Symptom:** Plugin features unavailable after install
|
||||
|
||||
**Causes:**
|
||||
- Marketplace cache not updated
|
||||
- Claude Code version incompatibility
|
||||
- Corrupted plugin files
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# Inspect the plugin cache before changing it
|
||||
ls -la ~/.claude/plugins/cache/
|
||||
|
||||
# Back up the plugin cache instead of deleting it in place
|
||||
mv ~/.claude/plugins/cache ~/.claude/plugins/cache.backup.$(date +%Y%m%d-%H%M%S)
|
||||
mkdir -p ~/.claude/plugins/cache
|
||||
|
||||
# Reinstall from marketplace
|
||||
# Claude Code → Extensions → Everything Claude Code → Uninstall
|
||||
# Then reinstall from marketplace
|
||||
|
||||
# Check Claude Code version
|
||||
claude --version
|
||||
# Requires Claude Code 2.0+
|
||||
|
||||
# Manual install (if marketplace fails)
|
||||
git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
cp -r everything-claude-code ~/.claude/plugins/ecc
|
||||
```
|
||||
|
||||
### Package Manager Detection Fails
|
||||
|
||||
**Symptom:** Wrong package manager used (npm instead of pnpm)
|
||||
|
||||
**Causes:**
|
||||
- No lock file present
|
||||
- CLAUDE_PACKAGE_MANAGER not set
|
||||
- Multiple lock files confusing detection
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# Set preferred package manager globally
|
||||
export CLAUDE_PACKAGE_MANAGER=pnpm
|
||||
# Add to ~/.bashrc or ~/.zshrc
|
||||
|
||||
# Or set per-project
|
||||
echo '{"packageManager": "pnpm"}' > .claude/package-manager.json
|
||||
|
||||
# Or use package.json field
|
||||
npm pkg set packageManager="pnpm@8.15.0"
|
||||
|
||||
# Warning: removing lock files can change installed dependency versions.
|
||||
# Commit or back up the lock file first, then run a fresh install and re-run CI.
|
||||
# Only do this when intentionally switching package managers.
|
||||
rm package-lock.json # If using pnpm/yarn/bun
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Performance Issues
|
||||
|
||||
### Slow Response Times
|
||||
|
||||
**Symptom:** Agent takes 30+ seconds to respond
|
||||
|
||||
**Causes:**
|
||||
- Large observation files
|
||||
- Too many active hooks
|
||||
- Network latency to API
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# Archive large observations instead of deleting them
|
||||
archive_dir="$HOME/.claude/homunculus/archive/$(date +%Y%m%d)"
|
||||
mkdir -p "$archive_dir"
|
||||
find ~/.claude/homunculus/projects -name "observations.jsonl" -size +10M -exec sh -c '
|
||||
for file do
|
||||
base=$(basename "$(dirname "$file")")
|
||||
gzip -c "$file" > "'"$archive_dir"'/${base}-observations.jsonl.gz"
|
||||
: > "$file"
|
||||
done
|
||||
' sh {} +
|
||||
|
||||
# Disable unused hooks temporarily
|
||||
# Edit ~/.claude/settings.json
|
||||
|
||||
# Keep active observation files small
|
||||
# Large archives should live under ~/.claude/homunculus/archive/
|
||||
```
|
||||
|
||||
### High CPU Usage
|
||||
|
||||
**Symptom:** Claude Code consuming 100% CPU
|
||||
|
||||
**Causes:**
|
||||
- Infinite observation loops
|
||||
- File watching on large directories
|
||||
- Memory leaks in hooks
|
||||
|
||||
**Solutions:**
|
||||
```bash
|
||||
# Check for runaway processes
|
||||
top -o cpu | grep claude
|
||||
|
||||
# Disable continuous learning temporarily
|
||||
touch ~/.claude/homunculus/disabled
|
||||
|
||||
# Restart Claude Code
|
||||
# Cmd/Ctrl+Q then reopen
|
||||
|
||||
# Check observation file size
|
||||
du -sh ~/.claude/homunculus/*/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Common Error Messages
|
||||
|
||||
### "EACCES: permission denied"
|
||||
|
||||
```bash
|
||||
# Fix hook permissions
|
||||
find ~/.claude/plugins -name "*.sh" -exec chmod +x {} \;
|
||||
|
||||
# Fix observation directory permissions
|
||||
chmod -R u+rwX,go+rX ~/.claude/homunculus
|
||||
```
|
||||
|
||||
### "MODULE_NOT_FOUND"
|
||||
|
||||
```bash
|
||||
# Install plugin dependencies
|
||||
cd ~/.claude/plugins/cache/everything-claude-code
|
||||
npm install
|
||||
|
||||
# Or for manual install
|
||||
cd ~/.claude/plugins/ecc
|
||||
npm install
|
||||
```
|
||||
|
||||
### "spawn UNKNOWN"
|
||||
|
||||
```bash
|
||||
# Windows-specific: Ensure scripts use correct line endings
|
||||
# Convert CRLF to LF
|
||||
find ~/.claude/plugins -name "*.sh" -exec dos2unix {} \;
|
||||
|
||||
# Or install dos2unix
|
||||
# macOS: brew install dos2unix
|
||||
# Ubuntu: sudo apt install dos2unix
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you're still experiencing issues:
|
||||
|
||||
1. **Check GitHub Issues**: [github.com/affaan-m/everything-claude-code/issues](https://github.com/affaan-m/everything-claude-code/issues)
|
||||
2. **Enable Debug Logging**:
|
||||
```bash
|
||||
export CLAUDE_DEBUG=1
|
||||
export CLAUDE_LOG_LEVEL=debug
|
||||
```
|
||||
3. **Collect Diagnostic Info**:
|
||||
```bash
|
||||
claude --version
|
||||
node --version
|
||||
python3 --version
|
||||
echo $CLAUDE_PACKAGE_MANAGER
|
||||
ls -la ~/.claude/plugins/cache/
|
||||
```
|
||||
4. **Open an Issue**: Include debug logs, error messages, and diagnostic info
|
||||
|
||||
---
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [README.md](./README.md) - Installation and features
|
||||
- [CONTRIBUTING.md](./CONTRIBUTING.md) - Development guidelines
|
||||
- [docs/](./docs/) - Detailed documentation
|
||||
- [examples/](./examples/) - Usage examples
|
||||
@@ -337,8 +337,10 @@ For PMX, prioritize these E2E tests:
|
||||
|
||||
## Related Agents
|
||||
|
||||
This command invokes the `e2e-runner` agent located at:
|
||||
`~/.claude/agents/e2e-runner.md`
|
||||
This command invokes the `e2e-runner` agent provided by ECC.
|
||||
|
||||
For manual installs, the source file lives at:
|
||||
`agents/e2e-runner.md`
|
||||
|
||||
## Quick Commands
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
---
|
||||
description: Extract reusable patterns from the session, self-evaluate quality before saving, and determine the right save location (Global vs Project).
|
||||
description: "Extract reusable patterns from the session, self-evaluate quality before saving, and determine the right save location (Global vs Project)."
|
||||
---
|
||||
|
||||
# /learn-eval - Extract, Evaluate, then Save
|
||||
|
||||
Extends `/learn` with a quality gate and save-location decision before writing any skill file.
|
||||
Extends `/learn` with a quality gate, save-location decision, and knowledge-placement awareness before writing any skill file.
|
||||
|
||||
## What to Extract
|
||||
|
||||
@@ -51,36 +51,61 @@ origin: auto-extracted
|
||||
[Trigger conditions]
|
||||
```
|
||||
|
||||
5. **Self-evaluate before saving** using this rubric:
|
||||
5. **Quality gate — Checklist + Holistic verdict**
|
||||
|
||||
| Dimension | 1 | 3 | 5 |
|
||||
|-----------|---|---|---|
|
||||
| Specificity | Abstract principles only, no code examples | Representative code example present | Rich examples covering all usage patterns |
|
||||
| Actionability | Unclear what to do | Main steps are understandable | Immediately actionable, edge cases covered |
|
||||
| Scope Fit | Too broad or too narrow | Mostly appropriate, some boundary ambiguity | Name, trigger, and content perfectly aligned |
|
||||
| Non-redundancy | Nearly identical to another skill | Some overlap but unique perspective exists | Completely unique value |
|
||||
| Coverage | Covers only a fraction of the target task | Main cases covered, common variants missing | Main cases, edge cases, and pitfalls covered |
|
||||
### 5a. Required checklist (verify by actually reading files)
|
||||
|
||||
- Score each dimension 1–5
|
||||
- If any dimension scores 1–2, improve the draft and re-score until all dimensions are ≥ 3
|
||||
- Show the user the scores table and the final draft
|
||||
Execute **all** of the following before evaluating the draft:
|
||||
|
||||
6. Ask user to confirm:
|
||||
- Show: proposed save path + scores table + final draft
|
||||
- Wait for explicit confirmation before writing
|
||||
- [ ] Grep `~/.claude/skills/` and relevant project `.claude/skills/` files by keyword to check for content overlap
|
||||
- [ ] Check MEMORY.md (both project and global) for overlap
|
||||
- [ ] Consider whether appending to an existing skill would suffice
|
||||
- [ ] Confirm this is a reusable pattern, not a one-off fix
|
||||
|
||||
7. Save to the determined location
|
||||
### 5b. Holistic verdict
|
||||
|
||||
## Output Format for Step 5 (scores table)
|
||||
Synthesize the checklist results and draft quality, then choose **one** of the following:
|
||||
|
||||
| Dimension | Score | Rationale |
|
||||
|-----------|-------|-----------|
|
||||
| Specificity | N/5 | ... |
|
||||
| Actionability | N/5 | ... |
|
||||
| Scope Fit | N/5 | ... |
|
||||
| Non-redundancy | N/5 | ... |
|
||||
| Coverage | N/5 | ... |
|
||||
| **Total** | **N/25** | |
|
||||
| Verdict | Meaning | Next Action |
|
||||
|---------|---------|-------------|
|
||||
| **Save** | Unique, specific, well-scoped | Proceed to Step 6 |
|
||||
| **Improve then Save** | Valuable but needs refinement | List improvements → revise → re-evaluate (once) |
|
||||
| **Absorb into [X]** | Should be appended to an existing skill | Show target skill and additions → Step 6 |
|
||||
| **Drop** | Trivial, redundant, or too abstract | Explain reasoning and stop |
|
||||
|
||||
**Guideline dimensions** (informing the verdict, not scored):
|
||||
|
||||
- **Specificity & Actionability**: Contains code examples or commands that are immediately usable
|
||||
- **Scope Fit**: Name, trigger conditions, and content are aligned and focused on a single pattern
|
||||
- **Uniqueness**: Provides value not covered by existing skills (informed by checklist results)
|
||||
- **Reusability**: Realistic trigger scenarios exist in future sessions
|
||||
|
||||
6. **Verdict-specific confirmation flow**
|
||||
|
||||
- **Improve then Save**: Present the required improvements + revised draft + updated checklist/verdict after one re-evaluation; if the revised verdict is **Save**, save after user confirmation, otherwise follow the new verdict
|
||||
- **Save**: Present save path + checklist results + 1-line verdict rationale + full draft → save after user confirmation
|
||||
- **Absorb into [X]**: Present target path + additions (diff format) + checklist results + verdict rationale → append after user confirmation
|
||||
- **Drop**: Show checklist results + reasoning only (no confirmation needed)
|
||||
|
||||
7. Save / Absorb to the determined location
|
||||
|
||||
## Output Format for Step 5
|
||||
|
||||
```
|
||||
### Checklist
|
||||
- [x] skills/ grep: no overlap (or: overlap found → details)
|
||||
- [x] MEMORY.md: no overlap (or: overlap found → details)
|
||||
- [x] Existing skill append: new file appropriate (or: should append to [X])
|
||||
- [x] Reusability: confirmed (or: one-off → Drop)
|
||||
|
||||
### Verdict: Save / Improve then Save / Absorb into [X] / Drop
|
||||
|
||||
**Rationale:** (1-2 sentences explaining the verdict)
|
||||
```
|
||||
|
||||
## Design Rationale
|
||||
|
||||
This version replaces the previous 5-dimension numeric scoring rubric (Specificity, Actionability, Scope Fit, Non-redundancy, Coverage scored 1-5) with a checklist-based holistic verdict system. Modern frontier models (Opus 4.6+) have strong contextual judgment — forcing rich qualitative signals into numeric scores loses nuance and can produce misleading totals. The holistic approach lets the model weigh all factors naturally, producing more accurate save/drop decisions while the explicit checklist ensures no critical check is skipped.
|
||||
|
||||
## Notes
|
||||
|
||||
@@ -88,4 +113,4 @@ origin: auto-extracted
|
||||
- Don't extract one-time issues (specific API outages, etc.)
|
||||
- Focus on patterns that will save time in future sessions
|
||||
- Keep skills focused — one pattern per skill
|
||||
- If Coverage score is low, add related variants before saving
|
||||
- When the verdict is Absorb, append to the existing skill rather than creating a new file
|
||||
|
||||
@@ -109,5 +109,7 @@ After planning:
|
||||
|
||||
## Related Agents
|
||||
|
||||
This command invokes the `planner` agent located at:
|
||||
`~/.claude/agents/planner.md`
|
||||
This command invokes the `planner` agent provided by ECC.
|
||||
|
||||
For manual installs, the source file lives at:
|
||||
`agents/planner.md`
|
||||
|
||||
155
commands/resume-session.md
Normal file
155
commands/resume-session.md
Normal file
@@ -0,0 +1,155 @@
|
||||
---
|
||||
description: Load the most recent session file from ~/.claude/sessions/ and resume work with full context from where the last session ended.
|
||||
---
|
||||
|
||||
# Resume Session Command
|
||||
|
||||
Load the last saved session state and orient fully before doing any work.
|
||||
This command is the counterpart to `/save-session`.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Starting a new session to continue work from a previous day
|
||||
- After starting a fresh session due to context limits
|
||||
- When handing off a session file from another source (just provide the file path)
|
||||
- Any time you have a session file and want Claude to fully absorb it before proceeding
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
/resume-session # loads most recent file in ~/.claude/sessions/
|
||||
/resume-session 2024-01-15 # loads most recent session for that date
|
||||
/resume-session ~/.claude/sessions/2024-01-15-session.tmp # loads a specific legacy-format file
|
||||
/resume-session ~/.claude/sessions/2024-01-15-abc123de-session.tmp # loads a current short-id session file
|
||||
```
|
||||
|
||||
## Process
|
||||
|
||||
### Step 1: Find the session file
|
||||
|
||||
If no argument provided:
|
||||
|
||||
1. Check `~/.claude/sessions/`
|
||||
2. Pick the most recently modified `*-session.tmp` file
|
||||
3. If the folder does not exist or has no matching files, tell the user:
|
||||
```
|
||||
No session files found in ~/.claude/sessions/
|
||||
Run /save-session at the end of a session to create one.
|
||||
```
|
||||
Then stop.
|
||||
|
||||
If an argument is provided:
|
||||
|
||||
- If it looks like a date (`YYYY-MM-DD`), search `~/.claude/sessions/` for files matching
|
||||
`YYYY-MM-DD-session.tmp` (legacy format) or `YYYY-MM-DD-<shortid>-session.tmp` (current format)
|
||||
and load the most recently modified variant for that date
|
||||
- If it looks like a file path, read that file directly
|
||||
- If not found, report clearly and stop
|
||||
|
||||
### Step 2: Read the entire session file
|
||||
|
||||
Read the complete file. Do not summarize yet.
|
||||
|
||||
### Step 3: Confirm understanding
|
||||
|
||||
Respond with a structured briefing in this exact format:
|
||||
|
||||
```
|
||||
SESSION LOADED: [actual resolved path to the file]
|
||||
════════════════════════════════════════════════
|
||||
|
||||
PROJECT: [project name / topic from file]
|
||||
|
||||
WHAT WE'RE BUILDING:
|
||||
[2-3 sentence summary in your own words]
|
||||
|
||||
CURRENT STATE:
|
||||
✅ Working: [count] items confirmed
|
||||
🔄 In Progress: [list files that are in progress]
|
||||
🗒️ Not Started: [list planned but untouched]
|
||||
|
||||
WHAT NOT TO RETRY:
|
||||
[list every failed approach with its reason — this is critical]
|
||||
|
||||
OPEN QUESTIONS / BLOCKERS:
|
||||
[list any blockers or unanswered questions]
|
||||
|
||||
NEXT STEP:
|
||||
[exact next step if defined in the file]
|
||||
[if not defined: "No next step defined — recommend reviewing 'What Has NOT Been Tried Yet' together before starting"]
|
||||
|
||||
════════════════════════════════════════════════
|
||||
Ready to continue. What would you like to do?
|
||||
```
|
||||
|
||||
### Step 4: Wait for the user
|
||||
|
||||
Do NOT start working automatically. Do NOT touch any files. Wait for the user to say what to do next.
|
||||
|
||||
If the next step is clearly defined in the session file and the user says "continue" or "yes" or similar — proceed with that exact next step.
|
||||
|
||||
If no next step is defined — ask the user where to start, and optionally suggest an approach from the "What Has NOT Been Tried Yet" section.
|
||||
|
||||
---
|
||||
|
||||
## Edge Cases
|
||||
|
||||
**Multiple sessions for the same date** (`2024-01-15-session.tmp`, `2024-01-15-abc123de-session.tmp`):
|
||||
Load the most recently modified matching file for that date, regardless of whether it uses the legacy no-id format or the current short-id format.
|
||||
|
||||
**Session file references files that no longer exist:**
|
||||
Note this during the briefing — "⚠️ `path/to/file.ts` referenced in session but not found on disk."
|
||||
|
||||
**Session file is from more than 7 days ago:**
|
||||
Note the gap — "⚠️ This session is from N days ago (threshold: 7 days). Things may have changed." — then proceed normally.
|
||||
|
||||
**User provides a file path directly (e.g., forwarded from a teammate):**
|
||||
Read it and follow the same briefing process — the format is the same regardless of source.
|
||||
|
||||
**Session file is empty or malformed:**
|
||||
Report: "Session file found but appears empty or unreadable. You may need to create a new one with /save-session."
|
||||
|
||||
---
|
||||
|
||||
## Example Output
|
||||
|
||||
```
|
||||
SESSION LOADED: /Users/you/.claude/sessions/2024-01-15-abc123de-session.tmp
|
||||
════════════════════════════════════════════════
|
||||
|
||||
PROJECT: my-app — JWT Authentication
|
||||
|
||||
WHAT WE'RE BUILDING:
|
||||
User authentication with JWT tokens stored in httpOnly cookies.
|
||||
Register and login endpoints are partially done. Route protection
|
||||
via middleware hasn't been started yet.
|
||||
|
||||
CURRENT STATE:
|
||||
✅ Working: 3 items (register endpoint, JWT generation, password hashing)
|
||||
🔄 In Progress: app/api/auth/login/route.ts (token works, cookie not set yet)
|
||||
🗒️ Not Started: middleware.ts, app/login/page.tsx
|
||||
|
||||
WHAT NOT TO RETRY:
|
||||
❌ Next-Auth — conflicts with custom Prisma adapter, threw adapter error on every request
|
||||
❌ localStorage for JWT — causes SSR hydration mismatch, incompatible with Next.js
|
||||
|
||||
OPEN QUESTIONS / BLOCKERS:
|
||||
- Does cookies().set() work inside a Route Handler or only Server Actions?
|
||||
|
||||
NEXT STEP:
|
||||
In app/api/auth/login/route.ts — set the JWT as an httpOnly cookie using
|
||||
cookies().set('token', jwt, { httpOnly: true, secure: true, sameSite: 'strict' })
|
||||
then test with Postman for a Set-Cookie header in the response.
|
||||
|
||||
════════════════════════════════════════════════
|
||||
Ready to continue. What would you like to do?
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- Never modify the session file when loading it — it's a read-only historical record
|
||||
- The briefing format is fixed — do not skip sections even if they are empty
|
||||
- "What Not To Retry" must always be shown, even if it just says "None" — it's too important to miss
|
||||
- After resuming, the user may want to run `/save-session` again at the end of the new session to create a new dated file
|
||||
275
commands/save-session.md
Normal file
275
commands/save-session.md
Normal file
@@ -0,0 +1,275 @@
|
||||
---
|
||||
description: Save current session state to a dated file in ~/.claude/sessions/ so work can be resumed in a future session with full context.
|
||||
---
|
||||
|
||||
# Save Session Command
|
||||
|
||||
Capture everything that happened in this session — what was built, what worked, what failed, what's left — and write it to a dated file so the next session can pick up exactly where this one left off.
|
||||
|
||||
## When to Use
|
||||
|
||||
- End of a work session before closing Claude Code
|
||||
- Before hitting context limits (run this first, then start a fresh session)
|
||||
- After solving a complex problem you want to remember
|
||||
- Any time you need to hand off context to a future session
|
||||
|
||||
## Process
|
||||
|
||||
### Step 1: Gather context
|
||||
|
||||
Before writing the file, collect:
|
||||
|
||||
- Read all files modified during this session (use git diff or recall from conversation)
|
||||
- Review what was discussed, attempted, and decided
|
||||
- Note any errors encountered and how they were resolved (or not)
|
||||
- Check current test/build status if relevant
|
||||
|
||||
### Step 2: Create the sessions folder if it doesn't exist
|
||||
|
||||
Create the canonical sessions folder in the user's Claude home directory:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.claude/sessions
|
||||
```
|
||||
|
||||
### Step 3: Write the session file
|
||||
|
||||
Create `~/.claude/sessions/YYYY-MM-DD-<short-id>-session.tmp`, using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`:
|
||||
|
||||
- Allowed characters: lowercase `a-z`, digits `0-9`, hyphens `-`
|
||||
- Minimum length: 8 characters
|
||||
- No uppercase letters, no underscores, no spaces
|
||||
|
||||
Valid examples: `abc123de`, `a1b2c3d4`, `frontend-worktree-1`
|
||||
Invalid examples: `ABC123de` (uppercase), `short` (under 8 chars), `test_id1` (underscore)
|
||||
|
||||
Full valid filename example: `2024-01-15-abc123de-session.tmp`
|
||||
|
||||
The legacy filename `YYYY-MM-DD-session.tmp` is still valid, but new session files should prefer the short-id form to avoid same-day collisions.
|
||||
|
||||
### Step 4: Populate the file with all sections below
|
||||
|
||||
Write every section honestly. Do not skip sections — write "Nothing yet" or "N/A" if a section genuinely has no content. An incomplete file is worse than an honest empty section.
|
||||
|
||||
### Step 5: Show the file to the user
|
||||
|
||||
After writing, display the full contents and ask:
|
||||
|
||||
```
|
||||
Session saved to [actual resolved path to the session file]
|
||||
|
||||
Does this look accurate? Anything to correct or add before we close?
|
||||
```
|
||||
|
||||
Wait for confirmation. Make edits if requested.
|
||||
|
||||
---
|
||||
|
||||
## Session File Format
|
||||
|
||||
```markdown
|
||||
# Session: YYYY-MM-DD
|
||||
|
||||
**Started:** [approximate time if known]
|
||||
**Last Updated:** [current time]
|
||||
**Project:** [project name or path]
|
||||
**Topic:** [one-line summary of what this session was about]
|
||||
|
||||
---
|
||||
|
||||
## What We Are Building
|
||||
|
||||
[1-3 paragraphs describing the feature, bug fix, or task. Include enough
|
||||
context that someone with zero memory of this session can understand the goal.
|
||||
Include: what it does, why it's needed, how it fits into the larger system.]
|
||||
|
||||
---
|
||||
|
||||
## What WORKED (with evidence)
|
||||
|
||||
[List only things that are confirmed working. For each item include WHY you
|
||||
know it works — test passed, ran in browser, Postman returned 200, etc.
|
||||
Without evidence, move it to "Not Tried Yet" instead.]
|
||||
|
||||
- **[thing that works]** — confirmed by: [specific evidence]
|
||||
- **[thing that works]** — confirmed by: [specific evidence]
|
||||
|
||||
If nothing is confirmed working yet: "Nothing confirmed working yet — all approaches still in progress or untested."
|
||||
|
||||
---
|
||||
|
||||
## What Did NOT Work (and why)
|
||||
|
||||
[This is the most important section. List every approach tried that failed.
|
||||
For each failure write the EXACT reason so the next session doesn't retry it.
|
||||
Be specific: "threw X error because Y" is useful. "didn't work" is not.]
|
||||
|
||||
- **[approach tried]** — failed because: [exact reason / error message]
|
||||
- **[approach tried]** — failed because: [exact reason / error message]
|
||||
|
||||
If nothing failed: "No failed approaches yet."
|
||||
|
||||
---
|
||||
|
||||
## What Has NOT Been Tried Yet
|
||||
|
||||
[Approaches that seem promising but haven't been attempted. Ideas from the
|
||||
conversation. Alternative solutions worth exploring. Be specific enough that
|
||||
the next session knows exactly what to try.]
|
||||
|
||||
- [approach / idea]
|
||||
- [approach / idea]
|
||||
|
||||
If nothing is queued: "No specific untried approaches identified."
|
||||
|
||||
---
|
||||
|
||||
## Current State of Files
|
||||
|
||||
[Every file touched this session. Be precise about what state each file is in.]
|
||||
|
||||
| File | Status | Notes |
|
||||
| ----------------- | -------------- | -------------------------- |
|
||||
| `path/to/file.ts` | ✅ Complete | [what it does] |
|
||||
| `path/to/file.ts` | 🔄 In Progress | [what's done, what's left] |
|
||||
| `path/to/file.ts` | ❌ Broken | [what's wrong] |
|
||||
| `path/to/file.ts` | 🗒️ Not Started | [planned but not touched] |
|
||||
|
||||
If no files were touched: "No files modified this session."
|
||||
|
||||
---
|
||||
|
||||
## Decisions Made
|
||||
|
||||
[Architecture choices, tradeoffs accepted, approaches chosen and why.
|
||||
These prevent the next session from relitigating settled decisions.]
|
||||
|
||||
- **[decision]** — reason: [why this was chosen over alternatives]
|
||||
|
||||
If no significant decisions: "No major decisions made this session."
|
||||
|
||||
---
|
||||
|
||||
## Blockers & Open Questions
|
||||
|
||||
[Anything unresolved that the next session needs to address or investigate.
|
||||
Questions that came up but weren't answered. External dependencies waiting on.]
|
||||
|
||||
- [blocker / open question]
|
||||
|
||||
If none: "No active blockers."
|
||||
|
||||
---
|
||||
|
||||
## Exact Next Step
|
||||
|
||||
[If known: The single most important thing to do when resuming. Be precise
|
||||
enough that resuming requires zero thinking about where to start.]
|
||||
|
||||
[If not known: "Next step not determined — review 'What Has NOT Been Tried Yet'
|
||||
and 'Blockers' sections to decide on direction before starting."]
|
||||
|
||||
---
|
||||
|
||||
## Environment & Setup Notes
|
||||
|
||||
[Only fill this if relevant — commands needed to run the project, env vars
|
||||
required, services that need to be running, etc. Skip if standard setup.]
|
||||
|
||||
[If none: omit this section entirely.]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Example Output
|
||||
|
||||
```markdown
|
||||
# Session: 2024-01-15
|
||||
|
||||
**Started:** ~2pm
|
||||
**Last Updated:** 5:30pm
|
||||
**Project:** my-app
|
||||
**Topic:** Building JWT authentication with httpOnly cookies
|
||||
|
||||
---
|
||||
|
||||
## What We Are Building
|
||||
|
||||
User authentication system for the Next.js app. Users register with email/password,
|
||||
receive a JWT stored in an httpOnly cookie (not localStorage), and protected routes
|
||||
check for a valid token via middleware. The goal is session persistence across browser
|
||||
refreshes without exposing the token to JavaScript.
|
||||
|
||||
---
|
||||
|
||||
## What WORKED (with evidence)
|
||||
|
||||
- **`/api/auth/register` endpoint** — confirmed by: Postman POST returns 200 with user
|
||||
object, row visible in Supabase dashboard, bcrypt hash stored correctly
|
||||
- **JWT generation in `lib/auth.ts`** — confirmed by: unit test passes
|
||||
(`npm test -- auth.test.ts`), decoded token at jwt.io shows correct payload
|
||||
- **Password hashing** — confirmed by: `bcrypt.compare()` returns true in test
|
||||
|
||||
---
|
||||
|
||||
## What Did NOT Work (and why)
|
||||
|
||||
- **Next-Auth library** — failed because: conflicts with our custom Prisma adapter,
|
||||
threw "Cannot use adapter with credentials provider in this configuration" on every
|
||||
request. Not worth debugging — too opinionated for our setup.
|
||||
- **Storing JWT in localStorage** — failed because: SSR renders happen before
|
||||
localStorage is available, caused React hydration mismatch error on every page load.
|
||||
This approach is fundamentally incompatible with Next.js SSR.
|
||||
|
||||
---
|
||||
|
||||
## What Has NOT Been Tried Yet
|
||||
|
||||
- Store JWT as httpOnly cookie in the login route response (most likely solution)
|
||||
- Use `cookies()` from `next/headers` to read token in server components
|
||||
- Write middleware.ts to protect routes by checking cookie existence
|
||||
|
||||
---
|
||||
|
||||
## Current State of Files
|
||||
|
||||
| File | Status | Notes |
|
||||
| -------------------------------- | -------------- | ----------------------------------------------- |
|
||||
| `app/api/auth/register/route.ts` | ✅ Complete | Works, tested |
|
||||
| `app/api/auth/login/route.ts` | 🔄 In Progress | Token generates but not setting cookie yet |
|
||||
| `lib/auth.ts` | ✅ Complete | JWT helpers, all tested |
|
||||
| `middleware.ts` | 🗒️ Not Started | Route protection, needs cookie read logic first |
|
||||
| `app/login/page.tsx` | 🗒️ Not Started | UI not started |
|
||||
|
||||
---
|
||||
|
||||
## Decisions Made
|
||||
|
||||
- **httpOnly cookie over localStorage** — reason: prevents XSS token theft, works with SSR
|
||||
- **Custom auth over Next-Auth** — reason: Next-Auth conflicts with our Prisma setup, not worth the fight
|
||||
|
||||
---
|
||||
|
||||
## Blockers & Open Questions
|
||||
|
||||
- Does `cookies().set()` work inside a Route Handler or only in Server Actions? Need to verify.
|
||||
|
||||
---
|
||||
|
||||
## Exact Next Step
|
||||
|
||||
In `app/api/auth/login/route.ts`, after generating the JWT, set it as an httpOnly
|
||||
cookie using `cookies().set('token', jwt, { httpOnly: true, secure: true, sameSite: 'strict' })`.
|
||||
Then test with Postman — the response should include a `Set-Cookie` header.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- Each session gets its own file — never append to a previous session's file
|
||||
- The "What Did NOT Work" section is the most critical — future sessions will blindly retry failed approaches without it
|
||||
- If the user asks to save mid-session (not just at the end), save what's known so far and mark in-progress items clearly
|
||||
- The file is meant to be read by Claude at the start of the next session via `/resume-session`
|
||||
- Use the canonical global session store: `~/.claude/sessions/`
|
||||
- Prefer the short-id filename form (`YYYY-MM-DD-<short-id>-session.tmp`) for any new session file
|
||||
@@ -319,8 +319,10 @@ Never skip the RED phase. Never write code before tests.
|
||||
|
||||
## Related Agents
|
||||
|
||||
This command invokes the `tdd-guide` agent located at:
|
||||
`~/.claude/agents/tdd-guide.md`
|
||||
This command invokes the `tdd-guide` agent provided by ECC.
|
||||
|
||||
And can reference the `tdd-workflow` skill at:
|
||||
`~/.claude/skills/tdd-workflow/`
|
||||
The related `tdd-workflow` skill is also bundled with ECC.
|
||||
|
||||
For manual installs, the source files live at:
|
||||
- `agents/tdd-guide.md`
|
||||
- `skills/tdd-workflow/SKILL.md`
|
||||
|
||||
146
docs/ARCHITECTURE-IMPROVEMENTS.md
Normal file
146
docs/ARCHITECTURE-IMPROVEMENTS.md
Normal file
@@ -0,0 +1,146 @@
|
||||
# Architecture Improvement Recommendations
|
||||
|
||||
This document captures architect-level improvements for the Everything Claude Code (ECC) project. It is written from the perspective of a Claude Code coding architect aiming to improve maintainability, consistency, and long-term quality.
|
||||
|
||||
---
|
||||
|
||||
## 1. Documentation and Single Source of Truth
|
||||
|
||||
### 1.1 Agent / Command / Skill Count Sync
|
||||
|
||||
**Issue:** AGENTS.md states "13 specialized agents, 50+ skills, 33 commands" while the repo has **16 agents**, **65+ skills**, and **40 commands**. README and other docs also vary. This causes confusion for contributors and users.
|
||||
|
||||
**Recommendation:**
|
||||
|
||||
- **Single source of truth:** Derive counts (and optionally tables) from the filesystem or a small manifest. Options:
|
||||
- **Option A:** Add a script (e.g. `scripts/ci/catalog.js`) that scans `agents/*.md`, `commands/*.md`, and `skills/*/SKILL.md` and outputs JSON/Markdown. CI and docs can consume this.
|
||||
- **Option B:** Maintain one `docs/catalog.json` (or YAML) that lists agents, commands, and skills with metadata; scripts and docs read from it. Requires discipline to update on add/remove.
|
||||
- **Short-term:** Manually sync AGENTS.md, README.md, and CLAUDE.md with actual counts and list any new agents (e.g. chief-of-staff, loop-operator, harness-optimizer) in the agent table.
|
||||
|
||||
**Impact:** High — affects first impression and contributor trust.
|
||||
|
||||
---
|
||||
|
||||
### 1.2 Command → Agent / Skill Map
|
||||
|
||||
**Issue:** There is no single machine- or human-readable map of "which command uses which agent(s) or skill(s)." This lives in README tables and individual command `.md` files, which can drift.
|
||||
|
||||
**Recommendation:**
|
||||
|
||||
- Add a **command registry** (e.g. in `docs/` or as frontmatter in command files) that lists for each command: name, description, primary agent(s), skills referenced. Can be generated from command file content or maintained by hand.
|
||||
- Expose a "map" in docs (e.g. `docs/COMMAND-AGENT-MAP.md`) or in the generated catalog for discoverability and for tooling (e.g. "which commands use tdd-guide?").
|
||||
|
||||
**Impact:** Medium — improves discoverability and refactoring safety.
|
||||
|
||||
---
|
||||
|
||||
## 2. Testing and Quality
|
||||
|
||||
### 2.1 Test Discovery vs Hardcoded List
|
||||
|
||||
**Issue:** `tests/run-all.js` uses a **hardcoded list** of test files. New test files are not run unless someone updates `run-all.js`, so coverage can be incomplete by omission.
|
||||
|
||||
**Recommendation:**
|
||||
|
||||
- **Glob-based discovery:** Discover test files by pattern (e.g. `**/*.test.js` under `tests/`) and run them, with an optional allowlist/denylist for special cases. This makes new tests automatically part of the suite.
|
||||
- Keep a single entry point (`tests/run-all.js`) that runs discovered tests and aggregates results.
|
||||
|
||||
**Impact:** High — prevents regression where new tests exist but are never executed.
|
||||
|
||||
---
|
||||
|
||||
### 2.2 Test Coverage Metrics
|
||||
|
||||
**Issue:** There is no coverage tool (e.g. nyc/c8/istanbul). The project cannot assert "80%+ coverage" for its own scripts; coverage is implicit.
|
||||
|
||||
**Recommendation:**
|
||||
|
||||
- Introduce a coverage tool for Node scripts (e.g. `c8` or `nyc`) and run it in CI. Start with a baseline (e.g. 60%) and raise over time; or at least report coverage in CI without failing so the team can see trends.
|
||||
- Focus on `scripts/` (lib + hooks + ci) as the primary target; exclude one-off scripts if needed.
|
||||
|
||||
**Impact:** Medium — aligns the project with its own AGENTS.md guidance (80%+ coverage) and surfaces untested paths.
|
||||
|
||||
---
|
||||
|
||||
## 3. Schema and Validation
|
||||
|
||||
### 3.1 Use Hooks JSON Schema in CI
|
||||
|
||||
**Issue:** `schemas/hooks.schema.json` exists and defines the hook configuration shape, but `scripts/ci/validate-hooks.js` does **not** use it. Validation is duplicated (VALID_EVENTS, structure) and can drift from the schema.
|
||||
|
||||
**Recommendation:**
|
||||
|
||||
- Use a JSON Schema validator (e.g. `ajv`) in `validate-hooks.js` to validate `hooks/hooks.json` against `schemas/hooks.schema.json`. Keep the validator as the single source of truth for structure; retain only hook-specific checks (e.g. inline JS syntax) in the script.
|
||||
- Ensures schema and validator stay in sync and allows IDE/editor validation via `$schema` in hooks.json.
|
||||
|
||||
**Impact:** Medium — reduces drift and improves contributor experience when editing hooks.
|
||||
|
||||
---
|
||||
|
||||
## 4. Cross-Harness and i18n
|
||||
|
||||
### 4.1 Skill/Agent Subset Sync (.agents/skills, .cursor/skills)
|
||||
|
||||
**Issue:** `.agents/skills/` (Codex) and `.cursor/skills/` are subsets of `skills/`. Adding or removing a skill in the main repo requires manually updating these subsets, which can be forgotten.
|
||||
|
||||
**Recommendation:**
|
||||
|
||||
- Document in CONTRIBUTING.md that adding a skill may require updating `.agents/skills` and `.cursor/skills` (and how to do it).
|
||||
- Optionally: a CI check or script that compares `skills/` to the subsets and fails or warns if a skill is in one set but not the other when it should be (e.g. by convention or by a small manifest).
|
||||
|
||||
**Impact:** Low–Medium — reduces cross-harness drift.
|
||||
|
||||
---
|
||||
|
||||
### 4.2 Translation Drift (docs/ zh-CN, zh-TW, ja-JP)
|
||||
|
||||
**Issue:** Translations in `docs/` duplicate agents, commands, skills. As the English source evolves, translations can become outdated without clear process or tooling.
|
||||
|
||||
**Recommendation:**
|
||||
|
||||
- Document a **translation process:** when to update (e.g. on release), who owns each locale, and how to detect stale content (e.g. diff file lists or key sections).
|
||||
- Consider: translation status file (e.g. `docs/i18n-status.md`) or CI that checks translation file existence/timestamps and warns if English was updated more recently than a translation.
|
||||
- Long-term: consider extraction/placeholder format (e.g. i18n keys) so translations reference the same structure as the English source.
|
||||
|
||||
**Impact:** Medium — improves experience for non-English users and reduces confusion from outdated translations.
|
||||
|
||||
---
|
||||
|
||||
## 5. Hooks and Scripts
|
||||
|
||||
### 5.1 Hook Runtime Consistency
|
||||
|
||||
**Issue:** Most hooks invoke Node scripts via `run-with-flags.js`; one path uses `run-with-flags-shell.sh` + `observe.sh`. The mixed runtime is documented but could be simplified over time.
|
||||
|
||||
**Recommendation:**
|
||||
|
||||
- Prefer Node for new hooks when possible (cross-platform, single runtime). If shell is required, document why and keep the surface small.
|
||||
- Ensure `ECC_HOOK_PROFILE` and `ECC_DISABLED_HOOKS` are respected in all code paths (including shell) so behavior is consistent.
|
||||
|
||||
**Impact:** Low — maintains current design; improves if more hooks migrate to Node.
|
||||
|
||||
---
|
||||
|
||||
## 6. Summary Table
|
||||
|
||||
| Area | Improvement | Priority | Effort |
|
||||
|-------------------|--------------------------------------|----------|---------|
|
||||
| Doc sync | Sync AGENTS.md/README counts & table | High | Low |
|
||||
| Single source | Catalog script or manifest | High | Medium |
|
||||
| Test discovery | Glob-based test runner | High | Low |
|
||||
| Coverage | Add c8/nyc and CI coverage | Medium | Medium |
|
||||
| Hook schema in CI | Validate hooks.json via schema | Medium | Low |
|
||||
| Command map | Command → agent/skill registry | Medium | Medium |
|
||||
| Subset sync | Document/CI for .agents/.cursor | Low–Med | Low–Med |
|
||||
| Translations | Process + stale detection | Medium | Medium |
|
||||
| Hook runtime | Prefer Node; document shell use | Low | Low |
|
||||
|
||||
---
|
||||
|
||||
## 7. Quick Wins (Immediate)
|
||||
|
||||
1. **Update AGENTS.md:** Set agent count to 16; add chief-of-staff, loop-operator, harness-optimizer to the agent table; align skill/command counts with repo.
|
||||
2. **Test discovery:** Change `run-all.js` to discover `**/*.test.js` under `tests/` (with optional allowlist) so new tests are always run.
|
||||
3. **Wire hooks schema:** In `validate-hooks.js`, validate `hooks/hooks.json` against `schemas/hooks.schema.json` using ajv (or similar) and keep only hook-specific checks in the script.
|
||||
|
||||
These three can be done in one or two sessions and materially improve consistency and reliability.
|
||||
61
docs/COMMAND-AGENT-MAP.md
Normal file
61
docs/COMMAND-AGENT-MAP.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# Command → Agent / Skill Map
|
||||
|
||||
This document lists each slash command and the primary agent(s) or skills it invokes. Use it to discover which commands use which agents and to keep refactoring consistent.
|
||||
|
||||
| Command | Primary agent(s) | Notes |
|
||||
|---------|------------------|--------|
|
||||
| `/plan` | planner | Implementation planning before code |
|
||||
| `/tdd` | tdd-guide | Test-driven development |
|
||||
| `/code-review` | code-reviewer | Quality and security review |
|
||||
| `/build-fix` | build-error-resolver | Fix build/type errors |
|
||||
| `/e2e` | e2e-runner | Playwright E2E tests |
|
||||
| `/refactor-clean` | refactor-cleaner | Dead code removal |
|
||||
| `/update-docs` | doc-updater | Documentation sync |
|
||||
| `/update-codemaps` | doc-updater | Codemaps / architecture docs |
|
||||
| `/go-review` | go-reviewer | Go code review |
|
||||
| `/go-test` | tdd-guide | Go TDD workflow |
|
||||
| `/go-build` | go-build-resolver | Fix Go build errors |
|
||||
| `/python-review` | python-reviewer | Python code review |
|
||||
| `/harness-audit` | — | Harness scorecard (no single agent) |
|
||||
| `/loop-start` | loop-operator | Start autonomous loop |
|
||||
| `/loop-status` | loop-operator | Inspect loop status |
|
||||
| `/quality-gate` | — | Quality pipeline (hook-like) |
|
||||
| `/model-route` | — | Model recommendation (no agent) |
|
||||
| `/orchestrate` | planner, tdd-guide, code-reviewer, security-reviewer, architect | Multi-agent handoff |
|
||||
| `/multi-plan` | architect (Codex/Gemini prompts) | Multi-model planning |
|
||||
| `/multi-execute` | architect / frontend prompts | Multi-model execution |
|
||||
| `/multi-backend` | architect | Backend multi-service |
|
||||
| `/multi-frontend` | architect | Frontend multi-service |
|
||||
| `/multi-workflow` | architect | General multi-service |
|
||||
| `/learn` | — | continuous-learning skill, instincts |
|
||||
| `/learn-eval` | — | continuous-learning-v2, evaluate then save |
|
||||
| `/instinct-status` | — | continuous-learning-v2 |
|
||||
| `/instinct-import` | — | continuous-learning-v2 |
|
||||
| `/instinct-export` | — | continuous-learning-v2 |
|
||||
| `/evolve` | — | continuous-learning-v2, cluster instincts |
|
||||
| `/promote` | — | continuous-learning-v2 |
|
||||
| `/projects` | — | continuous-learning-v2 |
|
||||
| `/skill-create` | — | skill-create-output script, git history |
|
||||
| `/checkpoint` | — | verification-loop skill |
|
||||
| `/verify` | — | verification-loop skill |
|
||||
| `/eval` | — | eval-harness skill |
|
||||
| `/test-coverage` | — | Coverage analysis |
|
||||
| `/sessions` | — | Session history |
|
||||
| `/setup-pm` | — | Package manager setup script |
|
||||
| `/claw` | — | NanoClaw CLI (scripts/claw.js) |
|
||||
| `/pm2` | — | PM2 service lifecycle |
|
||||
| `/security-scan` | security-reviewer (skill) | AgentShield via security-scan skill |
|
||||
|
||||
## Skills referenced by commands
|
||||
|
||||
- **continuous-learning**, **continuous-learning-v2**: `/learn`, `/learn-eval`, `/instinct-*`, `/evolve`, `/promote`, `/projects`
|
||||
- **verification-loop**: `/checkpoint`, `/verify`
|
||||
- **eval-harness**: `/eval`
|
||||
- **security-scan**: `/security-scan` (runs AgentShield)
|
||||
- **strategic-compact**: suggested at compaction points (hooks)
|
||||
|
||||
## How to use this map
|
||||
|
||||
- **Discoverability:** Find which command triggers which agent (e.g. “use `/code-review` for code-reviewer”).
|
||||
- **Refactoring:** When renaming or removing an agent, search this doc and the command files for references.
|
||||
- **CI/docs:** The catalog script (`node scripts/ci/catalog.js`) outputs agent/command/skill counts; this map complements it with command–agent relationships.
|
||||
907
package-lock.json
generated
907
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -83,10 +83,13 @@
|
||||
"postinstall": "echo '\\n ecc-universal installed!\\n Run: npx ecc-install typescript\\n Docs: https://github.com/affaan-m/everything-claude-code\\n'",
|
||||
"lint": "eslint . && markdownlint '**/*.md' --ignore node_modules",
|
||||
"claw": "node scripts/claw.js",
|
||||
"test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js"
|
||||
"test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js",
|
||||
"coverage": "c8 --all --include=\"scripts/**/*.js\" --check-coverage --lines 80 --functions 80 --branches 80 --statements 80 --reporter=text --reporter=lcov node tests/run-all.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.2",
|
||||
"ajv": "^8.18.0",
|
||||
"c8": "^10.1.2",
|
||||
"eslint": "^9.39.2",
|
||||
"globals": "^17.1.0",
|
||||
"markdownlint-cli": "^0.47.0"
|
||||
|
||||
@@ -17,7 +17,8 @@ rules/
|
||||
├── typescript/ # TypeScript/JavaScript specific
|
||||
├── python/ # Python specific
|
||||
├── golang/ # Go specific
|
||||
└── swift/ # Swift specific
|
||||
├── swift/ # Swift specific
|
||||
└── perl/ # Perl specific
|
||||
```
|
||||
|
||||
- **common/** contains universal principles — no language-specific code examples.
|
||||
@@ -33,6 +34,7 @@ rules/
|
||||
./install.sh python
|
||||
./install.sh golang
|
||||
./install.sh swift
|
||||
./install.sh perl
|
||||
|
||||
# Install multiple languages at once
|
||||
./install.sh typescript python
|
||||
@@ -55,6 +57,7 @@ cp -r rules/typescript ~/.claude/rules/typescript
|
||||
cp -r rules/python ~/.claude/rules/python
|
||||
cp -r rules/golang ~/.claude/rules/golang
|
||||
cp -r rules/swift ~/.claude/rules/swift
|
||||
cp -r rules/perl ~/.claude/rules/perl
|
||||
|
||||
# Attention ! ! ! Configure according to your actual project requirements; the configuration here is for reference only.
|
||||
```
|
||||
@@ -88,7 +91,7 @@ To add support for a new language (e.g., `rust/`):
|
||||
When language-specific rules and common rules conflict, **language-specific rules take precedence** (specific overrides general). This follows the standard layered configuration pattern (similar to CSS specificity or `.gitignore` precedence).
|
||||
|
||||
- `rules/common/` defines universal defaults applicable to all projects.
|
||||
- `rules/golang/`, `rules/python/`, `rules/typescript/`, etc. override those defaults where language idioms differ.
|
||||
- `rules/golang/`, `rules/python/`, `rules/perl/`, `rules/typescript/`, etc. override those defaults where language idioms differ.
|
||||
|
||||
### Example
|
||||
|
||||
|
||||
46
rules/perl/coding-style.md
Normal file
46
rules/perl/coding-style.md
Normal file
@@ -0,0 +1,46 @@
|
||||
---
|
||||
paths:
|
||||
- "**/*.pl"
|
||||
- "**/*.pm"
|
||||
- "**/*.t"
|
||||
- "**/*.psgi"
|
||||
- "**/*.cgi"
|
||||
---
|
||||
# Perl Coding Style
|
||||
|
||||
> This file extends [common/coding-style.md](../common/coding-style.md) with Perl-specific content.
|
||||
|
||||
## Standards
|
||||
|
||||
- Always `use v5.36` (enables `strict`, `warnings`, `say`, subroutine signatures)
|
||||
- Use subroutine signatures — never unpack `@_` manually
|
||||
- Prefer `say` over `print` with explicit newlines
|
||||
|
||||
## Immutability
|
||||
|
||||
- Use **Moo** with `is => 'ro'` and `Types::Standard` for all attributes
|
||||
- Never use blessed hashrefs directly — always use Moo/Moose accessors
|
||||
- **OO override note**: Moo `has` attributes with `builder` or `default` are acceptable for computed read-only values
|
||||
|
||||
## Formatting
|
||||
|
||||
Use **perltidy** with these settings:
|
||||
|
||||
```
|
||||
-i=4 # 4-space indent
|
||||
-l=100 # 100 char line length
|
||||
-ce # cuddled else
|
||||
-bar # opening brace always right
|
||||
```
|
||||
|
||||
## Linting
|
||||
|
||||
Use **perlcritic** at severity 3 with themes: `core`, `pbp`, `security`.
|
||||
|
||||
```bash
|
||||
perlcritic --severity 3 --theme 'core || pbp || security' lib/
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
See skill: `perl-patterns` for comprehensive modern Perl idioms and best practices.
|
||||
22
rules/perl/hooks.md
Normal file
22
rules/perl/hooks.md
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
paths:
|
||||
- "**/*.pl"
|
||||
- "**/*.pm"
|
||||
- "**/*.t"
|
||||
- "**/*.psgi"
|
||||
- "**/*.cgi"
|
||||
---
|
||||
# Perl Hooks
|
||||
|
||||
> This file extends [common/hooks.md](../common/hooks.md) with Perl-specific content.
|
||||
|
||||
## PostToolUse Hooks
|
||||
|
||||
Configure in `~/.claude/settings.json`:
|
||||
|
||||
- **perltidy**: Auto-format `.pl` and `.pm` files after edit
|
||||
- **perlcritic**: Run lint check after editing `.pm` files
|
||||
|
||||
## Warnings
|
||||
|
||||
- Warn about `print` in non-script `.pm` files — use `say` or a logging module (e.g., `Log::Any`)
|
||||
76
rules/perl/patterns.md
Normal file
76
rules/perl/patterns.md
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
paths:
|
||||
- "**/*.pl"
|
||||
- "**/*.pm"
|
||||
- "**/*.t"
|
||||
- "**/*.psgi"
|
||||
- "**/*.cgi"
|
||||
---
|
||||
# Perl Patterns
|
||||
|
||||
> This file extends [common/patterns.md](../common/patterns.md) with Perl-specific content.
|
||||
|
||||
## Repository Pattern
|
||||
|
||||
Use **DBI** or **DBIx::Class** behind an interface:
|
||||
|
||||
```perl
|
||||
package MyApp::Repo::User;
|
||||
use Moo;
|
||||
|
||||
has dbh => (is => 'ro', required => 1);
|
||||
|
||||
sub find_by_id ($self, $id) {
|
||||
my $sth = $self->dbh->prepare('SELECT * FROM users WHERE id = ?');
|
||||
$sth->execute($id);
|
||||
return $sth->fetchrow_hashref;
|
||||
}
|
||||
```
|
||||
|
||||
## DTOs / Value Objects
|
||||
|
||||
Use **Moo** classes with **Types::Standard** (equivalent to Python dataclasses):
|
||||
|
||||
```perl
|
||||
package MyApp::DTO::User;
|
||||
use Moo;
|
||||
use Types::Standard qw(Str Int);
|
||||
|
||||
has name => (is => 'ro', isa => Str, required => 1);
|
||||
has email => (is => 'ro', isa => Str, required => 1);
|
||||
has age => (is => 'ro', isa => Int);
|
||||
```
|
||||
|
||||
## Resource Management
|
||||
|
||||
- Always use **three-arg open** with `autodie`
|
||||
- Use **Path::Tiny** for file operations
|
||||
|
||||
```perl
|
||||
use autodie;
|
||||
use Path::Tiny;
|
||||
|
||||
my $content = path('config.json')->slurp_utf8;
|
||||
```
|
||||
|
||||
## Module Interface
|
||||
|
||||
Use `Exporter 'import'` with `@EXPORT_OK` — never `@EXPORT`:
|
||||
|
||||
```perl
|
||||
use Exporter 'import';
|
||||
our @EXPORT_OK = qw(parse_config validate_input);
|
||||
```
|
||||
|
||||
## Dependency Management
|
||||
|
||||
Use **cpanfile** + **carton** for reproducible installs:
|
||||
|
||||
```bash
|
||||
carton install
|
||||
carton exec prove -lr t/
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
See skill: `perl-patterns` for comprehensive modern Perl patterns and idioms.
|
||||
69
rules/perl/security.md
Normal file
69
rules/perl/security.md
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
paths:
|
||||
- "**/*.pl"
|
||||
- "**/*.pm"
|
||||
- "**/*.t"
|
||||
- "**/*.psgi"
|
||||
- "**/*.cgi"
|
||||
---
|
||||
# Perl Security
|
||||
|
||||
> This file extends [common/security.md](../common/security.md) with Perl-specific content.
|
||||
|
||||
## Taint Mode
|
||||
|
||||
- Use `-T` flag on all CGI/web-facing scripts
|
||||
- Sanitize `%ENV` (`$ENV{PATH}`, `$ENV{CDPATH}`, etc.) before any external command
|
||||
|
||||
## Input Validation
|
||||
|
||||
- Use allowlist regex for untainting — never `/(.*)/s`
|
||||
- Validate all user input with explicit patterns:
|
||||
|
||||
```perl
|
||||
if ($input =~ /\A([a-zA-Z0-9_-]+)\z/) {
|
||||
my $clean = $1;
|
||||
}
|
||||
```
|
||||
|
||||
## File I/O
|
||||
|
||||
- **Three-arg open only** — never two-arg open
|
||||
- Prevent path traversal with `Cwd::realpath`:
|
||||
|
||||
```perl
|
||||
use Cwd 'realpath';
|
||||
my $safe_path = realpath($user_path);
|
||||
die "Path traversal" unless $safe_path =~ m{\A/allowed/directory/};
|
||||
```
|
||||
|
||||
## Process Execution
|
||||
|
||||
- Use **list-form `system()`** — never single-string form
|
||||
- Use **IPC::Run3** for capturing output
|
||||
- Never use backticks with variable interpolation
|
||||
|
||||
```perl
|
||||
system('grep', '-r', $pattern, $directory); # safe
|
||||
```
|
||||
|
||||
## SQL Injection Prevention
|
||||
|
||||
Always use DBI placeholders — never interpolate into SQL:
|
||||
|
||||
```perl
|
||||
my $sth = $dbh->prepare('SELECT * FROM users WHERE email = ?');
|
||||
$sth->execute($email);
|
||||
```
|
||||
|
||||
## Security Scanning
|
||||
|
||||
Run **perlcritic** with the security theme at severity 4+:
|
||||
|
||||
```bash
|
||||
perlcritic --severity 4 --theme security lib/
|
||||
```
|
||||
|
||||
## Reference
|
||||
|
||||
See skill: `perl-security` for comprehensive Perl security patterns, taint mode, and safe I/O.
|
||||
54
rules/perl/testing.md
Normal file
54
rules/perl/testing.md
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
paths:
|
||||
- "**/*.pl"
|
||||
- "**/*.pm"
|
||||
- "**/*.t"
|
||||
- "**/*.psgi"
|
||||
- "**/*.cgi"
|
||||
---
|
||||
# Perl Testing
|
||||
|
||||
> This file extends [common/testing.md](../common/testing.md) with Perl-specific content.
|
||||
|
||||
## Framework
|
||||
|
||||
Use **Test2::V0** for new projects (not Test::More):
|
||||
|
||||
```perl
|
||||
use Test2::V0;
|
||||
|
||||
is($result, 42, 'answer is correct');
|
||||
|
||||
done_testing;
|
||||
```
|
||||
|
||||
## Runner
|
||||
|
||||
```bash
|
||||
prove -l t/ # adds lib/ to @INC
|
||||
prove -lr -j8 t/ # recursive, 8 parallel jobs
|
||||
```
|
||||
|
||||
Always use `-l` to ensure `lib/` is on `@INC`.
|
||||
|
||||
## Coverage
|
||||
|
||||
Use **Devel::Cover** — target 80%+:
|
||||
|
||||
```bash
|
||||
cover -test
|
||||
```
|
||||
|
||||
## Mocking
|
||||
|
||||
- **Test::MockModule** — mock methods on existing modules
|
||||
- **Test::MockObject** — create test doubles from scratch
|
||||
|
||||
## Pitfalls
|
||||
|
||||
- Always end test files with `done_testing`
|
||||
- Never forget the `-l` flag with `prove`
|
||||
|
||||
## Reference
|
||||
|
||||
See skill: `perl-testing` for detailed Perl TDD patterns with Test2::V0, prove, and Devel::Cover.
|
||||
@@ -9,19 +9,128 @@ paths:
|
||||
|
||||
> This file extends [common/coding-style.md](../common/coding-style.md) with TypeScript/JavaScript specific content.
|
||||
|
||||
## Types and Interfaces
|
||||
|
||||
Use types to make public APIs, shared models, and component props explicit, readable, and reusable.
|
||||
|
||||
### Public APIs
|
||||
|
||||
- Add parameter and return types to exported functions, shared utilities, and public class methods
|
||||
- Let TypeScript infer obvious local variable types
|
||||
- Extract repeated inline object shapes into named types or interfaces
|
||||
|
||||
```typescript
|
||||
// WRONG: Exported function without explicit types
|
||||
export function formatUser(user) {
|
||||
return `${user.firstName} ${user.lastName}`
|
||||
}
|
||||
|
||||
// CORRECT: Explicit types on public APIs
|
||||
interface User {
|
||||
firstName: string
|
||||
lastName: string
|
||||
}
|
||||
|
||||
export function formatUser(user: User): string {
|
||||
return `${user.firstName} ${user.lastName}`
|
||||
}
|
||||
```
|
||||
|
||||
### Interfaces vs. Type Aliases
|
||||
|
||||
- Use `interface` for object shapes that may be extended or implemented
|
||||
- Use `type` for unions, intersections, tuples, mapped types, and utility types
|
||||
- Prefer string literal unions over `enum` unless an `enum` is required for interoperability
|
||||
|
||||
```typescript
|
||||
interface User {
|
||||
id: string
|
||||
email: string
|
||||
}
|
||||
|
||||
type UserRole = 'admin' | 'member'
|
||||
type UserWithRole = User & {
|
||||
role: UserRole
|
||||
}
|
||||
```
|
||||
|
||||
### Avoid `any`
|
||||
|
||||
- Avoid `any` in application code
|
||||
- Use `unknown` for external or untrusted input, then narrow it safely
|
||||
- Use generics when a value's type depends on the caller
|
||||
|
||||
```typescript
|
||||
// WRONG: any removes type safety
|
||||
function getErrorMessage(error: any) {
|
||||
return error.message
|
||||
}
|
||||
|
||||
// CORRECT: unknown forces safe narrowing
|
||||
function getErrorMessage(error: unknown): string {
|
||||
if (error instanceof Error) {
|
||||
return error.message
|
||||
}
|
||||
|
||||
return 'Unexpected error'
|
||||
}
|
||||
```
|
||||
|
||||
### React Props
|
||||
|
||||
- Define component props with a named `interface` or `type`
|
||||
- Type callback props explicitly
|
||||
- Do not use `React.FC` unless there is a specific reason to do so
|
||||
|
||||
```typescript
|
||||
interface User {
|
||||
id: string
|
||||
email: string
|
||||
}
|
||||
|
||||
interface UserCardProps {
|
||||
user: User
|
||||
onSelect: (id: string) => void
|
||||
}
|
||||
|
||||
function UserCard({ user, onSelect }: UserCardProps) {
|
||||
return <button onClick={() => onSelect(user.id)}>{user.email}</button>
|
||||
}
|
||||
```
|
||||
|
||||
### JavaScript Files
|
||||
|
||||
- In `.js` and `.jsx` files, use JSDoc when types improve clarity and a TypeScript migration is not practical
|
||||
- Keep JSDoc aligned with runtime behavior
|
||||
|
||||
```javascript
|
||||
/**
|
||||
* @param {{ firstName: string, lastName: string }} user
|
||||
* @returns {string}
|
||||
*/
|
||||
export function formatUser(user) {
|
||||
return `${user.firstName} ${user.lastName}`
|
||||
}
|
||||
```
|
||||
|
||||
## Immutability
|
||||
|
||||
Use spread operator for immutable updates:
|
||||
|
||||
```typescript
|
||||
interface User {
|
||||
id: string
|
||||
name: string
|
||||
}
|
||||
|
||||
// WRONG: Mutation
|
||||
function updateUser(user, name) {
|
||||
user.name = name // MUTATION!
|
||||
function updateUser(user: User, name: string): User {
|
||||
user.name = name // MUTATION!
|
||||
return user
|
||||
}
|
||||
|
||||
// CORRECT: Immutability
|
||||
function updateUser(user, name) {
|
||||
function updateUser(user: Readonly<User>, name: string): User {
|
||||
return {
|
||||
...user,
|
||||
name
|
||||
@@ -31,31 +140,56 @@ function updateUser(user, name) {
|
||||
|
||||
## Error Handling
|
||||
|
||||
Use async/await with try-catch:
|
||||
Use async/await with try-catch and narrow unknown errors safely:
|
||||
|
||||
```typescript
|
||||
try {
|
||||
const result = await riskyOperation()
|
||||
return result
|
||||
} catch (error) {
|
||||
console.error('Operation failed:', error)
|
||||
throw new Error('Detailed user-friendly message')
|
||||
interface User {
|
||||
id: string
|
||||
email: string
|
||||
}
|
||||
|
||||
declare function riskyOperation(userId: string): Promise<User>
|
||||
|
||||
function getErrorMessage(error: unknown): string {
|
||||
if (error instanceof Error) {
|
||||
return error.message
|
||||
}
|
||||
|
||||
return 'Unexpected error'
|
||||
}
|
||||
|
||||
const logger = {
|
||||
error: (message: string, error: unknown) => {
|
||||
// Replace with your production logger (for example, pino or winston).
|
||||
}
|
||||
}
|
||||
|
||||
async function loadUser(userId: string): Promise<User> {
|
||||
try {
|
||||
const result = await riskyOperation(userId)
|
||||
return result
|
||||
} catch (error: unknown) {
|
||||
logger.error('Operation failed', error)
|
||||
throw new Error(getErrorMessage(error))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Input Validation
|
||||
|
||||
Use Zod for schema-based validation:
|
||||
Use Zod for schema-based validation and infer types from the schema:
|
||||
|
||||
```typescript
|
||||
import { z } from 'zod'
|
||||
|
||||
const schema = z.object({
|
||||
const userSchema = z.object({
|
||||
email: z.string().email(),
|
||||
age: z.number().int().min(0).max(150)
|
||||
})
|
||||
|
||||
const validated = schema.parse(input)
|
||||
type UserInput = z.infer<typeof userSchema>
|
||||
|
||||
const validated: UserInput = userSchema.parse(input)
|
||||
```
|
||||
|
||||
## Console.log
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Claude Code Hooks Configuration",
|
||||
"description": "Configuration for Claude Code hooks. Event types are validated at runtime and must be one of: PreToolUse, PostToolUse, PreCompact, SessionStart, SessionEnd, Stop, Notification, SubagentStop",
|
||||
"description": "Configuration for Claude Code hooks. Supports current Claude Code hook events and hook action types.",
|
||||
"$defs": {
|
||||
"hookItem": {
|
||||
"stringArray": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
},
|
||||
"minItems": 1
|
||||
},
|
||||
"commandHookItem": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"type",
|
||||
@@ -12,28 +20,17 @@
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"PreToolUse",
|
||||
"PostToolUse",
|
||||
"PreCompact",
|
||||
"SessionStart",
|
||||
"SessionEnd",
|
||||
"Stop",
|
||||
"Notification",
|
||||
"SubagentStop"
|
||||
],
|
||||
"description": "Hook event type that triggers this hook"
|
||||
"const": "command",
|
||||
"description": "Run a local command"
|
||||
},
|
||||
"command": {
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
},
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
"$ref": "#/$defs/stringArray"
|
||||
}
|
||||
]
|
||||
},
|
||||
@@ -46,17 +43,94 @@
|
||||
"minimum": 0,
|
||||
"description": "Timeout in seconds for async hooks"
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": true
|
||||
},
|
||||
"httpHookItem": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"type",
|
||||
"url"
|
||||
],
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"const": "http"
|
||||
},
|
||||
"url": {
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
},
|
||||
"headers": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"allowedEnvVars": {
|
||||
"$ref": "#/$defs/stringArray"
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
}
|
||||
},
|
||||
"additionalProperties": true
|
||||
},
|
||||
"promptHookItem": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"type",
|
||||
"prompt"
|
||||
],
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["prompt", "agent"]
|
||||
},
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
},
|
||||
"model": {
|
||||
"type": "string",
|
||||
"minLength": 1
|
||||
},
|
||||
"timeout": {
|
||||
"type": "number",
|
||||
"minimum": 0
|
||||
}
|
||||
},
|
||||
"additionalProperties": true
|
||||
},
|
||||
"hookItem": {
|
||||
"oneOf": [
|
||||
{
|
||||
"$ref": "#/$defs/commandHookItem"
|
||||
},
|
||||
{
|
||||
"$ref": "#/$defs/httpHookItem"
|
||||
},
|
||||
{
|
||||
"$ref": "#/$defs/promptHookItem"
|
||||
}
|
||||
]
|
||||
},
|
||||
"matcherEntry": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"matcher",
|
||||
"hooks"
|
||||
],
|
||||
"properties": {
|
||||
"matcher": {
|
||||
"type": "string"
|
||||
"oneOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "object"
|
||||
}
|
||||
]
|
||||
},
|
||||
"hooks": {
|
||||
"type": "array",
|
||||
@@ -79,6 +153,28 @@
|
||||
},
|
||||
"hooks": {
|
||||
"type": "object",
|
||||
"propertyNames": {
|
||||
"enum": [
|
||||
"SessionStart",
|
||||
"UserPromptSubmit",
|
||||
"PreToolUse",
|
||||
"PermissionRequest",
|
||||
"PostToolUse",
|
||||
"PostToolUseFailure",
|
||||
"Notification",
|
||||
"SubagentStart",
|
||||
"Stop",
|
||||
"SubagentStop",
|
||||
"PreCompact",
|
||||
"InstructionsLoaded",
|
||||
"TeammateIdle",
|
||||
"TaskCompleted",
|
||||
"ConfigChange",
|
||||
"WorktreeCreate",
|
||||
"WorktreeRemove",
|
||||
"SessionEnd"
|
||||
]
|
||||
},
|
||||
"additionalProperties": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
@@ -98,4 +194,4 @@
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,25 @@
|
||||
"agents": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" }
|
||||
},
|
||||
"features": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"agents": { "type": "integer", "minimum": 0 },
|
||||
"commands": { "type": "integer", "minimum": 0 },
|
||||
"skills": { "type": "integer", "minimum": 0 },
|
||||
"configAssets": { "type": "boolean" },
|
||||
"hookEvents": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" }
|
||||
},
|
||||
"customTools": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" }
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
}
|
||||
|
||||
83
scripts/ci/catalog.js
Normal file
83
scripts/ci/catalog.js
Normal file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Catalog agents, commands, and skills from the repo.
|
||||
* Outputs JSON with counts and lists for CI/docs sync.
|
||||
*
|
||||
* Usage: node scripts/ci/catalog.js [--json|--md]
|
||||
* Default: --json to stdout
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const ROOT = path.join(__dirname, '../..');
|
||||
const AGENTS_DIR = path.join(ROOT, 'agents');
|
||||
const COMMANDS_DIR = path.join(ROOT, 'commands');
|
||||
const SKILLS_DIR = path.join(ROOT, 'skills');
|
||||
|
||||
function listAgents() {
|
||||
if (!fs.existsSync(AGENTS_DIR)) return [];
|
||||
try {
|
||||
return fs.readdirSync(AGENTS_DIR)
|
||||
.filter(f => f.endsWith('.md'))
|
||||
.map(f => f.slice(0, -3))
|
||||
.sort();
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to read agents directory (${AGENTS_DIR}): ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function listCommands() {
|
||||
if (!fs.existsSync(COMMANDS_DIR)) return [];
|
||||
try {
|
||||
return fs.readdirSync(COMMANDS_DIR)
|
||||
.filter(f => f.endsWith('.md'))
|
||||
.map(f => f.slice(0, -3))
|
||||
.sort();
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to read commands directory (${COMMANDS_DIR}): ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function listSkills() {
|
||||
if (!fs.existsSync(SKILLS_DIR)) return [];
|
||||
try {
|
||||
const entries = fs.readdirSync(SKILLS_DIR, { withFileTypes: true });
|
||||
return entries
|
||||
.filter(e => e.isDirectory() && fs.existsSync(path.join(SKILLS_DIR, e.name, 'SKILL.md')))
|
||||
.map(e => e.name)
|
||||
.sort();
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to read skills directory (${SKILLS_DIR}): ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
function run() {
|
||||
const agents = listAgents();
|
||||
const commands = listCommands();
|
||||
const skills = listSkills();
|
||||
|
||||
const catalog = {
|
||||
agents: { count: agents.length, list: agents },
|
||||
commands: { count: commands.length, list: commands },
|
||||
skills: { count: skills.length, list: skills }
|
||||
};
|
||||
|
||||
const format = process.argv[2] === '--md' ? 'md' : 'json';
|
||||
if (format === 'md') {
|
||||
console.log('# ECC Catalog (generated)\n');
|
||||
console.log(`- **Agents:** ${catalog.agents.count}`);
|
||||
console.log(`- **Commands:** ${catalog.commands.count}`);
|
||||
console.log(`- **Skills:** ${catalog.skills.count}\n`);
|
||||
console.log('## Agents\n');
|
||||
catalog.agents.list.forEach(a => { console.log(`- ${a}`); });
|
||||
console.log('\n## Commands\n');
|
||||
catalog.commands.list.forEach(c => { console.log(`- ${c}`); });
|
||||
console.log('\n## Skills\n');
|
||||
catalog.skills.list.forEach(s => { console.log(`- ${s}`); });
|
||||
} else {
|
||||
console.log(JSON.stringify(catalog, null, 2));
|
||||
}
|
||||
}
|
||||
|
||||
run();
|
||||
@@ -1,14 +1,45 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Validate hooks.json schema
|
||||
* Validate hooks.json schema and hook entry rules.
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const vm = require('vm');
|
||||
const Ajv = require('ajv');
|
||||
|
||||
const HOOKS_FILE = path.join(__dirname, '../../hooks/hooks.json');
|
||||
const VALID_EVENTS = ['PreToolUse', 'PostToolUse', 'PreCompact', 'SessionStart', 'SessionEnd', 'Stop', 'Notification', 'SubagentStop'];
|
||||
const HOOKS_SCHEMA_PATH = path.join(__dirname, '../../schemas/hooks.schema.json');
|
||||
const VALID_EVENTS = [
|
||||
'SessionStart',
|
||||
'UserPromptSubmit',
|
||||
'PreToolUse',
|
||||
'PermissionRequest',
|
||||
'PostToolUse',
|
||||
'PostToolUseFailure',
|
||||
'Notification',
|
||||
'SubagentStart',
|
||||
'Stop',
|
||||
'SubagentStop',
|
||||
'PreCompact',
|
||||
'InstructionsLoaded',
|
||||
'TeammateIdle',
|
||||
'TaskCompleted',
|
||||
'ConfigChange',
|
||||
'WorktreeCreate',
|
||||
'WorktreeRemove',
|
||||
'SessionEnd',
|
||||
];
|
||||
const VALID_HOOK_TYPES = ['command', 'http', 'prompt', 'agent'];
|
||||
const EVENTS_WITHOUT_MATCHER = new Set(['UserPromptSubmit', 'Notification', 'Stop', 'SubagentStop']);
|
||||
|
||||
function isNonEmptyString(value) {
|
||||
return typeof value === 'string' && value.trim().length > 0;
|
||||
}
|
||||
|
||||
function isNonEmptyStringArray(value) {
|
||||
return Array.isArray(value) && value.length > 0 && value.every(item => isNonEmptyString(item));
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate a single hook entry has required fields and valid inline JS
|
||||
@@ -22,32 +53,72 @@ function validateHookEntry(hook, label) {
|
||||
if (!hook.type || typeof hook.type !== 'string') {
|
||||
console.error(`ERROR: ${label} missing or invalid 'type' field`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
// Validate optional async and timeout fields
|
||||
if ('async' in hook && typeof hook.async !== 'boolean') {
|
||||
console.error(`ERROR: ${label} 'async' must be a boolean`);
|
||||
} else if (!VALID_HOOK_TYPES.includes(hook.type)) {
|
||||
console.error(`ERROR: ${label} has unsupported hook type '${hook.type}'`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
if ('timeout' in hook && (typeof hook.timeout !== 'number' || hook.timeout < 0)) {
|
||||
console.error(`ERROR: ${label} 'timeout' must be a non-negative number`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
if (!hook.command || (typeof hook.command !== 'string' && !Array.isArray(hook.command)) || (typeof hook.command === 'string' && !hook.command.trim()) || (Array.isArray(hook.command) && (hook.command.length === 0 || !hook.command.every(s => typeof s === 'string' && s.length > 0)))) {
|
||||
console.error(`ERROR: ${label} missing or invalid 'command' field`);
|
||||
hasErrors = true;
|
||||
} else if (typeof hook.command === 'string') {
|
||||
// Validate inline JS syntax in node -e commands
|
||||
const nodeEMatch = hook.command.match(/^node -e "(.*)"$/s);
|
||||
if (nodeEMatch) {
|
||||
try {
|
||||
new vm.Script(nodeEMatch[1].replace(/\\\\/g, '\\').replace(/\\"/g, '"').replace(/\\n/g, '\n').replace(/\\t/g, '\t'));
|
||||
} catch (syntaxErr) {
|
||||
console.error(`ERROR: ${label} has invalid inline JS: ${syntaxErr.message}`);
|
||||
hasErrors = true;
|
||||
if (hook.type === 'command') {
|
||||
if ('async' in hook && typeof hook.async !== 'boolean') {
|
||||
console.error(`ERROR: ${label} 'async' must be a boolean`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
if (!isNonEmptyString(hook.command) && !isNonEmptyStringArray(hook.command)) {
|
||||
console.error(`ERROR: ${label} missing or invalid 'command' field`);
|
||||
hasErrors = true;
|
||||
} else if (typeof hook.command === 'string') {
|
||||
const nodeEMatch = hook.command.match(/^node -e "(.*)"$/s);
|
||||
if (nodeEMatch) {
|
||||
try {
|
||||
new vm.Script(nodeEMatch[1].replace(/\\\\/g, '\\').replace(/\\"/g, '"').replace(/\\n/g, '\n').replace(/\\t/g, '\t'));
|
||||
} catch (syntaxErr) {
|
||||
console.error(`ERROR: ${label} has invalid inline JS: ${syntaxErr.message}`);
|
||||
hasErrors = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return hasErrors;
|
||||
}
|
||||
|
||||
if ('async' in hook) {
|
||||
console.error(`ERROR: ${label} 'async' is only supported for command hooks`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
if (hook.type === 'http') {
|
||||
if (!isNonEmptyString(hook.url)) {
|
||||
console.error(`ERROR: ${label} missing or invalid 'url' field`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
if ('headers' in hook && (typeof hook.headers !== 'object' || hook.headers === null || Array.isArray(hook.headers) || !Object.values(hook.headers).every(value => typeof value === 'string'))) {
|
||||
console.error(`ERROR: ${label} 'headers' must be an object with string values`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
if ('allowedEnvVars' in hook && (!Array.isArray(hook.allowedEnvVars) || !hook.allowedEnvVars.every(value => isNonEmptyString(value)))) {
|
||||
console.error(`ERROR: ${label} 'allowedEnvVars' must be an array of strings`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
return hasErrors;
|
||||
}
|
||||
|
||||
if (!isNonEmptyString(hook.prompt)) {
|
||||
console.error(`ERROR: ${label} missing or invalid 'prompt' field`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
if ('model' in hook && !isNonEmptyString(hook.model)) {
|
||||
console.error(`ERROR: ${label} 'model' must be a non-empty string`);
|
||||
hasErrors = true;
|
||||
}
|
||||
|
||||
return hasErrors;
|
||||
@@ -67,6 +138,20 @@ function validateHooks() {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Validate against JSON schema
|
||||
if (fs.existsSync(HOOKS_SCHEMA_PATH)) {
|
||||
const schema = JSON.parse(fs.readFileSync(HOOKS_SCHEMA_PATH, 'utf-8'));
|
||||
const ajv = new Ajv({ allErrors: true });
|
||||
const validate = ajv.compile(schema);
|
||||
const valid = validate(data);
|
||||
if (!valid) {
|
||||
for (const err of validate.errors) {
|
||||
console.error(`ERROR: hooks.json schema: ${err.instancePath || '/'} ${err.message}`);
|
||||
}
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Support both object format { hooks: {...} } and array format
|
||||
const hooks = data.hooks || data;
|
||||
let hasErrors = false;
|
||||
@@ -94,9 +179,12 @@ function validateHooks() {
|
||||
hasErrors = true;
|
||||
continue;
|
||||
}
|
||||
if (!matcher.matcher) {
|
||||
if (!('matcher' in matcher) && !EVENTS_WITHOUT_MATCHER.has(eventType)) {
|
||||
console.error(`ERROR: ${eventType}[${i}] missing 'matcher' field`);
|
||||
hasErrors = true;
|
||||
} else if ('matcher' in matcher && typeof matcher.matcher !== 'string' && (typeof matcher.matcher !== 'object' || matcher.matcher === null)) {
|
||||
console.error(`ERROR: ${eventType}[${i}] has invalid 'matcher' field`);
|
||||
hasErrors = true;
|
||||
}
|
||||
if (!matcher.hooks || !Array.isArray(matcher.hooks)) {
|
||||
console.error(`ERROR: ${eventType}[${i}] missing 'hooks' array`);
|
||||
@@ -116,9 +204,12 @@ function validateHooks() {
|
||||
// Array format (legacy)
|
||||
for (let i = 0; i < hooks.length; i++) {
|
||||
const hook = hooks[i];
|
||||
if (!hook.matcher) {
|
||||
if (!('matcher' in hook)) {
|
||||
console.error(`ERROR: Hook ${i} missing 'matcher' field`);
|
||||
hasErrors = true;
|
||||
} else if (typeof hook.matcher !== 'string' && (typeof hook.matcher !== 'object' || hook.matcher === null)) {
|
||||
console.error(`ERROR: Hook ${i} has invalid 'matcher' field`);
|
||||
hasErrors = true;
|
||||
}
|
||||
if (!hook.hooks || !Array.isArray(hook.hooks)) {
|
||||
console.error(`ERROR: Hook ${i} missing 'hooks' array`);
|
||||
|
||||
@@ -7,83 +7,70 @@
|
||||
* Runs after Edit tool use. If the edited file is a JS/TS file,
|
||||
* auto-detects the project formatter (Biome or Prettier) by looking
|
||||
* for config files, then formats accordingly.
|
||||
*
|
||||
* For Biome, uses `check --write` (format + lint in one pass) to
|
||||
* avoid a redundant second invocation from quality-gate.js.
|
||||
*
|
||||
* Prefers the local node_modules/.bin binary over npx to skip
|
||||
* package-resolution overhead (~200-500ms savings per invocation).
|
||||
*
|
||||
* Fails silently if no formatter is found or installed.
|
||||
*/
|
||||
|
||||
const { execFileSync } = require('child_process');
|
||||
const fs = require('fs');
|
||||
const { execFileSync, spawnSync } = require('child_process');
|
||||
const path = require('path');
|
||||
|
||||
// Shell metacharacters that cmd.exe interprets as command separators/operators
|
||||
const UNSAFE_PATH_CHARS = /[&|<>^%!]/;
|
||||
|
||||
const { findProjectRoot, detectFormatter, resolveFormatterBin } = require('../lib/resolve-formatter');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024; // 1MB limit
|
||||
let data = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
function findProjectRoot(startDir) {
|
||||
let dir = startDir;
|
||||
while (dir !== path.dirname(dir)) {
|
||||
if (fs.existsSync(path.join(dir, 'package.json'))) return dir;
|
||||
dir = path.dirname(dir);
|
||||
}
|
||||
return startDir;
|
||||
}
|
||||
|
||||
function detectFormatter(projectRoot) {
|
||||
const biomeConfigs = ['biome.json', 'biome.jsonc'];
|
||||
for (const cfg of biomeConfigs) {
|
||||
if (fs.existsSync(path.join(projectRoot, cfg))) return 'biome';
|
||||
}
|
||||
|
||||
const prettierConfigs = [
|
||||
'.prettierrc',
|
||||
'.prettierrc.json',
|
||||
'.prettierrc.js',
|
||||
'.prettierrc.cjs',
|
||||
'.prettierrc.mjs',
|
||||
'.prettierrc.yml',
|
||||
'.prettierrc.yaml',
|
||||
'.prettierrc.toml',
|
||||
'prettier.config.js',
|
||||
'prettier.config.cjs',
|
||||
'prettier.config.mjs',
|
||||
];
|
||||
for (const cfg of prettierConfigs) {
|
||||
if (fs.existsSync(path.join(projectRoot, cfg))) return 'prettier';
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function getFormatterCommand(formatter, filePath) {
|
||||
const npxBin = process.platform === 'win32' ? 'npx.cmd' : 'npx';
|
||||
if (formatter === 'biome') {
|
||||
return { bin: npxBin, args: ['@biomejs/biome', 'format', '--write', filePath] };
|
||||
}
|
||||
if (formatter === 'prettier') {
|
||||
return { bin: npxBin, args: ['prettier', '--write', filePath] };
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
/**
|
||||
* Core logic — exported so run-with-flags.js can call directly
|
||||
* without spawning a child process.
|
||||
*
|
||||
* @param {string} rawInput - Raw JSON string from stdin
|
||||
* @returns {string} The original input (pass-through)
|
||||
*/
|
||||
function run(rawInput) {
|
||||
try {
|
||||
const input = JSON.parse(data);
|
||||
const input = JSON.parse(rawInput);
|
||||
const filePath = input.tool_input?.file_path;
|
||||
|
||||
if (filePath && /\.(ts|tsx|js|jsx)$/.test(filePath)) {
|
||||
try {
|
||||
const projectRoot = findProjectRoot(path.dirname(path.resolve(filePath)));
|
||||
const resolvedFilePath = path.resolve(filePath);
|
||||
const projectRoot = findProjectRoot(path.dirname(resolvedFilePath));
|
||||
const formatter = detectFormatter(projectRoot);
|
||||
const cmd = getFormatterCommand(formatter, filePath);
|
||||
if (!formatter) return rawInput;
|
||||
|
||||
if (cmd) {
|
||||
execFileSync(cmd.bin, cmd.args, {
|
||||
const resolved = resolveFormatterBin(projectRoot, formatter);
|
||||
if (!resolved) return rawInput;
|
||||
|
||||
// Biome: `check --write` = format + lint in one pass
|
||||
// Prettier: `--write` = format only
|
||||
const args = formatter === 'biome' ? [...resolved.prefix, 'check', '--write', resolvedFilePath] : [...resolved.prefix, '--write', resolvedFilePath];
|
||||
|
||||
if (process.platform === 'win32' && resolved.bin.endsWith('.cmd')) {
|
||||
// Windows: .cmd files require shell to execute. Guard against
|
||||
// command injection by rejecting paths with shell metacharacters.
|
||||
if (UNSAFE_PATH_CHARS.test(resolvedFilePath)) {
|
||||
throw new Error('File path contains unsafe shell characters');
|
||||
}
|
||||
const result = spawnSync(resolved.bin, args, {
|
||||
cwd: projectRoot,
|
||||
shell: true,
|
||||
stdio: 'pipe',
|
||||
timeout: 15000
|
||||
});
|
||||
if (result.error) throw result.error;
|
||||
if (typeof result.status === 'number' && result.status !== 0) {
|
||||
throw new Error(result.stderr?.toString() || `Formatter exited with status ${result.status}`);
|
||||
}
|
||||
} else {
|
||||
execFileSync(resolved.bin, args, {
|
||||
cwd: projectRoot,
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 15000
|
||||
@@ -97,6 +84,26 @@ process.stdin.on('end', () => {
|
||||
// Invalid input — pass through
|
||||
}
|
||||
|
||||
process.stdout.write(data);
|
||||
process.exit(0);
|
||||
});
|
||||
return rawInput;
|
||||
}
|
||||
|
||||
// ── stdin entry point (backwards-compatible) ────────────────────
|
||||
if (require.main === module) {
|
||||
let data = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
data = run(data);
|
||||
process.stdout.write(data);
|
||||
process.exit(0);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = { run };
|
||||
|
||||
@@ -2,8 +2,149 @@
|
||||
'use strict';
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
const path = require('path');
|
||||
const { splitShellSegments } = require('../lib/shell-split');
|
||||
|
||||
const DEV_COMMAND_WORDS = new Set([
|
||||
'npm',
|
||||
'pnpm',
|
||||
'yarn',
|
||||
'bun',
|
||||
'npx',
|
||||
'tmux'
|
||||
]);
|
||||
const SKIPPABLE_PREFIX_WORDS = new Set(['env', 'command', 'builtin', 'exec', 'noglob', 'sudo', 'nohup']);
|
||||
const PREFIX_OPTION_VALUE_WORDS = {
|
||||
env: new Set(['-u', '-C', '-S', '--unset', '--chdir', '--split-string']),
|
||||
sudo: new Set([
|
||||
'-u',
|
||||
'-g',
|
||||
'-h',
|
||||
'-p',
|
||||
'-r',
|
||||
'-t',
|
||||
'-C',
|
||||
'--user',
|
||||
'--group',
|
||||
'--host',
|
||||
'--prompt',
|
||||
'--role',
|
||||
'--type',
|
||||
'--close-from'
|
||||
])
|
||||
};
|
||||
|
||||
function readToken(input, startIndex) {
|
||||
let index = startIndex;
|
||||
while (index < input.length && /\s/.test(input[index])) index += 1;
|
||||
if (index >= input.length) return null;
|
||||
|
||||
let token = '';
|
||||
let quote = null;
|
||||
|
||||
while (index < input.length) {
|
||||
const ch = input[index];
|
||||
|
||||
if (quote) {
|
||||
if (ch === quote) {
|
||||
quote = null;
|
||||
index += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '\\' && quote === '"' && index + 1 < input.length) {
|
||||
token += input[index + 1];
|
||||
index += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
token += ch;
|
||||
index += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '"' || ch === "'") {
|
||||
quote = ch;
|
||||
index += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (/\s/.test(ch)) break;
|
||||
|
||||
if (ch === '\\' && index + 1 < input.length) {
|
||||
token += input[index + 1];
|
||||
index += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
token += ch;
|
||||
index += 1;
|
||||
}
|
||||
|
||||
return { token, end: index };
|
||||
}
|
||||
|
||||
function shouldSkipOptionValue(wrapper, optionToken) {
|
||||
if (!wrapper || !optionToken || optionToken.includes('=')) return false;
|
||||
const optionSet = PREFIX_OPTION_VALUE_WORDS[wrapper];
|
||||
return Boolean(optionSet && optionSet.has(optionToken));
|
||||
}
|
||||
|
||||
function isOptionToken(token) {
|
||||
return token.startsWith('-') && token.length > 1;
|
||||
}
|
||||
|
||||
function normalizeCommandWord(token) {
|
||||
if (!token) return '';
|
||||
const base = path.basename(token).toLowerCase();
|
||||
return base.replace(/\.(cmd|exe|bat)$/i, '');
|
||||
}
|
||||
|
||||
function getLeadingCommandWord(segment) {
|
||||
let index = 0;
|
||||
let activeWrapper = null;
|
||||
let skipNextValue = false;
|
||||
|
||||
while (index < segment.length) {
|
||||
const parsed = readToken(segment, index);
|
||||
if (!parsed) return null;
|
||||
index = parsed.end;
|
||||
|
||||
const token = parsed.token;
|
||||
if (!token) continue;
|
||||
|
||||
if (skipNextValue) {
|
||||
skipNextValue = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (token === '--') {
|
||||
activeWrapper = null;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (/^[A-Za-z_][A-Za-z0-9_]*=.*/.test(token)) continue;
|
||||
|
||||
const normalizedToken = normalizeCommandWord(token);
|
||||
|
||||
if (SKIPPABLE_PREFIX_WORDS.has(normalizedToken)) {
|
||||
activeWrapper = normalizedToken;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (activeWrapper && isOptionToken(token)) {
|
||||
if (shouldSkipOptionValue(activeWrapper, token)) {
|
||||
skipNextValue = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
return normalizedToken;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
let raw = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
@@ -23,7 +164,13 @@ process.stdin.on('end', () => {
|
||||
const tmuxLauncher = /^\s*tmux\s+(new|new-session|new-window|split-window)\b/;
|
||||
const devPattern = /\b(npm\s+run\s+dev|pnpm(?:\s+run)?\s+dev|yarn\s+dev|bun\s+run\s+dev)\b/;
|
||||
|
||||
const hasBlockedDev = segments.some(segment => devPattern.test(segment) && !tmuxLauncher.test(segment));
|
||||
const hasBlockedDev = segments.some(segment => {
|
||||
const commandWord = getLeadingCommandWord(segment);
|
||||
if (!commandWord || !DEV_COMMAND_WORDS.has(commandWord)) {
|
||||
return false;
|
||||
}
|
||||
return devPattern.test(segment) && !tmuxLauncher.test(segment);
|
||||
});
|
||||
|
||||
if (hasBlockedDev) {
|
||||
console.error('[Hook] BLOCKED: Dev server must run in tmux for log access');
|
||||
|
||||
@@ -5,6 +5,11 @@
|
||||
* Runs lightweight quality checks after file edits.
|
||||
* - Targets one file when file_path is provided
|
||||
* - Falls back to no-op when language/tooling is unavailable
|
||||
*
|
||||
* For JS/TS files with Biome, this hook is skipped because
|
||||
* post-edit-format.js already runs `biome check --write`.
|
||||
* This hook still handles .json/.md files for Biome, and all
|
||||
* Prettier / Go / Python checks.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
@@ -13,56 +18,105 @@ const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
const { findProjectRoot, detectFormatter, resolveFormatterBin } = require('../lib/resolve-formatter');
|
||||
|
||||
function run(command, args, cwd = process.cwd()) {
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
|
||||
/**
|
||||
* Execute a command synchronously, returning the spawnSync result.
|
||||
*
|
||||
* @param {string} command - Executable path or name
|
||||
* @param {string[]} args - Arguments to pass
|
||||
* @param {string} [cwd] - Working directory (defaults to process.cwd())
|
||||
* @returns {import('child_process').SpawnSyncReturns<string>}
|
||||
*/
|
||||
function exec(command, args, cwd = process.cwd()) {
|
||||
return spawnSync(command, args, {
|
||||
cwd,
|
||||
encoding: 'utf8',
|
||||
env: process.env,
|
||||
timeout: 15000
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a message to stderr for logging.
|
||||
*
|
||||
* @param {string} msg - Message to log
|
||||
*/
|
||||
function log(msg) {
|
||||
process.stderr.write(`${msg}\n`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Run quality-gate checks for a single file based on its extension.
|
||||
* Skips JS/TS files when Biome is configured (handled by post-edit-format).
|
||||
*
|
||||
* @param {string} filePath - Path to the edited file
|
||||
*/
|
||||
function maybeRunQualityGate(filePath) {
|
||||
if (!filePath || !fs.existsSync(filePath)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Resolve to absolute path so projectRoot-relative comparisons work
|
||||
filePath = path.resolve(filePath);
|
||||
|
||||
const ext = path.extname(filePath).toLowerCase();
|
||||
const fix = String(process.env.ECC_QUALITY_GATE_FIX || '').toLowerCase() === 'true';
|
||||
const strict = String(process.env.ECC_QUALITY_GATE_STRICT || '').toLowerCase() === 'true';
|
||||
|
||||
if (['.ts', '.tsx', '.js', '.jsx', '.json', '.md'].includes(ext)) {
|
||||
// Prefer biome if present
|
||||
if (fs.existsSync(path.join(process.cwd(), 'biome.json')) || fs.existsSync(path.join(process.cwd(), 'biome.jsonc'))) {
|
||||
const args = ['biome', 'check', filePath];
|
||||
const projectRoot = findProjectRoot(path.dirname(filePath));
|
||||
const formatter = detectFormatter(projectRoot);
|
||||
|
||||
if (formatter === 'biome') {
|
||||
// JS/TS already handled by post-edit-format via `biome check --write`
|
||||
if (['.ts', '.tsx', '.js', '.jsx'].includes(ext)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// .json / .md — still need quality gate
|
||||
const resolved = resolveFormatterBin(projectRoot, 'biome');
|
||||
if (!resolved) return;
|
||||
const args = [...resolved.prefix, 'check', filePath];
|
||||
if (fix) args.push('--write');
|
||||
const result = run('npx', args);
|
||||
const result = exec(resolved.bin, args, projectRoot);
|
||||
if (result.status !== 0 && strict) {
|
||||
log(`[QualityGate] Biome check failed for ${filePath}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Fallback to prettier when installed
|
||||
const prettierArgs = ['prettier', '--check', filePath];
|
||||
if (fix) {
|
||||
prettierArgs[1] = '--write';
|
||||
}
|
||||
const prettier = run('npx', prettierArgs);
|
||||
if (prettier.status !== 0 && strict) {
|
||||
log(`[QualityGate] Prettier check failed for ${filePath}`);
|
||||
if (formatter === 'prettier') {
|
||||
const resolved = resolveFormatterBin(projectRoot, 'prettier');
|
||||
if (!resolved) return;
|
||||
const args = [...resolved.prefix, fix ? '--write' : '--check', filePath];
|
||||
const result = exec(resolved.bin, args, projectRoot);
|
||||
if (result.status !== 0 && strict) {
|
||||
log(`[QualityGate] Prettier check failed for ${filePath}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// No formatter configured — skip
|
||||
return;
|
||||
}
|
||||
|
||||
if (ext === '.go' && fix) {
|
||||
run('gofmt', ['-w', filePath]);
|
||||
if (ext === '.go') {
|
||||
if (fix) {
|
||||
const r = exec('gofmt', ['-w', filePath]);
|
||||
if (r.status !== 0 && strict) {
|
||||
log(`[QualityGate] gofmt failed for ${filePath}`);
|
||||
}
|
||||
} else if (strict) {
|
||||
const r = exec('gofmt', ['-l', filePath]);
|
||||
if (r.status !== 0) {
|
||||
log(`[QualityGate] gofmt failed for ${filePath}`);
|
||||
} else if (r.stdout && r.stdout.trim()) {
|
||||
log(`[QualityGate] gofmt check failed for ${filePath}`);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -70,29 +124,45 @@ function maybeRunQualityGate(filePath) {
|
||||
const args = ['format'];
|
||||
if (!fix) args.push('--check');
|
||||
args.push(filePath);
|
||||
const r = run('ruff', args);
|
||||
const r = exec('ruff', args);
|
||||
if (r.status !== 0 && strict) {
|
||||
log(`[QualityGate] Ruff check failed for ${filePath}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
/**
|
||||
* Core logic — exported so run-with-flags.js can call directly.
|
||||
*
|
||||
* @param {string} rawInput - Raw JSON string from stdin
|
||||
* @returns {string} The original input (pass-through)
|
||||
*/
|
||||
function run(rawInput) {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const input = JSON.parse(rawInput);
|
||||
const filePath = String(input.tool_input?.file_path || '');
|
||||
maybeRunQualityGate(filePath);
|
||||
} catch {
|
||||
// Ignore parse errors.
|
||||
}
|
||||
return rawInput;
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
// ── stdin entry point (backwards-compatible) ────────────────────
|
||||
if (require.main === module) {
|
||||
let raw = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
const result = run(raw);
|
||||
process.stdout.write(result);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = { run };
|
||||
|
||||
@@ -4,6 +4,8 @@ set -euo pipefail
|
||||
HOOK_ID="${1:-}"
|
||||
REL_SCRIPT_PATH="${2:-}"
|
||||
PROFILES_CSV="${3:-standard,strict}"
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-$(cd "${SCRIPT_DIR}/../.." && pwd)}"
|
||||
|
||||
# Preserve stdin for passthrough or script execution
|
||||
INPUT="$(cat)"
|
||||
@@ -14,13 +16,13 @@ if [[ -z "$HOOK_ID" || -z "$REL_SCRIPT_PATH" ]]; then
|
||||
fi
|
||||
|
||||
# Ask Node helper if this hook is enabled
|
||||
ENABLED="$(node "${CLAUDE_PLUGIN_ROOT}/scripts/hooks/check-hook-enabled.js" "$HOOK_ID" "$PROFILES_CSV" 2>/dev/null || echo yes)"
|
||||
ENABLED="$(node "${PLUGIN_ROOT}/scripts/hooks/check-hook-enabled.js" "$HOOK_ID" "$PROFILES_CSV" 2>/dev/null || echo yes)"
|
||||
if [[ "$ENABLED" != "yes" ]]; then
|
||||
printf '%s' "$INPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
SCRIPT_PATH="${CLAUDE_PLUGIN_ROOT}/${REL_SCRIPT_PATH}"
|
||||
SCRIPT_PATH="${PLUGIN_ROOT}/${REL_SCRIPT_PATH}"
|
||||
if [[ ! -f "$SCRIPT_PATH" ]]; then
|
||||
echo "[Hook] Script not found for ${HOOK_ID}: ${SCRIPT_PATH}" >&2
|
||||
printf '%s' "$INPUT"
|
||||
|
||||
@@ -52,7 +52,15 @@ async function main() {
|
||||
}
|
||||
|
||||
const pluginRoot = getPluginRoot();
|
||||
const scriptPath = path.join(pluginRoot, relScriptPath);
|
||||
const resolvedRoot = path.resolve(pluginRoot);
|
||||
const scriptPath = path.resolve(pluginRoot, relScriptPath);
|
||||
|
||||
// Prevent path traversal outside the plugin root
|
||||
if (!scriptPath.startsWith(resolvedRoot + path.sep)) {
|
||||
process.stderr.write(`[Hook] Path traversal rejected for ${hookId}: ${scriptPath}\n`);
|
||||
process.stdout.write(raw);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
if (!fs.existsSync(scriptPath)) {
|
||||
process.stderr.write(`[Hook] Script not found for ${hookId}: ${scriptPath}\n`);
|
||||
@@ -60,11 +68,43 @@ async function main() {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Prefer direct require() when the hook exports a run(rawInput) function.
|
||||
// This eliminates one Node.js process spawn (~50-100ms savings per hook).
|
||||
//
|
||||
// SAFETY: Only require() hooks that export run(). Legacy hooks execute
|
||||
// side effects at module scope (stdin listeners, process.exit, main() calls)
|
||||
// which would interfere with the parent process or cause double execution.
|
||||
let hookModule;
|
||||
const src = fs.readFileSync(scriptPath, 'utf8');
|
||||
const hasRunExport = /\bmodule\.exports\b/.test(src) && /\brun\b/.test(src);
|
||||
|
||||
if (hasRunExport) {
|
||||
try {
|
||||
hookModule = require(scriptPath);
|
||||
} catch (requireErr) {
|
||||
process.stderr.write(`[Hook] require() failed for ${hookId}: ${requireErr.message}\n`);
|
||||
// Fall through to legacy spawnSync path
|
||||
}
|
||||
}
|
||||
|
||||
if (hookModule && typeof hookModule.run === 'function') {
|
||||
try {
|
||||
const output = hookModule.run(raw);
|
||||
if (output !== null && output !== undefined) process.stdout.write(output);
|
||||
} catch (runErr) {
|
||||
process.stderr.write(`[Hook] run() error for ${hookId}: ${runErr.message}\n`);
|
||||
process.stdout.write(raw);
|
||||
}
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Legacy path: spawn a child Node process for hooks without run() export
|
||||
const result = spawnSync('node', [scriptPath], {
|
||||
input: raw,
|
||||
encoding: 'utf8',
|
||||
env: process.env,
|
||||
cwd: process.cwd(),
|
||||
timeout: 30000
|
||||
});
|
||||
|
||||
if (result.stdout) process.stdout.write(result.stdout);
|
||||
|
||||
185
scripts/lib/resolve-formatter.js
Normal file
185
scripts/lib/resolve-formatter.js
Normal file
@@ -0,0 +1,185 @@
|
||||
/**
|
||||
* Shared formatter resolution utilities with caching.
|
||||
*
|
||||
* Extracts project-root discovery, formatter detection, and binary
|
||||
* resolution into a single module so that post-edit-format.js and
|
||||
* quality-gate.js avoid duplicating work and filesystem lookups.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
// ── Caches (per-process, cleared on next hook invocation) ───────────
|
||||
const projectRootCache = new Map();
|
||||
const formatterCache = new Map();
|
||||
const binCache = new Map();
|
||||
|
||||
// ── Config file lists (single source of truth) ─────────────────────
|
||||
|
||||
const BIOME_CONFIGS = ['biome.json', 'biome.jsonc'];
|
||||
|
||||
const PRETTIER_CONFIGS = [
|
||||
'.prettierrc',
|
||||
'.prettierrc.json',
|
||||
'.prettierrc.js',
|
||||
'.prettierrc.cjs',
|
||||
'.prettierrc.mjs',
|
||||
'.prettierrc.yml',
|
||||
'.prettierrc.yaml',
|
||||
'.prettierrc.toml',
|
||||
'prettier.config.js',
|
||||
'prettier.config.cjs',
|
||||
'prettier.config.mjs'
|
||||
];
|
||||
|
||||
const PROJECT_ROOT_MARKERS = ['package.json', ...BIOME_CONFIGS, ...PRETTIER_CONFIGS];
|
||||
|
||||
// ── Windows .cmd shim mapping ───────────────────────────────────────
|
||||
const WIN_CMD_SHIMS = { npx: 'npx.cmd', pnpm: 'pnpm.cmd', yarn: 'yarn.cmd', bunx: 'bunx.cmd' };
|
||||
|
||||
// ── Formatter → package name mapping ────────────────────────────────
|
||||
const FORMATTER_PACKAGES = {
|
||||
biome: { binName: 'biome', pkgName: '@biomejs/biome' },
|
||||
prettier: { binName: 'prettier', pkgName: 'prettier' }
|
||||
};
|
||||
|
||||
// ── Public helpers ──────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Walk up from `startDir` until a directory containing a known project
|
||||
* root marker (package.json or formatter config) is found.
|
||||
* Returns `startDir` as fallback when no marker exists above it.
|
||||
*
|
||||
* @param {string} startDir - Absolute directory path to start from
|
||||
* @returns {string} Absolute path to the project root
|
||||
*/
|
||||
function findProjectRoot(startDir) {
|
||||
if (projectRootCache.has(startDir)) return projectRootCache.get(startDir);
|
||||
|
||||
let dir = startDir;
|
||||
while (dir !== path.dirname(dir)) {
|
||||
for (const marker of PROJECT_ROOT_MARKERS) {
|
||||
if (fs.existsSync(path.join(dir, marker))) {
|
||||
projectRootCache.set(startDir, dir);
|
||||
return dir;
|
||||
}
|
||||
}
|
||||
dir = path.dirname(dir);
|
||||
}
|
||||
|
||||
projectRootCache.set(startDir, startDir);
|
||||
return startDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect the formatter configured in the project.
|
||||
* Biome takes priority over Prettier.
|
||||
*
|
||||
* @param {string} projectRoot - Absolute path to the project root
|
||||
* @returns {'biome' | 'prettier' | null}
|
||||
*/
|
||||
function detectFormatter(projectRoot) {
|
||||
if (formatterCache.has(projectRoot)) return formatterCache.get(projectRoot);
|
||||
|
||||
for (const cfg of BIOME_CONFIGS) {
|
||||
if (fs.existsSync(path.join(projectRoot, cfg))) {
|
||||
formatterCache.set(projectRoot, 'biome');
|
||||
return 'biome';
|
||||
}
|
||||
}
|
||||
|
||||
// Check package.json "prettier" key before config files
|
||||
try {
|
||||
const pkgPath = path.join(projectRoot, 'package.json');
|
||||
if (fs.existsSync(pkgPath)) {
|
||||
const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8'));
|
||||
if ('prettier' in pkg) {
|
||||
formatterCache.set(projectRoot, 'prettier');
|
||||
return 'prettier';
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Malformed package.json — continue to file-based detection
|
||||
}
|
||||
|
||||
for (const cfg of PRETTIER_CONFIGS) {
|
||||
if (fs.existsSync(path.join(projectRoot, cfg))) {
|
||||
formatterCache.set(projectRoot, 'prettier');
|
||||
return 'prettier';
|
||||
}
|
||||
}
|
||||
|
||||
formatterCache.set(projectRoot, null);
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the runner binary and prefix args for the configured package
|
||||
* manager (respects CLAUDE_PACKAGE_MANAGER env and project config).
|
||||
*
|
||||
* @param {string} projectRoot - Absolute path to the project root
|
||||
* @returns {{ bin: string, prefix: string[] }}
|
||||
*/
|
||||
function getRunnerFromPackageManager(projectRoot) {
|
||||
const isWin = process.platform === 'win32';
|
||||
const { getPackageManager } = require('./package-manager');
|
||||
const pm = getPackageManager({ projectDir: projectRoot });
|
||||
const execCmd = pm?.config?.execCmd || 'npx';
|
||||
const [rawBin = 'npx', ...prefix] = execCmd.split(/\s+/).filter(Boolean);
|
||||
const bin = isWin ? WIN_CMD_SHIMS[rawBin] || rawBin : rawBin;
|
||||
return { bin, prefix };
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the formatter binary, preferring the local node_modules/.bin
|
||||
* installation over the package manager exec command to avoid
|
||||
* package-resolution overhead.
|
||||
*
|
||||
* @param {string} projectRoot - Absolute path to the project root
|
||||
* @param {'biome' | 'prettier'} formatter - Detected formatter name
|
||||
* @returns {{ bin: string, prefix: string[] } | null}
|
||||
* `bin` – executable path (absolute local path or runner binary)
|
||||
* `prefix` – extra args to prepend (e.g. ['@biomejs/biome'] when using npx)
|
||||
*/
|
||||
function resolveFormatterBin(projectRoot, formatter) {
|
||||
const cacheKey = `${projectRoot}:${formatter}`;
|
||||
if (binCache.has(cacheKey)) return binCache.get(cacheKey);
|
||||
|
||||
const pkg = FORMATTER_PACKAGES[formatter];
|
||||
if (!pkg) {
|
||||
binCache.set(cacheKey, null);
|
||||
return null;
|
||||
}
|
||||
|
||||
const isWin = process.platform === 'win32';
|
||||
const localBin = path.join(projectRoot, 'node_modules', '.bin', isWin ? `${pkg.binName}.cmd` : pkg.binName);
|
||||
|
||||
if (fs.existsSync(localBin)) {
|
||||
const result = { bin: localBin, prefix: [] };
|
||||
binCache.set(cacheKey, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
const runner = getRunnerFromPackageManager(projectRoot);
|
||||
const result = { bin: runner.bin, prefix: [...runner.prefix, pkg.pkgName] };
|
||||
binCache.set(cacheKey, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all caches. Useful for testing.
|
||||
*/
|
||||
function clearCaches() {
|
||||
projectRootCache.clear();
|
||||
formatterCache.clear();
|
||||
binCache.clear();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
findProjectRoot,
|
||||
detectFormatter,
|
||||
resolveFormatterBin,
|
||||
clearCaches
|
||||
};
|
||||
105
skills/blueprint/SKILL.md
Normal file
105
skills/blueprint/SKILL.md
Normal file
@@ -0,0 +1,105 @@
|
||||
---
|
||||
name: blueprint
|
||||
description: >-
|
||||
Turn a one-line objective into a step-by-step construction plan for
|
||||
multi-session, multi-agent engineering projects. Each step has a
|
||||
self-contained context brief so a fresh agent can execute it cold.
|
||||
Includes adversarial review gate, dependency graph, parallel step
|
||||
detection, anti-pattern catalog, and plan mutation protocol.
|
||||
TRIGGER when: user requests a plan, blueprint, or roadmap for a
|
||||
complex multi-PR task, or describes work that needs multiple sessions.
|
||||
DO NOT TRIGGER when: task is completable in a single PR or fewer
|
||||
than 3 tool calls, or user says "just do it".
|
||||
origin: community
|
||||
---
|
||||
|
||||
# Blueprint — Construction Plan Generator
|
||||
|
||||
Turn a one-line objective into a step-by-step construction plan that any coding agent can execute cold.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Breaking a large feature into multiple PRs with clear dependency order
|
||||
- Planning a refactor or migration that spans multiple sessions
|
||||
- Coordinating parallel workstreams across sub-agents
|
||||
- Any task where context loss between sessions would cause rework
|
||||
|
||||
**Do not use** for tasks completable in a single PR, fewer than 3 tool calls, or when the user says "just do it."
|
||||
|
||||
## How It Works
|
||||
|
||||
Blueprint runs a 5-phase pipeline:
|
||||
|
||||
1. **Research** — Pre-flight checks (git, gh auth, remote, default branch), then reads project structure, existing plans, and memory files to gather context.
|
||||
2. **Design** — Breaks the objective into one-PR-sized steps (3–12 typical). Assigns dependency edges, parallel/serial ordering, model tier (strongest vs default), and rollback strategy per step.
|
||||
3. **Draft** — Writes a self-contained Markdown plan file to `plans/`. Every step includes a context brief, task list, verification commands, and exit criteria — so a fresh agent can execute any step without reading prior steps.
|
||||
4. **Review** — Delegates adversarial review to a strongest-model sub-agent (e.g., Opus) against a checklist and anti-pattern catalog. Fixes all critical findings before finalizing.
|
||||
5. **Register** — Saves the plan, updates memory index, and presents the step count and parallelism summary to the user.
|
||||
|
||||
Blueprint detects git/gh availability automatically. With git + GitHub CLI, it generates full branch/PR/CI workflow plans. Without them, it switches to direct mode (edit-in-place, no branches).
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic usage
|
||||
|
||||
```
|
||||
/blueprint myapp "migrate database to PostgreSQL"
|
||||
```
|
||||
|
||||
Produces `plans/myapp-migrate-database-to-postgresql.md` with steps like:
|
||||
- Step 1: Add PostgreSQL driver and connection config
|
||||
- Step 2: Create migration scripts for each table
|
||||
- Step 3: Update repository layer to use new driver
|
||||
- Step 4: Add integration tests against PostgreSQL
|
||||
- Step 5: Remove old database code and config
|
||||
|
||||
### Multi-agent project
|
||||
|
||||
```
|
||||
/blueprint chatbot "extract LLM providers into a plugin system"
|
||||
```
|
||||
|
||||
Produces a plan with parallel steps where possible (e.g., "implement Anthropic plugin" and "implement OpenAI plugin" run in parallel after the plugin interface step is done), model tier assignments (strongest for the interface design step, default for implementation), and invariants verified after every step (e.g., "all existing tests pass", "no provider imports in core").
|
||||
|
||||
## Key Features
|
||||
|
||||
- **Cold-start execution** — Every step includes a self-contained context brief. No prior context needed.
|
||||
- **Adversarial review gate** — Every plan is reviewed by a strongest-model sub-agent against a checklist covering completeness, dependency correctness, and anti-pattern detection.
|
||||
- **Branch/PR/CI workflow** — Built into every step. Degrades gracefully to direct mode when git/gh is absent.
|
||||
- **Parallel step detection** — Dependency graph identifies steps with no shared files or output dependencies.
|
||||
- **Plan mutation protocol** — Steps can be split, inserted, skipped, reordered, or abandoned with formal protocols and audit trail.
|
||||
- **Zero runtime risk** — Pure Markdown skill. The entire repository contains only `.md` files — no hooks, no shell scripts, no executable code, no `package.json`, no build step. Nothing runs on install or invocation beyond Claude Code's native Markdown skill loader.
|
||||
|
||||
## Installation
|
||||
|
||||
This skill ships with Everything Claude Code. No separate installation is needed when ECC is installed.
|
||||
|
||||
### Full ECC install
|
||||
|
||||
If you are working from the ECC repository checkout, verify the skill is present with:
|
||||
|
||||
```bash
|
||||
test -f skills/blueprint/SKILL.md
|
||||
```
|
||||
|
||||
To update later, review the ECC diff before updating:
|
||||
|
||||
```bash
|
||||
cd /path/to/everything-claude-code
|
||||
git fetch origin main
|
||||
git log --oneline HEAD..origin/main # review new commits before updating
|
||||
git checkout <reviewed-full-sha> # pin to a specific reviewed commit
|
||||
```
|
||||
|
||||
### Vendored standalone install
|
||||
|
||||
If you are vendoring only this skill outside the full ECC install, copy the reviewed file from the ECC repository into `~/.claude/skills/blueprint/SKILL.md`. Vendored copies do not have a git remote, so update them by re-copying the file from a reviewed ECC commit rather than running `git pull`.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Claude Code (for `/blueprint` slash command)
|
||||
- Git + GitHub CLI (optional — enables full branch/PR/CI workflow; Blueprint detects absence and auto-switches to direct mode)
|
||||
|
||||
## Source
|
||||
|
||||
Inspired by antbotlab/blueprint — upstream project and reference design.
|
||||
@@ -28,6 +28,7 @@ OBSERVER_LOOP_SCRIPT="${SCRIPT_DIR}/observer-loop.sh"
|
||||
# Source shared project detection helper
|
||||
# This sets: PROJECT_ID, PROJECT_NAME, PROJECT_ROOT, PROJECT_DIR
|
||||
source "${SKILL_ROOT}/scripts/detect-project.sh"
|
||||
PYTHON_CMD="${CLV2_PYTHON_CMD:-}"
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Configuration
|
||||
@@ -46,7 +47,10 @@ OBSERVER_INTERVAL_MINUTES=5
|
||||
MIN_OBSERVATIONS=20
|
||||
OBSERVER_ENABLED=false
|
||||
if [ -f "$CONFIG_FILE" ]; then
|
||||
_config=$(CLV2_CONFIG="$CONFIG_FILE" python3 -c "
|
||||
if [ -z "$PYTHON_CMD" ]; then
|
||||
echo "No python interpreter found; using built-in observer defaults." >&2
|
||||
else
|
||||
_config=$(CLV2_CONFIG="$CONFIG_FILE" "$PYTHON_CMD" -c "
|
||||
import json, os
|
||||
with open(os.environ['CLV2_CONFIG']) as f:
|
||||
cfg = json.load(f)
|
||||
@@ -57,17 +61,18 @@ print(str(obs.get('enabled', False)).lower())
|
||||
" 2>/dev/null || echo "5
|
||||
20
|
||||
false")
|
||||
_interval=$(echo "$_config" | sed -n '1p')
|
||||
_min_obs=$(echo "$_config" | sed -n '2p')
|
||||
_enabled=$(echo "$_config" | sed -n '3p')
|
||||
if [ "$_interval" -gt 0 ] 2>/dev/null; then
|
||||
OBSERVER_INTERVAL_MINUTES="$_interval"
|
||||
fi
|
||||
if [ "$_min_obs" -gt 0 ] 2>/dev/null; then
|
||||
MIN_OBSERVATIONS="$_min_obs"
|
||||
fi
|
||||
if [ "$_enabled" = "true" ]; then
|
||||
OBSERVER_ENABLED=true
|
||||
_interval=$(echo "$_config" | sed -n '1p')
|
||||
_min_obs=$(echo "$_config" | sed -n '2p')
|
||||
_enabled=$(echo "$_config" | sed -n '3p')
|
||||
if [ "$_interval" -gt 0 ] 2>/dev/null; then
|
||||
OBSERVER_INTERVAL_MINUTES="$_interval"
|
||||
fi
|
||||
if [ "$_min_obs" -gt 0 ] 2>/dev/null; then
|
||||
MIN_OBSERVATIONS="$_min_obs"
|
||||
fi
|
||||
if [ "$_enabled" = "true" ]; then
|
||||
OBSERVER_ENABLED=true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
OBSERVER_INTERVAL_SECONDS=$((OBSERVER_INTERVAL_MINUTES * 60))
|
||||
|
||||
@@ -27,13 +27,38 @@ if [ -z "$INPUT_JSON" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
resolve_python_cmd() {
|
||||
if [ -n "${CLV2_PYTHON_CMD:-}" ] && command -v "$CLV2_PYTHON_CMD" >/dev/null 2>&1; then
|
||||
printf '%s\n' "$CLV2_PYTHON_CMD"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if command -v python3 >/dev/null 2>&1; then
|
||||
printf '%s\n' python3
|
||||
return 0
|
||||
fi
|
||||
|
||||
if command -v python >/dev/null 2>&1; then
|
||||
printf '%s\n' python
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
PYTHON_CMD="$(resolve_python_cmd 2>/dev/null || true)"
|
||||
if [ -z "$PYTHON_CMD" ]; then
|
||||
echo "[observe] No python interpreter found, skipping observation" >&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Extract cwd from stdin for project detection
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
# Extract cwd from the hook JSON to use for project detection.
|
||||
# This avoids spawning a separate git subprocess when cwd is available.
|
||||
STDIN_CWD=$(echo "$INPUT_JSON" | python3 -c '
|
||||
STDIN_CWD=$(echo "$INPUT_JSON" | "$PYTHON_CMD" -c '
|
||||
import json, sys
|
||||
try:
|
||||
data = json.load(sys.stdin)
|
||||
@@ -58,6 +83,7 @@ SKILL_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
# Source shared project detection helper
|
||||
# This sets: PROJECT_ID, PROJECT_NAME, PROJECT_ROOT, PROJECT_DIR
|
||||
source "${SKILL_ROOT}/scripts/detect-project.sh"
|
||||
PYTHON_CMD="${CLV2_PYTHON_CMD:-$PYTHON_CMD}"
|
||||
|
||||
# ─────────────────────────────────────────────
|
||||
# Configuration
|
||||
@@ -79,9 +105,9 @@ if [ ! -f "$PURGE_MARKER" ] || [ "$(find "$PURGE_MARKER" -mtime +1 2>/dev/null)"
|
||||
touch "$PURGE_MARKER" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Parse using python via stdin pipe (safe for all JSON payloads)
|
||||
# Parse using Python via stdin pipe (safe for all JSON payloads)
|
||||
# Pass HOOK_PHASE via env var since Claude Code does not include hook type in stdin JSON
|
||||
PARSED=$(echo "$INPUT_JSON" | HOOK_PHASE="$HOOK_PHASE" python3 -c '
|
||||
PARSED=$(echo "$INPUT_JSON" | HOOK_PHASE="$HOOK_PHASE" "$PYTHON_CMD" -c '
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
@@ -98,7 +124,9 @@ try:
|
||||
# Extract fields - Claude Code hook format
|
||||
tool_name = data.get("tool_name", data.get("tool", "unknown"))
|
||||
tool_input = data.get("tool_input", data.get("input", {}))
|
||||
tool_output = data.get("tool_output", data.get("output", ""))
|
||||
tool_output = data.get("tool_response")
|
||||
if tool_output is None:
|
||||
tool_output = data.get("tool_output", data.get("output", ""))
|
||||
session_id = data.get("session_id", "unknown")
|
||||
tool_use_id = data.get("tool_use_id", "")
|
||||
cwd = data.get("cwd", "")
|
||||
@@ -129,13 +157,13 @@ except Exception as e:
|
||||
')
|
||||
|
||||
# Check if parsing succeeded
|
||||
PARSED_OK=$(echo "$PARSED" | python3 -c "import json,sys; print(json.load(sys.stdin).get('parsed', False))" 2>/dev/null || echo "False")
|
||||
PARSED_OK=$(echo "$PARSED" | "$PYTHON_CMD" -c "import json,sys; print(json.load(sys.stdin).get('parsed', False))" 2>/dev/null || echo "False")
|
||||
|
||||
if [ "$PARSED_OK" != "True" ]; then
|
||||
# Fallback: log raw input for debugging (scrub secrets before persisting)
|
||||
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
export TIMESTAMP="$timestamp"
|
||||
echo "$INPUT_JSON" | python3 -c '
|
||||
echo "$INPUT_JSON" | "$PYTHON_CMD" -c '
|
||||
import json, sys, os, re
|
||||
|
||||
_SECRET_RE = re.compile(
|
||||
@@ -170,7 +198,7 @@ export PROJECT_ID_ENV="$PROJECT_ID"
|
||||
export PROJECT_NAME_ENV="$PROJECT_NAME"
|
||||
export TIMESTAMP="$timestamp"
|
||||
|
||||
echo "$PARSED" | python3 -c '
|
||||
echo "$PARSED" | "$PYTHON_CMD" -c '
|
||||
import json, sys, os, re
|
||||
|
||||
parsed = json.load(sys.stdin)
|
||||
|
||||
@@ -23,6 +23,29 @@ _CLV2_HOMUNCULUS_DIR="${HOME}/.claude/homunculus"
|
||||
_CLV2_PROJECTS_DIR="${_CLV2_HOMUNCULUS_DIR}/projects"
|
||||
_CLV2_REGISTRY_FILE="${_CLV2_HOMUNCULUS_DIR}/projects.json"
|
||||
|
||||
_clv2_resolve_python_cmd() {
|
||||
if [ -n "${CLV2_PYTHON_CMD:-}" ] && command -v "$CLV2_PYTHON_CMD" >/dev/null 2>&1; then
|
||||
printf '%s\n' "$CLV2_PYTHON_CMD"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if command -v python3 >/dev/null 2>&1; then
|
||||
printf '%s\n' python3
|
||||
return 0
|
||||
fi
|
||||
|
||||
if command -v python >/dev/null 2>&1; then
|
||||
printf '%s\n' python
|
||||
return 0
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
_CLV2_PYTHON_CMD="$(_clv2_resolve_python_cmd 2>/dev/null || true)"
|
||||
CLV2_PYTHON_CMD="$_CLV2_PYTHON_CMD"
|
||||
export CLV2_PYTHON_CMD
|
||||
|
||||
_clv2_detect_project() {
|
||||
local project_root=""
|
||||
local project_name=""
|
||||
@@ -73,10 +96,12 @@ _clv2_detect_project() {
|
||||
fi
|
||||
|
||||
local hash_input="${remote_url:-$project_root}"
|
||||
# Use SHA256 via python3 (portable across macOS/Linux, no shasum/sha256sum divergence)
|
||||
project_id=$(printf '%s' "$hash_input" | python3 -c "import sys,hashlib; print(hashlib.sha256(sys.stdin.buffer.read()).hexdigest()[:12])" 2>/dev/null)
|
||||
# Prefer Python for consistent SHA256 behavior across shells/platforms.
|
||||
if [ -n "$_CLV2_PYTHON_CMD" ]; then
|
||||
project_id=$(printf '%s' "$hash_input" | "$_CLV2_PYTHON_CMD" -c "import sys,hashlib; print(hashlib.sha256(sys.stdin.buffer.read()).hexdigest()[:12])" 2>/dev/null)
|
||||
fi
|
||||
|
||||
# Fallback if python3 failed
|
||||
# Fallback if Python is unavailable or hash generation failed.
|
||||
if [ -z "$project_id" ]; then
|
||||
project_id=$(printf '%s' "$hash_input" | shasum -a 256 2>/dev/null | cut -c1-12 || \
|
||||
printf '%s' "$hash_input" | sha256sum 2>/dev/null | cut -c1-12 || \
|
||||
@@ -85,9 +110,9 @@ _clv2_detect_project() {
|
||||
|
||||
# Backward compatibility: if credentials were stripped and the hash changed,
|
||||
# check if a project dir exists under the legacy hash and reuse it
|
||||
if [ "$legacy_hash_input" != "$hash_input" ]; then
|
||||
local legacy_id
|
||||
legacy_id=$(printf '%s' "$legacy_hash_input" | python3 -c "import sys,hashlib; print(hashlib.sha256(sys.stdin.buffer.read()).hexdigest()[:12])" 2>/dev/null)
|
||||
if [ "$legacy_hash_input" != "$hash_input" ] && [ -n "$_CLV2_PYTHON_CMD" ]; then
|
||||
local legacy_id=""
|
||||
legacy_id=$(printf '%s' "$legacy_hash_input" | "$_CLV2_PYTHON_CMD" -c "import sys,hashlib; print(hashlib.sha256(sys.stdin.buffer.read()).hexdigest()[:12])" 2>/dev/null)
|
||||
if [ -n "$legacy_id" ] && [ -d "${_CLV2_PROJECTS_DIR}/${legacy_id}" ] && [ ! -d "${_CLV2_PROJECTS_DIR}/${project_id}" ]; then
|
||||
# Migrate legacy directory to new hash
|
||||
mv "${_CLV2_PROJECTS_DIR}/${legacy_id}" "${_CLV2_PROJECTS_DIR}/${project_id}" 2>/dev/null || project_id="$legacy_id"
|
||||
@@ -120,14 +145,18 @@ _clv2_update_project_registry() {
|
||||
|
||||
mkdir -p "$(dirname "$_CLV2_REGISTRY_FILE")"
|
||||
|
||||
if [ -z "$_CLV2_PYTHON_CMD" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Pass values via env vars to avoid shell→python injection.
|
||||
# python3 reads them with os.environ, which is safe for any string content.
|
||||
# Python reads them with os.environ, which is safe for any string content.
|
||||
_CLV2_REG_PID="$pid" \
|
||||
_CLV2_REG_PNAME="$pname" \
|
||||
_CLV2_REG_PROOT="$proot" \
|
||||
_CLV2_REG_PREMOTE="$premote" \
|
||||
_CLV2_REG_FILE="$_CLV2_REGISTRY_FILE" \
|
||||
python3 -c '
|
||||
"$_CLV2_PYTHON_CMD" -c '
|
||||
import json, os
|
||||
from datetime import datetime, timezone
|
||||
|
||||
|
||||
@@ -208,4 +208,4 @@ When retrieving context for this task:
|
||||
|
||||
- [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) - Subagent orchestration section
|
||||
- `continuous-learning` skill - For patterns that improve over time
|
||||
- Agent definitions in `~/.claude/agents/`
|
||||
- Agent definitions bundled with ECC (manual install path: `agents/`)
|
||||
|
||||
504
skills/perl-patterns/SKILL.md
Normal file
504
skills/perl-patterns/SKILL.md
Normal file
@@ -0,0 +1,504 @@
|
||||
---
|
||||
name: perl-patterns
|
||||
description: Modern Perl 5.36+ idioms, best practices, and conventions for building robust, maintainable Perl applications.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Modern Perl Development Patterns
|
||||
|
||||
Idiomatic Perl 5.36+ patterns and best practices for building robust, maintainable applications.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Writing new Perl code or modules
|
||||
- Reviewing Perl code for idiom compliance
|
||||
- Refactoring legacy Perl to modern standards
|
||||
- Designing Perl module architecture
|
||||
- Migrating pre-5.36 code to modern Perl
|
||||
|
||||
## How It Works
|
||||
|
||||
Apply these patterns as a bias toward modern Perl 5.36+ defaults: signatures, explicit modules, focused error handling, and testable boundaries. The examples below are meant to be copied as starting points, then tightened for the actual app, dependency stack, and deployment model in front of you.
|
||||
|
||||
## Core Principles
|
||||
|
||||
### 1. Use `v5.36` Pragma
|
||||
|
||||
A single `use v5.36` replaces the old boilerplate and enables strict, warnings, and subroutine signatures.
|
||||
|
||||
```perl
|
||||
# Good: Modern preamble
|
||||
use v5.36;
|
||||
|
||||
sub greet($name) {
|
||||
say "Hello, $name!";
|
||||
}
|
||||
|
||||
# Bad: Legacy boilerplate
|
||||
use strict;
|
||||
use warnings;
|
||||
use feature 'say', 'signatures';
|
||||
no warnings 'experimental::signatures';
|
||||
|
||||
sub greet {
|
||||
my ($name) = @_;
|
||||
say "Hello, $name!";
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Subroutine Signatures
|
||||
|
||||
Use signatures for clarity and automatic arity checking.
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Good: Signatures with defaults
|
||||
sub connect_db($host, $port = 5432, $timeout = 30) {
|
||||
# $host is required, others have defaults
|
||||
return DBI->connect("dbi:Pg:host=$host;port=$port", undef, undef, {
|
||||
RaiseError => 1,
|
||||
PrintError => 0,
|
||||
});
|
||||
}
|
||||
|
||||
# Good: Slurpy parameter for variable args
|
||||
sub log_message($level, @details) {
|
||||
say "[$level] " . join(' ', @details);
|
||||
}
|
||||
|
||||
# Bad: Manual argument unpacking
|
||||
sub connect_db {
|
||||
my ($host, $port, $timeout) = @_;
|
||||
$port //= 5432;
|
||||
$timeout //= 30;
|
||||
# ...
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Context Sensitivity
|
||||
|
||||
Understand scalar vs list context — a core Perl concept.
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
my @items = (1, 2, 3, 4, 5);
|
||||
|
||||
my @copy = @items; # List context: all elements
|
||||
my $count = @items; # Scalar context: count (5)
|
||||
say "Items: " . scalar @items; # Force scalar context
|
||||
```
|
||||
|
||||
### 4. Postfix Dereferencing
|
||||
|
||||
Use postfix dereference syntax for readability with nested structures.
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
my $data = {
|
||||
users => [
|
||||
{ name => 'Alice', roles => ['admin', 'user'] },
|
||||
{ name => 'Bob', roles => ['user'] },
|
||||
],
|
||||
};
|
||||
|
||||
# Good: Postfix dereferencing
|
||||
my @users = $data->{users}->@*;
|
||||
my @roles = $data->{users}[0]{roles}->@*;
|
||||
my %first = $data->{users}[0]->%*;
|
||||
|
||||
# Bad: Circumfix dereferencing (harder to read in chains)
|
||||
my @users = @{ $data->{users} };
|
||||
my @roles = @{ $data->{users}[0]{roles} };
|
||||
```
|
||||
|
||||
### 5. The `isa` Operator (5.32+)
|
||||
|
||||
Infix type-check — replaces `blessed($o) && $o->isa('X')`.
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
if ($obj isa 'My::Class') { $obj->do_something }
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### eval/die Pattern
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
sub parse_config($path) {
|
||||
my $content = eval { path($path)->slurp_utf8 };
|
||||
die "Config error: $@" if $@;
|
||||
return decode_json($content);
|
||||
}
|
||||
```
|
||||
|
||||
### Try::Tiny (Reliable Exception Handling)
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Try::Tiny;
|
||||
|
||||
sub fetch_user($id) {
|
||||
my $user = try {
|
||||
$db->resultset('User')->find($id)
|
||||
// die "User $id not found\n";
|
||||
}
|
||||
catch {
|
||||
warn "Failed to fetch user $id: $_";
|
||||
undef;
|
||||
};
|
||||
return $user;
|
||||
}
|
||||
```
|
||||
|
||||
### Native try/catch (5.40+)
|
||||
|
||||
```perl
|
||||
use v5.40;
|
||||
|
||||
sub divide($x, $y) {
|
||||
try {
|
||||
die "Division by zero" if $y == 0;
|
||||
return $x / $y;
|
||||
}
|
||||
catch ($e) {
|
||||
warn "Error: $e";
|
||||
return;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Modern OO with Moo
|
||||
|
||||
Prefer Moo for lightweight, modern OO. Use Moose only when its metaprotocol is needed.
|
||||
|
||||
```perl
|
||||
# Good: Moo class
|
||||
package User;
|
||||
use Moo;
|
||||
use Types::Standard qw(Str Int ArrayRef);
|
||||
use namespace::autoclean;
|
||||
|
||||
has name => (is => 'ro', isa => Str, required => 1);
|
||||
has email => (is => 'ro', isa => Str, required => 1);
|
||||
has age => (is => 'ro', isa => Int, default => sub { 0 });
|
||||
has roles => (is => 'ro', isa => ArrayRef[Str], default => sub { [] });
|
||||
|
||||
sub is_admin($self) {
|
||||
return grep { $_ eq 'admin' } $self->roles->@*;
|
||||
}
|
||||
|
||||
sub greet($self) {
|
||||
return "Hello, I'm " . $self->name;
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
# Usage
|
||||
my $user = User->new(
|
||||
name => 'Alice',
|
||||
email => 'alice@example.com',
|
||||
roles => ['admin', 'user'],
|
||||
);
|
||||
|
||||
# Bad: Blessed hashref (no validation, no accessors)
|
||||
package User;
|
||||
sub new {
|
||||
my ($class, %args) = @_;
|
||||
return bless \%args, $class;
|
||||
}
|
||||
sub name { return $_[0]->{name} }
|
||||
1;
|
||||
```
|
||||
|
||||
### Moo Roles
|
||||
|
||||
```perl
|
||||
package Role::Serializable;
|
||||
use Moo::Role;
|
||||
use JSON::MaybeXS qw(encode_json);
|
||||
requires 'TO_HASH';
|
||||
sub to_json($self) { encode_json($self->TO_HASH) }
|
||||
1;
|
||||
|
||||
package User;
|
||||
use Moo;
|
||||
with 'Role::Serializable';
|
||||
has name => (is => 'ro', required => 1);
|
||||
has email => (is => 'ro', required => 1);
|
||||
sub TO_HASH($self) { { name => $self->name, email => $self->email } }
|
||||
1;
|
||||
```
|
||||
|
||||
### Native `class` Keyword (5.38+, Corinna)
|
||||
|
||||
```perl
|
||||
use v5.38;
|
||||
use feature 'class';
|
||||
no warnings 'experimental::class';
|
||||
|
||||
class Point {
|
||||
field $x :param;
|
||||
field $y :param;
|
||||
method magnitude() { sqrt($x**2 + $y**2) }
|
||||
}
|
||||
|
||||
my $p = Point->new(x => 3, y => 4);
|
||||
say $p->magnitude; # 5
|
||||
```
|
||||
|
||||
## Regular Expressions
|
||||
|
||||
### Named Captures and `/x` Flag
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Good: Named captures with /x for readability
|
||||
my $log_re = qr{
|
||||
^ (?<timestamp> \d{4}-\d{2}-\d{2} \s \d{2}:\d{2}:\d{2} )
|
||||
\s+ \[ (?<level> \w+ ) \]
|
||||
\s+ (?<message> .+ ) $
|
||||
}x;
|
||||
|
||||
if ($line =~ $log_re) {
|
||||
say "Time: $+{timestamp}, Level: $+{level}";
|
||||
say "Message: $+{message}";
|
||||
}
|
||||
|
||||
# Bad: Positional captures (hard to maintain)
|
||||
if ($line =~ /^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\s+\[(\w+)\]\s+(.+)$/) {
|
||||
say "Time: $1, Level: $2";
|
||||
}
|
||||
```
|
||||
|
||||
### Precompiled Patterns
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Good: Compile once, use many
|
||||
my $email_re = qr/^[A-Za-z0-9._%+-]+\@[A-Za-z0-9.-]+\.[A-Za-z]{2,}$/;
|
||||
|
||||
sub validate_emails(@emails) {
|
||||
return grep { $_ =~ $email_re } @emails;
|
||||
}
|
||||
```
|
||||
|
||||
## Data Structures
|
||||
|
||||
### References and Safe Deep Access
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Hash and array references
|
||||
my $config = {
|
||||
database => {
|
||||
host => 'localhost',
|
||||
port => 5432,
|
||||
options => ['utf8', 'sslmode=require'],
|
||||
},
|
||||
};
|
||||
|
||||
# Safe deep access (returns undef if any level missing)
|
||||
my $port = $config->{database}{port}; # 5432
|
||||
my $missing = $config->{cache}{host}; # undef, no error
|
||||
|
||||
# Hash slices
|
||||
my %subset;
|
||||
@subset{qw(host port)} = @{$config->{database}}{qw(host port)};
|
||||
|
||||
# Array slices
|
||||
my @first_two = $config->{database}{options}->@[0, 1];
|
||||
|
||||
# Multi-variable for loop (experimental in 5.36, stable in 5.40)
|
||||
use feature 'for_list';
|
||||
no warnings 'experimental::for_list';
|
||||
for my ($key, $val) (%$config) {
|
||||
say "$key => $val";
|
||||
}
|
||||
```
|
||||
|
||||
## File I/O
|
||||
|
||||
### Three-Argument Open
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Good: Three-arg open with autodie (core module, eliminates 'or die')
|
||||
use autodie;
|
||||
|
||||
sub read_file($path) {
|
||||
open my $fh, '<:encoding(UTF-8)', $path;
|
||||
local $/;
|
||||
my $content = <$fh>;
|
||||
close $fh;
|
||||
return $content;
|
||||
}
|
||||
|
||||
# Bad: Two-arg open (shell injection risk, see perl-security)
|
||||
open FH, $path; # NEVER do this
|
||||
open FH, "< $path"; # Still bad — user data in mode string
|
||||
```
|
||||
|
||||
### Path::Tiny for File Operations
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Path::Tiny;
|
||||
|
||||
my $file = path('config', 'app.json');
|
||||
my $content = $file->slurp_utf8;
|
||||
$file->spew_utf8($new_content);
|
||||
|
||||
# Iterate directory
|
||||
for my $child (path('src')->children(qr/\.pl$/)) {
|
||||
say $child->basename;
|
||||
}
|
||||
```
|
||||
|
||||
## Module Organization
|
||||
|
||||
### Standard Project Layout
|
||||
|
||||
```text
|
||||
MyApp/
|
||||
├── lib/
|
||||
│ └── MyApp/
|
||||
│ ├── App.pm # Main module
|
||||
│ ├── Config.pm # Configuration
|
||||
│ ├── DB.pm # Database layer
|
||||
│ └── Util.pm # Utilities
|
||||
├── bin/
|
||||
│ └── myapp # Entry-point script
|
||||
├── t/
|
||||
│ ├── 00-load.t # Compilation tests
|
||||
│ ├── unit/ # Unit tests
|
||||
│ └── integration/ # Integration tests
|
||||
├── cpanfile # Dependencies
|
||||
├── Makefile.PL # Build system
|
||||
└── .perlcriticrc # Linting config
|
||||
```
|
||||
|
||||
### Exporter Patterns
|
||||
|
||||
```perl
|
||||
package MyApp::Util;
|
||||
use v5.36;
|
||||
use Exporter 'import';
|
||||
|
||||
our @EXPORT_OK = qw(trim);
|
||||
our %EXPORT_TAGS = (all => \@EXPORT_OK);
|
||||
|
||||
sub trim($str) { $str =~ s/^\s+|\s+$//gr }
|
||||
|
||||
1;
|
||||
```
|
||||
|
||||
## Tooling
|
||||
|
||||
### perltidy Configuration (.perltidyrc)
|
||||
|
||||
```text
|
||||
-i=4 # 4-space indent
|
||||
-l=100 # 100-char line length
|
||||
-ci=4 # continuation indent
|
||||
-ce # cuddled else
|
||||
-bar # opening brace on same line
|
||||
-nolq # don't outdent long quoted strings
|
||||
```
|
||||
|
||||
### perlcritic Configuration (.perlcriticrc)
|
||||
|
||||
```ini
|
||||
severity = 3
|
||||
theme = core + pbp + security
|
||||
|
||||
[InputOutput::RequireCheckedSyscalls]
|
||||
functions = :builtins
|
||||
exclude_functions = say print
|
||||
|
||||
[Subroutines::ProhibitExplicitReturnUndef]
|
||||
severity = 4
|
||||
|
||||
[ValuesAndExpressions::ProhibitMagicNumbers]
|
||||
allowed_values = 0 1 2 -1
|
||||
```
|
||||
|
||||
### Dependency Management (cpanfile + carton)
|
||||
|
||||
```bash
|
||||
cpanm App::cpanminus Carton # Install tools
|
||||
carton install # Install deps from cpanfile
|
||||
carton exec -- perl bin/myapp # Run with local deps
|
||||
```
|
||||
|
||||
```perl
|
||||
# cpanfile
|
||||
requires 'Moo', '>= 2.005';
|
||||
requires 'Path::Tiny';
|
||||
requires 'JSON::MaybeXS';
|
||||
requires 'Try::Tiny';
|
||||
|
||||
on test => sub {
|
||||
requires 'Test2::V0';
|
||||
requires 'Test::MockModule';
|
||||
};
|
||||
```
|
||||
|
||||
## Quick Reference: Modern Perl Idioms
|
||||
|
||||
| Legacy Pattern | Modern Replacement |
|
||||
|---|---|
|
||||
| `use strict; use warnings;` | `use v5.36;` |
|
||||
| `my ($x, $y) = @_;` | `sub foo($x, $y) { ... }` |
|
||||
| `@{ $ref }` | `$ref->@*` |
|
||||
| `%{ $ref }` | `$ref->%*` |
|
||||
| `open FH, "< $file"` | `open my $fh, '<:encoding(UTF-8)', $file` |
|
||||
| `blessed hashref` | `Moo` class with types |
|
||||
| `$1, $2, $3` | `$+{name}` (named captures) |
|
||||
| `eval { }; if ($@)` | `Try::Tiny` or native `try/catch` (5.40+) |
|
||||
| `BEGIN { require Exporter; }` | `use Exporter 'import';` |
|
||||
| Manual file ops | `Path::Tiny` |
|
||||
| `blessed($o) && $o->isa('X')` | `$o isa 'X'` (5.32+) |
|
||||
| `builtin::true / false` | `use builtin 'true', 'false';` (5.36+, experimental) |
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
```perl
|
||||
# 1. Two-arg open (security risk)
|
||||
open FH, $filename; # NEVER
|
||||
|
||||
# 2. Indirect object syntax (ambiguous parsing)
|
||||
my $obj = new Foo(bar => 1); # Bad
|
||||
my $obj = Foo->new(bar => 1); # Good
|
||||
|
||||
# 3. Excessive reliance on $_
|
||||
map { process($_) } grep { validate($_) } @items; # Hard to follow
|
||||
my @valid = grep { validate($_) } @items; # Better: break it up
|
||||
my @results = map { process($_) } @valid;
|
||||
|
||||
# 4. Disabling strict refs
|
||||
no strict 'refs'; # Almost always wrong
|
||||
${"My::Package::$var"} = $value; # Use a hash instead
|
||||
|
||||
# 5. Global variables as configuration
|
||||
our $TIMEOUT = 30; # Bad: mutable global
|
||||
use constant TIMEOUT => 30; # Better: constant
|
||||
# Best: Moo attribute with default
|
||||
|
||||
# 6. String eval for module loading
|
||||
eval "require $module"; # Bad: code injection risk
|
||||
eval "use $module"; # Bad
|
||||
use Module::Runtime 'require_module'; # Good: safe module loading
|
||||
require_module($module);
|
||||
```
|
||||
|
||||
**Remember**: Modern Perl is clean, readable, and safe. Let `use v5.36` handle the boilerplate, use Moo for objects, and prefer CPAN's battle-tested modules over hand-rolled solutions.
|
||||
503
skills/perl-security/SKILL.md
Normal file
503
skills/perl-security/SKILL.md
Normal file
@@ -0,0 +1,503 @@
|
||||
---
|
||||
name: perl-security
|
||||
description: Comprehensive Perl security covering taint mode, input validation, safe process execution, DBI parameterized queries, web security (XSS/SQLi/CSRF), and perlcritic security policies.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Perl Security Patterns
|
||||
|
||||
Comprehensive security guidelines for Perl applications covering input validation, injection prevention, and secure coding practices.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Handling user input in Perl applications
|
||||
- Building Perl web applications (CGI, Mojolicious, Dancer2, Catalyst)
|
||||
- Reviewing Perl code for security vulnerabilities
|
||||
- Performing file operations with user-supplied paths
|
||||
- Executing system commands from Perl
|
||||
- Writing DBI database queries
|
||||
|
||||
## How It Works
|
||||
|
||||
Start with taint-aware input boundaries, then move outward: validate and untaint inputs, keep filesystem and process execution constrained, and use parameterized DBI queries everywhere. The examples below show the safe defaults this skill expects you to apply before shipping Perl code that touches user input, the shell, or the network.
|
||||
|
||||
## Taint Mode
|
||||
|
||||
Perl's taint mode (`-T`) tracks data from external sources and prevents it from being used in unsafe operations without explicit validation.
|
||||
|
||||
### Enabling Taint Mode
|
||||
|
||||
```perl
|
||||
#!/usr/bin/perl -T
|
||||
use v5.36;
|
||||
|
||||
# Tainted: anything from outside the program
|
||||
my $input = $ARGV[0]; # Tainted
|
||||
my $env_path = $ENV{PATH}; # Tainted
|
||||
my $form = <STDIN>; # Tainted
|
||||
my $query = $ENV{QUERY_STRING}; # Tainted
|
||||
|
||||
# Sanitize PATH early (required in taint mode)
|
||||
$ENV{PATH} = '/usr/local/bin:/usr/bin:/bin';
|
||||
delete @ENV{qw(IFS CDPATH ENV BASH_ENV)};
|
||||
```
|
||||
|
||||
### Untainting Pattern
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Good: Validate and untaint with a specific regex
|
||||
sub untaint_username($input) {
|
||||
if ($input =~ /^([a-zA-Z0-9_]{3,30})$/) {
|
||||
return $1; # $1 is untainted
|
||||
}
|
||||
die "Invalid username: must be 3-30 alphanumeric characters\n";
|
||||
}
|
||||
|
||||
# Good: Validate and untaint a file path
|
||||
sub untaint_filename($input) {
|
||||
if ($input =~ m{^([a-zA-Z0-9._-]+)$}) {
|
||||
return $1;
|
||||
}
|
||||
die "Invalid filename: contains unsafe characters\n";
|
||||
}
|
||||
|
||||
# Bad: Overly permissive untainting (defeats the purpose)
|
||||
sub bad_untaint($input) {
|
||||
$input =~ /^(.*)$/s;
|
||||
return $1; # Accepts ANYTHING — pointless
|
||||
}
|
||||
```
|
||||
|
||||
## Input Validation
|
||||
|
||||
### Allowlist Over Blocklist
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Good: Allowlist — define exactly what's permitted
|
||||
sub validate_sort_field($field) {
|
||||
my %allowed = map { $_ => 1 } qw(name email created_at updated_at);
|
||||
die "Invalid sort field: $field\n" unless $allowed{$field};
|
||||
return $field;
|
||||
}
|
||||
|
||||
# Good: Validate with specific patterns
|
||||
sub validate_email($email) {
|
||||
if ($email =~ /^([a-zA-Z0-9._%+-]+\@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,})$/) {
|
||||
return $1;
|
||||
}
|
||||
die "Invalid email address\n";
|
||||
}
|
||||
|
||||
sub validate_integer($input) {
|
||||
if ($input =~ /^(-?\d{1,10})$/) {
|
||||
return $1 + 0; # Coerce to number
|
||||
}
|
||||
die "Invalid integer\n";
|
||||
}
|
||||
|
||||
# Bad: Blocklist — always incomplete
|
||||
sub bad_validate($input) {
|
||||
die "Invalid" if $input =~ /[<>"';&|]/; # Misses encoded attacks
|
||||
return $input;
|
||||
}
|
||||
```
|
||||
|
||||
### Length Constraints
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
sub validate_comment($text) {
|
||||
die "Comment is required\n" unless length($text) > 0;
|
||||
die "Comment exceeds 10000 chars\n" if length($text) > 10_000;
|
||||
return $text;
|
||||
}
|
||||
```
|
||||
|
||||
## Safe Regular Expressions
|
||||
|
||||
### ReDoS Prevention
|
||||
|
||||
Catastrophic backtracking occurs with nested quantifiers on overlapping patterns.
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Bad: Vulnerable to ReDoS (exponential backtracking)
|
||||
my $bad_re = qr/^(a+)+$/; # Nested quantifiers
|
||||
my $bad_re2 = qr/^([a-zA-Z]+)*$/; # Nested quantifiers on class
|
||||
my $bad_re3 = qr/^(.*?,){10,}$/; # Repeated greedy/lazy combo
|
||||
|
||||
# Good: Rewrite without nesting
|
||||
my $good_re = qr/^a+$/; # Single quantifier
|
||||
my $good_re2 = qr/^[a-zA-Z]+$/; # Single quantifier on class
|
||||
|
||||
# Good: Use possessive quantifiers or atomic groups to prevent backtracking
|
||||
my $safe_re = qr/^[a-zA-Z]++$/; # Possessive (5.10+)
|
||||
my $safe_re2 = qr/^(?>a+)$/; # Atomic group
|
||||
|
||||
# Good: Enforce timeout on untrusted patterns
|
||||
use POSIX qw(alarm);
|
||||
sub safe_match($string, $pattern, $timeout = 2) {
|
||||
my $matched;
|
||||
eval {
|
||||
local $SIG{ALRM} = sub { die "Regex timeout\n" };
|
||||
alarm($timeout);
|
||||
$matched = $string =~ $pattern;
|
||||
alarm(0);
|
||||
};
|
||||
alarm(0);
|
||||
die $@ if $@;
|
||||
return $matched;
|
||||
}
|
||||
```
|
||||
|
||||
## Safe File Operations
|
||||
|
||||
### Three-Argument Open
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Good: Three-arg open, lexical filehandle, check return
|
||||
sub read_file($path) {
|
||||
open my $fh, '<:encoding(UTF-8)', $path
|
||||
or die "Cannot open '$path': $!\n";
|
||||
local $/;
|
||||
my $content = <$fh>;
|
||||
close $fh;
|
||||
return $content;
|
||||
}
|
||||
|
||||
# Bad: Two-arg open with user data (command injection)
|
||||
sub bad_read($path) {
|
||||
open my $fh, $path; # If $path = "|rm -rf /", runs command!
|
||||
open my $fh, "< $path"; # Shell metacharacter injection
|
||||
}
|
||||
```
|
||||
|
||||
### TOCTOU Prevention and Path Traversal
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Fcntl qw(:DEFAULT :flock);
|
||||
use File::Spec;
|
||||
use Cwd qw(realpath);
|
||||
|
||||
# Atomic file creation
|
||||
sub create_file_safe($path) {
|
||||
sysopen(my $fh, $path, O_WRONLY | O_CREAT | O_EXCL, 0600)
|
||||
or die "Cannot create '$path': $!\n";
|
||||
return $fh;
|
||||
}
|
||||
|
||||
# Validate path stays within allowed directory
|
||||
sub safe_path($base_dir, $user_path) {
|
||||
my $real = realpath(File::Spec->catfile($base_dir, $user_path))
|
||||
// die "Path does not exist\n";
|
||||
my $base_real = realpath($base_dir)
|
||||
// die "Base dir does not exist\n";
|
||||
die "Path traversal blocked\n" unless $real =~ /^\Q$base_real\E(?:\/|\z)/;
|
||||
return $real;
|
||||
}
|
||||
```
|
||||
|
||||
Use `File::Temp` for temporary files (`tempfile(UNLINK => 1)`) and `flock(LOCK_EX)` to prevent race conditions.
|
||||
|
||||
## Safe Process Execution
|
||||
|
||||
### List-Form system and exec
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Good: List form — no shell interpolation
|
||||
sub run_command(@cmd) {
|
||||
system(@cmd) == 0
|
||||
or die "Command failed: @cmd\n";
|
||||
}
|
||||
|
||||
run_command('grep', '-r', $user_pattern, '/var/log/app/');
|
||||
|
||||
# Good: Capture output safely with IPC::Run3
|
||||
use IPC::Run3;
|
||||
sub capture_output(@cmd) {
|
||||
my ($stdout, $stderr);
|
||||
run3(\@cmd, \undef, \$stdout, \$stderr);
|
||||
if ($?) {
|
||||
die "Command failed (exit $?): $stderr\n";
|
||||
}
|
||||
return $stdout;
|
||||
}
|
||||
|
||||
# Bad: String form — shell injection!
|
||||
sub bad_search($pattern) {
|
||||
system("grep -r '$pattern' /var/log/app/"); # If $pattern = "'; rm -rf / #"
|
||||
}
|
||||
|
||||
# Bad: Backticks with interpolation
|
||||
my $output = `ls $user_dir`; # Shell injection risk
|
||||
```
|
||||
|
||||
Also use `Capture::Tiny` for capturing stdout/stderr from external commands safely.
|
||||
|
||||
## SQL Injection Prevention
|
||||
|
||||
### DBI Placeholders
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use DBI;
|
||||
|
||||
my $dbh = DBI->connect($dsn, $user, $pass, {
|
||||
RaiseError => 1,
|
||||
PrintError => 0,
|
||||
AutoCommit => 1,
|
||||
});
|
||||
|
||||
# Good: Parameterized queries — always use placeholders
|
||||
sub find_user($dbh, $email) {
|
||||
my $sth = $dbh->prepare('SELECT * FROM users WHERE email = ?');
|
||||
$sth->execute($email);
|
||||
return $sth->fetchrow_hashref;
|
||||
}
|
||||
|
||||
sub search_users($dbh, $name, $status) {
|
||||
my $sth = $dbh->prepare(
|
||||
'SELECT * FROM users WHERE name LIKE ? AND status = ? ORDER BY name'
|
||||
);
|
||||
$sth->execute("%$name%", $status);
|
||||
return $sth->fetchall_arrayref({});
|
||||
}
|
||||
|
||||
# Bad: String interpolation in SQL (SQLi vulnerability!)
|
||||
sub bad_find($dbh, $email) {
|
||||
my $sth = $dbh->prepare("SELECT * FROM users WHERE email = '$email'");
|
||||
# If $email = "' OR 1=1 --", returns all users
|
||||
$sth->execute;
|
||||
return $sth->fetchrow_hashref;
|
||||
}
|
||||
```
|
||||
|
||||
### Dynamic Column Allowlists
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Good: Validate column names against an allowlist
|
||||
sub order_by($dbh, $column, $direction) {
|
||||
my %allowed_cols = map { $_ => 1 } qw(name email created_at);
|
||||
my %allowed_dirs = map { $_ => 1 } qw(ASC DESC);
|
||||
|
||||
die "Invalid column: $column\n" unless $allowed_cols{$column};
|
||||
die "Invalid direction: $direction\n" unless $allowed_dirs{uc $direction};
|
||||
|
||||
my $sth = $dbh->prepare("SELECT * FROM users ORDER BY $column $direction");
|
||||
$sth->execute;
|
||||
return $sth->fetchall_arrayref({});
|
||||
}
|
||||
|
||||
# Bad: Directly interpolating user-chosen column
|
||||
sub bad_order($dbh, $column) {
|
||||
$dbh->prepare("SELECT * FROM users ORDER BY $column"); # SQLi!
|
||||
}
|
||||
```
|
||||
|
||||
### DBIx::Class (ORM Safety)
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# DBIx::Class generates safe parameterized queries
|
||||
my @users = $schema->resultset('User')->search({
|
||||
status => 'active',
|
||||
email => { -like => '%@example.com' },
|
||||
}, {
|
||||
order_by => { -asc => 'name' },
|
||||
rows => 50,
|
||||
});
|
||||
```
|
||||
|
||||
## Web Security
|
||||
|
||||
### XSS Prevention
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use HTML::Entities qw(encode_entities);
|
||||
use URI::Escape qw(uri_escape_utf8);
|
||||
|
||||
# Good: Encode output for HTML context
|
||||
sub safe_html($user_input) {
|
||||
return encode_entities($user_input);
|
||||
}
|
||||
|
||||
# Good: Encode for URL context
|
||||
sub safe_url_param($value) {
|
||||
return uri_escape_utf8($value);
|
||||
}
|
||||
|
||||
# Good: Encode for JSON context
|
||||
use JSON::MaybeXS qw(encode_json);
|
||||
sub safe_json($data) {
|
||||
return encode_json($data); # Handles escaping
|
||||
}
|
||||
|
||||
# Template auto-escaping (Mojolicious)
|
||||
# <%= $user_input %> — auto-escaped (safe)
|
||||
# <%== $raw_html %> — raw output (dangerous, use only for trusted content)
|
||||
|
||||
# Template auto-escaping (Template Toolkit)
|
||||
# [% user_input | html %] — explicit HTML encoding
|
||||
|
||||
# Bad: Raw output in HTML
|
||||
sub bad_html($input) {
|
||||
print "<div>$input</div>"; # XSS if $input contains <script>
|
||||
}
|
||||
```
|
||||
|
||||
### CSRF Protection
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Crypt::URandom qw(urandom);
|
||||
use MIME::Base64 qw(encode_base64url);
|
||||
|
||||
sub generate_csrf_token() {
|
||||
return encode_base64url(urandom(32));
|
||||
}
|
||||
```
|
||||
|
||||
Use constant-time comparison when verifying tokens. Most web frameworks (Mojolicious, Dancer2, Catalyst) provide built-in CSRF protection — prefer those over hand-rolled solutions.
|
||||
|
||||
### Session and Header Security
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
|
||||
# Mojolicious session + headers
|
||||
$app->secrets(['long-random-secret-rotated-regularly']);
|
||||
$app->sessions->secure(1); # HTTPS only
|
||||
$app->sessions->samesite('Lax');
|
||||
|
||||
$app->hook(after_dispatch => sub ($c) {
|
||||
$c->res->headers->header('X-Content-Type-Options' => 'nosniff');
|
||||
$c->res->headers->header('X-Frame-Options' => 'DENY');
|
||||
$c->res->headers->header('Content-Security-Policy' => "default-src 'self'");
|
||||
$c->res->headers->header('Strict-Transport-Security' => 'max-age=31536000; includeSubDomains');
|
||||
});
|
||||
```
|
||||
|
||||
## Output Encoding
|
||||
|
||||
Always encode output for its context: `HTML::Entities::encode_entities()` for HTML, `URI::Escape::uri_escape_utf8()` for URLs, `JSON::MaybeXS::encode_json()` for JSON.
|
||||
|
||||
## CPAN Module Security
|
||||
|
||||
- **Pin versions** in cpanfile: `requires 'DBI', '== 1.643';`
|
||||
- **Prefer maintained modules**: Check MetaCPAN for recent releases
|
||||
- **Minimize dependencies**: Each dependency is an attack surface
|
||||
|
||||
## Security Tooling
|
||||
|
||||
### perlcritic Security Policies
|
||||
|
||||
```ini
|
||||
# .perlcriticrc — security-focused configuration
|
||||
severity = 3
|
||||
theme = security + core
|
||||
|
||||
# Require three-arg open
|
||||
[InputOutput::RequireThreeArgOpen]
|
||||
severity = 5
|
||||
|
||||
# Require checked system calls
|
||||
[InputOutput::RequireCheckedSyscalls]
|
||||
functions = :builtins
|
||||
severity = 4
|
||||
|
||||
# Prohibit string eval
|
||||
[BuiltinFunctions::ProhibitStringyEval]
|
||||
severity = 5
|
||||
|
||||
# Prohibit backtick operators
|
||||
[InputOutput::ProhibitBacktickOperators]
|
||||
severity = 4
|
||||
|
||||
# Require taint checking in CGI
|
||||
[Modules::RequireTaintChecking]
|
||||
severity = 5
|
||||
|
||||
# Prohibit two-arg open
|
||||
[InputOutput::ProhibitTwoArgOpen]
|
||||
severity = 5
|
||||
|
||||
# Prohibit bare-word filehandles
|
||||
[InputOutput::ProhibitBarewordFileHandles]
|
||||
severity = 5
|
||||
```
|
||||
|
||||
### Running perlcritic
|
||||
|
||||
```bash
|
||||
# Check a file
|
||||
perlcritic --severity 3 --theme security lib/MyApp/Handler.pm
|
||||
|
||||
# Check entire project
|
||||
perlcritic --severity 3 --theme security lib/
|
||||
|
||||
# CI integration
|
||||
perlcritic --severity 4 --theme security --quiet lib/ || exit 1
|
||||
```
|
||||
|
||||
## Quick Security Checklist
|
||||
|
||||
| Check | What to Verify |
|
||||
|---|---|
|
||||
| Taint mode | `-T` flag on CGI/web scripts |
|
||||
| Input validation | Allowlist patterns, length limits |
|
||||
| File operations | Three-arg open, path traversal checks |
|
||||
| Process execution | List-form system, no shell interpolation |
|
||||
| SQL queries | DBI placeholders, never interpolate |
|
||||
| HTML output | `encode_entities()`, template auto-escape |
|
||||
| CSRF tokens | Generated, verified on state-changing requests |
|
||||
| Session config | Secure, HttpOnly, SameSite cookies |
|
||||
| HTTP headers | CSP, X-Frame-Options, HSTS |
|
||||
| Dependencies | Pinned versions, audited modules |
|
||||
| Regex safety | No nested quantifiers, anchored patterns |
|
||||
| Error messages | No stack traces or paths leaked to users |
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
```perl
|
||||
# 1. Two-arg open with user data (command injection)
|
||||
open my $fh, $user_input; # CRITICAL vulnerability
|
||||
|
||||
# 2. String-form system (shell injection)
|
||||
system("convert $user_file output.png"); # CRITICAL vulnerability
|
||||
|
||||
# 3. SQL string interpolation
|
||||
$dbh->do("DELETE FROM users WHERE id = $id"); # SQLi
|
||||
|
||||
# 4. eval with user input (code injection)
|
||||
eval $user_code; # Remote code execution
|
||||
|
||||
# 5. Trusting $ENV without sanitizing
|
||||
my $path = $ENV{UPLOAD_DIR}; # Could be manipulated
|
||||
system("ls $path"); # Double vulnerability
|
||||
|
||||
# 6. Disabling taint without validation
|
||||
($input) = $input =~ /(.*)/s; # Lazy untaint — defeats purpose
|
||||
|
||||
# 7. Raw user data in HTML
|
||||
print "<div>Welcome, $username!</div>"; # XSS
|
||||
|
||||
# 8. Unvalidated redirects
|
||||
print $cgi->redirect($user_url); # Open redirect
|
||||
```
|
||||
|
||||
**Remember**: Perl's flexibility is powerful but requires discipline. Use taint mode for web-facing code, validate all input with allowlists, use DBI placeholders for every query, and encode all output for its context. Defense in depth — never rely on a single layer.
|
||||
475
skills/perl-testing/SKILL.md
Normal file
475
skills/perl-testing/SKILL.md
Normal file
@@ -0,0 +1,475 @@
|
||||
---
|
||||
name: perl-testing
|
||||
description: Perl testing patterns using Test2::V0, Test::More, prove runner, mocking, coverage with Devel::Cover, and TDD methodology.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Perl Testing Patterns
|
||||
|
||||
Comprehensive testing strategies for Perl applications using Test2::V0, Test::More, prove, and TDD methodology.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Writing new Perl code (follow TDD: red, green, refactor)
|
||||
- Designing test suites for Perl modules or applications
|
||||
- Reviewing Perl test coverage
|
||||
- Setting up Perl testing infrastructure
|
||||
- Migrating tests from Test::More to Test2::V0
|
||||
- Debugging failing Perl tests
|
||||
|
||||
## TDD Workflow
|
||||
|
||||
Always follow the RED-GREEN-REFACTOR cycle.
|
||||
|
||||
```perl
|
||||
# Step 1: RED — Write a failing test
|
||||
# t/unit/calculator.t
|
||||
use v5.36;
|
||||
use Test2::V0;
|
||||
|
||||
use lib 'lib';
|
||||
use Calculator;
|
||||
|
||||
subtest 'addition' => sub {
|
||||
my $calc = Calculator->new;
|
||||
is($calc->add(2, 3), 5, 'adds two numbers');
|
||||
is($calc->add(-1, 1), 0, 'handles negatives');
|
||||
};
|
||||
|
||||
done_testing;
|
||||
|
||||
# Step 2: GREEN — Write minimal implementation
|
||||
# lib/Calculator.pm
|
||||
package Calculator;
|
||||
use v5.36;
|
||||
use Moo;
|
||||
|
||||
sub add($self, $a, $b) {
|
||||
return $a + $b;
|
||||
}
|
||||
|
||||
1;
|
||||
|
||||
# Step 3: REFACTOR — Improve while tests stay green
|
||||
# Run: prove -lv t/unit/calculator.t
|
||||
```
|
||||
|
||||
## Test::More Fundamentals
|
||||
|
||||
The standard Perl testing module — widely used, ships with core.
|
||||
|
||||
### Basic Assertions
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Test::More;
|
||||
|
||||
# Plan upfront or use done_testing
|
||||
# plan tests => 5; # Fixed plan (optional)
|
||||
|
||||
# Equality
|
||||
is($result, 42, 'returns correct value');
|
||||
isnt($result, 0, 'not zero');
|
||||
|
||||
# Boolean
|
||||
ok($user->is_active, 'user is active');
|
||||
ok(!$user->is_banned, 'user is not banned');
|
||||
|
||||
# Deep comparison
|
||||
is_deeply(
|
||||
$got,
|
||||
{ name => 'Alice', roles => ['admin'] },
|
||||
'returns expected structure'
|
||||
);
|
||||
|
||||
# Pattern matching
|
||||
like($error, qr/not found/i, 'error mentions not found');
|
||||
unlike($output, qr/password/, 'output hides password');
|
||||
|
||||
# Type check
|
||||
isa_ok($obj, 'MyApp::User');
|
||||
can_ok($obj, 'save', 'delete');
|
||||
|
||||
done_testing;
|
||||
```
|
||||
|
||||
### SKIP and TODO
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Test::More;
|
||||
|
||||
# Skip tests conditionally
|
||||
SKIP: {
|
||||
skip 'No database configured', 2 unless $ENV{TEST_DB};
|
||||
|
||||
my $db = connect_db();
|
||||
ok($db->ping, 'database is reachable');
|
||||
is($db->version, '15', 'correct PostgreSQL version');
|
||||
}
|
||||
|
||||
# Mark expected failures
|
||||
TODO: {
|
||||
local $TODO = 'Caching not yet implemented';
|
||||
is($cache->get('key'), 'value', 'cache returns value');
|
||||
}
|
||||
|
||||
done_testing;
|
||||
```
|
||||
|
||||
## Test2::V0 Modern Framework
|
||||
|
||||
Test2::V0 is the modern replacement for Test::More — richer assertions, better diagnostics, and extensible.
|
||||
|
||||
### Why Test2?
|
||||
|
||||
- Superior deep comparison with hash/array builders
|
||||
- Better diagnostic output on failures
|
||||
- Subtests with cleaner scoping
|
||||
- Extensible via Test2::Tools::* plugins
|
||||
- Backward-compatible with Test::More tests
|
||||
|
||||
### Deep Comparison with Builders
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Test2::V0;
|
||||
|
||||
# Hash builder — check partial structure
|
||||
is(
|
||||
$user->to_hash,
|
||||
hash {
|
||||
field name => 'Alice';
|
||||
field email => match(qr/\@example\.com$/);
|
||||
field age => validator(sub { $_ >= 18 });
|
||||
# Ignore other fields
|
||||
etc();
|
||||
},
|
||||
'user has expected fields'
|
||||
);
|
||||
|
||||
# Array builder
|
||||
is(
|
||||
$result,
|
||||
array {
|
||||
item 'first';
|
||||
item match(qr/^second/);
|
||||
item DNE(); # Does Not Exist — verify no extra items
|
||||
},
|
||||
'result matches expected list'
|
||||
);
|
||||
|
||||
# Bag — order-independent comparison
|
||||
is(
|
||||
$tags,
|
||||
bag {
|
||||
item 'perl';
|
||||
item 'testing';
|
||||
item 'tdd';
|
||||
},
|
||||
'has all required tags regardless of order'
|
||||
);
|
||||
```
|
||||
|
||||
### Subtests
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Test2::V0;
|
||||
|
||||
subtest 'User creation' => sub {
|
||||
my $user = User->new(name => 'Alice', email => 'alice@example.com');
|
||||
ok($user, 'user object created');
|
||||
is($user->name, 'Alice', 'name is set');
|
||||
is($user->email, 'alice@example.com', 'email is set');
|
||||
};
|
||||
|
||||
subtest 'User validation' => sub {
|
||||
my $warnings = warns {
|
||||
User->new(name => '', email => 'bad');
|
||||
};
|
||||
ok($warnings, 'warns on invalid data');
|
||||
};
|
||||
|
||||
done_testing;
|
||||
```
|
||||
|
||||
### Exception Testing with Test2
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Test2::V0;
|
||||
|
||||
# Test that code dies
|
||||
like(
|
||||
dies { divide(10, 0) },
|
||||
qr/Division by zero/,
|
||||
'dies on division by zero'
|
||||
);
|
||||
|
||||
# Test that code lives
|
||||
ok(lives { divide(10, 2) }, 'division succeeds') or note($@);
|
||||
|
||||
# Combined pattern
|
||||
subtest 'error handling' => sub {
|
||||
ok(lives { parse_config('valid.json') }, 'valid config parses');
|
||||
like(
|
||||
dies { parse_config('missing.json') },
|
||||
qr/Cannot open/,
|
||||
'missing file dies with message'
|
||||
);
|
||||
};
|
||||
|
||||
done_testing;
|
||||
```
|
||||
|
||||
## Test Organization and prove
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```text
|
||||
t/
|
||||
├── 00-load.t # Verify modules compile
|
||||
├── 01-basic.t # Core functionality
|
||||
├── unit/
|
||||
│ ├── config.t # Unit tests by module
|
||||
│ ├── user.t
|
||||
│ └── util.t
|
||||
├── integration/
|
||||
│ ├── database.t
|
||||
│ └── api.t
|
||||
├── lib/
|
||||
│ └── TestHelper.pm # Shared test utilities
|
||||
└── fixtures/
|
||||
├── config.json # Test data files
|
||||
└── users.csv
|
||||
```
|
||||
|
||||
### prove Commands
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
prove -l t/
|
||||
|
||||
# Verbose output
|
||||
prove -lv t/
|
||||
|
||||
# Run specific test
|
||||
prove -lv t/unit/user.t
|
||||
|
||||
# Recursive search
|
||||
prove -lr t/
|
||||
|
||||
# Parallel execution (8 jobs)
|
||||
prove -lr -j8 t/
|
||||
|
||||
# Run only failing tests from last run
|
||||
prove -l --state=failed t/
|
||||
|
||||
# Colored output with timer
|
||||
prove -l --color --timer t/
|
||||
|
||||
# TAP output for CI
|
||||
prove -l --formatter TAP::Formatter::JUnit t/ > results.xml
|
||||
```
|
||||
|
||||
### .proverc Configuration
|
||||
|
||||
```text
|
||||
-l
|
||||
--color
|
||||
--timer
|
||||
-r
|
||||
-j4
|
||||
--state=save
|
||||
```
|
||||
|
||||
## Fixtures and Setup/Teardown
|
||||
|
||||
### Subtest Isolation
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Test2::V0;
|
||||
use File::Temp qw(tempdir);
|
||||
use Path::Tiny;
|
||||
|
||||
subtest 'file processing' => sub {
|
||||
# Setup
|
||||
my $dir = tempdir(CLEANUP => 1);
|
||||
my $file = path($dir, 'input.txt');
|
||||
$file->spew_utf8("line1\nline2\nline3\n");
|
||||
|
||||
# Test
|
||||
my $result = process_file("$file");
|
||||
is($result->{line_count}, 3, 'counts lines');
|
||||
|
||||
# Teardown happens automatically (CLEANUP => 1)
|
||||
};
|
||||
```
|
||||
|
||||
### Shared Test Helpers
|
||||
|
||||
Place reusable helpers in `t/lib/TestHelper.pm` and load with `use lib 't/lib'`. Export factory functions like `create_test_db()`, `create_temp_dir()`, and `fixture_path()` via `Exporter`.
|
||||
|
||||
## Mocking
|
||||
|
||||
### Test::MockModule
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Test2::V0;
|
||||
use Test::MockModule;
|
||||
|
||||
subtest 'mock external API' => sub {
|
||||
my $mock = Test::MockModule->new('MyApp::API');
|
||||
|
||||
# Good: Mock returns controlled data
|
||||
$mock->mock(fetch_user => sub ($self, $id) {
|
||||
return { id => $id, name => 'Mock User', email => 'mock@test.com' };
|
||||
});
|
||||
|
||||
my $api = MyApp::API->new;
|
||||
my $user = $api->fetch_user(42);
|
||||
is($user->{name}, 'Mock User', 'returns mocked user');
|
||||
|
||||
# Verify call count
|
||||
my $call_count = 0;
|
||||
$mock->mock(fetch_user => sub { $call_count++; return {} });
|
||||
$api->fetch_user(1);
|
||||
$api->fetch_user(2);
|
||||
is($call_count, 2, 'fetch_user called twice');
|
||||
|
||||
# Mock is automatically restored when $mock goes out of scope
|
||||
};
|
||||
|
||||
# Bad: Monkey-patching without restoration
|
||||
# *MyApp::API::fetch_user = sub { ... }; # NEVER — leaks across tests
|
||||
```
|
||||
|
||||
For lightweight mock objects, use `Test::MockObject` to create injectable test doubles with `->mock()` and verify calls with `->called_ok()`.
|
||||
|
||||
## Coverage with Devel::Cover
|
||||
|
||||
### Running Coverage
|
||||
|
||||
```bash
|
||||
# Basic coverage report
|
||||
cover -test
|
||||
|
||||
# Or step by step
|
||||
perl -MDevel::Cover -Ilib t/unit/user.t
|
||||
cover
|
||||
|
||||
# HTML report
|
||||
cover -report html
|
||||
open cover_db/coverage.html
|
||||
|
||||
# Specific thresholds
|
||||
cover -test -report text | grep 'Total'
|
||||
|
||||
# CI-friendly: fail under threshold
|
||||
cover -test && cover -report text -select '^lib/' \
|
||||
| perl -ne 'if (/Total.*?(\d+\.\d+)/) { exit 1 if $1 < 80 }'
|
||||
```
|
||||
|
||||
### Integration Testing
|
||||
|
||||
Use in-memory SQLite for database tests, mock HTTP::Tiny for API tests.
|
||||
|
||||
```perl
|
||||
use v5.36;
|
||||
use Test2::V0;
|
||||
use DBI;
|
||||
|
||||
subtest 'database integration' => sub {
|
||||
my $dbh = DBI->connect('dbi:SQLite:dbname=:memory:', '', '', {
|
||||
RaiseError => 1,
|
||||
});
|
||||
$dbh->do('CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)');
|
||||
|
||||
$dbh->prepare('INSERT INTO users (name) VALUES (?)')->execute('Alice');
|
||||
my $row = $dbh->selectrow_hashref('SELECT * FROM users WHERE name = ?', undef, 'Alice');
|
||||
is($row->{name}, 'Alice', 'inserted and retrieved user');
|
||||
};
|
||||
|
||||
done_testing;
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### DO
|
||||
|
||||
- **Follow TDD**: Write tests before implementation (red-green-refactor)
|
||||
- **Use Test2::V0**: Modern assertions, better diagnostics
|
||||
- **Use subtests**: Group related assertions, isolate state
|
||||
- **Mock external dependencies**: Network, database, file system
|
||||
- **Use `prove -l`**: Always include lib/ in `@INC`
|
||||
- **Name tests clearly**: `'user login with invalid password fails'`
|
||||
- **Test edge cases**: Empty strings, undef, zero, boundary values
|
||||
- **Aim for 80%+ coverage**: Focus on business logic paths
|
||||
- **Keep tests fast**: Mock I/O, use in-memory databases
|
||||
|
||||
### DON'T
|
||||
|
||||
- **Don't test implementation**: Test behavior and output, not internals
|
||||
- **Don't share state between subtests**: Each subtest should be independent
|
||||
- **Don't skip `done_testing`**: Ensures all planned tests ran
|
||||
- **Don't over-mock**: Mock boundaries only, not the code under test
|
||||
- **Don't use `Test::More` for new projects**: Prefer Test2::V0
|
||||
- **Don't ignore test failures**: All tests must pass before merge
|
||||
- **Don't test CPAN modules**: Trust libraries to work correctly
|
||||
- **Don't write brittle tests**: Avoid over-specific string matching
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Task | Command / Pattern |
|
||||
|---|---|
|
||||
| Run all tests | `prove -lr t/` |
|
||||
| Run one test verbose | `prove -lv t/unit/user.t` |
|
||||
| Parallel test run | `prove -lr -j8 t/` |
|
||||
| Coverage report | `cover -test && cover -report html` |
|
||||
| Test equality | `is($got, $expected, 'label')` |
|
||||
| Deep comparison | `is($got, hash { field k => 'v'; etc() }, 'label')` |
|
||||
| Test exception | `like(dies { ... }, qr/msg/, 'label')` |
|
||||
| Test no exception | `ok(lives { ... }, 'label')` |
|
||||
| Mock a method | `Test::MockModule->new('Pkg')->mock(m => sub { ... })` |
|
||||
| Skip tests | `SKIP: { skip 'reason', $count unless $cond; ... }` |
|
||||
| TODO tests | `TODO: { local $TODO = 'reason'; ... }` |
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
### Forgetting `done_testing`
|
||||
|
||||
```perl
|
||||
# Bad: Test file runs but doesn't verify all tests executed
|
||||
use Test2::V0;
|
||||
is(1, 1, 'works');
|
||||
# Missing done_testing — silent bugs if test code is skipped
|
||||
|
||||
# Good: Always end with done_testing
|
||||
use Test2::V0;
|
||||
is(1, 1, 'works');
|
||||
done_testing;
|
||||
```
|
||||
|
||||
### Missing `-l` Flag
|
||||
|
||||
```bash
|
||||
# Bad: Modules in lib/ not found
|
||||
prove t/unit/user.t
|
||||
# Can't locate MyApp/User.pm in @INC
|
||||
|
||||
# Good: Include lib/ in @INC
|
||||
prove -l t/unit/user.t
|
||||
```
|
||||
|
||||
### Over-Mocking
|
||||
|
||||
Mock the *dependency*, not the code under test. If your test only verifies that a mock returns what you told it to, it tests nothing.
|
||||
|
||||
### Test Pollution
|
||||
|
||||
Use `my` variables inside subtests — never `our` — to prevent state leaking between tests.
|
||||
|
||||
**Remember**: Tests are your safety net. Keep them fast, focused, and independent. Use Test2::V0 for new projects, prove for running, and Devel::Cover for accountability.
|
||||
@@ -74,7 +74,24 @@ Scanning:
|
||||
|
||||
### Phase 2 — Quality Evaluation
|
||||
|
||||
Launch a Task tool subagent (**Explore agent, model: opus**) with the full inventory and checklist.
|
||||
Launch an Agent tool subagent (**general-purpose agent**) with the full inventory and checklist:
|
||||
|
||||
```text
|
||||
Agent(
|
||||
subagent_type="general-purpose",
|
||||
prompt="
|
||||
Evaluate the following skill inventory against the checklist.
|
||||
|
||||
[INVENTORY]
|
||||
|
||||
[CHECKLIST]
|
||||
|
||||
Return JSON for each skill:
|
||||
{ \"verdict\": \"Keep\"|\"Improve\"|\"Update\"|\"Retire\"|\"Merge into [X]\", \"reason\": \"...\" }
|
||||
"
|
||||
)
|
||||
```
|
||||
|
||||
The subagent reads each skill, applies the checklist, and returns per-skill JSON:
|
||||
|
||||
`{ "verdict": "Keep"|"Improve"|"Update"|"Retire"|"Merge into [X]", "reason": "..." }`
|
||||
|
||||
@@ -1927,7 +1927,7 @@ function runTests() {
|
||||
PreToolUse: [{
|
||||
matcher: 'Write',
|
||||
hooks: [{
|
||||
type: 'intercept',
|
||||
type: 'command',
|
||||
command: 'echo test',
|
||||
async: 'yes' // Should be boolean, not string
|
||||
}]
|
||||
@@ -1947,7 +1947,7 @@ function runTests() {
|
||||
PostToolUse: [{
|
||||
matcher: 'Edit',
|
||||
hooks: [{
|
||||
type: 'intercept',
|
||||
type: 'command',
|
||||
command: 'echo test',
|
||||
timeout: -5 // Must be non-negative
|
||||
}]
|
||||
@@ -2105,6 +2105,31 @@ function runTests() {
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nRound 82b: validate-hooks (current official events and hook types):');
|
||||
|
||||
if (test('accepts UserPromptSubmit with omitted matcher and prompt/http/agent hooks', () => {
|
||||
const testDir = createTestDir();
|
||||
const hooksJson = JSON.stringify({
|
||||
hooks: {
|
||||
UserPromptSubmit: [
|
||||
{
|
||||
hooks: [
|
||||
{ type: 'prompt', prompt: 'Summarize the request.' },
|
||||
{ type: 'agent', prompt: 'Review for security issues.', model: 'gpt-5.4' },
|
||||
{ type: 'http', url: 'https://example.com/hooks', headers: { Authorization: 'Bearer token' } }
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
});
|
||||
const hooksFile = path.join(testDir, 'hooks.json');
|
||||
fs.writeFileSync(hooksFile, hooksJson);
|
||||
|
||||
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
|
||||
assert.strictEqual(result.code, 0, 'Should accept current official hook event/type combinations');
|
||||
cleanupTestDir(testDir);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 83: validate-agents whitespace-only field, validate-skills empty SKILL.md ──
|
||||
|
||||
console.log('\nRound 83: validate-agents (whitespace-only frontmatter field value):');
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
246
tests/lib/resolve-formatter.test.js
Normal file
246
tests/lib/resolve-formatter.test.js
Normal file
@@ -0,0 +1,246 @@
|
||||
/**
|
||||
* Tests for scripts/lib/resolve-formatter.js
|
||||
*
|
||||
* Run with: node tests/lib/resolve-formatter.test.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
|
||||
const { findProjectRoot, detectFormatter, resolveFormatterBin, clearCaches } = require('../../scripts/lib/resolve-formatter');
|
||||
|
||||
/**
|
||||
* Run a single test case, printing pass/fail.
|
||||
*
|
||||
* @param {string} name - Test description
|
||||
* @param {() => void} fn - Test body (throws on failure)
|
||||
* @returns {boolean} Whether the test passed
|
||||
*/
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/** Track all created tmp dirs for cleanup */
|
||||
const tmpDirs = [];
|
||||
|
||||
/**
|
||||
* Create a temporary directory and track it for cleanup.
|
||||
*
|
||||
* @returns {string} Absolute path to the new temp directory
|
||||
*/
|
||||
function makeTmpDir() {
|
||||
const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'resolve-fmt-'));
|
||||
tmpDirs.push(dir);
|
||||
return dir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove all tracked temporary directories.
|
||||
*/
|
||||
function cleanupTmpDirs() {
|
||||
for (const dir of tmpDirs) {
|
||||
try {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
} catch {
|
||||
// Best-effort cleanup
|
||||
}
|
||||
}
|
||||
tmpDirs.length = 0;
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing resolve-formatter.js ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
function run(name, fn) {
|
||||
clearCaches();
|
||||
if (test(name, fn)) passed++;
|
||||
else failed++;
|
||||
}
|
||||
|
||||
// ── findProjectRoot ───────────────────────────────────────────
|
||||
|
||||
run('findProjectRoot: finds package.json in parent dir', () => {
|
||||
const root = makeTmpDir();
|
||||
const sub = path.join(root, 'src', 'lib');
|
||||
fs.mkdirSync(sub, { recursive: true });
|
||||
fs.writeFileSync(path.join(root, 'package.json'), '{}');
|
||||
|
||||
assert.strictEqual(findProjectRoot(sub), root);
|
||||
});
|
||||
|
||||
run('findProjectRoot: returns startDir when no package.json', () => {
|
||||
const root = makeTmpDir();
|
||||
const sub = path.join(root, 'deep');
|
||||
fs.mkdirSync(sub, { recursive: true });
|
||||
|
||||
// No package.json anywhere in tmp → falls back to startDir
|
||||
assert.strictEqual(findProjectRoot(sub), sub);
|
||||
});
|
||||
|
||||
run('findProjectRoot: caches result for same startDir', () => {
|
||||
const root = makeTmpDir();
|
||||
fs.writeFileSync(path.join(root, 'package.json'), '{}');
|
||||
|
||||
const first = findProjectRoot(root);
|
||||
// Remove package.json — cache should still return the old result
|
||||
fs.unlinkSync(path.join(root, 'package.json'));
|
||||
const second = findProjectRoot(root);
|
||||
|
||||
assert.strictEqual(first, second);
|
||||
});
|
||||
|
||||
// ── detectFormatter ───────────────────────────────────────────
|
||||
|
||||
run('detectFormatter: detects biome.json', () => {
|
||||
const root = makeTmpDir();
|
||||
fs.writeFileSync(path.join(root, 'biome.json'), '{}');
|
||||
assert.strictEqual(detectFormatter(root), 'biome');
|
||||
});
|
||||
|
||||
run('detectFormatter: detects biome.jsonc', () => {
|
||||
const root = makeTmpDir();
|
||||
fs.writeFileSync(path.join(root, 'biome.jsonc'), '{}');
|
||||
assert.strictEqual(detectFormatter(root), 'biome');
|
||||
});
|
||||
|
||||
run('detectFormatter: detects .prettierrc', () => {
|
||||
const root = makeTmpDir();
|
||||
fs.writeFileSync(path.join(root, '.prettierrc'), '{}');
|
||||
assert.strictEqual(detectFormatter(root), 'prettier');
|
||||
});
|
||||
|
||||
run('detectFormatter: detects prettier.config.js', () => {
|
||||
const root = makeTmpDir();
|
||||
fs.writeFileSync(path.join(root, 'prettier.config.js'), 'module.exports = {}');
|
||||
assert.strictEqual(detectFormatter(root), 'prettier');
|
||||
});
|
||||
|
||||
run('detectFormatter: detects prettier key in package.json', () => {
|
||||
const root = makeTmpDir();
|
||||
fs.writeFileSync(path.join(root, 'package.json'), JSON.stringify({ name: 'test', prettier: { singleQuote: true } }));
|
||||
assert.strictEqual(detectFormatter(root), 'prettier');
|
||||
});
|
||||
|
||||
run('detectFormatter: ignores package.json without prettier key', () => {
|
||||
const root = makeTmpDir();
|
||||
fs.writeFileSync(path.join(root, 'package.json'), JSON.stringify({ name: 'test' }));
|
||||
assert.strictEqual(detectFormatter(root), null);
|
||||
});
|
||||
|
||||
run('detectFormatter: biome takes priority over prettier', () => {
|
||||
const root = makeTmpDir();
|
||||
fs.writeFileSync(path.join(root, 'biome.json'), '{}');
|
||||
fs.writeFileSync(path.join(root, '.prettierrc'), '{}');
|
||||
assert.strictEqual(detectFormatter(root), 'biome');
|
||||
});
|
||||
|
||||
run('detectFormatter: returns null when no config found', () => {
|
||||
const root = makeTmpDir();
|
||||
assert.strictEqual(detectFormatter(root), null);
|
||||
});
|
||||
|
||||
// ── resolveFormatterBin ───────────────────────────────────────
|
||||
|
||||
run('resolveFormatterBin: uses local biome binary when available', () => {
|
||||
const root = makeTmpDir();
|
||||
const binDir = path.join(root, 'node_modules', '.bin');
|
||||
fs.mkdirSync(binDir, { recursive: true });
|
||||
const binName = process.platform === 'win32' ? 'biome.cmd' : 'biome';
|
||||
fs.writeFileSync(path.join(binDir, binName), '');
|
||||
|
||||
const result = resolveFormatterBin(root, 'biome');
|
||||
assert.strictEqual(result.bin, path.join(binDir, binName));
|
||||
assert.deepStrictEqual(result.prefix, []);
|
||||
});
|
||||
|
||||
run('resolveFormatterBin: falls back to npx for biome', () => {
|
||||
const root = makeTmpDir();
|
||||
const result = resolveFormatterBin(root, 'biome');
|
||||
const expectedBin = process.platform === 'win32' ? 'npx.cmd' : 'npx';
|
||||
assert.strictEqual(result.bin, expectedBin);
|
||||
assert.deepStrictEqual(result.prefix, ['@biomejs/biome']);
|
||||
});
|
||||
|
||||
run('resolveFormatterBin: uses local prettier binary when available', () => {
|
||||
const root = makeTmpDir();
|
||||
const binDir = path.join(root, 'node_modules', '.bin');
|
||||
fs.mkdirSync(binDir, { recursive: true });
|
||||
const binName = process.platform === 'win32' ? 'prettier.cmd' : 'prettier';
|
||||
fs.writeFileSync(path.join(binDir, binName), '');
|
||||
|
||||
const result = resolveFormatterBin(root, 'prettier');
|
||||
assert.strictEqual(result.bin, path.join(binDir, binName));
|
||||
assert.deepStrictEqual(result.prefix, []);
|
||||
});
|
||||
|
||||
run('resolveFormatterBin: falls back to npx for prettier', () => {
|
||||
const root = makeTmpDir();
|
||||
const result = resolveFormatterBin(root, 'prettier');
|
||||
const expectedBin = process.platform === 'win32' ? 'npx.cmd' : 'npx';
|
||||
assert.strictEqual(result.bin, expectedBin);
|
||||
assert.deepStrictEqual(result.prefix, ['prettier']);
|
||||
});
|
||||
|
||||
run('resolveFormatterBin: returns null for unknown formatter', () => {
|
||||
const root = makeTmpDir();
|
||||
const result = resolveFormatterBin(root, 'unknown');
|
||||
assert.strictEqual(result, null);
|
||||
});
|
||||
|
||||
run('resolveFormatterBin: caches resolved binary', () => {
|
||||
const root = makeTmpDir();
|
||||
const binDir = path.join(root, 'node_modules', '.bin');
|
||||
fs.mkdirSync(binDir, { recursive: true });
|
||||
const binName = process.platform === 'win32' ? 'biome.cmd' : 'biome';
|
||||
fs.writeFileSync(path.join(binDir, binName), '');
|
||||
|
||||
const first = resolveFormatterBin(root, 'biome');
|
||||
fs.unlinkSync(path.join(binDir, binName));
|
||||
const second = resolveFormatterBin(root, 'biome');
|
||||
|
||||
assert.strictEqual(first.bin, second.bin);
|
||||
});
|
||||
|
||||
// ── clearCaches ───────────────────────────────────────────────
|
||||
|
||||
run('clearCaches: clears all cached values', () => {
|
||||
const root = makeTmpDir();
|
||||
fs.writeFileSync(path.join(root, 'package.json'), '{}');
|
||||
fs.writeFileSync(path.join(root, 'biome.json'), '{}');
|
||||
|
||||
findProjectRoot(root);
|
||||
detectFormatter(root);
|
||||
resolveFormatterBin(root, 'biome');
|
||||
|
||||
clearCaches();
|
||||
|
||||
// After clearing, removing config should change detection
|
||||
fs.unlinkSync(path.join(root, 'biome.json'));
|
||||
assert.strictEqual(detectFormatter(root), null);
|
||||
});
|
||||
|
||||
// ── Summary & Cleanup ─────────────────────────────────────────
|
||||
|
||||
cleanupTmpDirs();
|
||||
|
||||
console.log('\n=== Test Results ===');
|
||||
console.log(`Passed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
console.log(`Total: ${passed + failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
@@ -10,24 +10,28 @@ const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
const testsDir = __dirname;
|
||||
const testFiles = [
|
||||
'lib/utils.test.js',
|
||||
'lib/package-manager.test.js',
|
||||
'lib/session-manager.test.js',
|
||||
'lib/session-aliases.test.js',
|
||||
'lib/project-detect.test.js',
|
||||
'hooks/hooks.test.js',
|
||||
'hooks/evaluate-session.test.js',
|
||||
'hooks/suggest-compact.test.js',
|
||||
'integration/hooks.test.js',
|
||||
'ci/validators.test.js',
|
||||
'scripts/claw.test.js',
|
||||
'scripts/setup-package-manager.test.js',
|
||||
'scripts/skill-create-output.test.js'
|
||||
];
|
||||
|
||||
/**
|
||||
* Discover all *.test.js files under testsDir (relative paths for stable output order).
|
||||
*/
|
||||
function discoverTestFiles(dir, baseDir = dir, acc = []) {
|
||||
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||
for (const e of entries) {
|
||||
const full = path.join(dir, e.name);
|
||||
const rel = path.relative(baseDir, full);
|
||||
if (e.isDirectory()) {
|
||||
discoverTestFiles(full, baseDir, acc);
|
||||
} else if (e.isFile() && e.name.endsWith('.test.js')) {
|
||||
acc.push(rel);
|
||||
}
|
||||
}
|
||||
return acc.sort();
|
||||
}
|
||||
|
||||
const testFiles = discoverTestFiles(testsDir);
|
||||
|
||||
const BOX_W = 58; // inner width between ║ delimiters
|
||||
const boxLine = (s) => `║${s.padEnd(BOX_W)}║`;
|
||||
const boxLine = s => `║${s.padEnd(BOX_W)}║`;
|
||||
|
||||
console.log('╔' + '═'.repeat(BOX_W) + '╗');
|
||||
console.log(boxLine(' Everything Claude Code - Test Suite'));
|
||||
@@ -67,6 +71,17 @@ for (const testFile of testFiles) {
|
||||
|
||||
if (passedMatch) totalPassed += parseInt(passedMatch[1], 10);
|
||||
if (failedMatch) totalFailed += parseInt(failedMatch[1], 10);
|
||||
|
||||
if (result.error) {
|
||||
console.log(`✗ ${testFile} failed to start: ${result.error.message}`);
|
||||
totalFailed += failedMatch ? 0 : 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (result.status !== 0) {
|
||||
console.log(`✗ ${testFile} exited with status ${result.status}`);
|
||||
totalFailed += failedMatch ? 0 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
totalTests = totalPassed + totalFailed;
|
||||
|
||||
Reference in New Issue
Block a user