Merge branch 'main' into feature/trae-integration

This commit is contained in:
Affaan Mustafa
2026-03-28 20:45:51 -04:00
committed by GitHub
86 changed files with 3925 additions and 408 deletions

View File

@@ -0,0 +1,20 @@
{
"name": "everything-claude-code",
"interface": {
"displayName": "Everything Claude Code"
},
"plugins": [
{
"name": "everything-claude-code",
"source": {
"source": "local",
"path": "../.."
},
"policy": {
"installation": "AVAILABLE",
"authentication": "ON_INSTALL"
},
"category": "Productivity"
}
]
}

View File

@@ -21,5 +21,37 @@
"workflow", "workflow",
"automation", "automation",
"best-practices" "best-practices"
] ],
"agents": [
"./agents/architect.md",
"./agents/build-error-resolver.md",
"./agents/chief-of-staff.md",
"./agents/code-reviewer.md",
"./agents/cpp-build-resolver.md",
"./agents/cpp-reviewer.md",
"./agents/database-reviewer.md",
"./agents/doc-updater.md",
"./agents/docs-lookup.md",
"./agents/e2e-runner.md",
"./agents/flutter-reviewer.md",
"./agents/go-build-resolver.md",
"./agents/go-reviewer.md",
"./agents/harness-optimizer.md",
"./agents/java-build-resolver.md",
"./agents/java-reviewer.md",
"./agents/kotlin-build-resolver.md",
"./agents/kotlin-reviewer.md",
"./agents/loop-operator.md",
"./agents/planner.md",
"./agents/python-reviewer.md",
"./agents/pytorch-build-resolver.md",
"./agents/refactor-cleaner.md",
"./agents/rust-build-resolver.md",
"./agents/rust-reviewer.md",
"./agents/security-reviewer.md",
"./agents/tdd-guide.md",
"./agents/typescript-reviewer.md"
],
"skills": ["./skills/"],
"commands": ["./commands/"]
} }

47
.claude/rules/node.md Normal file
View File

@@ -0,0 +1,47 @@
# Node.js Rules for everything-claude-code
> Project-specific rules for the ECC codebase. Extends common rules.
## Stack
- **Runtime**: Node.js >=18 (no transpilation, plain CommonJS)
- **Test runner**: `node tests/run-all.js` — individual files via `node tests/**/*.test.js`
- **Linter**: ESLint (`@eslint/js`, flat config)
- **Coverage**: c8
- **Lint**: markdownlint-cli for `.md` files
## File Conventions
- `scripts/` — Node.js utilities, hooks. CommonJS (`require`/`module.exports`)
- `agents/`, `commands/`, `skills/`, `rules/` — Markdown with YAML frontmatter
- `tests/` — Mirror the `scripts/` structure. Test files named `*.test.js`
- File naming: **lowercase with hyphens** (e.g. `session-start.js`, `post-edit-format.js`)
## Code Style
- CommonJS only — no ESM (`import`/`export`) unless file ends in `.mjs`
- No TypeScript — plain `.js` throughout
- Prefer `const` over `let`; never `var`
- Keep hook scripts under 200 lines — extract helpers to `scripts/lib/`
- All hooks must `exit 0` on non-critical errors (never block tool execution unexpectedly)
## Hook Development
- Hook scripts normally receive JSON on stdin, but hooks routed through `scripts/hooks/run-with-flags.js` can export `run(rawInput)` and let the wrapper handle parsing/gating
- Async hooks: mark `"async": true` in `settings.json` with a timeout ≤30s
- Blocking hooks (PreToolUse, stop): keep fast (<200ms) — no network calls
- Use `run-with-flags.js` wrapper for all hooks so `ECC_HOOK_PROFILE` and `ECC_DISABLED_HOOKS` runtime gating works
- Always exit 0 on parse errors; log to stderr with `[HookName]` prefix
## Testing Requirements
- Run `node tests/run-all.js` before committing
- New scripts in `scripts/lib/` require a matching test in `tests/lib/`
- New hooks require at least one integration test in `tests/hooks/`
## Markdown / Agent Files
- Agents: YAML frontmatter with `name`, `description`, `tools`, `model`
- Skills: sections — When to Use, How It Works, Examples
- Commands: `description:` frontmatter line required
- Run `npx markdownlint-cli '**/*.md' --ignore node_modules` before committing

49
.codex-plugin/README.md Normal file
View File

@@ -0,0 +1,49 @@
# .codex-plugin — Codex Native Plugin for ECC
This directory contains the **Codex plugin manifest** for Everything Claude Code.
## Structure
```
.codex-plugin/
└── plugin.json — Codex plugin manifest (name, version, skills ref, MCP ref)
.mcp.json — MCP server configurations at plugin root (NOT inside .codex-plugin/)
```
## What This Provides
- **125 skills** from `./skills/` — reusable Codex workflows for TDD, security,
code review, architecture, and more
- **6 MCP servers** — GitHub, Context7, Exa, Memory, Playwright, Sequential Thinking
## Installation
Codex plugin support is currently in preview. Once generally available:
```bash
# Install from Codex CLI
codex plugin install affaan-m/everything-claude-code
# Or reference locally during development
codex plugin install ./
Run this from the repository root so `./` points to the repo root and `.mcp.json` resolves correctly.
```
## MCP Servers Included
| Server | Purpose |
|---|---|
| `github` | GitHub API access |
| `context7` | Live documentation lookup |
| `exa` | Neural web search |
| `memory` | Persistent memory across sessions |
| `playwright` | Browser automation & E2E testing |
| `sequential-thinking` | Step-by-step reasoning |
## Notes
- The `skills/` directory at the repo root is shared between Claude Code (`.claude-plugin/`)
and Codex (`.codex-plugin/`) — same source of truth, no duplication
- MCP server credentials are inherited from the launching environment (env vars)
- This manifest does **not** override `~/.codex/config.toml` settings

30
.codex-plugin/plugin.json Normal file
View File

@@ -0,0 +1,30 @@
{
"name": "everything-claude-code",
"version": "1.9.0",
"description": "Battle-tested Codex workflows — 125 skills, production-ready MCP configs, and agent definitions for TDD, security scanning, code review, and autonomous development.",
"author": {
"name": "Affaan Mustafa",
"email": "me@affaanmustafa.com",
"url": "https://x.com/affaanmustafa"
},
"homepage": "https://github.com/affaan-m/everything-claude-code",
"repository": "https://github.com/affaan-m/everything-claude-code",
"license": "MIT",
"keywords": ["codex", "agents", "skills", "tdd", "code-review", "security", "workflow", "automation"],
"skills": "./skills/",
"mcpServers": "./.mcp.json",
"interface": {
"displayName": "Everything Claude Code",
"shortDescription": "125 battle-tested skills for TDD, security, code review, and autonomous development.",
"longDescription": "Everything Claude Code (ECC) is a community-maintained collection of Codex skills and MCP configs evolved over 10+ months of intensive daily use. It covers TDD workflows, security scanning, code review, architecture decisions, and more — all in one installable plugin.",
"developerName": "Affaan Mustafa",
"category": "Productivity",
"capabilities": ["Read", "Write"],
"websiteURL": "https://github.com/affaan-m/everything-claude-code",
"defaultPrompt": [
"Use the tdd-workflow skill to write tests before implementation.",
"Use the security-review skill to scan for OWASP Top 10 vulnerabilities.",
"Use the code-review skill to review this PR for correctness and security."
]
}
}

View File

@@ -46,12 +46,15 @@ Available skills:
Treat the project-local `.codex/config.toml` as the default Codex baseline for ECC. The current ECC baseline enables GitHub, Context7, Exa, Memory, Playwright, and Sequential Thinking; add heavier extras in `~/.codex/config.toml` only when a task actually needs them. Treat the project-local `.codex/config.toml` as the default Codex baseline for ECC. The current ECC baseline enables GitHub, Context7, Exa, Memory, Playwright, and Sequential Thinking; add heavier extras in `~/.codex/config.toml` only when a task actually needs them.
ECC's canonical Codex section name is `[mcp_servers.context7]`. The launcher package remains `@upstash/context7-mcp`; only the TOML section name is normalized for consistency with `codex mcp list` and the reference config.
### Automatic config.toml merging ### Automatic config.toml merging
The sync script (`scripts/sync-ecc-to-codex.sh`) uses a Node-based TOML parser to safely merge ECC MCP servers into `~/.codex/config.toml`: The sync script (`scripts/sync-ecc-to-codex.sh`) uses a Node-based TOML parser to safely merge ECC MCP servers into `~/.codex/config.toml`:
- **Add-only by default** — missing ECC servers are appended; existing servers are never modified or removed. - **Add-only by default** — missing ECC servers are appended; existing servers are never modified or removed.
- **7 managed servers** — Supabase, Playwright, Context7, Exa, GitHub, Memory, Sequential Thinking. - **7 managed servers** — Supabase, Playwright, Context7, Exa, GitHub, Memory, Sequential Thinking.
- **Canonical naming** — ECC manages Context7 as `[mcp_servers.context7]`; legacy `[mcp_servers.context7-mcp]` entries are treated as aliases during updates.
- **Package-manager aware** — uses the project's configured package manager (npm/pnpm/yarn/bun) instead of hardcoding `pnpm`. - **Package-manager aware** — uses the project's configured package manager (npm/pnpm/yarn/bun) instead of hardcoding `pnpm`.
- **Drift warnings** — if an existing server's config differs from the ECC recommendation, the script logs a warning. - **Drift warnings** — if an existing server's config differs from the ECC recommendation, the script logs a warning.
- **`--update-mcp`** — explicitly replaces all ECC-managed servers with the latest recommended config (safely removes subtables like `[mcp_servers.supabase.env]`). - **`--update-mcp`** — explicitly replaces all ECC-managed servers with the latest recommended config (safely removes subtables like `[mcp_servers.supabase.env]`).

View File

@@ -27,7 +27,10 @@ notify = [
"-sound", "default", "-sound", "default",
] ]
# Prefer AGENTS.md and project-local .codex/AGENTS.md for instructions. # Persistent instructions are appended to every prompt (additive, unlike
# model_instructions_file which replaces AGENTS.md).
persistent_instructions = "Follow project AGENTS.md guidelines. Use available MCP servers when they can help."
# model_instructions_file replaces built-in instructions instead of AGENTS.md, # model_instructions_file replaces built-in instructions instead of AGENTS.md,
# so leave it unset unless you intentionally want a single override file. # so leave it unset unless you intentionally want a single override file.
# model_instructions_file = "/absolute/path/to/instructions.md" # model_instructions_file = "/absolute/path/to/instructions.md"
@@ -38,10 +41,14 @@ notify = [
[mcp_servers.github] [mcp_servers.github]
command = "npx" command = "npx"
args = ["-y", "@modelcontextprotocol/server-github"] args = ["-y", "@modelcontextprotocol/server-github"]
startup_timeout_sec = 30
[mcp_servers.context7] [mcp_servers.context7]
command = "npx" command = "npx"
# Canonical Codex section name is `context7`; the package itself remains
# `@upstash/context7-mcp`.
args = ["-y", "@upstash/context7-mcp@latest"] args = ["-y", "@upstash/context7-mcp@latest"]
startup_timeout_sec = 30
[mcp_servers.exa] [mcp_servers.exa]
url = "https://mcp.exa.ai/mcp" url = "https://mcp.exa.ai/mcp"
@@ -49,14 +56,17 @@ url = "https://mcp.exa.ai/mcp"
[mcp_servers.memory] [mcp_servers.memory]
command = "npx" command = "npx"
args = ["-y", "@modelcontextprotocol/server-memory"] args = ["-y", "@modelcontextprotocol/server-memory"]
startup_timeout_sec = 30
[mcp_servers.playwright] [mcp_servers.playwright]
command = "npx" command = "npx"
args = ["-y", "@playwright/mcp@latest", "--extension"] args = ["-y", "@playwright/mcp@latest", "--extension"]
startup_timeout_sec = 30
[mcp_servers.sequential-thinking] [mcp_servers.sequential-thinking]
command = "npx" command = "npx"
args = ["-y", "@modelcontextprotocol/server-sequential-thinking"] args = ["-y", "@modelcontextprotocol/server-sequential-thinking"]
startup_timeout_sec = 30
# Additional MCP servers (uncomment as needed): # Additional MCP servers (uncomment as needed):
# [mcp_servers.supabase] # [mcp_servers.supabase]

View File

@@ -44,7 +44,7 @@ jobs:
# Package manager setup # Package manager setup
- name: Setup pnpm - name: Setup pnpm
if: matrix.pm == 'pnpm' if: matrix.pm == 'pnpm'
uses: pnpm/action-setup@v4 uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
with: with:
version: latest version: latest
@@ -57,7 +57,7 @@ jobs:
- name: Setup Bun - name: Setup Bun
if: matrix.pm == 'bun' if: matrix.pm == 'bun'
uses: oven-sh/setup-bun@v2 uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2
# Cache configuration # Cache configuration
- name: Get npm cache directory - name: Get npm cache directory

View File

@@ -20,11 +20,13 @@ jobs:
- name: Validate version tag - name: Validate version tag
run: | run: |
if ! [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then if ! [[ "${REF_NAME}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
echo "Invalid version tag format. Expected vX.Y.Z" echo "Invalid version tag format. Expected vX.Y.Z"
exit 1 exit 1
fi fi
env:
REF_NAME: ${{ github.ref_name }}
- name: Verify plugin.json version matches tag - name: Verify plugin.json version matches tag
env: env:
TAG_NAME: ${{ github.ref_name }} TAG_NAME: ${{ github.ref_name }}
@@ -61,7 +63,7 @@ jobs:
EOF EOF
- name: Create GitHub Release - name: Create GitHub Release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2
with: with:
body_path: release_body.md body_path: release_body.md
generate_release_notes: true generate_release_notes: true

View File

@@ -49,7 +49,7 @@ jobs:
EOF EOF
- name: Create GitHub Release - name: Create GitHub Release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2
with: with:
tag_name: ${{ inputs.tag }} tag_name: ${{ inputs.tag }}
body_path: release_body.md body_path: release_body.md

View File

@@ -36,7 +36,7 @@ jobs:
- name: Setup pnpm - name: Setup pnpm
if: inputs.package-manager == 'pnpm' if: inputs.package-manager == 'pnpm'
uses: pnpm/action-setup@v4 uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
with: with:
version: latest version: latest
@@ -49,7 +49,7 @@ jobs:
- name: Setup Bun - name: Setup Bun
if: inputs.package-manager == 'bun' if: inputs.package-manager == 'bun'
uses: oven-sh/setup-bun@v2 uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2
- name: Get npm cache directory - name: Get npm cache directory
if: inputs.package-manager == 'npm' if: inputs.package-manager == 'npm'

3
.gitignore vendored
View File

@@ -83,6 +83,9 @@ temp/
*.bak *.bak
*.backup *.backup
# Observer temp files (continuous-learning-v2)
.observer-tmp/
# Rust build artifacts # Rust build artifacts
ecc2/target/ ecc2/target/

27
.mcp.json Normal file
View File

@@ -0,0 +1,27 @@
{
"mcpServers": {
"github": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"]
},
"context7": {
"command": "npx",
"args": ["-y", "@upstash/context7-mcp@2.1.4"]
},
"exa": {
"url": "https://mcp.exa.ai/mcp"
},
"memory": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-memory"]
},
"playwright": {
"command": "npx",
"args": ["-y", "@playwright/mcp@0.0.68", "--extension"]
},
"sequential-thinking": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"]
}
}
}

View File

@@ -1,6 +1,6 @@
# Everything Claude Code (ECC) — Agent Instructions # Everything Claude Code (ECC) — Agent Instructions
This is a **production-ready AI coding plugin** providing 28 specialized agents, 125 skills, 60 commands, and automated hook workflows for software development. This is a **production-ready AI coding plugin** providing 28 specialized agents, 126 skills, 60 commands, and automated hook workflows for software development.
**Version:** 1.9.0 **Version:** 1.9.0
@@ -142,7 +142,7 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat
``` ```
agents/ — 28 specialized subagents agents/ — 28 specialized subagents
skills/ — 125 workflow skills and domain knowledge skills/ — 126 workflow skills and domain knowledge
commands/ — 60 slash commands commands/ — 60 slash commands
hooks/ — Trigger-based automations hooks/ — Trigger-based automations
rules/ — Always-follow guidelines (common + per-language) rules/ — Always-follow guidelines (common + per-language)

159
COMMANDS-QUICK-REF.md Normal file
View File

@@ -0,0 +1,159 @@
# Commands Quick Reference
> 59 slash commands installed globally. Type `/` in any Claude Code session to invoke.
---
## Core Workflow
| Command | What it does |
|---------|-------------|
| `/plan` | Restate requirements, assess risks, write step-by-step implementation plan — **waits for your confirm before touching code** |
| `/tdd` | Enforce test-driven development: scaffold interface → write failing test → implement → verify 80%+ coverage |
| `/code-review` | Full code quality, security, and maintainability review of changed files |
| `/build-fix` | Detect and fix build errors — delegates to the right build-resolver agent automatically |
| `/verify` | Run the full verification loop: build → lint → test → type-check |
| `/quality-gate` | Quality gate check against project standards |
---
## Testing
| Command | What it does |
|---------|-------------|
| `/tdd` | Universal TDD workflow (any language) |
| `/e2e` | Generate + run Playwright end-to-end tests, capture screenshots/videos/traces |
| `/test-coverage` | Report test coverage, identify gaps |
| `/go-test` | TDD workflow for Go (table-driven, 80%+ coverage with `go test -cover`) |
| `/kotlin-test` | TDD for Kotlin (Kotest + Kover) |
| `/rust-test` | TDD for Rust (cargo test, integration tests) |
| `/cpp-test` | TDD for C++ (GoogleTest + gcov/lcov) |
---
## Code Review
| Command | What it does |
|---------|-------------|
| `/code-review` | Universal code review |
| `/python-review` | Python — PEP 8, type hints, security, idiomatic patterns |
| `/go-review` | Go — idiomatic patterns, concurrency safety, error handling |
| `/kotlin-review` | Kotlin — null safety, coroutine safety, clean architecture |
| `/rust-review` | Rust — ownership, lifetimes, unsafe usage |
| `/cpp-review` | C++ — memory safety, modern idioms, concurrency |
---
## Build Fixers
| Command | What it does |
|---------|-------------|
| `/build-fix` | Auto-detect language and fix build errors |
| `/go-build` | Fix Go build errors and `go vet` warnings |
| `/kotlin-build` | Fix Kotlin/Gradle compiler errors |
| `/rust-build` | Fix Rust build + borrow checker issues |
| `/cpp-build` | Fix C++ CMake and linker problems |
| `/gradle-build` | Fix Gradle errors for Android / KMP |
---
## Planning & Architecture
| Command | What it does |
|---------|-------------|
| `/plan` | Implementation plan with risk assessment |
| `/multi-plan` | Multi-model collaborative planning |
| `/multi-workflow` | Multi-model collaborative development |
| `/multi-backend` | Backend-focused multi-model development |
| `/multi-frontend` | Frontend-focused multi-model development |
| `/multi-execute` | Multi-model collaborative execution |
| `/orchestrate` | Guide for tmux/worktree multi-agent orchestration |
| `/devfleet` | Orchestrate parallel Claude Code agents via DevFleet |
---
## Session Management
| Command | What it does |
|---------|-------------|
| `/save-session` | Save current session state to `~/.claude/session-data/` |
| `/resume-session` | Load the most recent saved session from the canonical session store and resume from where you left off |
| `/sessions` | Browse, search, and manage session history with aliases from `~/.claude/session-data/` (with legacy reads from `~/.claude/sessions/`) |
| `/checkpoint` | Mark a checkpoint in the current session |
| `/aside` | Answer a quick side question without losing current task context |
| `/context-budget` | Analyse context window usage — find token overhead, optimise |
---
## Learning & Improvement
| Command | What it does |
|---------|-------------|
| `/learn` | Extract reusable patterns from the current session |
| `/learn-eval` | Extract patterns + self-evaluate quality before saving |
| `/evolve` | Analyse learned instincts, suggest evolved skill structures |
| `/promote` | Promote project-scoped instincts to global scope |
| `/instinct-status` | Show all learned instincts (project + global) with confidence scores |
| `/instinct-export` | Export instincts to a file |
| `/instinct-import` | Import instincts from a file or URL |
| `/skill-create` | Analyse local git history → generate a reusable skill |
| `/skill-health` | Skill portfolio health dashboard with analytics |
| `/rules-distill` | Scan skills, extract cross-cutting principles, distill into rules |
---
## Refactoring & Cleanup
| Command | What it does |
|---------|-------------|
| `/refactor-clean` | Remove dead code, consolidate duplicates, clean up structure |
| `/prompt-optimize` | Analyse a draft prompt and output an optimised ECC-enriched version |
---
## Docs & Research
| Command | What it does |
|---------|-------------|
| `/docs` | Look up current library/API documentation via Context7 |
| `/update-docs` | Update project documentation |
| `/update-codemaps` | Regenerate codemaps for the codebase |
---
## Loops & Automation
| Command | What it does |
|---------|-------------|
| `/loop-start` | Start a recurring agent loop on an interval |
| `/loop-status` | Check status of running loops |
| `/claw` | Start NanoClaw v2 — persistent REPL with model routing, skill hot-load, branching, and metrics |
---
## Project & Infrastructure
| Command | What it does |
|---------|-------------|
| `/projects` | List known projects and their instinct statistics |
| `/harness-audit` | Audit the agent harness configuration for reliability and cost |
| `/eval` | Run the evaluation harness |
| `/model-route` | Route a task to the right model (Haiku / Sonnet / Opus) |
| `/pm2` | PM2 process manager initialisation |
| `/setup-pm` | Configure package manager (npm / pnpm / yarn / bun) |
---
## Quick Decision Guide
```
Starting a new feature? → /plan first, then /tdd
Code just written? → /code-review
Build broken? → /build-fix
Need live docs? → /docs <library>
Session about to end? → /save-session or /learn-eval
Resuming next day? → /resume-session
Context getting heavy? → /context-budget then /checkpoint
Want to extract what you learned? → /learn-eval then /evolve
Running repeated tasks? → /loop-start
```

View File

@@ -73,6 +73,13 @@ git add . && git commit -m "feat: add my-skill" && git push -u origin feat/my-co
Skills are knowledge modules that Claude Code loads based on context. Skills are knowledge modules that Claude Code loads based on context.
> **📚 Comprehensive Guide:** For detailed guidance on creating effective skills, see [Skill Development Guide](docs/SKILL-DEVELOPMENT-GUIDE.md). It covers:
> - Skill architecture and categories
> - Writing effective content with examples
> - Best practices and common patterns
> - Testing and validation
> - Complete examples gallery
### Directory Structure ### Directory Structure
``` ```
@@ -86,7 +93,7 @@ skills/
```markdown ```markdown
--- ---
name: your-skill-name name: your-skill-name
description: Brief description shown in skill list description: Brief description shown in skill list and used for auto-activation
origin: ECC origin: ECC
--- ---
@@ -94,6 +101,10 @@ origin: ECC
Brief overview of what this skill covers. Brief overview of what this skill covers.
## When to Activate
Describe scenarios where Claude should use this skill. This is critical for auto-activation.
## Core Concepts ## Core Concepts
Explain key patterns and guidelines. Explain key patterns and guidelines.
@@ -107,33 +118,54 @@ function example() {
} }
\`\`\` \`\`\`
## Anti-Patterns
Show what NOT to do with examples.
## Best Practices ## Best Practices
- Actionable guidelines - Actionable guidelines
- Do's and don'ts - Do's and don'ts
- Common pitfalls to avoid - Common pitfalls to avoid
## When to Use ## Related Skills
Describe scenarios where this skill applies. Link to complementary skills (e.g., `related-skill-1`, `related-skill-2`).
``` ```
### Skill Categories
| Category | Purpose | Examples |
|----------|---------|----------|
| **Language Standards** | Idioms, conventions, best practices | `python-patterns`, `golang-patterns` |
| **Framework Patterns** | Framework-specific guidance | `django-patterns`, `nextjs-patterns` |
| **Workflow** | Step-by-step processes | `tdd-workflow`, `refactoring-workflow` |
| **Domain Knowledge** | Specialized domains | `security-review`, `api-design` |
| **Tool Integration** | Tool/library usage | `docker-patterns`, `supabase-patterns` |
| **Template** | Project-specific skill templates | `project-guidelines-example` |
### Skill Checklist ### Skill Checklist
- [ ] Focused on one domain/technology - [ ] Focused on one domain/technology (not too broad)
- [ ] Includes practical code examples - [ ] Includes "When to Activate" section for auto-activation
- [ ] Under 500 lines - [ ] Includes practical, copy-pasteable code examples
- [ ] Shows anti-patterns (what NOT to do)
- [ ] Under 500 lines (800 max)
- [ ] Uses clear section headers - [ ] Uses clear section headers
- [ ] Tested with Claude Code - [ ] Tested with Claude Code
- [ ] Links to related skills
- [ ] No sensitive data (API keys, tokens, paths)
### Example Skills ### Example Skills
| Skill | Purpose | | Skill | Category | Purpose |
|-------|---------| |-------|----------|---------|
| `coding-standards/` | TypeScript/JavaScript patterns | | `coding-standards/` | Language Standards | TypeScript/JavaScript patterns |
| `frontend-patterns/` | React and Next.js best practices | | `frontend-patterns/` | Framework Patterns | React and Next.js best practices |
| `backend-patterns/` | API and database patterns | | `backend-patterns/` | Framework Patterns | API and database patterns |
| `security-review/` | Security checklist | | `security-review/` | Domain Knowledge | Security checklist |
| `tdd-workflow/` | Workflow | Test-driven development process |
| `project-guidelines-example/` | Template | Project-specific skill template |
--- ---

122
EVALUATION.md Normal file
View File

@@ -0,0 +1,122 @@
# Repo Evaluation vs Current Setup
**Date:** 2026-03-21
**Branch:** `claude/evaluate-repo-comparison-ASZ9Y`
---
## Current Setup (`~/.claude/`)
The active Claude Code installation is near-minimal:
| Component | Current |
|-----------|---------|
| Agents | 0 |
| Skills | 0 installed |
| Commands | 0 |
| Hooks | 1 (Stop: git check) |
| Rules | 0 |
| MCP configs | 0 |
**Installed hooks:**
- `Stop``stop-hook-git-check.sh` — blocks session end if there are uncommitted changes or unpushed commits
**Installed permissions:**
- `Skill` — allows skill invocations
**Plugins:** Only `blocklist.json` (no active plugins installed)
---
## This Repo (`everything-claude-code` v1.9.0)
| Component | Repo |
|-----------|------|
| Agents | 28 |
| Skills | 116 |
| Commands | 59 |
| Rules sets | 12 languages + common (60+ rule files) |
| Hooks | Comprehensive system (PreToolUse, PostToolUse, SessionStart, Stop) |
| MCP configs | 1 (Context7 + others) |
| Schemas | 9 JSON validators |
| Scripts/CLI | 46+ Node.js modules + multiple CLIs |
| Tests | 58 test files |
| Install profiles | core, developer, security, research, full |
| Supported harnesses | Claude Code, Codex, Cursor, OpenCode |
---
## Gap Analysis
### Hooks
- **Current:** 1 Stop hook (git hygiene check)
- **Repo:** Full hook matrix covering:
- Dangerous command blocking (`rm -rf`, force pushes)
- Auto-formatting on file edits
- Dev server tmux enforcement
- Cost tracking
- Session evaluation and governance capture
- MCP health monitoring
### Agents (28 missing)
The repo provides specialized agents for every major workflow:
- Language reviewers: TypeScript, Python, Go, Java, Kotlin, Rust, C++, Flutter
- Build resolvers: Go, Java, Kotlin, Rust, C++, PyTorch
- Workflow agents: planner, tdd-guide, code-reviewer, security-reviewer, architect
- Automation: loop-operator, doc-updater, refactor-cleaner, harness-optimizer
### Skills (116 missing)
Domain knowledge modules covering:
- Language patterns (Python, Go, Kotlin, Rust, C++, Java, Swift, Perl, Laravel, Django)
- Testing strategies (TDD, E2E, coverage)
- Architecture patterns (backend, frontend, API design, database migrations)
- AI/ML workflows (Claude API, eval harness, agent loops, cost-aware pipelines)
- Business workflows (investor materials, market research, content engine)
### Commands (59 missing)
- `/tdd`, `/plan`, `/e2e`, `/code-review` — core dev workflows
- `/sessions`, `/save-session`, `/resume-session` — session persistence
- `/orchestrate`, `/multi-plan`, `/multi-execute` — multi-agent coordination
- `/learn`, `/skill-create`, `/evolve` — continuous improvement
- `/build-fix`, `/verify`, `/quality-gate` — build/quality automation
### Rules (60+ files missing)
Language-specific coding style, patterns, testing, and security guidelines for:
TypeScript, Python, Go, Java, Kotlin, Rust, C++, C#, Swift, Perl, PHP, and common/cross-language rules.
---
## Recommendations
### Immediate value (core install)
Run `ecc install --profile core` to get:
- Core agents (code-reviewer, planner, tdd-guide, security-reviewer)
- Essential skills (tdd-workflow, coding-standards, security-review)
- Key commands (/tdd, /plan, /code-review, /build-fix)
### Full install
Run `ecc install --profile full` to get all 28 agents, 116 skills, and 59 commands.
### Hooks upgrade
The current Stop hook is solid. The repo's `hooks.json` adds:
- Dangerous command blocking (safety)
- Auto-formatting (quality)
- Cost tracking (observability)
- Session evaluation (learning)
### Rules
Adding language rules (e.g., TypeScript, Python) provides always-on coding guidelines without relying on per-session prompts.
---
## What the Current Setup Does Well
- The `stop-hook-git-check.sh` Stop hook is production-quality and already enforces good git hygiene
- The `Skill` permission is correctly configured
- The setup is clean with no conflicts or cruft
---
## Summary
The current setup is essentially a blank slate with one well-implemented git hygiene hook. This repo provides a complete, production-tested enhancement layer covering agents, skills, commands, hooks, and rules — with a selective install system so you can add exactly what you need without bloating the configuration.

View File

@@ -207,7 +207,7 @@ npm install # or: pnpm install | yarn install | bun install
npx ecc-install typescript npx ecc-install typescript
``` ```
For manual install instructions see the README in the `rules/` folder. For manual install instructions see the README in the `rules/` folder. When copying rules manually, copy the whole language directory (for example `rules/common` or `rules/golang`), not the files inside it, so relative references keep working and filenames do not collide.
### Step 3: Start Using ### Step 3: Start Using
@@ -222,7 +222,7 @@ For manual install instructions see the README in the `rules/` folder.
/plugin list everything-claude-code@everything-claude-code /plugin list everything-claude-code@everything-claude-code
``` ```
**That's it!** You now have access to 28 agents, 125 skills, and 60 commands. **That's it!** You now have access to 28 agents, 126 skills, and 60 commands.
### Multi-model commands require additional setup ### Multi-model commands require additional setup
@@ -638,16 +638,16 @@ This gives you instant access to all commands, agents, skills, and hooks.
> >
> # Option A: User-level rules (applies to all projects) > # Option A: User-level rules (applies to all projects)
> mkdir -p ~/.claude/rules > mkdir -p ~/.claude/rules
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/ > cp -r everything-claude-code/rules/common ~/.claude/rules/
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack > cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # pick your stack
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/ > cp -r everything-claude-code/rules/python ~/.claude/rules/
> cp -r everything-claude-code/rules/golang/* ~/.claude/rules/ > cp -r everything-claude-code/rules/golang ~/.claude/rules/
> cp -r everything-claude-code/rules/php/* ~/.claude/rules/ > cp -r everything-claude-code/rules/php ~/.claude/rules/
> >
> # Option B: Project-level rules (applies to current project only) > # Option B: Project-level rules (applies to current project only)
> mkdir -p .claude/rules > mkdir -p .claude/rules
> cp -r everything-claude-code/rules/common/* .claude/rules/ > cp -r everything-claude-code/rules/common .claude/rules/
> cp -r everything-claude-code/rules/typescript/* .claude/rules/ # pick your stack > cp -r everything-claude-code/rules/typescript .claude/rules/ # pick your stack
> ``` > ```
--- ---
@@ -663,12 +663,13 @@ git clone https://github.com/affaan-m/everything-claude-code.git
# Copy agents to your Claude config # Copy agents to your Claude config
cp everything-claude-code/agents/*.md ~/.claude/agents/ cp everything-claude-code/agents/*.md ~/.claude/agents/
# Copy rules (common + language-specific) # Copy rules directories (common + language-specific)
cp -r everything-claude-code/rules/common/* ~/.claude/rules/ mkdir -p ~/.claude/rules
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack cp -r everything-claude-code/rules/common ~/.claude/rules/
cp -r everything-claude-code/rules/python/* ~/.claude/rules/ cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # pick your stack
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/ cp -r everything-claude-code/rules/python ~/.claude/rules/
cp -r everything-claude-code/rules/php/* ~/.claude/rules/ cp -r everything-claude-code/rules/golang ~/.claude/rules/
cp -r everything-claude-code/rules/php ~/.claude/rules/
# Copy commands # Copy commands
cp everything-claude-code/commands/*.md ~/.claude/commands/ cp everything-claude-code/commands/*.md ~/.claude/commands/
@@ -874,7 +875,8 @@ Yes. Use Option 2 (manual installation) and copy only what you need:
cp everything-claude-code/agents/*.md ~/.claude/agents/ cp everything-claude-code/agents/*.md ~/.claude/agents/
# Just rules # Just rules
cp -r everything-claude-code/rules/common/* ~/.claude/rules/ mkdir -p ~/.claude/rules/
cp -r everything-claude-code/rules/common ~/.claude/rules/
``` ```
Each component is fully independent. Each component is fully independent.
@@ -1023,6 +1025,8 @@ cp .codex/config.toml ~/.codex/config.toml
The sync script safely merges ECC MCP servers into your existing `~/.codex/config.toml` using an **add-only** strategy — it never removes or modifies your existing servers. Run with `--dry-run` to preview changes, or `--update-mcp` to force-refresh ECC servers to the latest recommended config. The sync script safely merges ECC MCP servers into your existing `~/.codex/config.toml` using an **add-only** strategy — it never removes or modifies your existing servers. Run with `--dry-run` to preview changes, or `--update-mcp` to force-refresh ECC servers to the latest recommended config.
For Context7, ECC uses the canonical Codex section name `[mcp_servers.context7]` while still launching the `@upstash/context7-mcp` package. If you already have a legacy `[mcp_servers.context7-mcp]` entry, `--update-mcp` migrates it to the canonical section name.
Codex macOS app: Codex macOS app:
- Open this repository as your workspace. - Open this repository as your workspace.
- The root `AGENTS.md` is auto-detected. - The root `AGENTS.md` is auto-detected.
@@ -1109,7 +1113,7 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|---------|-------------|----------|--------| |---------|-------------|----------|--------|
| Agents | ✅ 28 agents | ✅ 12 agents | **Claude Code leads** | | Agents | ✅ 28 agents | ✅ 12 agents | **Claude Code leads** |
| Commands | ✅ 60 commands | ✅ 31 commands | **Claude Code leads** | | Commands | ✅ 60 commands | ✅ 31 commands | **Claude Code leads** |
| Skills | ✅ 125 skills | ✅ 37 skills | **Claude Code leads** | | Skills | ✅ 126 skills | ✅ 37 skills | **Claude Code leads** |
| Hooks | ✅ 8 event types | ✅ 11 events | **OpenCode has more!** | | Hooks | ✅ 8 event types | ✅ 11 events | **OpenCode has more!** |
| Rules | ✅ 29 rules | ✅ 13 instructions | **Claude Code leads** | | Rules | ✅ 29 rules | ✅ 13 instructions | **Claude Code leads** |
| MCP Servers | ✅ 14 servers | ✅ Full | **Full parity** | | MCP Servers | ✅ 14 servers | ✅ Full | **Full parity** |

View File

@@ -82,14 +82,17 @@
# 首先克隆仓库 # 首先克隆仓库
git clone https://github.com/affaan-m/everything-claude-code.git git clone https://github.com/affaan-m/everything-claude-code.git
# 复制规则(通用 + 语言特定) # 复制规则目录(通用 + 语言特定)
cp -r everything-claude-code/rules/common/* ~/.claude/rules/ mkdir -p ~/.claude/rules
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈 cp -r everything-claude-code/rules/common ~/.claude/rules/
cp -r everything-claude-code/rules/python/* ~/.claude/rules/ cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # 选择你的技术栈
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/ cp -r everything-claude-code/rules/python ~/.claude/rules/
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/ cp -r everything-claude-code/rules/golang ~/.claude/rules/
cp -r everything-claude-code/rules/perl ~/.claude/rules/
``` ```
复制规则时,请复制整个目录(例如 `rules/common``rules/golang`),而不是复制目录内的文件;这样可以保留相对引用,并避免不同规则集中的同名文件互相覆盖。
### 第三步:开始使用 ### 第三步:开始使用
```bash ```bash
@@ -366,11 +369,20 @@ everything-claude-code/
> git clone https://github.com/affaan-m/everything-claude-code.git > git clone https://github.com/affaan-m/everything-claude-code.git
> >
> # 选项 A用户级规则应用于所有项目 > # 选项 A用户级规则应用于所有项目
> cp -r everything-claude-code/rules/* ~/.claude/rules/ > mkdir -p ~/.claude/rules
> cp -r everything-claude-code/rules/common ~/.claude/rules/
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/
> cp -r everything-claude-code/rules/python ~/.claude/rules/
> cp -r everything-claude-code/rules/golang ~/.claude/rules/
> cp -r everything-claude-code/rules/perl ~/.claude/rules/
> >
> # 选项 B项目级规则仅应用于当前项目 > # 选项 B项目级规则仅应用于当前项目
> mkdir -p .claude/rules > mkdir -p .claude/rules
> cp -r everything-claude-code/rules/* .claude/rules/ > cp -r everything-claude-code/rules/common .claude/rules/
> cp -r everything-claude-code/rules/typescript .claude/rules/
> cp -r everything-claude-code/rules/python .claude/rules/
> cp -r everything-claude-code/rules/golang .claude/rules/
> cp -r everything-claude-code/rules/perl .claude/rules/
> ``` > ```
--- ---
@@ -386,12 +398,13 @@ git clone https://github.com/affaan-m/everything-claude-code.git
# 将代理复制到你的 Claude 配置 # 将代理复制到你的 Claude 配置
cp everything-claude-code/agents/*.md ~/.claude/agents/ cp everything-claude-code/agents/*.md ~/.claude/agents/
# 复制规则(通用 + 语言特定) # 复制规则目录(通用 + 语言特定)
cp -r everything-claude-code/rules/common/* ~/.claude/rules/ mkdir -p ~/.claude/rules
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈 cp -r everything-claude-code/rules/common ~/.claude/rules/
cp -r everything-claude-code/rules/python/* ~/.claude/rules/ cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # 选择你的技术栈
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/ cp -r everything-claude-code/rules/python ~/.claude/rules/
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/ cp -r everything-claude-code/rules/golang ~/.claude/rules/
cp -r everything-claude-code/rules/perl ~/.claude/rules/
# 复制命令 # 复制命令
cp everything-claude-code/commands/*.md ~/.claude/commands/ cp everything-claude-code/commands/*.md ~/.claude/commands/

196
REPO-ASSESSMENT.md Normal file
View File

@@ -0,0 +1,196 @@
# Repo & Fork Assessment + Setup Recommendations
**Date:** 2026-03-21
---
## What's Available
### Repo: `Infiniteyieldai/everything-claude-code`
This is a **fork of `affaan-m/everything-claude-code`** (the upstream project with 50K+ stars, 6K+ forks).
| Attribute | Value |
|-----------|-------|
| Version | 1.9.0 (current) |
| Status | Clean fork — 1 commit ahead of upstream `main` (the EVALUATION.md doc added in this session) |
| Remote branches | `main`, `claude/evaluate-repo-comparison-ASZ9Y` |
| Upstream sync | Fully synced — last upstream commit merged was the zh-CN docs PR (#728) |
| License | MIT |
**This is the right repo to work from.** It's the latest upstream version with no divergence or merge conflicts.
---
### Current `~/.claude/` Installation
| Component | Installed | Available in Repo |
|-----------|-----------|-------------------|
| Agents | 0 | 28 |
| Skills | 0 | 116 |
| Commands | 0 | 59 |
| Rules | 0 | 60+ files (12 languages) |
| Hooks | 1 (git Stop check) | Full PreToolUse/PostToolUse matrix |
| MCP configs | 0 | 1 (Context7) |
The existing Stop hook (`stop-hook-git-check.sh`) is solid — blocks session end on uncommitted/unpushed work. Keep it.
---
## Install Profile Recommendations
The repo ships 5 install profiles. Choose based on your primary use case:
### Profile: `core` (Minimum viable setup)
> Fastest to install. Gets you commands, core agents, hooks runtime, and quality workflow.
**Best for:** Trying ECC out, minimal footprint, or a constrained environment.
```bash
node scripts/install-plan.js --profile core
node scripts/install-apply.js
```
**Installs:** rules-core, agents-core, commands-core, hooks-runtime, platform-configs, workflow-quality
---
### Profile: `developer` (Recommended for daily dev work)
> The default engineering profile for most ECC users.
**Best for:** General software development across app codebases.
```bash
node scripts/install-plan.js --profile developer
node scripts/install-apply.js
```
**Adds over core:** framework-language skills, database patterns, orchestration commands
---
### Profile: `security`
> Baseline runtime + security-specific agents and rules.
**Best for:** Security-focused workflows, code audits, vulnerability reviews.
---
### Profile: `research`
> Investigation, synthesis, and publishing workflows.
**Best for:** Content creation, investor materials, market research, cross-posting.
---
### Profile: `full`
> Everything — all 18 modules.
**Best for:** Power users who want the complete toolkit.
```bash
node scripts/install-plan.js --profile full
node scripts/install-apply.js
```
---
## Priority Additions (High Value, Low Risk)
Regardless of profile, these components add immediate value:
### 1. Core Agents (highest ROI)
| Agent | Why it matters |
|-------|----------------|
| `planner.md` | Breaks complex tasks into implementation plans |
| `code-reviewer.md` | Quality and maintainability review |
| `tdd-guide.md` | TDD workflow (RED→GREEN→IMPROVE) |
| `security-reviewer.md` | Vulnerability detection |
| `architect.md` | System design & scalability decisions |
### 2. Key Commands
| Command | Why it matters |
|---------|----------------|
| `/plan` | Implementation planning before coding |
| `/tdd` | Test-driven workflow |
| `/code-review` | On-demand review |
| `/build-fix` | Automated build error resolution |
| `/learn` | Extract patterns from current session |
### 3. Hook Upgrades (from `hooks/hooks.json`)
The repo's hook system adds these over the current single Stop hook:
| Hook | Trigger | Value |
|------|---------|-------|
| `block-no-verify` | PreToolUse: Bash | Blocks `--no-verify` git flag abuse |
| `pre-bash-git-push-reminder` | PreToolUse: Bash | Pre-push review reminder |
| `doc-file-warning` | PreToolUse: Write | Warns on non-standard doc files |
| `suggest-compact` | PreToolUse: Edit/Write | Suggests compaction at logical intervals |
| Continuous learning observer | PreToolUse: * | Captures tool use patterns for skill improvement |
### 4. Rules (Always-on guidelines)
The `rules/common/` directory provides baseline guidelines that fire on every session:
- `security.md` — Security guardrails
- `testing.md` — 80%+ coverage requirement
- `git-workflow.md` — Conventional commits, branch strategy
- `coding-style.md` — Cross-language style standards
---
## What to Do With the Fork
### Option A: Use as upstream tracker (current state)
Keep the fork synced with `affaan-m/everything-claude-code` upstream. Periodically merge upstream changes:
```bash
git fetch upstream
git merge upstream/main
```
Install from the local clone. This is clean and maintainable.
### Option B: Customize the fork
Add personal skills, agents, or commands to the fork. Good for:
- Business-specific domain skills (your vertical)
- Team-specific coding conventions
- Custom hooks for your stack
The fork already has the EVALUATION.md and REPO-ASSESSMENT.md docs — that's fine for a working fork.
### Option C: Install from npm (simplest for fresh machines)
```bash
npx ecc-universal install --profile developer
```
No need to clone the repo. This is the recommended install method for most users.
---
## Recommended Setup Steps
1. **Keep the existing Stop hook** — it's doing its job
2. **Run the developer profile install** from the local fork:
```bash
cd /path/to/everything-claude-code
node scripts/install-plan.js --profile developer
node scripts/install-apply.js
```
3. **Add language rules** for your primary stack (TypeScript, Python, Go, etc.):
```bash
node scripts/install-plan.js --add rules/typescript
node scripts/install-apply.js
```
4. **Enable MCP Context7** for live documentation lookup:
- Copy `mcp-configs/mcp-servers.json` into your project's `.claude/` dir
5. **Review hooks** — enable the `hooks/hooks.json` additions selectively, starting with `block-no-verify` and `pre-bash-git-push-reminder`
---
## Summary
| Question | Answer |
|----------|--------|
| Is the fork healthy? | Yes — fully synced with upstream v1.9.0 |
| Other forks to consider? | None visible in this environment; upstream `affaan-m/everything-claude-code` is the source of truth |
| Best install profile? | `developer` for day-to-day dev work |
| Biggest gap in current setup? | 0 agents installed — add at minimum: planner, code-reviewer, tdd-guide, security-reviewer |
| Quickest win? | Run `node scripts/install-plan.js --profile core && node scripts/install-apply.js` |

View File

@@ -0,0 +1,919 @@
# Skill Development Guide
A comprehensive guide to creating effective skills for Everything Claude Code (ECC).
## Table of Contents
- [What Are Skills?](#what-are-skills)
- [Skill Architecture](#skill-architecture)
- [Creating Your First Skill](#creating-your-first-skill)
- [Skill Categories](#skill-categories)
- [Writing Effective Skill Content](#writing-effective-skill-content)
- [Best Practices](#best-practices)
- [Common Patterns](#common-patterns)
- [Testing Your Skill](#testing-your-skill)
- [Submitting Your Skill](#submitting-your-skill)
- [Examples Gallery](#examples-gallery)
---
## What Are Skills?
Skills are **knowledge modules** that Claude Code loads based on context. They provide:
- **Domain expertise**: Framework patterns, language idioms, best practices
- **Workflow definitions**: Step-by-step processes for common tasks
- **Reference material**: Code snippets, checklists, decision trees
- **Context injection**: Activate when specific conditions are met
Unlike **agents** (specialized subassistants) or **commands** (user-triggered actions), skills are passive knowledge that Claude Code references when relevant.
### When Skills Activate
Skills activate when:
- The user's task matches the skill's domain
- Claude Code detects relevant context
- A command references a skill
- An agent needs domain knowledge
### Skill vs Agent vs Command
| Component | Purpose | Activation |
|-----------|---------|------------|
| **Skill** | Knowledge repository | Context-based (automatic) |
| **Agent** | Task executor | Explicit delegation |
| **Command** | User action | User-invoked (`/command`) |
| **Hook** | Automation | Event-triggered |
| **Rule** | Always-on guidelines | Always active |
---
## Skill Architecture
### File Structure
```
skills/
└── your-skill-name/
├── SKILL.md # Required: Main skill definition
├── examples/ # Optional: Code examples
│ ├── basic.ts
│ └── advanced.ts
└── references/ # Optional: External references
└── links.md
```
### SKILL.md Format
```markdown
---
name: skill-name
description: Brief description shown in skill list and used for auto-activation
origin: ECC
---
# Skill Title
Brief overview of what this skill covers.
## When to Activate
Describe scenarios where Claude should use this skill.
## Core Concepts
Main patterns and guidelines.
## Code Examples
\`\`\`typescript
// Practical, tested examples
\`\`\`
## Anti-Patterns
Show what NOT to do with concrete examples.
## Best Practices
- Actionable guidelines
- Do's and don'ts
## Related Skills
Link to complementary skills.
```
### YAML Frontmatter Fields
| Field | Required | Description |
|-------|----------|-------------|
| `name` | Yes | Lowercase, hyphenated identifier (e.g., `react-patterns`) |
| `description` | Yes | One-line description for skill list and auto-activation |
| `origin` | No | Source identifier (e.g., `ECC`, `community`, project name) |
| `tags` | No | Array of tags for categorization |
| `version` | No | Skill version for tracking updates |
---
## Creating Your First Skill
### Step 1: Choose a Focus
Good skills are **focused and actionable**:
| ✅ Good Focus | ❌ Too Broad |
|---------------|--------------|
| `react-hook-patterns` | `react` |
| `postgresql-indexing` | `databases` |
| `pytest-fixtures` | `python-testing` |
| `nextjs-app-router` | `nextjs` |
### Step 2: Create the Directory
```bash
mkdir -p skills/your-skill-name
```
### Step 3: Write SKILL.md
Here's a minimal template:
```markdown
---
name: your-skill-name
description: Brief description of when to use this skill
---
# Your Skill Title
Brief overview (1-2 sentences).
## When to Activate
- Scenario 1
- Scenario 2
- Scenario 3
## Core Concepts
### Concept 1
Explanation with examples.
### Concept 2
Another pattern with code.
## Code Examples
\`\`\`typescript
// Practical example
\`\`\`
## Best Practices
- Do this
- Avoid that
## Related Skills
- `related-skill-1`
- `related-skill-2`
```
### Step 4: Add Content
Write content that Claude can **immediately use**:
- ✅ Copy-pasteable code examples
- ✅ Clear decision trees
- ✅ Checklists for verification
- ❌ Vague explanations without examples
- ❌ Long prose without actionable guidance
---
## Skill Categories
### Language Standards
Focus on idiomatic code, naming conventions, and language-specific patterns.
**Examples:** `python-patterns`, `golang-patterns`, `typescript-standards`
```markdown
---
name: python-patterns
description: Python idioms, best practices, and patterns for clean, idiomatic code.
---
# Python Patterns
## When to Activate
- Writing Python code
- Refactoring Python modules
- Python code review
## Core Concepts
### Context Managers
\`\`\`python
# Always use context managers for resources
with open('file.txt') as f:
content = f.read()
\`\`\`
```
### Framework Patterns
Focus on framework-specific conventions, common patterns, and anti-patterns.
**Examples:** `django-patterns`, `nextjs-patterns`, `springboot-patterns`
```markdown
---
name: django-patterns
description: Django best practices for models, views, URLs, and templates.
---
# Django Patterns
## When to Activate
- Building Django applications
- Creating models and views
- Django URL configuration
```
### Workflow Skills
Define step-by-step processes for common development tasks.
**Examples:** `tdd-workflow`, `code-review-workflow`, `deployment-checklist`
```markdown
---
name: code-review-workflow
description: Systematic code review process for quality and security.
---
# Code Review Workflow
## Steps
1. **Understand Context** - Read PR description and linked issues
2. **Check Tests** - Verify test coverage and quality
3. **Review Logic** - Analyze implementation for correctness
4. **Check Security** - Look for vulnerabilities
5. **Verify Style** - Ensure code follows conventions
```
### Domain Knowledge
Specialized knowledge for specific domains (security, performance, etc.).
**Examples:** `security-review`, `performance-optimization`, `api-design`
```markdown
---
name: api-design
description: REST and GraphQL API design patterns, versioning, and best practices.
---
# API Design Patterns
## RESTful Conventions
| Method | Endpoint | Purpose |
|--------|----------|---------|
| GET | /resources | List all |
| GET | /resources/:id | Get one |
| POST | /resources | Create |
```
### Tool Integration
Guidance for using specific tools, libraries, or services.
**Examples:** `supabase-patterns`, `docker-patterns`, `mcp-server-patterns`
---
## Writing Effective Skill Content
### 1. Start with "When to Activate"
This section is **critical** for auto-activation. Be specific:
```markdown
## When to Activate
- Creating new React components
- Refactoring existing components
- Debugging React state issues
- Reviewing React code for best practices
```
### 2. Use "Show, Don't Tell"
Bad:
```markdown
## Error Handling
Always handle errors properly in async functions.
```
Good:
```markdown
## Error Handling
\`\`\`typescript
async function fetchData(url: string) {
try {
const response = await fetch(url)
if (!response.ok) {
throw new Error(\`HTTP \${response.status}: \${response.statusText}\`)
}
return await response.json()
} catch (error) {
console.error('Fetch failed:', error)
throw new Error('Failed to fetch data')
}
}
\`\`\`
### Key Points
- Check \`response.ok\` before parsing
- Log errors for debugging
- Re-throw with user-friendly message
```
### 3. Include Anti-Patterns
Show what NOT to do:
```markdown
## Anti-Patterns
### ❌ Direct State Mutation
\`\`\`typescript
// NEVER do this
user.name = 'New Name'
items.push(newItem)
\`\`\`
### ✅ Immutable Updates
\`\`\`typescript
// ALWAYS do this
const updatedUser = { ...user, name: 'New Name' }
const updatedItems = [...items, newItem]
\`\`\`
```
### 4. Provide Checklists
Checklists are actionable and easy to follow:
```markdown
## Pre-Deployment Checklist
- [ ] All tests passing
- [ ] No console.log in production code
- [ ] Environment variables documented
- [ ] Secrets not hardcoded
- [ ] Error handling complete
- [ ] Input validation in place
```
### 5. Use Decision Trees
For complex decisions:
```markdown
## Choosing the Right Approach
\`\`\`
Need to fetch data?
├── Single request → use fetch directly
├── Multiple independent → Promise.all()
├── Multiple dependent → await sequentially
└── With caching → use SWR or React Query
\`\`\`
```
---
## Best Practices
### DO
| Practice | Example |
|----------|---------|
| **Be specific** | "Use \`useCallback\` for event handlers passed to child components" |
| **Show examples** | Include copy-pasteable code |
| **Explain WHY** | "Immutability prevents unexpected side effects in React state" |
| **Link related skills** | "See also: \`react-performance\`" |
| **Keep focused** | One skill = one domain/concept |
| **Use sections** | Clear headers for easy scanning |
### DON'T
| Practice | Why It's Bad |
|----------|--------------|
| **Be vague** | "Write good code" - not actionable |
| **Long prose** | Hard to parse, better as code |
| **Cover too much** | "Python, Django, and Flask patterns" - too broad |
| **Skip examples** | Theory without practice is less useful |
| **Ignore anti-patterns** | Learning what NOT to do is valuable |
### Content Guidelines
1. **Length**: 200-500 lines typical, 800 lines maximum
2. **Code blocks**: Include language identifier
3. **Headers**: Use `##` and `###` hierarchy
4. **Lists**: Use `-` for unordered, `1.` for ordered
5. **Tables**: For comparisons and references
---
## Common Patterns
### Pattern 1: Standards Skill
```markdown
---
name: language-standards
description: Coding standards and best practices for [language].
---
# [Language] Coding Standards
## When to Activate
- Writing [language] code
- Code review
- Setting up linting
## Naming Conventions
| Element | Convention | Example |
|---------|------------|---------|
| Variables | camelCase | userName |
| Constants | SCREAMING_SNAKE | MAX_RETRY |
| Functions | camelCase | fetchUser |
| Classes | PascalCase | UserService |
## Code Examples
[Include practical examples]
## Linting Setup
[Include configuration]
## Related Skills
- `language-testing`
- `language-security`
```
### Pattern 2: Workflow Skill
```markdown
---
name: task-workflow
description: Step-by-step workflow for [task].
---
# [Task] Workflow
## When to Activate
- [Trigger 1]
- [Trigger 2]
## Prerequisites
- [Requirement 1]
- [Requirement 2]
## Steps
### Step 1: [Name]
[Description]
\`\`\`bash
[Commands]
\`\`\`
### Step 2: [Name]
[Description]
## Verification
- [ ] [Check 1]
- [ ] [Check 2]
## Troubleshooting
| Problem | Solution |
|---------|----------|
| [Issue] | [Fix] |
```
### Pattern 3: Reference Skill
```markdown
---
name: api-reference
description: Quick reference for [API/Library].
---
# [API/Library] Reference
## When to Activate
- Using [API/Library]
- Looking up [API/Library] syntax
## Common Operations
### Operation 1
\`\`\`typescript
// Basic usage
\`\`\`
### Operation 2
\`\`\`typescript
// Advanced usage
\`\`\`
## Configuration
[Include config examples]
## Error Handling
[Include error patterns]
```
---
## Testing Your Skill
### Local Testing
1. **Copy to Claude Code skills directory**:
```bash
cp -r skills/your-skill-name ~/.claude/skills/
```
2. **Test with Claude Code**:
```
You: "I need to [task that should trigger your skill]"
Claude should reference your skill's patterns.
```
3. **Verify activation**:
- Ask Claude to explain a concept from your skill
- Check if it uses your examples and patterns
- Ensure it follows your guidelines
### Validation Checklist
- [ ] **YAML frontmatter valid** - No syntax errors
- [ ] **Name follows convention** - lowercase-with-hyphens
- [ ] **Description is clear** - Tells when to use
- [ ] **Examples work** - Code compiles and runs
- [ ] **Links valid** - Related skills exist
- [ ] **No sensitive data** - No API keys, tokens, paths
### Code Example Testing
Test all code examples:
```bash
# From the repo root
npx tsc --noEmit skills/your-skill-name/examples/*.ts
# Or from inside the skill directory
npx tsc --noEmit examples/*.ts
# From the repo root
python -m py_compile skills/your-skill-name/examples/*.py
# Or from inside the skill directory
python -m py_compile examples/*.py
# From the repo root
go build ./skills/your-skill-name/examples/...
# Or from inside the skill directory
go build ./examples/...
```
---
## Submitting Your Skill
### 1. Fork and Clone
```bash
gh repo fork affaan-m/everything-claude-code --clone
cd everything-claude-code
```
### 2. Create Branch
```bash
git checkout -b feat/skill-your-skill-name
```
### 3. Add Your Skill
```bash
mkdir -p skills/your-skill-name
# Create SKILL.md
```
### 4. Validate
```bash
# Check YAML frontmatter
head -10 skills/your-skill-name/SKILL.md
# Verify structure
ls -la skills/your-skill-name/
# Run tests if available
npm test
```
### 5. Commit and Push
```bash
git add skills/your-skill-name/
git commit -m "feat(skills): add your-skill-name skill"
git push -u origin feat/skill-your-skill-name
```
### 6. Create Pull Request
Use this PR template:
```markdown
## Summary
Brief description of the skill and why it's valuable.
## Skill Type
- [ ] Language standards
- [ ] Framework patterns
- [ ] Workflow
- [ ] Domain knowledge
- [ ] Tool integration
## Testing
How I tested this skill locally.
## Checklist
- [ ] YAML frontmatter valid
- [ ] Code examples tested
- [ ] Follows skill guidelines
- [ ] No sensitive data
- [ ] Clear activation triggers
```
---
## Examples Gallery
### Example 1: Language Standards
**File:** `skills/rust-patterns/SKILL.md`
```markdown
---
name: rust-patterns
description: Rust idioms, ownership patterns, and best practices for safe, idiomatic code.
origin: ECC
---
# Rust Patterns
## When to Activate
- Writing Rust code
- Handling ownership and borrowing
- Error handling with Result/Option
- Implementing traits
## Ownership Patterns
### Borrowing Rules
\`\`\`rust
// ✅ CORRECT: Borrow when you don't need ownership
fn process_data(data: &str) -> usize {
data.len()
}
// ✅ CORRECT: Take ownership when you need to modify or consume
fn consume_data(data: Vec<u8>) -> String {
String::from_utf8(data).unwrap()
}
\`\`\`
## Error Handling
### Result Pattern
\`\`\`rust
use thiserror::Error;
#[derive(Error, Debug)]
pub enum AppError {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Parse error: {0}")]
Parse(#[from] std::num::ParseIntError),
}
pub type AppResult<T> = Result<T, AppError>;
\`\`\`
## Related Skills
- `rust-testing`
- `rust-security`
```
### Example 2: Framework Patterns
**File:** `skills/fastapi-patterns/SKILL.md`
```markdown
---
name: fastapi-patterns
description: FastAPI patterns for routing, dependency injection, validation, and async operations.
origin: ECC
---
# FastAPI Patterns
## When to Activate
- Building FastAPI applications
- Creating API endpoints
- Implementing dependency injection
- Handling async database operations
## Project Structure
\`\`\`
app/
├── main.py # FastAPI app entry point
├── routers/ # Route handlers
│ ├── users.py
│ └── items.py
├── models/ # Pydantic models
│ ├── user.py
│ └── item.py
├── services/ # Business logic
│ └── user_service.py
└── dependencies.py # Shared dependencies
\`\`\`
## Dependency Injection
\`\`\`python
from fastapi import Depends
from sqlalchemy.ext.asyncio import AsyncSession
async def get_db() -> AsyncSession:
async with AsyncSessionLocal() as session:
yield session
@router.get("/users/{user_id}")
async def get_user(
user_id: int,
db: AsyncSession = Depends(get_db)
):
# Use db session
pass
\`\`\`
## Related Skills
- `python-patterns`
- `pydantic-validation`
```
### Example 3: Workflow Skill
**File:** `skills/refactoring-workflow/SKILL.md`
```markdown
---
name: refactoring-workflow
description: Systematic refactoring workflow for improving code quality without changing behavior.
origin: ECC
---
# Refactoring Workflow
## When to Activate
- Improving code structure
- Reducing technical debt
- Simplifying complex code
- Extracting reusable components
## Prerequisites
- All tests passing
- Git working directory clean
- Feature branch created
## Workflow Steps
### Step 1: Identify Refactoring Target
- Look for code smells (long methods, duplicate code, large classes)
- Check test coverage for target area
- Document current behavior
### Step 2: Ensure Tests Exist
\`\`\`bash
# Run tests to verify current behavior
npm test
# Check coverage for target files
npm run test:coverage
\`\`\`
### Step 3: Make Small Changes
- One refactoring at a time
- Run tests after each change
- Commit frequently
### Step 4: Verify Behavior Unchanged
\`\`\`bash
# Run full test suite
npm test
# Run E2E tests
npm run test:e2e
\`\`\`
## Common Refactorings
| Smell | Refactoring |
|-------|-------------|
| Long method | Extract method |
| Duplicate code | Extract to shared function |
| Large class | Extract class |
| Long parameter list | Introduce parameter object |
## Checklist
- [ ] Tests exist for target code
- [ ] Made small, focused changes
- [ ] Tests pass after each change
- [ ] Behavior unchanged
- [ ] Committed with clear message
```
---
## Additional Resources
- [CONTRIBUTING.md](../CONTRIBUTING.md) - General contribution guidelines
- [project-guidelines-example](../skills/project-guidelines-example/SKILL.md) - Project-specific skill template
- [coding-standards](../skills/coding-standards/SKILL.md) - Example of standards skill
- [tdd-workflow](../skills/tdd-workflow/SKILL.md) - Example of workflow skill
- [security-review](../skills/security-review/SKILL.md) - Example of domain knowledge skill
---
**Remember**: A good skill is focused, actionable, and immediately useful. Write skills you'd want to use yourself.

View File

@@ -409,7 +409,7 @@ claude --version
Claude Code v2.1+は、インストール済みプラグインの`hooks/hooks.json`(規約)を自動読み込みします。`plugin.json`で明示的に宣言するとエラーが発生します: Claude Code v2.1+は、インストール済みプラグインの`hooks/hooks.json`(規約)を自動読み込みします。`plugin.json`で明示的に宣言するとエラーが発生します:
``` ```
Duplicate hooks file detected: ./hooks/hooks.json resolves to already-loaded file Duplicate hook file detected: ./hooks/hooks.json is already resolved to a loaded file
``` ```
**背景:** これは本リポジトリで複数の修正/リバート循環を引き起こしました([#29](https://github.com/affaan-m/everything-claude-code/issues/29), [#52](https://github.com/affaan-m/everything-claude-code/issues/52), [#103](https://github.com/affaan-m/everything-claude-code/issues/103)。Claude Codeバージョン間で動作が変わったため混乱がありました。今後を防ぐため回帰テストがあります。 **背景:** これは本リポジトリで複数の修正/リバート循環を引き起こしました([#29](https://github.com/affaan-m/everything-claude-code/issues/29), [#52](https://github.com/affaan-m/everything-claude-code/issues/52), [#103](https://github.com/affaan-m/everything-claude-code/issues/103)。Claude Codeバージョン間で動作が変わったため混乱がありました。今後を防ぐため回帰テストがあります。

View File

@@ -77,9 +77,9 @@ model: opus
各問題について: 各問題について:
``` ```
[CRITICAL] ハードコードされたAPIキー [CRITICAL] ハードコードされたAPIキー
File: src/api/client.ts:42 ファイル: src/api/client.ts:42
Issue: APIキーがソースコードに公開されている 問題: APIキーがソースコードに公開されている
Fix: 環境変数に移動 修正: 環境変数に移動
const apiKey = "sk-abc123"; // ❌ Bad const apiKey = "sk-abc123"; // ❌ Bad
const apiKey = process.env.API_KEY; // ✓ Good const apiKey = process.env.API_KEY; // ✓ Good

View File

@@ -341,20 +341,20 @@ x = x // 無意味な代入を削除
各修正試行後: 各修正試行後:
```text ```text
[FIXED] internal/handler/user.go:42 [修正済] internal/handler/user.go:42
Error: undefined: UserService エラー: undefined: UserService
Fix: Added import "project/internal/service" 修正: import を追加 "project/internal/service"
Remaining errors: 3 残りのエラー: 3
``` ```
最終サマリー: 最終サマリー:
```text ```text
Build Status: SUCCESS/FAILED ビルドステータス: SUCCESS/FAILED
Errors Fixed: N 修正済みエラー: N
Vet Warnings Fixed: N Vet 警告修正済み: N
Files Modified: list 変更ファイル: list
Remaining Issues: list (if any) 残りの問題: list (ある場合)
``` ```
## 重要な注意事項 ## 重要な注意事項

View File

@@ -228,9 +228,9 @@ model: opus
各問題について: 各問題について:
```text ```text
[CRITICAL] SQLインジェクション脆弱性 [CRITICAL] SQLインジェクション脆弱性
File: internal/repository/user.go:42 ファイル: internal/repository/user.go:42
Issue: ユーザー入力がSQLクエリに直接連結されている 問題: ユーザー入力がSQLクエリに直接連結されている
Fix: パラメータ化クエリを使用 修正: パラメータ化クエリを使用
query := "SELECT * FROM users WHERE id = " + userID // Bad query := "SELECT * FROM users WHERE id = " + userID // Bad
query := "SELECT * FROM users WHERE id = $1" // Good query := "SELECT * FROM users WHERE id = $1" // Good

View File

@@ -399,9 +399,9 @@ model: opus
各問題について: 各問題について:
```text ```text
[CRITICAL] SQLインジェクション脆弱性 [CRITICAL] SQLインジェクション脆弱性
File: app/routes/user.py:42 ファイル: app/routes/user.py:42
Issue: ユーザー入力がSQLクエリに直接補間されている 問題: ユーザー入力がSQLクエリに直接補間されている
Fix: パラメータ化クエリを使用 修正: パラメータ化クエリを使用
query = f"SELECT * FROM users WHERE id = {user_id}" # Bad query = f"SELECT * FROM users WHERE id = {user_id}" # Bad
query = "SELECT * FROM users WHERE id = %s" # Good query = "SELECT * FROM users WHERE id = %s" # Good

View File

@@ -35,12 +35,12 @@ echo "$(date +%Y-%m-%d-%H:%M) | $CHECKPOINT_NAME | $(git rev-parse --short HEAD)
3. レポート: 3. レポート:
``` ```
CHECKPOINT COMPARISON: $NAME チェックポイント比較: $NAME
============================ ============================
Files changed: X 変更されたファイル: X
Tests: +Y passed / -Z failed テスト: +Y 合格 / -Z 失敗
Coverage: +X% / -Y% カバレッジ: +X% / -Y%
Build: [PASS/FAIL] ビルド: [PASS/FAIL]
``` ```
## チェックポイント一覧表示 ## チェックポイント一覧表示
@@ -57,13 +57,13 @@ Build: [PASS/FAIL]
一般的なチェックポイント流: 一般的なチェックポイント流:
``` ```
[Start] --> /checkpoint create "feature-start" [開始] --> /checkpoint create "feature-start"
| |
[Implement] --> /checkpoint create "core-done" [実装] --> /checkpoint create "core-done"
| |
[Test] --> /checkpoint verify "core-done" [テスト] --> /checkpoint verify "core-done"
| |
[Refactor] --> /checkpoint create "refactor-done" [リファクタリング] --> /checkpoint create "refactor-done"
| |
[PR] --> /checkpoint verify "feature-start" [PR] --> /checkpoint verify "feature-start"
``` ```

View File

@@ -38,24 +38,24 @@ e2e-runner エージェントは:
## 使用します示例 ## 使用します示例
```` ````
User: /e2e Test the market search and view flow User: /e2e マーケット検索と表示フローをテスト
Agent (e2e-runner): Agent (e2e-runner):
# E2E Test Generation: Market Search and View Flow # E2Eテスト生成: マーケット検索と表示フロー
## Test Scenario Identified ## 特定されたテストシナリオ
**User Journey:** Market Search → View Results → Click Market → View Details **ユーザージャーニー:** マーケット検索 → 結果表示 → マーケットクリック → 詳細表示
**Test Coverage:** **テストカバレッジ:**
1. Navigate to markets page 1. マーケットページに遷移
2. Perform semantic search 2. セマンティック検索を実行
3. Verify search results 3. 検索結果を検証
4. Click on first result 4. 最初の結果をクリック
5. Verify market details page loads 5. マーケット詳細ページの読み込みを検証
6. Verify chart renders 6. チャートの描画を検証
## Generated Test Code ## 生成されたテストコード
```typescript ```typescript
// tests/e2e/markets/search-and-view.spec.ts // tests/e2e/markets/search-and-view.spec.ts
@@ -160,7 +160,7 @@ test.describe('Market Search and View Flow', () => {
## テスト実行 ## テスト実行
```bash ```bash
# Run the generated test # 生成されたテストを実行
npx playwright test tests/e2e/markets/search-and-view.spec.ts npx playwright test tests/e2e/markets/search-and-view.spec.ts
Running 3 tests using 3 workers Running 3 tests using 3 workers
@@ -181,54 +181,54 @@ Artifacts generated:
``` ```
╔══════════════════════════════════════════════════════════════╗ ╔══════════════════════════════════════════════════════════════╗
║ E2E Test Results ║ E2Eテスト結果
╠══════════════════════════════════════════════════════════════╣ ╠══════════════════════════════════════════════════════════════╣
Status: ✅ ALL TESTS PASSED ステータス: ✅ 全テスト合格
Total: 3 tests 合計: 3テスト
Passed: 3 (100%) ║ 合格: 3 (100%) ║
Failed: 0 ║ 失敗: 0 ║
Flaky: 0 ║ 不安定: 0 ║
Duration: 9.1s ║ 所要時間: 9.1s ║
╚══════════════════════════════════════════════════════════════╝ ╚══════════════════════════════════════════════════════════════╝
Artifacts: アーティファクト:
📸 Screenshots: 2 files 📸 スクリーンショット: 2ファイル
📹 Videos: 0 files (only on failure) 📹 ビデオ: 0ファイル (失敗時のみ)
🔍 Traces: 0 files (only on failure) 🔍 トレース: 0ファイル (失敗時のみ)
📊 HTML Report: playwright-report/index.html 📊 HTMLレポート: playwright-report/index.html
View report: npx playwright show-report レポート表示: npx playwright show-report
``` ```
✅ E2E テストスイートは CI/CD 統合の準備ができました! ✅ E2E テストスイートは CI/CD 統合の準備ができました!
```` ````
## Test Artifacts ## テストアーティファクト
When tests run, the following artifacts are captured: テスト実行時、以下のアーティファクトがキャプチャされます:
**On All Tests:** **全テスト共通:**
- HTML Report with timeline and results - タイムラインと結果を含むHTMLレポート
- JUnit XML for CI integration - CI統合用のJUnit XML
**On Failure Only:** **失敗時のみ:**
- Screenshot of the failing state - 失敗状態のスクリーンショット
- Video recording of the test - テストのビデオ録画
- Trace file for debugging (step-by-step replay) - デバッグ用トレースファイル (ステップバイステップ再生)
- Network logs - ネットワークログ
- Console logs - コンソールログ
## Viewing Artifacts ## アーティファクトの確認
```bash ```bash
# View HTML report in browser # ブラウザでHTMLレポートを表示
npx playwright show-report npx playwright show-report
# View specific trace file # 特定のトレースファイルを表示
npx playwright show-trace artifacts/trace-abc123.zip npx playwright show-trace artifacts/trace-abc123.zip
# Screenshots are saved in artifacts/ directory # スクリーンショットはartifacts/ディレクトリに保存
open artifacts/search-results.png open artifacts/search-results.png
```` ````
@@ -239,18 +239,18 @@ open artifacts/search-results.png
``` ```
⚠️ FLAKY TEST DETECTED: tests/e2e/markets/trade.spec.ts ⚠️ FLAKY TEST DETECTED: tests/e2e/markets/trade.spec.ts
Test passed 7/10 runs (70% pass rate) テストは10回中7回合格 (合格率70%)
Common failure: よくある失敗:
"Timeout waiting for element '[data-testid="confirm-btn"]'" "Timeout waiting for element '[data-testid="confirm-btn"]'"
Recommended fixes: 推奨修正:
1. Add explicit wait: await page.waitForSelector('[data-testid="confirm-btn"]') 1. 明示的な待機を追加: await page.waitForSelector('[data-testid="confirm-btn"]')
2. Increase timeout: { timeout: 10000 } 2. タイムアウトを増加: { timeout: 10000 }
3. Check for race conditions in component 3. コンポーネントの競合状態を確認
4. Verify element is not hidden by animation 4. 要素がアニメーションで隠れていないか確認
Quarantine recommendation: Mark as test.fixme() until fixed 隔離推奨: 修正されるまでtest.fixme()としてマーク
``` ```
## ブラウザ設定 ## ブラウザ設定
@@ -350,21 +350,21 @@ PMX の場合、以下の E2E テストを優先:
## 快速命令 ## 快速命令
```bash ```bash
# Run all E2E tests # 全E2Eテストを実行
npx playwright test npx playwright test
# Run specific test file # 特定のテストファイルを実行
npx playwright test tests/e2e/markets/search.spec.ts npx playwright test tests/e2e/markets/search.spec.ts
# Run in headed mode (see browser) # ヘッドモードで実行 (ブラウザ表示)
npx playwright test --headed npx playwright test --headed
# Debug test # テストをデバッグ
npx playwright test --debug npx playwright test --debug
# Generate test code # テストコードを生成
npx playwright codegen http://localhost:3000 npx playwright codegen http://localhost:3000
# View report # レポートを表示
npx playwright show-report npx playwright show-report
``` ```

View File

@@ -92,36 +92,36 @@ instinctsが分離の恩恵を受ける複雑な複数ステップのプロセ
## 出力フォーマット ## 出力フォーマット
``` ```
🧬 Evolve Analysis 🧬 進化分析
================== ==================
進化の準備ができた3つのクラスターを発見: 進化の準備ができた3つのクラスターを発見:
## クラスター1: データベースマイグレーションワークフロー ## クラスター1: データベースマイグレーションワークフロー
Instincts: new-table-migration, update-schema, regenerate-types Instincts: new-table-migration, update-schema, regenerate-types
Type: Command タイプ: Command
Confidence: 85%(12件の観測に基づく) 信頼度: 85%(12件の観測に基づく)
作成: /new-tableコマンド 作成: /new-tableコマンド
Files: ファイル:
- ~/.claude/homunculus/evolved/commands/new-table.md - ~/.claude/homunculus/evolved/commands/new-table.md
## クラスター2: 関数型コードスタイル ## クラスター2: 関数型コードスタイル
Instincts: prefer-functional, use-immutable, avoid-classes, pure-functions Instincts: prefer-functional, use-immutable, avoid-classes, pure-functions
Type: Skill タイプ: Skill
Confidence: 78%(8件の観測に基づく) 信頼度: 78%(8件の観測に基づく)
作成: functional-patternsスキル 作成: functional-patternsスキル
Files: ファイル:
- ~/.claude/homunculus/evolved/skills/functional-patterns.md - ~/.claude/homunculus/evolved/skills/functional-patterns.md
## クラスター3: デバッグプロセス ## クラスター3: デバッグプロセス
Instincts: debug-check-logs, debug-isolate, debug-reproduce, debug-verify Instincts: debug-check-logs, debug-isolate, debug-reproduce, debug-verify
Type: Agent タイプ: Agent
Confidence: 72%(6件の観測に基づく) 信頼度: 72%(6件の観測に基づく)
作成: debuggerエージェント 作成: debuggerエージェント
Files: ファイル:
- ~/.claude/homunculus/evolved/agents/debugger.md - ~/.claude/homunculus/evolved/agents/debugger.md
--- ---

View File

@@ -62,9 +62,9 @@ internal/handler/api.go:58:2: missing return at end of function
## 修正1: 未定義の識別子 ## 修正1: 未定義の識別子
File: internal/service/user.go:25 ファイル: internal/service/user.go:25
Error: undefined: UserRepository エラー: undefined: UserRepository
Cause: インポート欠落 原因: インポート欠落
```go ```go
// インポートを追加 // インポートを追加
@@ -83,8 +83,8 @@ $ go build ./...
## 修正2: 型の不一致 ## 修正2: 型の不一致
File: internal/handler/api.go:42 ファイル: internal/handler/api.go:42
Error: cannot use x (type string) as type int エラー: cannot use x (type string) as type int
```go ```go
// 変更前 // 変更前
@@ -101,8 +101,8 @@ $ go build ./...
## 修正3: 戻り値の欠落 ## 修正3: 戻り値の欠落
File: internal/handler/api.go:58 ファイル: internal/handler/api.go:58
Error: missing return at end of function エラー: missing return at end of function
```go ```go
func GetUser(id string) (*User, error) { func GetUser(id string) (*User, error) {

View File

@@ -85,8 +85,8 @@ Agent:
## 発見された問題 ## 発見された問題
[CRITICAL] 競合状態 [CRITICAL] 競合状態
File: internal/service/auth.go:45 ファイル: internal/service/auth.go:45
Issue: 同期化なしで共有マップにアクセス 問題: 同期化なしで共有マップにアクセス
```go ```go
var cache = map[string]*Session{} // 並行アクセス! var cache = map[string]*Session{} // 並行アクセス!
@@ -94,7 +94,7 @@ func GetSession(id string) *Session {
return cache[id] // 競合状態 return cache[id] // 競合状態
} }
``` ```
Fix: sync.RWMutexまたはsync.Mapを使用 修正: sync.RWMutexまたはsync.Mapを使用
```go ```go
var ( var (
cache = map[string]*Session{} cache = map[string]*Session{}
@@ -109,12 +109,12 @@ func GetSession(id string) *Session {
``` ```
[HIGH] エラーコンテキストの欠落 [HIGH] エラーコンテキストの欠落
File: internal/handler/user.go:28 ファイル: internal/handler/user.go:28
Issue: コンテキストなしでエラーを返す 問題: コンテキストなしでエラーを返す
```go ```go
return err // コンテキストなし return err // コンテキストなし
``` ```
Fix: コンテキストでラップ 修正: コンテキストでラップ
```go ```go
return fmt.Errorf("get user %s: %w", userID, err) return fmt.Errorf("get user %s: %w", userID, err)
``` ```

View File

@@ -45,40 +45,40 @@ python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py import <
## インポートプロセス ## インポートプロセス
``` ```
📥 Importing instincts from: team-instincts.yaml 📥 instinctsをインポート中: team-instincts.yaml
================================================ ================================================
Found 12 instincts to import. 12件のinstinctsが見つかりました。
Analyzing conflicts... 競合を分析中...
## New Instincts (8) ## 新規instincts (8)
These will be added: 以下が追加されます:
✓ use-zod-validation (confidence: 0.7) ✓ use-zod-validation (confidence: 0.7)
✓ prefer-named-exports (confidence: 0.65) ✓ prefer-named-exports (confidence: 0.65)
✓ test-async-functions (confidence: 0.8) ✓ test-async-functions (confidence: 0.8)
... ...
## Duplicate Instincts (3) ## 重複instincts (3)
Already have similar instincts: 類似のinstinctsが既に存在:
⚠️ prefer-functional-style ⚠️ prefer-functional-style
Local: 0.8 confidence, 12 observations ローカル: 信頼度0.8, 12回の観測
Import: 0.7 confidence インポート: 信頼度0.7
Keep local (higher confidence) ローカルを保持 (信頼度が高い)
⚠️ test-first-workflow ⚠️ test-first-workflow
Local: 0.75 confidence ローカル: 信頼度0.75
Import: 0.9 confidence インポート: 信頼度0.9
Update to import (higher confidence) インポートに更新 (信頼度が高い)
## Conflicting Instincts (1) ## 競合instincts (1)
These contradict local instincts: ローカルのinstinctsと矛盾:
❌ use-classes-for-services ❌ use-classes-for-services
Conflicts with: avoid-classes 競合: avoid-classes
Skip (requires manual resolution) スキップ (手動解決が必要)
--- ---
Import 8 new, update 1, skip 3? 8件を新規追加、1件を更新、3件をスキップしますか?
``` ```
## マージ戦略 ## マージ戦略
@@ -130,13 +130,13 @@ Skill Creatorからインポートする場合:
インポート後: インポート後:
``` ```
Import complete! インポート完了!
Added: 8 instincts 追加: 8件のinstincts
Updated: 1 instinct 更新: 1件のinstinct
Skipped: 3 instincts (2 duplicates, 1 conflict) スキップ: 3件のinstincts (2件の重複, 1件の競合)
New instincts saved to: ~/.claude/homunculus/instincts/inherited/ 新規instinctsの保存先: ~/.claude/homunculus/instincts/inherited/
Run /instinct-status to see all instincts. /instinct-statusを実行してすべてのinstinctsを確認できます。
``` ```

View File

@@ -39,42 +39,42 @@ python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py status
## 出力形式 ## 出力形式
``` ```
📊 Instinct Status 📊 instinctステータス
================== ==================
## Code Style (4 instincts) ## コードスタイル (4 instincts)
### prefer-functional-style ### prefer-functional-style
Trigger: when writing new functions トリガー: 新しい関数を書くとき
Action: Use functional patterns over classes アクション: クラスより関数型パターンを使用
Confidence: ████████░░ 80% 信頼度: ████████░░ 80%
Source: session-observation | Last updated: 2025-01-22 ソース: session-observation | 最終更新: 2025-01-22
### use-path-aliases ### use-path-aliases
Trigger: when importing modules トリガー: モジュールをインポートするとき
Action: Use @/ path aliases instead of relative imports アクション: 相対インポートの代わりに@/パスエイリアスを使用
Confidence: ██████░░░░ 60% 信頼度: ██████░░░░ 60%
Source: repo-analysis (github.com/acme/webapp) ソース: repo-analysis (github.com/acme/webapp)
## Testing (2 instincts) ## テスト (2 instincts)
### test-first-workflow ### test-first-workflow
Trigger: when adding new functionality トリガー: 新しい機能を追加するとき
Action: Write test first, then implementation アクション: テストを先に書き、次に実装
Confidence: █████████░ 90% 信頼度: █████████░ 90%
Source: session-observation ソース: session-observation
## Workflow (3 instincts) ## ワークフロー (3 instincts)
### grep-before-edit ### grep-before-edit
Trigger: when modifying code トリガー: コードを変更するとき
Action: Search with Grep, confirm with Read, then Edit アクション: Grepで検索、Readで確認、次にEdit
Confidence: ███████░░░ 70% 信頼度: ███████░░░ 70%
Source: session-observation ソース: session-observation
--- ---
Total: 9 instincts (4 personal, 5 inherited) 合計: 9 instincts (4個人, 5継承)
Observer: Running (last analysis: 5 min ago) オブザーバー: 実行中 (最終分析: 5分前)
``` ```
## フラグ ## フラグ

View File

@@ -99,36 +99,36 @@ security-reviewer -> code-reviewer -> architect
## 最終レポート形式 ## 最終レポート形式
``` ```
ORCHESTRATION REPORT オーケストレーションレポート
==================== ====================
Workflow: feature ワークフロー: feature
Task: Add user authentication タスク: ユーザー認証の追加
Agents: planner -> tdd-guide -> code-reviewer -> security-reviewer エージェント: planner -> tdd-guide -> code-reviewer -> security-reviewer
SUMMARY サマリー
------- -------
[1段落の要約] [1段落の要約]
AGENT OUTPUTS エージェント出力
------------- -------------
Planner: [要約] Planner: [要約]
TDD Guide: [要約] TDD Guide: [要約]
Code Reviewer: [要約] Code Reviewer: [要約]
Security Reviewer: [要約] Security Reviewer: [要約]
FILES CHANGED 変更ファイル
------------- -------------
[変更されたすべてのファイルをリスト] [変更されたすべてのファイルをリスト]
TEST RESULTS テスト結果
------------ ------------
[テスト合格/不合格の要約] [テスト合格/不合格の要約]
SECURITY STATUS セキュリティステータス
--------------- ---------------
[セキュリティの発見事項] [セキュリティの発見事項]
RECOMMENDATION 推奨事項
-------------- --------------
[リリース可 / 要修正 / ブロック中] [リリース可 / 要修正 / ブロック中]
``` ```

View File

@@ -95,26 +95,26 @@ Agent:
## 発見された問題 ## 発見された問題
[CRITICAL] SQLインジェクション脆弱性 [CRITICAL] SQLインジェクション脆弱性
File: app/routes/user.py:42 ファイル: app/routes/user.py:42
Issue: ユーザー入力が直接SQLクエリに挿入されている 問題: ユーザー入力が直接SQLクエリに挿入されている
```python ```python
query = f"SELECT * FROM users WHERE id = {user_id}" # 悪い query = f"SELECT * FROM users WHERE id = {user_id}" # 悪い
``` ```
Fix: パラメータ化クエリを使用 修正: パラメータ化クエリを使用
```python ```python
query = "SELECT * FROM users WHERE id = %s" # 良い query = "SELECT * FROM users WHERE id = %s" # 良い
cursor.execute(query, (user_id,)) cursor.execute(query, (user_id,))
``` ```
[HIGH] 可変デフォルト引数 [HIGH] 可変デフォルト引数
File: app/services/auth.py:18 ファイル: app/services/auth.py:18
Issue: 可変デフォルト引数が共有状態を引き起こす 問題: 可変デフォルト引数が共有状態を引き起こす
```python ```python
def process_items(items=[]): # 悪い def process_items(items=[]): # 悪い
items.append("new") items.append("new")
return items return items
``` ```
Fix: デフォルトにNoneを使用 修正: デフォルトにNoneを使用
```python ```python
def process_items(items=None): # 良い def process_items(items=None): # 良い
if items is None: if items is None:
@@ -124,27 +124,27 @@ def process_items(items=None): # 良い
``` ```
[MEDIUM] 型ヒントの欠落 [MEDIUM] 型ヒントの欠落
File: app/services/auth.py:25 ファイル: app/services/auth.py:25
Issue: 型アノテーションのない公開関数 問題: 型アノテーションのない公開関数
```python ```python
def get_user(user_id): # 悪い def get_user(user_id): # 悪い
return db.find(user_id) return db.find(user_id)
``` ```
Fix: 型ヒントを追加 修正: 型ヒントを追加
```python ```python
def get_user(user_id: str) -> Optional[User]: # 良い def get_user(user_id: str) -> Optional[User]: # 良い
return db.find(user_id) return db.find(user_id)
``` ```
[MEDIUM] コンテキストマネージャーを使用していない [MEDIUM] コンテキストマネージャーを使用していない
File: app/routes/user.py:55 ファイル: app/routes/user.py:55
Issue: 例外時にファイルがクローズされない 問題: 例外時にファイルがクローズされない
```python ```python
f = open("config.json") # 悪い f = open("config.json") # 悪い
data = f.read() data = f.read()
f.close() f.close()
``` ```
Fix: コンテキストマネージャーを使用 修正: コンテキストマネージャーを使用
```python ```python
with open("config.json") as f: # 良い with open("config.json") as f: # 良い
data = f.read() data = f.read()

View File

@@ -36,16 +36,16 @@
簡潔な検証レポートを生成します: 簡潔な検証レポートを生成します:
``` ```
VERIFICATION: [PASS/FAIL] 検証結果: [PASS/FAIL]
Build: [OK/FAIL] ビルド: [OK/FAIL]
Types: [OK/X errors] 型: [OK/Xエラー]
Lint: [OK/X issues] Lint: [OK/X件の問題]
Tests: [X/Y passed, Z% coverage] テスト: [X/Y合格, Z%カバレッジ]
Secrets: [OK/X found] シークレット: [OK/X件発見]
Logs: [OK/X console.logs] ログ: [OK/X件のconsole.log]
Ready for PR: [YES/NO] PR準備完了: [YES/NO]
``` ```
重大な問題がある場合は、修正案とともにリストアップします。 重大な問題がある場合は、修正案とともにリストアップします。

View File

@@ -43,7 +43,7 @@
``` ```
src/ src/
|-- app/ # Next.js app router |-- app/ # Next.js App Router
|-- components/ # 再利用可能なUIコンポーネント |-- components/ # 再利用可能なUIコンポーネント
|-- hooks/ # カスタムReactフック |-- hooks/ # カスタムReactフック
|-- lib/ # ユーティリティライブラリ |-- lib/ # ユーティリティライブラリ

View File

@@ -234,14 +234,14 @@ setCount(count + 1) // Can be stale in async scenarios
### REST API規約 ### REST API規約
``` ```
GET /api/markets # List all markets GET /api/markets # すべてのマーケットを一覧
GET /api/markets/:id # Get specific market GET /api/markets/:id # 特定のマーケットを取得
POST /api/markets # Create new market POST /api/markets # 新しいマーケットを作成
PUT /api/markets/:id # Update market (full) PUT /api/markets/:id # マーケットを更新(全体)
PATCH /api/markets/:id # Update market (partial) PATCH /api/markets/:id # マーケットを更新(部分)
DELETE /api/markets/:id # Delete market DELETE /api/markets/:id # マーケットを削除
# Query parameters for filtering # フィルタリング用クエリパラメータ
GET /api/markets?status=active&limit=10&offset=0 GET /api/markets?status=active&limit=10&offset=0
``` ```
@@ -312,29 +312,29 @@ export async function POST(request: Request) {
``` ```
src/ src/
├── app/ # Next.js App Router ├── app/ # Next.js App Router
│ ├── api/ # API routes │ ├── api/ # API ルート
│ ├── markets/ # Market pages │ ├── markets/ # マーケットページ
│ └── (auth)/ # Auth pages (route groups) │ └── (auth)/ # 認証ページ(ルートグループ)
├── components/ # React components ├── components/ # React コンポーネント
│ ├── ui/ # Generic UI components │ ├── ui/ # 汎用 UI コンポーネント
│ ├── forms/ # Form components │ ├── forms/ # フォームコンポーネント
│ └── layouts/ # Layout components │ └── layouts/ # レイアウトコンポーネント
├── hooks/ # Custom React hooks ├── hooks/ # カスタム React フック
├── lib/ # Utilities and configs ├── lib/ # ユーティリティと設定
│ ├── api/ # API clients │ ├── api/ # API クライアント
│ ├── utils/ # Helper functions │ ├── utils/ # ヘルパー関数
│ └── constants/ # Constants │ └── constants/ # 定数
├── types/ # TypeScript types ├── types/ # TypeScript 型定義
└── styles/ # Global styles └── styles/ # グローバルスタイル
``` ```
### ファイル命名 ### ファイル命名
``` ```
components/Button.tsx # PascalCase for components components/Button.tsx # コンポーネントは PascalCase
hooks/useAuth.ts # camelCase with 'use' prefix hooks/useAuth.ts # フックは 'use' プレフィックス付き camelCase
lib/formatDate.ts # camelCase for utilities lib/formatDate.ts # ユーティリティは camelCase
types/market.types.ts # camelCase with .types suffix types/market.types.ts # 型定義は .types サフィックス付き camelCase
``` ```
## コメントとドキュメント ## コメントとドキュメント

View File

@@ -51,13 +51,13 @@ source: "session-observation"
## 仕組み ## 仕組み
``` ```
Session Activity セッションアクティビティ
│ フックがプロンプト + ツール使用をキャプチャ100%信頼性) │ フックがプロンプト + ツール使用をキャプチャ100%信頼性)
┌─────────────────────────────────────────┐ ┌─────────────────────────────────────────┐
│ observations.jsonl │ │ observations.jsonl │
(prompts, tool calls, outcomes) (プロンプト、ツール呼び出し、結果)
└─────────────────────────────────────────┘ └─────────────────────────────────────────┘
│ Observerエージェントが読み取りバックグラウンド、Haiku │ Observerエージェントが読み取りバックグラウンド、Haiku

View File

@@ -22,24 +22,24 @@ Claude Codeセッションの正式な評価フレームワークで、評価駆
Claudeが以前できなかったことができるようになったかをテスト Claudeが以前できなかったことができるようになったかをテスト
```markdown ```markdown
[CAPABILITY EVAL: feature-name] [CAPABILITY EVAL: feature-name]
Task: Claudeが達成すべきことの説明 タスク: Claudeが達成すべきことの説明
Success Criteria: 成功基準:
- [ ] 基準1 - [ ] 基準1
- [ ] 基準2 - [ ] 基準2
- [ ] 基準3 - [ ] 基準3
Expected Output: 期待される結果の説明 期待される出力: 期待される結果の説明
``` ```
### リグレッション評価 ### リグレッション評価
変更が既存の機能を破壊しないことを確認: 変更が既存の機能を破壊しないことを確認:
```markdown ```markdown
[REGRESSION EVAL: feature-name] [REGRESSION EVAL: feature-name]
Baseline: SHAまたはチェックポイント名 ベースライン: SHAまたはチェックポイント名
Tests: テスト:
- existing-test-1: PASS/FAIL - existing-test-1: PASS/FAIL
- existing-test-2: PASS/FAIL - existing-test-2: PASS/FAIL
- existing-test-3: PASS/FAIL - existing-test-3: PASS/FAIL
Result: X/Y passed (previously Y/Y) 結果: X/Y 成功(以前は Y/Y
``` ```
## 評価者タイプ ## 評価者タイプ
@@ -67,17 +67,17 @@ Claudeを使用して自由形式の出力を評価
3. エッジケースは処理されていますか? 3. エッジケースは処理されていますか?
4. エラー処理は適切ですか? 4. エラー処理は適切ですか?
Score: 1-5 (1=poor, 5=excellent) スコア: 1-51=不良、5=優秀)
Reasoning: [説明] 理由: [説明]
``` ```
### 3. 人間評価者 ### 3. 人間評価者
手動レビューのためにフラグを立てる: 手動レビューのためにフラグを立てる:
```markdown ```markdown
[HUMAN REVIEW REQUIRED] [HUMAN REVIEW REQUIRED]
Change: 何が変更されたかの説明 変更内容: 何が変更されたかの説明
Reason: 人間のレビューが必要な理由 理由: 人間のレビューが必要な理由
Risk Level: LOW/MEDIUM/HIGH リスクレベル: LOW/MEDIUM/HIGH
``` ```
## メトリクス ## メトリクス
@@ -98,21 +98,21 @@ Risk Level: LOW/MEDIUM/HIGH
### 1. 定義(コーディング前) ### 1. 定義(コーディング前)
```markdown ```markdown
## EVAL DEFINITION: feature-xyz ## 評価定義: feature-xyz
### Capability Evals ### 能力評価
1. 新しいユーザーアカウントを作成できる 1. 新しいユーザーアカウントを作成できる
2. メール形式を検証できる 2. メール形式を検証できる
3. パスワードを安全にハッシュ化できる 3. パスワードを安全にハッシュ化できる
### Regression Evals ### リグレッション評価
1. 既存のログインが引き続き機能する 1. 既存のログインが引き続き機能する
2. セッション管理が変更されていない 2. セッション管理が変更されていない
3. ログアウトフローが維持されている 3. ログアウトフローが維持されている
### Success Metrics ### 成功メトリクス
- pass@3 > 90% for capability evals - 能力評価で pass@3 > 90%
- pass^3 = 100% for regression evals - リグレッション評価で pass^3 = 100%
``` ```
### 2. 実装 ### 2. 実装
@@ -131,26 +131,26 @@ npm test -- --testPathPattern="existing"
### 4. レポート ### 4. レポート
```markdown ```markdown
EVAL REPORT: feature-xyz 評価レポート: feature-xyz
======================== ========================
Capability Evals: 能力評価:
create-user: PASS (pass@1) create-user: PASS (pass@1)
validate-email: PASS (pass@2) validate-email: PASS (pass@2)
hash-password: PASS (pass@1) hash-password: PASS (pass@1)
Overall: 3/3 passed 全体: 3/3 成功
Regression Evals: リグレッション評価:
login-flow: PASS login-flow: PASS
session-mgmt: PASS session-mgmt: PASS
logout-flow: PASS logout-flow: PASS
Overall: 3/3 passed 全体: 3/3 成功
Metrics: メトリクス:
pass@1: 67% (2/3) pass@1: 67% (2/3)
pass@3: 100% (3/3) pass@3: 100% (3/3)
Status: READY FOR REVIEW ステータス: レビュー準備完了
``` ```
## 統合パターン ## 統合パターン
@@ -199,29 +199,29 @@ Status: READY FOR REVIEW
```markdown ```markdown
## EVAL: add-authentication ## EVAL: add-authentication
### Phase 1: Define (10 min) ### フェーズ 1: 定義10分
Capability Evals: 能力評価:
- [ ] ユーザーはメール/パスワードで登録できる - [ ] ユーザーはメール/パスワードで登録できる
- [ ] ユーザーは有効な資格情報でログインできる - [ ] ユーザーは有効な資格情報でログインできる
- [ ] 無効な資格情報は適切なエラーで拒否される - [ ] 無効な資格情報は適切なエラーで拒否される
- [ ] セッションはページリロード後も持続する - [ ] セッションはページリロード後も持続する
- [ ] ログアウトはセッションをクリアする - [ ] ログアウトはセッションをクリアする
Regression Evals: リグレッション評価:
- [ ] 公開ルートは引き続きアクセス可能 - [ ] 公開ルートは引き続きアクセス可能
- [ ] APIレスポンスは変更されていない - [ ] APIレスポンスは変更されていない
- [ ] データベーススキーマは互換性がある - [ ] データベーススキーマは互換性がある
### Phase 2: Implement (varies) ### フェーズ 2: 実装(可変)
[コードを書く] [コードを書く]
### Phase 3: Evaluate ### フェーズ 3: 評価
Run: /eval check add-authentication Run: /eval check add-authentication
### Phase 4: Report ### フェーズ 4: レポート
EVAL REPORT: add-authentication 評価レポート: add-authentication
============================== ==============================
Capability: 5/5 passed (pass@3: 100%) 能力: 5/5 成功(pass@3: 100%
Regression: 3/3 passed (pass^3: 100%) リグレッション: 3/3 成功(pass^3: 100%
Status: SHIP IT ステータス: 出荷可能
``` ```

View File

@@ -368,17 +368,17 @@ func WriteAndFlush(w io.Writer, data []byte) error {
myproject/ myproject/
├── cmd/ ├── cmd/
│ └── myapp/ │ └── myapp/
│ └── main.go # Entry point │ └── main.go # エントリポイント
├── internal/ ├── internal/
│ ├── handler/ # HTTP handlers │ ├── handler/ # HTTP ハンドラー
│ ├── service/ # Business logic │ ├── service/ # ビジネスロジック
│ ├── repository/ # Data access │ ├── repository/ # データアクセス
│ └── config/ # Configuration │ └── config/ # 設定
├── pkg/ ├── pkg/
│ └── client/ # Public API client │ └── client/ # 公開 API クライアント
├── api/ ├── api/
│ └── v1/ # API definitions (proto, OpenAPI) │ └── v1/ # API 定義(protoOpenAPI
├── testdata/ # Test fixtures ├── testdata/ # テストフィクスチャ
├── go.mod ├── go.mod
├── go.sum ├── go.sum
└── Makefile └── Makefile

View File

@@ -113,7 +113,7 @@ mypackage/
├── testdata/ # テストフィクスチャ ├── testdata/ # テストフィクスチャ
│ ├── valid_user.json │ ├── valid_user.json
│ └── invalid_user.json │ └── invalid_user.json
└── export_test.go # 内部テストのための非公開エクスポート └── export_test.go # 内部テストの非公開エクスポート
``` ```
### テストパッケージ ### テストパッケージ

View File

@@ -594,18 +594,18 @@ def test_with_tmpdir(tmpdir):
``` ```
tests/ tests/
├── conftest.py # Shared fixtures ├── conftest.py # 共有フィクスチャ
├── __init__.py ├── __init__.py
├── unit/ # Unit tests ├── unit/ # ユニットテスト
│ ├── __init__.py │ ├── __init__.py
│ ├── test_models.py │ ├── test_models.py
│ ├── test_utils.py │ ├── test_utils.py
│ └── test_services.py │ └── test_services.py
├── integration/ # Integration tests ├── integration/ # 統合テスト
│ ├── __init__.py │ ├── __init__.py
│ ├── test_api.py │ ├── test_api.py
│ └── test_database.py │ └── test_database.py
└── e2e/ # End-to-end tests └── e2e/ # エンドツーエンドテスト
├── __init__.py ├── __init__.py
└── test_user_flow.py └── test_user_flow.py
``` ```

View File

@@ -1,6 +1,7 @@
--- ---
name: prompt-optimizer name: prompt-optimizer
description: 分析原始提示识别意图和差距匹配ECC组件技能/命令/代理/钩子并输出一个可直接粘贴的优化提示。仅提供咨询角色——绝不自行执行任务。触发时机当用户说“优化提示”、“改进我的提示”、“如何编写提示”、“帮我优化这个指令”或明确要求提高提示质量时。中文等效表达同样触发“优化prompt”、“改进prompt”、“怎么写prompt”、“帮我优化这个指令”。不触发时机当用户希望直接执行任务或说“直接做”时。不触发时机当用户说“优化代码”、“优化性能”、“optimize performance”、“optimize this code”时——这些是重构/性能优化任务,而非提示优化。origin: community description: 分析原始提示识别意图和差距匹配ECC组件技能/命令/代理/钩子并输出一个可直接粘贴的优化提示。仅提供咨询角色——绝不自行执行任务。触发时机当用户说“优化提示”、“改进我的提示”、“如何编写提示”、“帮我优化这个指令”或明确要求提高提示质量时。中文等效表达同样触发“优化prompt”、“改进prompt”、“怎么写prompt”、“帮我优化这个指令”。不触发时机当用户希望直接执行任务或说“直接做”时。不触发时机当用户说“优化代码”、“优化性能”、“optimize performance”、“optimize this code”时——这些是重构/性能优化任务,而非提示优化。
origin: community
metadata: metadata:
author: YannJY02 author: YannJY02
version: "1.0.0" version: "1.0.0"

View File

@@ -34,5 +34,20 @@ while ($true) {
$scriptDir = Split-Path -Parent $scriptPath $scriptDir = Split-Path -Parent $scriptPath
$installerScript = Join-Path -Path (Join-Path -Path $scriptDir -ChildPath 'scripts') -ChildPath 'install-apply.js' $installerScript = Join-Path -Path (Join-Path -Path $scriptDir -ChildPath 'scripts') -ChildPath 'install-apply.js'
# Auto-install Node dependencies when running from a git clone
$nodeModules = Join-Path -Path $scriptDir -ChildPath 'node_modules'
if (-not (Test-Path -LiteralPath $nodeModules)) {
Write-Host '[ECC] Installing dependencies...'
Push-Location $scriptDir
try {
& npm install --no-audit --no-fund --loglevel=error
if ($LASTEXITCODE -ne 0) {
Write-Error "npm install failed with exit code $LASTEXITCODE"
exit $LASTEXITCODE
}
}
finally { Pop-Location }
}
& node $installerScript @args & node $installerScript @args
exit $LASTEXITCODE exit $LASTEXITCODE

View File

@@ -14,4 +14,10 @@ while [ -L "$SCRIPT_PATH" ]; do
done done
SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)" SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)"
# Auto-install Node dependencies when running from a git clone
if [ ! -d "$SCRIPT_DIR/node_modules" ]; then
echo "[ECC] Installing dependencies..."
(cd "$SCRIPT_DIR" && npm install --no-audit --no-fund --loglevel=error)
fi
exec node "$SCRIPT_DIR/scripts/install-apply.js" "$@" exec node "$SCRIPT_DIR/scripts/install-apply.js" "$@"

View File

@@ -125,6 +125,7 @@
"skills/kotlin-ktor-patterns", "skills/kotlin-ktor-patterns",
"skills/kotlin-patterns", "skills/kotlin-patterns",
"skills/kotlin-testing", "skills/kotlin-testing",
"skills/laravel-plugin-discovery",
"skills/laravel-patterns", "skills/laravel-patterns",
"skills/laravel-tdd", "skills/laravel-tdd",
"skills/laravel-verification", "skills/laravel-verification",

View File

@@ -133,6 +133,11 @@
"args": ["-y", "token-optimizer-mcp"], "args": ["-y", "token-optimizer-mcp"],
"description": "Token optimization for 95%+ context reduction via content deduplication and compression" "description": "Token optimization for 95%+ context reduction via content deduplication and compression"
}, },
"laraplugins": {
"type": "http",
"url": "https://laraplugins.io/mcp/plugins",
"description": "Laravel plugin discovery — search packages by keyword, health score, Laravel/PHP version compatibility. Use with laravel-plugin-discovery skill."
},
"confluence": { "confluence": {
"command": "npx", "command": "npx",
"args": ["-y", "confluence-mcp-server"], "args": ["-y", "confluence-mcp-server"],

7
package-lock.json generated
View File

@@ -11,6 +11,7 @@
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@iarna/toml": "^2.2.5", "@iarna/toml": "^2.2.5",
"ajv": "^8.18.0",
"sql.js": "^1.14.1" "sql.js": "^1.14.1"
}, },
"bin": { "bin": {
@@ -19,7 +20,6 @@
}, },
"devDependencies": { "devDependencies": {
"@eslint/js": "^9.39.2", "@eslint/js": "^9.39.2",
"ajv": "^8.18.0",
"c8": "^10.1.2", "c8": "^10.1.2",
"eslint": "^9.39.2", "eslint": "^9.39.2",
"globals": "^17.1.0", "globals": "^17.1.0",
@@ -449,7 +449,6 @@
"version": "8.18.0", "version": "8.18.0",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz", "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz",
"integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==", "integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"fast-deep-equal": "^3.1.3", "fast-deep-equal": "^3.1.3",
@@ -1043,7 +1042,6 @@
"version": "3.1.3", "version": "3.1.3",
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
"dev": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/fast-json-stable-stringify": { "node_modules/fast-json-stable-stringify": {
@@ -1064,7 +1062,6 @@
"version": "3.1.0", "version": "3.1.0",
"resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz",
"integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==",
"dev": true,
"funding": [ "funding": [
{ {
"type": "github", "type": "github",
@@ -1491,7 +1488,6 @@
"version": "1.0.0", "version": "1.0.0",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
"dev": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/json-stable-stringify-without-jsonify": { "node_modules/json-stable-stringify-without-jsonify": {
@@ -2512,7 +2508,6 @@
"version": "2.0.2", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
"dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": ">=0.10.0" "node": ">=0.10.0"

View File

@@ -89,6 +89,9 @@
"AGENTS.md", "AGENTS.md",
".claude-plugin/plugin.json", ".claude-plugin/plugin.json",
".claude-plugin/README.md", ".claude-plugin/README.md",
".codex-plugin/plugin.json",
".codex-plugin/README.md",
".mcp.json",
"install.sh", "install.sh",
"install.ps1", "install.ps1",
"llms.txt" "llms.txt"

View File

@@ -0,0 +1,172 @@
# ECC2 Codebase Research Report
**Date:** 2026-03-26
**Subject:** `ecc-tui` v0.1.0 — Agentic IDE Control Plane
**Total Lines:** 4,417 across 15 `.rs` files
## 1. Architecture Overview
ECC2 is a Rust TUI application that orchestrates AI coding agent sessions. It uses:
- **ratatui 0.29** + **crossterm 0.28** for terminal UI
- **rusqlite 0.32** (bundled) for local state persistence
- **tokio 1** (full) for async runtime
- **clap 4** (derive) for CLI
### Module Breakdown
| Module | Lines | Purpose |
|--------|------:|---------|
| `session/` | 1,974 | Session lifecycle, persistence, runtime, output |
| `tui/` | 1,613 | Dashboard, app loop, custom widgets |
| `observability/` | 409 | Tool call risk scoring and logging |
| `config/` | 144 | Configuration (TOML file) |
| `main.rs` | 142 | CLI entry point |
| `worktree/` | 99 | Git worktree management |
| `comms/` | 36 | Inter-agent messaging (send only) |
### Key Architectural Patterns
- **DbWriter thread** in `session/runtime.rs` — dedicated OS thread for SQLite writes from async context via `mpsc::unbounded_channel` with oneshot acknowledgements. Clean solution to the "SQLite from async" problem.
- **Session state machine** with enforced transitions: `Pending → {Running, Failed, Stopped}`, `Running → {Idle, Completed, Failed, Stopped}`, etc.
- **Ring buffer** for session output — `OUTPUT_BUFFER_LIMIT = 1000` lines per session with automatic eviction.
- **Risk scoring** on tool calls — 4-axis analysis (base tool risk, file sensitivity, blast radius, irreversibility) producing composite 0.01.0 scores with suggested actions (Allow/Review/RequireConfirmation/Block).
## 2. Code Quality Metrics
| Metric | Value |
|--------|-------|
| Total lines | 4,417 |
| Test functions | 29 |
| `unwrap()` calls | 3 |
| `unsafe` blocks | 0 |
| TODO/FIXME comments | 0 |
| Max file size | 1,273 lines (`dashboard.rs`) |
**Assessment:** The codebase is clean. Only 3 `unwrap()` calls (2 in tests, 1 in config `default()`), zero `unsafe`, and all modules use proper `anyhow::Result` error propagation. The `dashboard.rs` file at 1,273 lines exceeds the repo's 800-line max-file guideline, but it is still manageable at the current scope.
## 3. Identified Gaps
### 3.1 Comms Module — Send Without Receive
`comms/mod.rs` (36 lines) has `send()` but no `receive()`, `poll()`, `inbox()`, or `subscribe()`. The `messages` table exists in SQLite, but nothing reads from it. The inter-agent messaging story is half-built.
**Impact:** Agents cannot coordinate. The `TaskHandoff`, `Query`, `Response`, and `Conflict` message types are defined but unusable.
### 3.2 New Session Dialog — Stub
`dashboard.rs:495``new_session()` logs `"New session dialog requested"` but does nothing. Users must use the CLI (`ecc start --task "..."`) to create sessions; the TUI dashboard cannot.
### 3.3 Single Agent Support
`session/manager.rs``agent_program()` only supports `"claude"`. The CLI accepts `--agent` but anything other than `"claude"` fails. No codex, opencode, or custom agent support.
### 3.4 Config — File-Only
`Config::load()` reads `~/.claude/ecc2.toml` only. The implementation lacks environment variable overrides (e.g., `ECC_DB_PATH`, `ECC_WORKTREE_ROOT`) and CLI flags for configuration.
### 3.5 Legacy Dependency Candidate: `git2`
`git2 = "0.20"` is still declared in `Cargo.toml`, but the `worktree` module shells out to the `git` CLI instead. That makes `git2` a strong removal candidate rather than an already-completed cleanup.
### 3.6 No Metrics Aggregation
`SessionMetrics` tracks tokens, cost, duration, tool_calls, files_changed per session. But there's no aggregate view: total cost across sessions, average duration, top tools by usage, etc. The Metrics pane in the dashboard shows per-session detail only.
### 3.7 Daemon — No Health Reporting
`session/daemon.rs` runs an infinite loop checking session timeouts. No health endpoint, no log rotation, no PID file, no signal handling for graceful shutdown. `Ctrl+C` during daemon mode kills the process uncleanly.
## 4. Test Coverage Analysis
34 test functions across 10 source modules:
| Module | Tests | Coverage Focus |
|--------|------:|----------------|
| `main.rs` | 1 | CLI parsing |
| `config/mod.rs` | 5 | Defaults, deserialization, legacy fallback |
| `observability/mod.rs` | 5 | Risk scoring, persistence, pagination |
| `session/daemon.rs` | 2 | Crash recovery / liveness handling |
| `session/manager.rs` | 4 | Session lifecycle, resume, stop, latest status |
| `session/output.rs` | 2 | Ring buffer, broadcast |
| `session/runtime.rs` | 1 | Output capture persistence/events |
| `session/store.rs` | 3 | Buffer window, migration, state transitions |
| `tui/dashboard.rs` | 8 | Rendering, selection, pane navigation, scrolling |
| `tui/widgets.rs` | 3 | Token meter rendering and thresholds |
**Direct coverage gaps:**
- `comms/mod.rs` — 0 tests
- `worktree/mod.rs` — 0 tests
The core I/O-heavy paths are no longer completely untested: `manager.rs`, `runtime.rs`, and `daemon.rs` each have targeted tests. The remaining gap is breadth rather than total absence, especially around `comms/`, `worktree/`, and more adversarial process/worktree failure cases.
## 5. Security Observations
- **No secrets in code.** Config reads from TOML file, no hardcoded credentials.
- **Process spawning** uses `tokio::process::Command` with explicit `Stdio::piped()` — no shell injection vectors.
- **Risk scoring** is a strong feature — catches `rm -rf`, `git push --force origin main`, file access to `.env`/secrets.
- **No input sanitization on session task strings.** The task string is passed directly to `claude --print`. If the task contains shell metacharacters, it could be exploited depending on how `Command` handles argument quoting. Currently safe (arguments are not shell-interpreted), but worth auditing.
## 6. Dependency Health
| Crate | Version | Latest | Notes |
|-------|---------|--------|-------|
| ratatui | 0.29 | **0.30.0** | Update available |
| crossterm | 0.28 | **0.29.0** | Update available |
| rusqlite | 0.32 | **0.39.0** | Update available |
| tokio | 1 | **1.50.0** | Update available |
| serde | 1 | **1.0.228** | Update available |
| clap | 4 | **4.6.0** | Update available |
| chrono | 0.4 | **0.4.44** | Update available |
| uuid | 1 | **1.22.0** | Update available |
`git2` is still present in `Cargo.toml` even though the `worktree` module shells out to the `git` CLI. Several other dependencies are outdated; either remove `git2` or start using it before the next release.
## 7. Recommendations (Prioritized)
### P0 — Quick Wins
1. **Add environment variable support to `Config::load()`**`ECC_DB_PATH`, `ECC_WORKTREE_ROOT`, `ECC_DEFAULT_AGENT`. Standard practice for CLI tools.
### P1 — Feature Completions
2. **Implement `comms::receive()` / `comms::poll()`** — read unread messages from the `messages` table, optionally with a `broadcast` channel for real-time delivery. Wire it into the dashboard.
3. **Build the new-session dialog in the TUI** — modal form with task input, agent selector, worktree toggle. Should call `session::manager::create_session()`.
4. **Add aggregate metrics** — total cost, average session duration, tool call frequency, cost per session. Show in the Metrics pane.
### P2 — Robustness
5. **Expand integration coverage for `manager.rs`, `runtime.rs`, and `daemon.rs`** — the repo now has baseline tests here, but it still needs failure-path coverage around process crashes, timeouts, and cleanup edge cases.
6. **Add first-party tests for `worktree/mod.rs` and `comms/mod.rs`** — these are still uncovered and back important orchestration features.
7. **Add daemon health reporting** — PID file, structured logging, graceful shutdown via signal handler.
8. **Task string security audit** — The session task uses `claude --print` via `tokio::process::Command`. Verify arguments are never shell-interpreted. Checklist: confirm `Command` arg usage, threat-model metacharacter injection, input validation/escaping strategy, logging of raw inputs, and automated tests. Re-audit if invocation code changes.
9. **Break up `dashboard.rs`** — extract SessionsPane, OutputPane, MetricsPane, LogPane into separate files under `tui/panes/`.
### P3 — Extensibility
10. **Multi-agent support** — make `agent_program()` pluggable. Add `codex`, `opencode`, `custom` agent types.
11. **Config validation** — validate risk thresholds sum correctly, budget values are positive, paths exist.
## 8. Comparison with Ratatui 0.29 Best Practices
The codebase follows ratatui conventions well:
- Uses `TableState` for stateful selection (correct pattern)
- Custom `Widget` trait implementation for `TokenMeter` (idiomatic)
- `tick()` method for periodic state sync (standard)
- `broadcast::channel` for real-time output events (appropriate)
**Minor deviations:**
- The `Dashboard` struct directly holds `StateStore` (SQLite connection). Ratatui best practice is to keep the state store behind an `Arc<Mutex<>>` to allow background updates. Currently the TUI owns the DB exclusively, which blocks adding a background metrics refresh task.
- No `Clear` widget usage when rendering the help overlay — could cause rendering artifacts on some terminals.
## 9. Risk Assessment
| Risk | Likelihood | Impact | Mitigation |
|------|-----------|--------|------------|
| Dashboard file exceeds 1500 lines (projected) | High | Medium | At 1,273 lines currently (Section 2); extract panes into modules before it grows further |
| SQLite lock contention | Low | High | DbWriter pattern already handles this |
| No agent diversity | Medium | Medium | Pluggable agent support |
| Task-string handling assumptions drift over time | Medium | Medium | Keep `Command` argument handling shell-free, document the threat model, and add regression tests for metacharacter-heavy task input |
---
**Bottom line:** ECC2 is a well-structured Rust project with clean error handling, good separation of concerns, and strong security features (risk scoring). The main gaps are incomplete features (comms, new-session dialog, single agent) rather than architectural problems. The codebase is ready for feature work on top of the solid foundation.

124
rules/common/code-review.md Normal file
View File

@@ -0,0 +1,124 @@
# Code Review Standards
## Purpose
Code review ensures quality, security, and maintainability before code is merged. This rule defines when and how to conduct code reviews.
## When to Review
**MANDATORY review triggers:**
- After writing or modifying code
- Before any commit to shared branches
- When security-sensitive code is changed (auth, payments, user data)
- When architectural changes are made
- Before merging pull requests
**Pre-Review Requirements:**
Before requesting review, ensure:
- All automated checks (CI/CD) are passing
- Merge conflicts are resolved
- Branch is up to date with target branch
## Review Checklist
Before marking code complete:
- [ ] Code is readable and well-named
- [ ] Functions are focused (<50 lines)
- [ ] Files are cohesive (<800 lines)
- [ ] No deep nesting (>4 levels)
- [ ] Errors are handled explicitly
- [ ] No hardcoded secrets or credentials
- [ ] No console.log or debug statements
- [ ] Tests exist for new functionality
- [ ] Test coverage meets 80% minimum
## Security Review Triggers
**STOP and use security-reviewer agent when:**
- Authentication or authorization code
- User input handling
- Database queries
- File system operations
- External API calls
- Cryptographic operations
- Payment or financial code
## Review Severity Levels
| Level | Meaning | Action |
|-------|---------|--------|
| CRITICAL | Security vulnerability or data loss risk | **BLOCK** - Must fix before merge |
| HIGH | Bug or significant quality issue | **WARN** - Should fix before merge |
| MEDIUM | Maintainability concern | **INFO** - Consider fixing |
| LOW | Style or minor suggestion | **NOTE** - Optional |
## Agent Usage
Use these agents for code review:
| Agent | Purpose |
|-------|---------|
| **code-reviewer** | General code quality, patterns, best practices |
| **security-reviewer** | Security vulnerabilities, OWASP Top 10 |
| **typescript-reviewer** | TypeScript/JavaScript specific issues |
| **python-reviewer** | Python specific issues |
| **go-reviewer** | Go specific issues |
| **rust-reviewer** | Rust specific issues |
## Review Workflow
```
1. Run git diff to understand changes
2. Check security checklist first
3. Review code quality checklist
4. Run relevant tests
5. Verify coverage >= 80%
6. Use appropriate agent for detailed review
```
## Common Issues to Catch
### Security
- Hardcoded credentials (API keys, passwords, tokens)
- SQL injection (string concatenation in queries)
- XSS vulnerabilities (unescaped user input)
- Path traversal (unsanitized file paths)
- CSRF protection missing
- Authentication bypasses
### Code Quality
- Large functions (>50 lines) - split into smaller
- Large files (>800 lines) - extract modules
- Deep nesting (>4 levels) - use early returns
- Missing error handling - handle explicitly
- Mutation patterns - prefer immutable operations
- Missing tests - add test coverage
### Performance
- N+1 queries - use JOINs or batching
- Missing pagination - add LIMIT to queries
- Unbounded queries - add constraints
- Missing caching - cache expensive operations
## Approval Criteria
- **Approve**: No CRITICAL or HIGH issues
- **Warning**: Only HIGH issues (merge with caution)
- **Block**: CRITICAL issues found
## Integration with Other Rules
This rule works with:
- [testing.md](testing.md) - Test coverage requirements
- [security.md](security.md) - Security checklist
- [git-workflow.md](git-workflow.md) - Commit standards
- [agents.md](agents.md) - Agent delegation

View File

@@ -36,3 +36,9 @@ The Feature Implementation Workflow describes the development pipeline: research
- Detailed commit messages - Detailed commit messages
- Follow conventional commits format - Follow conventional commits format
- See [git-workflow.md](./git-workflow.md) for commit message format and PR process - See [git-workflow.md](./git-workflow.md) for commit message format and PR process
5. **Pre-Review Checks**
- Verify all automated checks (CI/CD) are passing
- Resolve any merge conflicts
- Ensure branch is up to date with target branch
- Only request review after these checks pass

108
rules/zh/README.md Normal file
View File

@@ -0,0 +1,108 @@
# 规则
## 结构
规则按**通用**层和**语言特定**目录组织:
```
rules/
├── common/ # 语言无关的原则(始终安装)
│ ├── coding-style.md
│ ├── git-workflow.md
│ ├── testing.md
│ ├── performance.md
│ ├── patterns.md
│ ├── hooks.md
│ ├── agents.md
│ ├── security.md
│ ├── code-review.md
│ └── development-workflow.md
├── zh/ # 中文翻译版本
│ ├── coding-style.md
│ ├── git-workflow.md
│ ├── testing.md
│ ├── performance.md
│ ├── patterns.md
│ ├── hooks.md
│ ├── agents.md
│ ├── security.md
│ ├── code-review.md
│ └── development-workflow.md
├── typescript/ # TypeScript/JavaScript 特定
├── python/ # Python 特定
├── golang/ # Go 特定
├── swift/ # Swift 特定
└── php/ # PHP 特定
```
- **common/** 包含通用原则 — 无语言特定的代码示例。
- **zh/** 包含 common 目录的中文翻译版本。
- **语言目录** 扩展通用规则,包含框架特定的模式、工具和代码示例。每个文件引用其对应的通用版本。
## 安装
### 选项 1安装脚本推荐
```bash
# 安装通用 + 一个或多个语言特定的规则集
./install.sh typescript
./install.sh python
./install.sh golang
./install.sh swift
./install.sh php
# 同时安装多种语言
./install.sh typescript python
```
### 选项 2手动安装
> **重要提示:** 复制整个目录 — 不要使用 `/*` 展开。
> 通用和语言特定目录包含同名文件。
> 将它们展开到一个目录会导致语言特定文件覆盖通用规则,
> 并破坏语言特定文件使用的 `../common/` 相对引用。
```bash
# 创建目标目录
mkdir -p ~/.claude/rules
# 安装通用规则(所有项目必需)
cp -r rules/common ~/.claude/rules/common
# 安装中文翻译版本(可选)
cp -r rules/zh ~/.claude/rules/zh
# 根据项目技术栈安装语言特定规则
cp -r rules/typescript ~/.claude/rules/typescript
cp -r rules/python ~/.claude/rules/python
cp -r rules/golang ~/.claude/rules/golang
cp -r rules/swift ~/.claude/rules/swift
cp -r rules/php ~/.claude/rules/php
```
## 规则 vs 技能
- **规则** 定义广泛适用的标准、约定和检查清单(如"80% 测试覆盖率"、"禁止硬编码密钥")。
- **技能**`skills/` 目录)为特定任务提供深入、可操作的参考材料(如 `python-patterns``golang-testing`)。
语言特定的规则文件在适当的地方引用相关技能。规则告诉你*做什么*;技能告诉你*怎么做*。
## 规则优先级
当语言特定规则与通用规则冲突时,**语言特定规则优先**(特定覆盖通用)。这遵循标准的分层配置模式(类似于 CSS 特异性或 `.gitignore` 优先级)。
- `rules/common/` 定义适用于所有项目的通用默认值。
- `rules/golang/``rules/python/``rules/swift/``rules/php/``rules/typescript/` 等在语言习惯不同时覆盖这些默认值。
- `rules/zh/` 是通用规则的中文翻译,与英文版本内容一致。
### 示例
`common/coding-style.md` 推荐不可变性作为默认原则。语言特定的 `golang/coding-style.md` 可以覆盖这一点:
> 惯用的 Go 使用指针接收器进行结构体变更 — 参见 [common/coding-style.md](../common/coding-style.md) 了解通用原则,但这里首选符合 Go 习惯的变更方式。
### 带覆盖说明的通用规则
`rules/common/` 中可能被语言特定文件覆盖的规则会被标记:
> **语言说明**:此规则可能会被语言特定规则覆盖;对于某些语言,该模式可能并不符合惯用写法。

50
rules/zh/agents.md Normal file
View File

@@ -0,0 +1,50 @@
# 代理编排
## 可用代理
位于 `~/.claude/agents/`
| 代理 | 用途 | 何时使用 |
|-------|---------|------------|
| planner | 实现规划 | 复杂功能、重构 |
| architect | 系统设计 | 架构决策 |
| tdd-guide | 测试驱动开发 | 新功能、bug 修复 |
| code-reviewer | 代码审查 | 编写代码后 |
| security-reviewer | 安全分析 | 提交前 |
| build-error-resolver | 修复构建错误 | 构建失败时 |
| e2e-runner | E2E 测试 | 关键用户流程 |
| refactor-cleaner | 死代码清理 | 代码维护 |
| doc-updater | 文档 | 更新文档 |
| rust-reviewer | Rust 代码审查 | Rust 项目 |
## 立即使用代理
无需用户提示:
1. 复杂功能请求 - 使用 **planner** 代理
2. 刚编写/修改的代码 - 使用 **code-reviewer** 代理
3. Bug 修复或新功能 - 使用 **tdd-guide** 代理
4. 架构决策 - 使用 **architect** 代理
## 并行任务执行
对独立操作始终使用并行 Task 执行:
```markdown
# 好:并行执行
同时启动 3 个代理:
1. 代理 1认证模块安全分析
2. 代理 2缓存系统性能审查
3. 代理 3工具类型检查
# 坏:不必要的顺序
先代理 1然后代理 2然后代理 3
```
## 多视角分析
对于复杂问题,使用分角色子代理:
- 事实审查者
- 高级工程师
- 安全专家
- 一致性审查者
- 冗余检查者

124
rules/zh/code-review.md Normal file
View File

@@ -0,0 +1,124 @@
# 代码审查标准
## 目的
代码审查确保代码合并前的质量、安全性和可维护性。此规则定义何时以及如何进行代码审查。
## 何时审查
**强制审查触发条件:**
- 编写或修改代码后
- 提交到共享分支之前
- 更改安全敏感代码时(认证、支付、用户数据)
- 进行架构更改时
- 合并 pull request 之前
**审查前要求:**
在请求审查之前,确保:
- 所有自动化检查CI/CD已通过
- 合并冲突已解决
- 分支已与目标分支同步
## 审查检查清单
在标记代码完成之前:
- [ ] 代码可读且命名良好
- [ ] 函数聚焦(<50 行)
- [ ] 文件内聚(<800 行)
- [ ] 无深层嵌套(>4 层)
- [ ] 错误显式处理
- [ ] 无硬编码密钥或凭据
- [ ] 无 console.log 或调试语句
- [ ] 新功能有测试
- [ ] 测试覆盖率满足 80% 最低要求
## 安全审查触发条件
**停止并使用 security-reviewer 代理当:**
- 认证或授权代码
- 用户输入处理
- 数据库查询
- 文件系统操作
- 外部 API 调用
- 加密操作
- 支付或金融代码
## 审查严重级别
| 级别 | 含义 | 行动 |
|-------|---------|--------|
| CRITICAL关键 | 安全漏洞或数据丢失风险 | **阻止** - 合并前必须修复 |
| HIGH | Bug 或重大质量问题 | **警告** - 合并前应修复 |
| MEDIUM | 可维护性问题 | **信息** - 考虑修复 |
| LOW | 风格或次要建议 | **注意** - 可选 |
## 代理使用
使用这些代理进行代码审查:
| 代理 | 用途 |
|-------|--------|
| **code-reviewer** | 通用代码质量、模式、最佳实践 |
| **security-reviewer** | 安全漏洞、OWASP Top 10 |
| **typescript-reviewer** | TypeScript/JavaScript 特定问题 |
| **python-reviewer** | Python 特定问题 |
| **go-reviewer** | Go 特定问题 |
| **rust-reviewer** | Rust 特定问题 |
## 审查工作流
```
1. 运行 git diff 了解更改
2. 先检查安全检查清单
3. 审查代码质量检查清单
4. 运行相关测试
5. 验证覆盖率 >= 80%
6. 使用适当的代理进行详细审查
```
## 常见问题捕获
### 安全
- 硬编码凭据API 密钥、密码、令牌)
- SQL 注入(查询中的字符串拼接)
- XSS 漏洞(未转义的用户输入)
- 路径遍历(未净化的文件路径)
- CSRF 保护缺失
- 认证绕过
### 代码质量
- 大函数(>50 行)- 拆分为更小的
- 大文件(>800 行)- 提取模块
- 深层嵌套(>4 层)- 使用提前返回
- 缺少错误处理 - 显式处理
- 变更模式 - 优先使用不可变操作
- 缺少测试 - 添加测试覆盖
### 性能
- N+1 查询 - 使用 JOIN 或批处理
- 缺少分页 - 给查询添加 LIMIT
- 无界查询 - 添加约束
- 缺少缓存 - 缓存昂贵操作
## 批准标准
- **批准**:无关键或高优先级问题
- **警告**:仅有高优先级问题(谨慎合并)
- **阻止**:发现关键问题
## 与其他规则的集成
此规则与以下规则配合:
- [testing.md](testing.md) - 测试覆盖率要求
- [security.md](security.md) - 安全检查清单
- [git-workflow.md](git-workflow.md) - 提交标准
- [agents.md](agents.md) - 代理委托

48
rules/zh/coding-style.md Normal file
View File

@@ -0,0 +1,48 @@
# 编码风格
## 不可变性(关键)
始终创建新对象,永远不要修改现有对象:
```
// 伪代码
错误: modify(original, field, value) → 就地修改 original
正确: update(original, field, value) → 返回带有更改的新副本
```
原理:不可变数据防止隐藏的副作用,使调试更容易,并启用安全的并发。
## 文件组织
多个小文件 > 少量大文件:
- 高内聚,低耦合
- 典型 200-400 行,最多 800 行
- 从大模块中提取工具函数
- 按功能/领域组织,而非按类型
## 错误处理
始终全面处理错误:
- 在每一层显式处理错误
- 在面向 UI 的代码中提供用户友好的错误消息
- 在服务器端记录详细的错误上下文
- 永远不要静默吞掉错误
## 输入验证
始终在系统边界验证:
- 处理前验证所有用户输入
- 在可用的情况下使用基于模式的验证
- 快速失败并给出清晰的错误消息
- 永远不要信任外部数据API 响应、用户输入、文件内容)
## 代码质量检查清单
在标记工作完成前:
- [ ] 代码可读且命名良好
- [ ] 函数很小(<50 行)
- [ ] 文件聚焦(<800 行)
- [ ] 没有深层嵌套(>4 层)
- [ ] 正确的错误处理
- [ ] 没有硬编码值(使用常量或配置)
- [ ] 没有变更(使用不可变模式)

View File

@@ -0,0 +1,44 @@
# 开发工作流
> 此文件扩展 [common/git-workflow.md](./git-workflow.md),包含 git 操作之前的完整功能开发流程。
功能实现工作流描述了开发管道研究、规划、TDD、代码审查然后提交到 git。
## 功能实现工作流
0. **研究与重用** _(任何新实现前必需)_
- **GitHub 代码搜索优先:** 在编写任何新代码之前,运行 `gh search repos``gh search code` 查找现有实现、模板和模式。
- **库文档其次:** 使用 Context7 或主要供应商文档确认 API 行为、包使用和版本特定细节。
- **仅当前两者不足时使用 Exa** 在 GitHub 搜索和主要文档之后,使用 Exa 进行更广泛的网络研究或发现。
- **检查包注册表:** 在编写工具代码之前搜索 npm、PyPI、crates.io 和其他注册表。首选久经考验的库而非手工编写的解决方案。
- **搜索可适配的实现:** 寻找解决问题 80%+ 且可以分支、移植或包装的开源项目。
- 当满足需求时,优先采用或移植经验证的方法而非从头编写新代码。
1. **先规划**
- 使用 **planner** 代理创建实现计划
- 编码前生成规划文档PRD、架构、系统设计、技术文档、任务列表
- 识别依赖和风险
- 分解为阶段
2. **TDD 方法**
- 使用 **tdd-guide** 代理
- 先写测试RED
- 实现以通过测试GREEN
- 重构IMPROVE
- 验证 80%+ 覆盖率
3. **代码审查**
- 编写代码后立即使用 **code-reviewer** 代理
- 解决关键和高优先级问题
- 尽可能修复中优先级问题
4. **提交与推送**
- 详细的提交消息
- 遵循约定式提交格式
- 参见 [git-workflow.md](./git-workflow.md) 了解提交消息格式和 PR 流程
5. **审查前检查**
- 验证所有自动化检查CI/CD已通过
- 解决任何合并冲突
- 确保分支已与目标分支同步
- 仅在这些检查通过后请求审查

24
rules/zh/git-workflow.md Normal file
View File

@@ -0,0 +1,24 @@
# Git 工作流
## 提交消息格式
```
<类型>: <描述>
<可选正文>
```
类型feat, fix, refactor, docs, test, chore, perf, ci
注意:通过 ~/.claude/settings.json 全局禁用归属。
## Pull Request 工作流
创建 PR 时:
1. 分析完整提交历史(不仅是最新提交)
2. 使用 `git diff [base-branch]...HEAD` 查看所有更改
3. 起草全面的 PR 摘要
4. 包含带有 TODO 的测试计划
5. 如果是新分支,使用 `-u` 标志推送
> 对于 git 操作之前的完整开发流程规划、TDD、代码审查
> 参见 [development-workflow.md](./development-workflow.md)。

30
rules/zh/hooks.md Normal file
View File

@@ -0,0 +1,30 @@
# 钩子系统
## 钩子类型
- **PreToolUse**:工具执行前(验证、参数修改)
- **PostToolUse**:工具执行后(自动格式化、检查)
- **Stop**:会话结束时(最终验证)
## 自动接受权限
谨慎使用:
- 为可信、定义明确的计划启用
- 探索性工作时禁用
- 永远不要使用 dangerously-skip-permissions 标志
- 改为在 `~/.claude.json` 中配置 `allowedTools`
## TodoWrite 最佳实践
使用 TodoWrite 工具:
- 跟踪多步骤任务的进度
- 验证对指令的理解
- 启用实时引导
- 显示细粒度的实现步骤
待办列表揭示:
- 顺序错误的步骤
- 缺失的项目
- 多余的不必要项目
- 错误的粒度
- 误解的需求

31
rules/zh/patterns.md Normal file
View File

@@ -0,0 +1,31 @@
# 常用模式
## 骨架项目
实现新功能时:
1. 搜索久经考验的骨架项目
2. 使用并行代理评估选项:
- 安全性评估
- 可扩展性分析
- 相关性评分
- 实现规划
3. 克隆最佳匹配作为基础
4. 在经验证的结构内迭代
## 设计模式
### 仓储模式
将数据访问封装在一致的接口后面:
- 定义标准操作findAll、findById、create、update、delete
- 具体实现处理存储细节数据库、API、文件等
- 业务逻辑依赖抽象接口,而非存储机制
- 便于轻松切换数据源,并简化使用模拟的测试
### API 响应格式
对所有 API 响应使用一致的信封:
- 包含成功/状态指示器
- 包含数据负载(错误时可为空)
- 包含错误消息字段(成功时可为空)
- 包含分页响应的元数据total、page、limit

55
rules/zh/performance.md Normal file
View File

@@ -0,0 +1,55 @@
# 性能优化
## 模型选择策略
**Haiku 4.5**Sonnet 90% 的能力3 倍成本节省):
- 频繁调用的轻量级代理
- 结对编程和代码生成
- 多代理系统中的工作者代理
**Sonnet 4.6**(最佳编码模型):
- 主要开发工作
- 编排多代理工作流
- 复杂编码任务
**Opus 4.5**(最深度推理):
- 复杂架构决策
- 最大推理需求
- 研究和分析任务
## 上下文窗口管理
避免在上下文窗口的最后 20% 进行以下操作:
- 大规模重构
- 跨多个文件的功能实现
- 调试复杂交互
上下文敏感度较低的任务:
- 单文件编辑
- 独立工具创建
- 文档更新
- 简单 bug 修复
## 扩展思考 + 规划模式
扩展思考默认启用,为内部推理保留最多 31,999 个 token。
通过以下方式控制扩展思考:
- **切换**Option+TmacOS/ Alt+TWindows/Linux
- **配置**:在 `~/.claude/settings.json` 中设置 `alwaysThinkingEnabled`
- **预算上限**`export MAX_THINKING_TOKENS=10000`
- **详细模式**Ctrl+O 查看思考输出
对于需要深度推理的复杂任务:
1. 确保扩展思考已启用(默认开启)
2. 启用**规划模式**进行结构化方法
3. 使用多轮审查进行彻底分析
4. 使用分角色子代理获得多样化视角
## 构建排查
如果构建失败:
1. 使用 **build-error-resolver** 代理
2. 分析错误消息
3. 增量修复
4. 每次修复后验证

29
rules/zh/security.md Normal file
View File

@@ -0,0 +1,29 @@
# 安全指南
## 强制安全检查
在任何提交之前:
- [ ] 无硬编码密钥API 密钥、密码、令牌)
- [ ] 所有用户输入已验证
- [ ] SQL 注入防护(参数化查询)
- [ ] XSS 防护(净化 HTML
- [ ] CSRF 保护已启用
- [ ] 认证/授权已验证
- [ ] 所有端点启用速率限制
- [ ] 错误消息不泄露敏感数据
## 密钥管理
- 永远不要在源代码中硬编码密钥
- 始终使用环境变量或密钥管理器
- 启动时验证所需的密钥是否存在
- 轮换任何可能已暴露的密钥
## 安全响应协议
如果发现安全问题:
1. 立即停止
2. 使用 **security-reviewer** 代理
3. 在继续之前修复关键问题
4. 轮换任何已暴露的密钥
5. 审查整个代码库中的类似问题

29
rules/zh/testing.md Normal file
View File

@@ -0,0 +1,29 @@
# 测试要求
## 最低测试覆盖率80%
测试类型(全部必需):
1. **单元测试** - 单个函数、工具、组件
2. **集成测试** - API 端点、数据库操作
3. **E2E 测试** - 关键用户流程(框架根据语言选择)
## 测试驱动开发
强制工作流:
1. 先写测试RED
2. 运行测试 - 应该失败
3. 编写最小实现GREEN
4. 运行测试 - 应该通过
5. 重构IMPROVE
6. 验证覆盖率80%+
## 测试失败排查
1. 使用 **tdd-guide** 代理
2. 检查测试隔离
3. 验证模拟是否正确
4. 修复实现,而非测试(除非测试有误)
## 代理支持
- **tdd-guide** - 主动用于新功能,强制先写测试

View File

@@ -11,7 +11,7 @@ CODEX_HOME="${CODEX_HOME:-$HOME/.codex}"
CONFIG_FILE="$CODEX_HOME/config.toml" CONFIG_FILE="$CODEX_HOME/config.toml"
AGENTS_FILE="$CODEX_HOME/AGENTS.md" AGENTS_FILE="$CODEX_HOME/AGENTS.md"
PROMPTS_DIR="$CODEX_HOME/prompts" PROMPTS_DIR="$CODEX_HOME/prompts"
SKILLS_DIR="$CODEX_HOME/skills" SKILLS_DIR="${AGENTS_HOME:-$HOME/.agents}/skills"
HOOKS_DIR_EXPECT="${ECC_GLOBAL_HOOKS_DIR:-$CODEX_HOME/git-hooks}" HOOKS_DIR_EXPECT="${ECC_GLOBAL_HOOKS_DIR:-$CODEX_HOME/git-hooks}"
failures=0 failures=0
@@ -89,7 +89,13 @@ fi
if [[ -f "$CONFIG_FILE" ]]; then if [[ -f "$CONFIG_FILE" ]]; then
check_config_pattern '^multi_agent\s*=\s*true' "multi_agent is enabled" check_config_pattern '^multi_agent\s*=\s*true' "multi_agent is enabled"
check_config_absent '^\s*collab\s*=' "deprecated collab flag is absent" check_config_absent '^\s*collab\s*=' "deprecated collab flag is absent"
check_config_pattern '^persistent_instructions\s*=' "persistent_instructions is configured" # persistent_instructions is recommended but optional; warn instead of fail
# so users who rely on AGENTS.md alone are not blocked (#967).
if rg -n '^[[:space:]]*persistent_instructions\s*=' "$CONFIG_FILE" >/dev/null 2>&1; then
ok "persistent_instructions is configured"
else
warn "persistent_instructions is not set (recommended but optional)"
fi
check_config_pattern '^\[profiles\.strict\]' "profiles.strict exists" check_config_pattern '^\[profiles\.strict\]' "profiles.strict exists"
check_config_pattern '^\[profiles\.yolo\]' "profiles.yolo exists" check_config_pattern '^\[profiles\.yolo\]' "profiles.yolo exists"
@@ -97,7 +103,7 @@ if [[ -f "$CONFIG_FILE" ]]; then
'mcp_servers.github' \ 'mcp_servers.github' \
'mcp_servers.memory' \ 'mcp_servers.memory' \
'mcp_servers.sequential-thinking' \ 'mcp_servers.sequential-thinking' \
'mcp_servers.context7-mcp' 'mcp_servers.context7'
do do
if rg -n "^\[$section\]" "$CONFIG_FILE" >/dev/null 2>&1; then if rg -n "^\[$section\]" "$CONFIG_FILE" >/dev/null 2>&1; then
ok "MCP section [$section] exists" ok "MCP section [$section] exists"
@@ -106,10 +112,10 @@ if [[ -f "$CONFIG_FILE" ]]; then
fi fi
done done
if rg -n '^\[mcp_servers\.context7\]' "$CONFIG_FILE" >/dev/null 2>&1; then if rg -n '^\[mcp_servers\.context7-mcp\]' "$CONFIG_FILE" >/dev/null 2>&1; then
warn "Duplicate [mcp_servers.context7] exists (context7-mcp is preferred)" warn "Legacy [mcp_servers.context7-mcp] exists (context7 is preferred)"
else else
ok "No duplicate [mcp_servers.context7] section" ok "No legacy [mcp_servers.context7-mcp] section"
fi fi
fi fi
@@ -144,12 +150,12 @@ if [[ -d "$SKILLS_DIR" ]]; then
done done
if [[ "$missing_skills" -eq 0 ]]; then if [[ "$missing_skills" -eq 0 ]]; then
ok "All 16 ECC Codex skills are present" ok "All 16 ECC skills are present in $SKILLS_DIR"
else else
fail "$missing_skills required skills are missing" warn "$missing_skills ECC skills missing from $SKILLS_DIR (install via ECC installer or npx skills)"
fi fi
else else
fail "Skills directory missing ($SKILLS_DIR)" warn "Skills directory missing ($SKILLS_DIR) — install via ECC installer or npx skills"
fi fi
if [[ -f "$PROMPTS_DIR/ecc-prompts-manifest.txt" ]]; then if [[ -f "$PROMPTS_DIR/ecc-prompts-manifest.txt" ]]; then

View File

@@ -83,20 +83,23 @@ function dlxServer(name, pkg, extraFields, extraToml) {
} }
/** Each entry: key = section name under mcp_servers, value = { toml, fields } */ /** Each entry: key = section name under mcp_servers, value = { toml, fields } */
const DEFAULT_MCP_STARTUP_TIMEOUT_SEC = 30;
const DEFAULT_MCP_STARTUP_TIMEOUT_TOML = `startup_timeout_sec = ${DEFAULT_MCP_STARTUP_TIMEOUT_SEC}`;
const ECC_SERVERS = { const ECC_SERVERS = {
supabase: dlxServer('supabase', '@supabase/mcp-server-supabase@latest', { startup_timeout_sec: 20.0, tool_timeout_sec: 120.0 }, 'startup_timeout_sec = 20.0\ntool_timeout_sec = 120.0'), supabase: dlxServer('supabase', '@supabase/mcp-server-supabase@latest', { startup_timeout_sec: 20.0, tool_timeout_sec: 120.0 }, 'startup_timeout_sec = 20.0\ntool_timeout_sec = 120.0'),
playwright: dlxServer('playwright', '@playwright/mcp@latest'), playwright: dlxServer('playwright', '@playwright/mcp@latest', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
'context7-mcp': dlxServer('context7-mcp', '@upstash/context7-mcp'), context7: dlxServer('context7', '@upstash/context7-mcp@latest', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
exa: { exa: {
fields: { url: 'https://mcp.exa.ai/mcp' }, fields: { url: 'https://mcp.exa.ai/mcp' },
toml: `[mcp_servers.exa]\nurl = "https://mcp.exa.ai/mcp"` toml: `[mcp_servers.exa]\nurl = "https://mcp.exa.ai/mcp"`
}, },
github: { github: {
fields: { command: 'bash', args: ['-lc', GH_BOOTSTRAP] }, fields: { command: 'bash', args: ['-lc', GH_BOOTSTRAP], startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC },
toml: `[mcp_servers.github]\ncommand = "bash"\nargs = ["-lc", ${JSON.stringify(GH_BOOTSTRAP)}]` toml: `[mcp_servers.github]\ncommand = "bash"\nargs = ["-lc", ${JSON.stringify(GH_BOOTSTRAP)}]\n${DEFAULT_MCP_STARTUP_TIMEOUT_TOML}`
}, },
memory: dlxServer('memory', '@modelcontextprotocol/server-memory'), memory: dlxServer('memory', '@modelcontextprotocol/server-memory', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
'sequential-thinking': dlxServer('sequential-thinking', '@modelcontextprotocol/server-sequential-thinking') 'sequential-thinking': dlxServer('sequential-thinking', '@modelcontextprotocol/server-sequential-thinking', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML)
}; };
// Append --features arg for supabase after dlxServer builds the base // Append --features arg for supabase after dlxServer builds the base
@@ -104,9 +107,9 @@ ECC_SERVERS.supabase.fields.args.push('--features=account,docs,database,debuggin
ECC_SERVERS.supabase.toml = ECC_SERVERS.supabase.toml.replace(/^(args = \[.*)\]$/m, '$1, "--features=account,docs,database,debugging,development,functions,storage,branching"]'); ECC_SERVERS.supabase.toml = ECC_SERVERS.supabase.toml.replace(/^(args = \[.*)\]$/m, '$1, "--features=account,docs,database,debugging,development,functions,storage,branching"]');
// Legacy section names that should be treated as an existing ECC server. // Legacy section names that should be treated as an existing ECC server.
// e.g. old configs shipped [mcp_servers.context7] instead of [mcp_servers.context7-mcp]. // e.g. older configs shipped [mcp_servers.context7-mcp] instead of [mcp_servers.context7].
const LEGACY_ALIASES = { const LEGACY_ALIASES = {
'context7-mcp': ['context7'] context7: ['context7-mcp']
}; };
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@@ -254,6 +257,10 @@ function main() {
if (resolvedLabel !== name) { if (resolvedLabel !== name) {
raw = removeServerFromText(raw, name, existing); raw = removeServerFromText(raw, name, existing);
} }
if (legacyName && hasCanonical) {
toRemoveLog.push(`mcp_servers.${legacyName}`);
raw = removeServerFromText(raw, legacyName, existing);
}
toAppend.push(spec.toml); toAppend.push(spec.toml);
} else { } else {
// Add-only mode: skip, but warn about drift // Add-only mode: skip, but warn about drift

0
scripts/ecc.js Normal file → Executable file
View File

0
scripts/install-apply.js Normal file → Executable file
View File

View File

@@ -1,15 +1,109 @@
'use strict'; 'use strict';
const fs = require('fs'); const fs = require('fs');
const path = require('path');
const { writeInstallState } = require('../install-state'); const { writeInstallState } = require('../install-state');
function readJsonObject(filePath, label) {
let parsed;
try {
parsed = JSON.parse(fs.readFileSync(filePath, 'utf8'));
} catch (error) {
throw new Error(`Failed to parse ${label} at ${filePath}: ${error.message}`);
}
if (!parsed || typeof parsed !== 'object' || Array.isArray(parsed)) {
throw new Error(`Invalid ${label} at ${filePath}: expected a JSON object`);
}
return parsed;
}
function mergeHookEntries(existingEntries, incomingEntries) {
const mergedEntries = [];
const seenEntries = new Set();
for (const entry of [...existingEntries, ...incomingEntries]) {
const entryKey = JSON.stringify(entry);
if (seenEntries.has(entryKey)) {
continue;
}
seenEntries.add(entryKey);
mergedEntries.push(entry);
}
return mergedEntries;
}
function findHooksSourcePath(plan, hooksDestinationPath) {
const operation = plan.operations.find(item => item.destinationPath === hooksDestinationPath);
return operation ? operation.sourcePath : null;
}
function buildMergedSettings(plan) {
if (!plan.adapter || plan.adapter.target !== 'claude') {
return null;
}
const hooksDestinationPath = path.join(plan.targetRoot, 'hooks', 'hooks.json');
const hooksSourcePath = findHooksSourcePath(plan, hooksDestinationPath) || hooksDestinationPath;
if (!fs.existsSync(hooksSourcePath)) {
return null;
}
const hooksConfig = readJsonObject(hooksSourcePath, 'hooks config');
const incomingHooks = hooksConfig.hooks;
if (!incomingHooks || typeof incomingHooks !== 'object' || Array.isArray(incomingHooks)) {
throw new Error(`Invalid hooks config at ${hooksSourcePath}: expected "hooks" to be a JSON object`);
}
const settingsPath = path.join(plan.targetRoot, 'settings.json');
let settings = {};
if (fs.existsSync(settingsPath)) {
settings = readJsonObject(settingsPath, 'existing settings');
}
const existingHooks = settings.hooks && typeof settings.hooks === 'object' && !Array.isArray(settings.hooks)
? settings.hooks
: {};
const mergedHooks = { ...existingHooks };
for (const [eventName, incomingEntries] of Object.entries(incomingHooks)) {
const currentEntries = Array.isArray(existingHooks[eventName]) ? existingHooks[eventName] : [];
const nextEntries = Array.isArray(incomingEntries) ? incomingEntries : [];
mergedHooks[eventName] = mergeHookEntries(currentEntries, nextEntries);
}
const mergedSettings = {
...settings,
hooks: mergedHooks,
};
return {
settingsPath,
mergedSettings,
};
}
function applyInstallPlan(plan) { function applyInstallPlan(plan) {
const mergedSettingsPlan = buildMergedSettings(plan);
for (const operation of plan.operations) { for (const operation of plan.operations) {
fs.mkdirSync(require('path').dirname(operation.destinationPath), { recursive: true }); fs.mkdirSync(path.dirname(operation.destinationPath), { recursive: true });
fs.copyFileSync(operation.sourcePath, operation.destinationPath); fs.copyFileSync(operation.sourcePath, operation.destinationPath);
} }
if (mergedSettingsPlan) {
fs.mkdirSync(path.dirname(mergedSettingsPlan.settingsPath), { recursive: true });
fs.writeFileSync(
mergedSettingsPlan.settingsPath,
JSON.stringify(mergedSettingsPlan.mergedSettings, null, 2) + '\n',
'utf8'
);
}
writeInstallState(plan.installStatePath, plan.statePreview); writeInstallState(plan.installStatePath, plan.statePreview);
return { return {

View File

@@ -4,7 +4,6 @@ set -euo pipefail
# Sync Everything Claude Code (ECC) assets into a local Codex CLI setup. # Sync Everything Claude Code (ECC) assets into a local Codex CLI setup.
# - Backs up ~/.codex config and AGENTS.md # - Backs up ~/.codex config and AGENTS.md
# - Merges ECC AGENTS.md into existing AGENTS.md (marker-based, preserves user content) # - Merges ECC AGENTS.md into existing AGENTS.md (marker-based, preserves user content)
# - Syncs Codex-ready skills from .agents/skills
# - Generates prompt files from commands/*.md # - Generates prompt files from commands/*.md
# - Generates Codex QA wrappers and optional language rule-pack prompts # - Generates Codex QA wrappers and optional language rule-pack prompts
# - Installs global git safety hooks (pre-commit and pre-push) # - Installs global git safety hooks (pre-commit and pre-push)
@@ -28,8 +27,6 @@ CONFIG_FILE="$CODEX_HOME/config.toml"
AGENTS_FILE="$CODEX_HOME/AGENTS.md" AGENTS_FILE="$CODEX_HOME/AGENTS.md"
AGENTS_ROOT_SRC="$REPO_ROOT/AGENTS.md" AGENTS_ROOT_SRC="$REPO_ROOT/AGENTS.md"
AGENTS_CODEX_SUPP_SRC="$REPO_ROOT/.codex/AGENTS.md" AGENTS_CODEX_SUPP_SRC="$REPO_ROOT/.codex/AGENTS.md"
SKILLS_SRC="$REPO_ROOT/.agents/skills"
SKILLS_DEST="$CODEX_HOME/skills"
PROMPTS_SRC="$REPO_ROOT/commands" PROMPTS_SRC="$REPO_ROOT/commands"
PROMPTS_DEST="$CODEX_HOME/prompts" PROMPTS_DEST="$CODEX_HOME/prompts"
HOOKS_INSTALLER="$REPO_ROOT/scripts/codex/install-global-git-hooks.sh" HOOKS_INSTALLER="$REPO_ROOT/scripts/codex/install-global-git-hooks.sh"
@@ -133,7 +130,6 @@ MCP_MERGE_SCRIPT="$REPO_ROOT/scripts/codex/merge-mcp-config.js"
require_path "$REPO_ROOT/AGENTS.md" "ECC AGENTS.md" require_path "$REPO_ROOT/AGENTS.md" "ECC AGENTS.md"
require_path "$AGENTS_CODEX_SUPP_SRC" "ECC Codex AGENTS supplement" require_path "$AGENTS_CODEX_SUPP_SRC" "ECC Codex AGENTS supplement"
require_path "$SKILLS_SRC" "ECC skills directory"
require_path "$PROMPTS_SRC" "ECC commands directory" require_path "$PROMPTS_SRC" "ECC commands directory"
require_path "$HOOKS_INSTALLER" "ECC global git hooks installer" require_path "$HOOKS_INSTALLER" "ECC global git hooks installer"
require_path "$SANITY_CHECKER" "ECC global sanity checker" require_path "$SANITY_CHECKER" "ECC global sanity checker"
@@ -235,17 +231,9 @@ else
fi fi
fi fi
log "Syncing ECC Codex skills" # Skills are NOT synced here — Codex CLI reads directly from
run_or_echo mkdir -p "$SKILLS_DEST" # ~/.agents/skills/ (installed by ECC installer / npx skills).
skills_count=0 # Copying into ~/.codex/skills/ was unnecessary.
for skill_dir in "$SKILLS_SRC"/*; do
[[ -d "$skill_dir" ]] || continue
skill_name="$(basename "$skill_dir")"
dest="$SKILLS_DEST/$skill_name"
run_or_echo rm -rf "$dest"
run_or_echo cp -R "$skill_dir" "$dest"
skills_count=$((skills_count + 1))
done
log "Generating prompt files from ECC commands" log "Generating prompt files from ECC commands"
run_or_echo mkdir -p "$PROMPTS_DEST" run_or_echo mkdir -p "$PROMPTS_DEST"
@@ -486,7 +474,6 @@ fi
log "Sync complete" log "Sync complete"
log "Backup saved at: $BACKUP_DIR" log "Backup saved at: $BACKUP_DIR"
log "Skills synced: $skills_count"
log "Prompts generated: $((prompt_count + extension_count)) (commands: $prompt_count, extensions: $extension_count)" log "Prompts generated: $((prompt_count + extension_count)) (commands: $prompt_count, extensions: $extension_count)"
if [[ "$MODE" == "apply" ]]; then if [[ "$MODE" == "apply" ]]; then

View File

@@ -1,3 +1,9 @@
---
name: benchmark
description: Use this skill to measure performance baselines, detect regressions before/after PRs, and compare stack alternatives.
origin: ECC
---
# Benchmark — Performance Baseline & Regression Detection # Benchmark — Performance Baseline & Regression Detection
## When to Use ## When to Use

View File

@@ -1,3 +1,9 @@
---
name: browser-qa
description: Use this skill to automate visual testing and UI interaction verification using browser automation after deploying features.
origin: ECC
---
# Browser QA — Automated Visual Testing & Interaction # Browser QA — Automated Visual Testing & Interaction
## When to Use ## When to Use

View File

@@ -1,3 +1,9 @@
---
name: canary-watch
description: Use this skill to monitor a deployed URL for regressions after deploys, merges, or dependency upgrades.
origin: ECC
---
# Canary Watch — Post-Deploy Monitoring # Canary Watch — Post-Deploy Monitoring
## When to Use ## When to Use

View File

@@ -62,9 +62,16 @@ analyze_observations() {
analysis_count=$(wc -l < "$analysis_file" 2>/dev/null || echo 0) analysis_count=$(wc -l < "$analysis_file" 2>/dev/null || echo 0)
echo "[$(date)] Using last $analysis_count of $obs_count observations for analysis" >> "$LOG_FILE" echo "[$(date)] Using last $analysis_count of $obs_count observations for analysis" >> "$LOG_FILE"
# Use relative path from PROJECT_DIR for cross-platform compatibility (#842).
# On Windows (Git Bash/MSYS2), absolute paths from mktemp may use MSYS-style
# prefixes (e.g. /c/Users/...) that the Claude subprocess cannot resolve.
analysis_relpath=".observer-tmp/$(basename "$analysis_file")"
prompt_file="$(mktemp "${observer_tmp_dir}/ecc-observer-prompt.XXXXXX")" prompt_file="$(mktemp "${observer_tmp_dir}/ecc-observer-prompt.XXXXXX")"
cat > "$prompt_file" <<PROMPT cat > "$prompt_file" <<PROMPT
Read ${analysis_file} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences). IMPORTANT: You are running in non-interactive --print mode. You MUST use the Write tool directly to create files. Do NOT ask for permission, do NOT ask for confirmation, do NOT output summaries instead of writing. Just read, analyze, and write.
Read ${analysis_relpath} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences).
If you find 3+ occurrences of the same pattern, you MUST write an instinct file directly to ${INSTINCTS_DIR}/<id>.md using the Write tool. If you find 3+ occurrences of the same pattern, you MUST write an instinct file directly to ${INSTINCTS_DIR}/<id>.md using the Write tool.
Do NOT ask for permission to write files, do NOT describe what you would write, and do NOT stop at analysis when a qualifying pattern exists. Do NOT ask for permission to write files, do NOT describe what you would write, and do NOT stop at analysis when a qualifying pattern exists.
@@ -117,11 +124,19 @@ PROMPT
max_turns=10 max_turns=10
fi fi
# Prevent observe.sh from recording this automated Haiku session as observations # Ensure CWD is PROJECT_DIR so the relative analysis_relpath resolves correctly
# on all platforms, not just when the observer happens to be launched from the project root.
cd "$PROJECT_DIR" || { echo "[$(date)] Failed to cd to PROJECT_DIR ($PROJECT_DIR), skipping analysis" >> "$LOG_FILE"; rm -f "$prompt_file" "$analysis_file"; return; }
# Prevent observe.sh from recording this automated Haiku session as observations.
# Pass prompt via -p flag instead of stdin redirect for Windows compatibility (#842).
ECC_SKIP_OBSERVE=1 ECC_HOOK_PROFILE=minimal claude --model haiku --max-turns "$max_turns" --print \ ECC_SKIP_OBSERVE=1 ECC_HOOK_PROFILE=minimal claude --model haiku --max-turns "$max_turns" --print \
--allowedTools "Read,Write" \ --allowedTools "Read,Write" \
< "$prompt_file" >> "$LOG_FILE" 2>&1 & -p "$(cat "$prompt_file")" >> "$LOG_FILE" 2>&1 &
claude_pid=$! claude_pid=$!
# prompt_file content was already expanded by the shell; remove early to avoid
# leaving stale temp files during the (potentially long) analysis window.
rm -f "$prompt_file"
( (
sleep "$timeout_seconds" sleep "$timeout_seconds"
@@ -135,7 +150,7 @@ PROMPT
wait "$claude_pid" wait "$claude_pid"
exit_code=$? exit_code=$?
kill "$watchdog_pid" 2>/dev/null || true kill "$watchdog_pid" 2>/dev/null || true
rm -f "$prompt_file" "$analysis_file" rm -f "$analysis_file"
if [ "$exit_code" -ne 0 ]; then if [ "$exit_code" -ne 0 ]; then
echo "[$(date)] Claude analysis failed (exit $exit_code)" >> "$LOG_FILE" echo "[$(date)] Claude analysis failed (exit $exit_code)" >> "$LOG_FILE"

View File

@@ -1,3 +1,9 @@
---
name: design-system
description: Use this skill to generate or audit design systems, check visual consistency, and review PRs that touch styling.
origin: ECC
---
# Design System — Generate & Audit Visual Systems # Design System — Generate & Audit Visual Systems
## When to Use ## When to Use

View File

@@ -0,0 +1,229 @@
---
name: laravel-plugin-discovery
description: Discover and evaluate Laravel packages via LaraPlugins.io MCP. Use when the user wants to find plugins, check package health, or assess Laravel/PHP compatibility.
origin: ECC
---
# Laravel Plugin Discovery
Find, evaluate, and choose healthy Laravel packages using the LaraPlugins.io MCP server.
## When to Use
- User wants to find Laravel packages for a specific feature (e.g. "auth", "permissions", "admin panel")
- User asks "what package should I use for..." or "is there a Laravel package for..."
- User wants to check if a package is actively maintained
- User needs to verify Laravel version compatibility
- User wants to assess package health before adding to a project
## MCP Requirement
LaraPlugins MCP server must be configured. Add to your `~/.claude.json` mcpServers:
```json
"laraplugins": {
"type": "http",
"url": "https://laraplugins.io/mcp/plugins"
}
```
No API key required — the server is free for the Laravel community.
## MCP Tools
The LaraPlugins MCP provides two primary tools:
### SearchPluginTool
Search packages by keyword, health score, vendor, and version compatibility.
**Parameters:**
- `text_search` (string, optional): Keyword to search (e.g. "permission", "admin", "api")
- `health_score` (string, optional): Filter by health band — `Healthy`, `Medium`, `Unhealthy`, or `Unrated`
- `laravel_compatibility` (string, optional): Filter by Laravel version — `"5"`, `"6"`, `"7"`, `"8"`, `"9"`, `"10"`, `"11"`, `"12"`, `"13"`
- `php_compatibility` (string, optional): Filter by PHP version — `"7.4"`, `"8.0"`, `"8.1"`, `"8.2"`, `"8.3"`, `"8.4"`, `"8.5"`
- `vendor_filter` (string, optional): Filter by vendor name (e.g. "spatie", "laravel")
- `page` (number, optional): Page number for pagination
### GetPluginDetailsTool
Fetch detailed metrics, readme content, and version history for a specific package.
**Parameters:**
- `package` (string, required): Full Composer package name (e.g. "spatie/laravel-permission")
- `include_versions` (boolean, optional): Include version history in response
---
## How It Works
### Finding Packages
When the user wants to discover packages for a feature:
1. Use `SearchPluginTool` with relevant keywords
2. Apply filters for health score, Laravel version, or PHP version
3. Review the results with package names, descriptions, and health indicators
### Evaluating Packages
When the user wants to assess a specific package:
1. Use `GetPluginDetailsTool` with the package name
2. Review health score, last updated date, Laravel version support
3. Check vendor reputation and risk indicators
### Checking Compatibility
When the user needs Laravel or PHP version compatibility:
1. Search with `laravel_compatibility` filter set to their version
2. Or get details on a specific package to see its supported versions
---
## Examples
### Example: Find Authentication Packages
```
SearchPluginTool({
text_search: "authentication",
health_score: "Healthy"
})
```
Returns packages matching "authentication" with healthy status:
- spatie/laravel-permission
- laravel/breeze
- laravel/passport
- etc.
### Example: Find Laravel 12 Compatible Packages
```
SearchPluginTool({
text_search: "admin panel",
laravel_compatibility: "12"
})
```
Returns packages compatible with Laravel 12.
### Example: Get Package Details
```
GetPluginDetailsTool({
package: "spatie/laravel-permission",
include_versions: true
})
```
Returns:
- Health score and last activity
- Laravel/PHP version support
- Vendor reputation (risk score)
- Version history
- Brief description
### Example: Find Packages by Vendor
```
SearchPluginTool({
vendor_filter: "spatie",
health_score: "Healthy"
})
```
Returns all healthy packages from vendor "spatie".
---
## Filtering Best Practices
### By Health Score
| Health Band | Meaning |
|-------------|---------|
| `Healthy` | Active maintenance, recent updates |
| `Medium` | Occasional updates, may need attention |
| `Unhealthy` | Abandoned or infrequently maintained |
| `Unrated` | Not yet assessed |
**Recommendation**: Prefer `Healthy` packages for production applications.
### By Laravel Version
| Version | Notes |
|---------|-------|
| `13` | Latest Laravel |
| `12` | Current stable |
| `11` | Still widely used |
| `10` | Legacy but common |
| `5`-`9` | Deprecated |
**Recommendation**: Match the target project's Laravel version.
### Combining Filters
```typescript
// Find healthy, Laravel 12 compatible packages for permissions
SearchPluginTool({
text_search: "permission",
health_score: "Healthy",
laravel_compatibility: "12"
})
```
---
## Response Interpretation
### Search Results
Each result includes:
- Package name (e.g. `spatie/laravel-permission`)
- Brief description
- Health status indicator
- Laravel version support badges
### Package Details
The detailed response includes:
- **Health Score**: Numeric or band indicator
- **Last Activity**: When the package was last updated
- **Laravel Support**: Version compatibility matrix
- **PHP Support**: PHP version compatibility
- **Risk Score**: Vendor trust indicators
- **Version History**: Recent release timeline
---
## Common Use Cases
| Scenario | Recommended Approach |
|----------|---------------------|
| "What package for auth?" | Search "auth" with healthy filter |
| "Is spatie/package still maintained?" | Get details, check health score |
| "Need Laravel 12 packages" | Search with laravel_compatibility: "12" |
| "Find admin panel packages" | Search "admin panel", review results |
| "Check vendor reputation" | Search by vendor, check details |
---
## Best Practices
1. **Always filter by health** — Use `health_score: "Healthy"` for production projects
2. **Match Laravel version** — Always check `laravel_compatibility` matches the target project
3. **Check vendor reputation** — Prefer packages from known vendors (spatie, laravel, etc.)
4. **Review before recommending** — Use GetPluginDetailsTool for a comprehensive assessment
5. **No API key needed** — The MCP is free, no authentication required
---
## Related Skills
- `laravel-patterns` — Laravel architecture and patterns
- `laravel-tdd` — Test-driven development for Laravel
- `laravel-security` — Laravel security best practices
- `documentation-lookup` — General library documentation lookup (Context7)

View File

@@ -1,6 +1,6 @@
--- ---
name: laravel-verification name: laravel-verification
description: Verification loop for Laravel projects: env checks, linting, static analysis, tests with coverage, security scans, and deployment readiness. description: "Verification loop for Laravel projects: env checks, linting, static analysis, tests with coverage, security scans, and deployment readiness."
origin: ECC origin: ECC
--- ---

View File

@@ -1,3 +1,9 @@
---
name: product-lens
description: Use this skill to validate the "why" before building, run product diagnostics, and convert vague ideas into specs.
origin: ECC
---
# Product Lens — Think Before You Build # Product Lens — Think Before You Build
## When to Use ## When to Use

78
skills/repo-scan/SKILL.md Normal file
View File

@@ -0,0 +1,78 @@
---
name: repo-scan
description: Cross-stack source code asset audit — classifies every file, detects embedded third-party libraries, and delivers actionable four-level verdicts per module with interactive HTML reports.
origin: community
---
# repo-scan
> Every ecosystem has its own dependency manager, but no tool looks across C++, Android, iOS, and Web to tell you: how much code is actually yours, what's third-party, and what's dead weight.
## When to Use
- Taking over a large legacy codebase and need a structural overview
- Before major refactoring — identify what's core, what's duplicate, what's dead
- Auditing third-party dependencies embedded directly in source (not declared in package managers)
- Preparing architecture decision records for monorepo reorganization
## Installation
```bash
# Fetch only the pinned commit for reproducibility
mkdir -p ~/.claude/skills/repo-scan
git init repo-scan
cd repo-scan
git remote add origin https://github.com/haibindev/repo-scan.git
git fetch --depth 1 origin 2742664
git checkout --detach FETCH_HEAD
cp -r . ~/.claude/skills/repo-scan
```
> Review the source before installing any agent skill.
## Core Capabilities
| Capability | Description |
|---|---|
| **Cross-stack scanning** | C/C++, Java/Android, iOS (OC/Swift), Web (TS/JS/Vue) in one pass |
| **File classification** | Every file tagged as project code, third-party, or build artifact |
| **Library detection** | 50+ known libraries (FFmpeg, Boost, OpenSSL…) with version extraction |
| **Four-level verdicts** | Core Asset / Extract & Merge / Rebuild / Deprecate |
| **HTML reports** | Interactive dark-theme pages with drill-down navigation |
| **Monorepo support** | Hierarchical scanning with summary + sub-project reports |
## Analysis Depth Levels
| Level | Files Read | Use Case |
|---|---|---|
| `fast` | 1-2 per module | Quick inventory of huge directories |
| `standard` | 2-5 per module | Default audit with full dependency + architecture checks |
| `deep` | 5-10 per module | Adds thread safety, memory management, API consistency |
| `full` | All files | Pre-merge comprehensive review |
## How It Works
1. **Classify the repo surface**: enumerate files, then tag each as project code, embedded third-party code, or build artifact.
2. **Detect embedded libraries**: inspect directory names, headers, license files, and version markers to identify bundled dependencies and likely versions.
3. **Score each module**: group files by module or subsystem, then assign one of the four verdicts based on ownership, duplication, and maintenance cost.
4. **Highlight structural risks**: call out dead-weight artifacts, duplicated wrappers, outdated vendored code, and modules that should be extracted, rebuilt, or deprecated.
5. **Produce the report**: return a concise summary plus the interactive HTML output with per-module drill-down so the audit can be reviewed asynchronously.
## Examples
On a 50,000-file C++ monorepo:
- Found FFmpeg 2.x (2015 vintage) still in production
- Discovered the same SDK wrapper duplicated 3 times
- Identified 636 MB of committed Debug/ipch/obj build artifacts
- Classified: 3 MB project code vs 596 MB third-party
## Best Practices
- Start with `standard` depth for first-time audits
- Use `fast` for monorepos with 100+ modules to get a quick inventory
- Run `deep` incrementally on modules flagged for refactoring
- Review the cross-module analysis for duplicate detection across sub-projects
## Links
- [GitHub Repository](https://github.com/haibindev/repo-scan)

View File

@@ -1,3 +1,9 @@
---
name: safety-guard
description: Use this skill to prevent destructive operations when working on production systems or running agents autonomously.
origin: ECC
---
# Safety Guard — Prevent Destructive Operations # Safety Guard — Prevent Destructive Operations
## When to Use ## When to Use

View File

@@ -47,6 +47,19 @@ ALWAYS write tests first, then implement code to make tests pass.
- Browser automation - Browser automation
- UI interactions - UI interactions
### 4. Git Checkpoints
- If the repository is under Git, create a checkpoint commit after each TDD stage
- Do not squash or rewrite these checkpoint commits until the workflow is complete
- Each checkpoint commit message must describe the stage and the exact evidence captured
- Count only commits created on the current active branch for the current task
- Do not treat commits from other branches, earlier unrelated work, or distant branch history as valid checkpoint evidence
- Before treating a checkpoint as satisfied, verify that the commit is reachable from the current `HEAD` on the active branch and belongs to the current task sequence
- The preferred compact workflow is:
- one commit for failing test added and RED validated
- one commit for minimal fix applied and GREEN validated
- one optional commit for refactor complete
- Separate evidence-only commits are not required if the test commit clearly corresponds to RED and the fix commit clearly corresponds to GREEN
## TDD Workflow Steps ## TDD Workflow Steps
### Step 1: Write User Journeys ### Step 1: Write User Journeys
@@ -87,6 +100,29 @@ npm test
# Tests should fail - we haven't implemented yet # Tests should fail - we haven't implemented yet
``` ```
This step is mandatory and is the RED gate for all production changes.
Before modifying business logic or other production code, you must verify a valid RED state via one of these paths:
- Runtime RED:
- The relevant test target compiles successfully
- The new or changed test is actually executed
- The result is RED
- Compile-time RED:
- The new test newly instantiates, references, or exercises the buggy code path
- The compile failure is itself the intended RED signal
- In either case, the failure is caused by the intended business-logic bug, undefined behavior, or missing implementation
- The failure is not caused only by unrelated syntax errors, broken test setup, missing dependencies, or unrelated regressions
A test that was only written but not compiled and executed does not count as RED.
Do not edit production code until this RED state is confirmed.
If the repository is under Git, create a checkpoint commit immediately after this stage is validated.
Recommended commit message format:
- `test: add reproducer for <feature or bug>`
- This commit may also serve as the RED validation checkpoint if the reproducer was compiled and executed and failed for the intended reason
- Verify that this checkpoint commit is on the current active branch before continuing
### Step 4: Implement Code ### Step 4: Implement Code
Write minimal code to make tests pass: Write minimal code to make tests pass:
@@ -97,12 +133,24 @@ export async function searchMarkets(query: string) {
} }
``` ```
If the repository is under Git, stage the minimal fix now but defer the checkpoint commit until GREEN is validated in Step 5.
### Step 5: Run Tests Again ### Step 5: Run Tests Again
```bash ```bash
npm test npm test
# Tests should now pass # Tests should now pass
``` ```
Rerun the same relevant test target after the fix and confirm the previously failing test is now GREEN.
Only after a valid GREEN result may you proceed to refactor.
If the repository is under Git, create a checkpoint commit immediately after GREEN is validated.
Recommended commit message format:
- `fix: <feature or bug>`
- The fix commit may also serve as the GREEN validation checkpoint if the same relevant test target was rerun and passed
- Verify that this checkpoint commit is on the current active branch before continuing
### Step 6: Refactor ### Step 6: Refactor
Improve code quality while keeping tests green: Improve code quality while keeping tests green:
- Remove duplication - Remove duplication
@@ -110,6 +158,11 @@ Improve code quality while keeping tests green:
- Optimize performance - Optimize performance
- Enhance readability - Enhance readability
If the repository is under Git, create a checkpoint commit immediately after refactoring is complete and tests remain green.
Recommended commit message format:
- `refactor: clean up after <feature or bug> implementation`
- Verify that this checkpoint commit is on the current active branch before considering the TDD cycle complete
### Step 7: Verify Coverage ### Step 7: Verify Coverage
```bash ```bash
npm run test:coverage npm run test:coverage

View File

@@ -175,7 +175,7 @@ test('prompt references analysis_file not full OBSERVATIONS_FILE', () => {
assert.ok(heredocStart > 0, 'Should find prompt heredoc start'); assert.ok(heredocStart > 0, 'Should find prompt heredoc start');
assert.ok(heredocEnd > heredocStart, 'Should find prompt heredoc end'); assert.ok(heredocEnd > heredocStart, 'Should find prompt heredoc end');
const promptSection = content.substring(heredocStart, heredocEnd); const promptSection = content.substring(heredocStart, heredocEnd);
assert.ok(promptSection.includes('${analysis_file}'), 'Prompt should point Claude at the sampled analysis file, not the full observations file'); assert.ok(promptSection.includes('${analysis_relpath}'), 'Prompt should point Claude at the sampled analysis file (via relative path), not the full observations file');
}); });
// ────────────────────────────────────────────────────── // ──────────────────────────────────────────────────────

View File

@@ -0,0 +1,277 @@
/**
* Tests for plugin manifests:
* - .claude-plugin/plugin.json (Claude Code plugin)
* - .codex-plugin/plugin.json (Codex native plugin)
* - .mcp.json (MCP server config at plugin root)
* - .agents/plugins/marketplace.json (Codex marketplace discovery)
*
* Enforces rules from:
* - .claude-plugin/PLUGIN_SCHEMA_NOTES.md (Claude Code validator rules)
* - https://platform.openai.com/docs/codex/plugins (Codex official docs)
*
* Run with: node tests/run-all.js
*/
'use strict';
const assert = require('assert');
const fs = require('fs');
const path = require('path');
const repoRoot = path.resolve(__dirname, '..');
const repoRootWithSep = `${repoRoot}${path.sep}`;
let passed = 0;
let failed = 0;
function test(name, fn) {
try {
fn();
console.log(`${name}`);
passed++;
} catch (err) {
console.log(`${name}`);
console.log(` Error: ${err.message}`);
failed++;
}
}
function loadJsonObject(filePath, label) {
assert.ok(fs.existsSync(filePath), `Expected ${label} to exist`);
let parsed;
try {
parsed = JSON.parse(fs.readFileSync(filePath, 'utf8'));
} catch (error) {
assert.fail(`Expected ${label} to contain valid JSON: ${error.message}`);
}
assert.ok(
parsed && typeof parsed === 'object' && !Array.isArray(parsed),
`Expected ${label} to contain a JSON object`,
);
return parsed;
}
function assertSafeRepoRelativePath(relativePath, label) {
const normalized = path.posix.normalize(relativePath.replace(/\\/g, '/'));
assert.ok(!path.isAbsolute(relativePath), `${label} must not be absolute: ${relativePath}`);
assert.ok(
!normalized.startsWith('../') && !normalized.includes('/../'),
`${label} must not traverse directories: ${relativePath}`,
);
}
// ── Claude plugin manifest ────────────────────────────────────────────────────
console.log('\n=== .claude-plugin/plugin.json ===\n');
const claudePluginPath = path.join(repoRoot, '.claude-plugin', 'plugin.json');
test('claude plugin.json exists', () => {
assert.ok(fs.existsSync(claudePluginPath), 'Expected .claude-plugin/plugin.json to exist');
});
const claudePlugin = loadJsonObject(claudePluginPath, '.claude-plugin/plugin.json');
test('claude plugin.json has version field', () => {
assert.ok(claudePlugin.version, 'Expected version field');
});
test('claude plugin.json agents is an array', () => {
assert.ok(Array.isArray(claudePlugin.agents), 'Expected agents to be an array (not a string/directory)');
});
test('claude plugin.json agents uses explicit file paths (not directories)', () => {
for (const agentPath of claudePlugin.agents) {
assertSafeRepoRelativePath(agentPath, 'Agent path');
assert.ok(
agentPath.endsWith('.md'),
`Expected explicit .md file path, got: ${agentPath}`,
);
assert.ok(
!agentPath.endsWith('/'),
`Expected explicit file path, not directory, got: ${agentPath}`,
);
}
});
test('claude plugin.json all agent files exist', () => {
for (const agentRelPath of claudePlugin.agents) {
assertSafeRepoRelativePath(agentRelPath, 'Agent path');
const absolute = path.resolve(repoRoot, agentRelPath);
assert.ok(
absolute === repoRoot || absolute.startsWith(repoRootWithSep),
`Agent path resolves outside repo root: ${agentRelPath}`,
);
assert.ok(
fs.existsSync(absolute),
`Agent file missing: ${agentRelPath}`,
);
}
});
test('claude plugin.json skills is an array', () => {
assert.ok(Array.isArray(claudePlugin.skills), 'Expected skills to be an array');
});
test('claude plugin.json commands is an array', () => {
assert.ok(Array.isArray(claudePlugin.commands), 'Expected commands to be an array');
});
test('claude plugin.json does NOT have explicit hooks declaration', () => {
assert.ok(
!('hooks' in claudePlugin),
'hooks field must NOT be declared — Claude Code v2.1+ auto-loads hooks/hooks.json by convention',
);
});
// ── Codex plugin manifest ─────────────────────────────────────────────────────
// Per official docs: https://platform.openai.com/docs/codex/plugins
// - .codex-plugin/plugin.json is the required manifest
// - skills, mcpServers, apps are STRING paths relative to plugin root (not arrays)
// - .mcp.json must be at plugin root (NOT inside .codex-plugin/)
console.log('\n=== .codex-plugin/plugin.json ===\n');
const codexPluginPath = path.join(repoRoot, '.codex-plugin', 'plugin.json');
test('codex plugin.json exists', () => {
assert.ok(fs.existsSync(codexPluginPath), 'Expected .codex-plugin/plugin.json to exist');
});
const codexPlugin = loadJsonObject(codexPluginPath, '.codex-plugin/plugin.json');
test('codex plugin.json has name field', () => {
assert.ok(codexPlugin.name, 'Expected name field');
});
test('codex plugin.json has version field', () => {
assert.ok(codexPlugin.version, 'Expected version field');
});
test('codex plugin.json skills is a string (not array) per official spec', () => {
assert.strictEqual(
typeof codexPlugin.skills,
'string',
'skills must be a string path per Codex official docs, not an array',
);
});
test('codex plugin.json mcpServers is a string path (not array) per official spec', () => {
assert.strictEqual(
typeof codexPlugin.mcpServers,
'string',
'mcpServers must be a string path per Codex official docs',
);
});
test('codex plugin.json mcpServers exactly matches "./.mcp.json"', () => {
assert.strictEqual(
codexPlugin.mcpServers,
'./.mcp.json',
'mcpServers must point exactly to "./.mcp.json" per official docs',
);
const mcpPath = path.join(repoRoot, codexPlugin.mcpServers.replace(/^\.\//, ''));
assert.ok(
fs.existsSync(mcpPath),
`mcpServers file missing at plugin root: ${codexPlugin.mcpServers}`,
);
});
test('codex plugin.json has interface.displayName', () => {
assert.ok(
codexPlugin.interface && codexPlugin.interface.displayName,
'Expected interface.displayName for plugin directory presentation',
);
});
// ── .mcp.json at plugin root ──────────────────────────────────────────────────
// Per official docs: keep .mcp.json at plugin root, NOT inside .codex-plugin/
console.log('\n=== .mcp.json (plugin root) ===\n');
const mcpJsonPath = path.join(repoRoot, '.mcp.json');
test('.mcp.json exists at plugin root (not inside .codex-plugin/)', () => {
assert.ok(fs.existsSync(mcpJsonPath), 'Expected .mcp.json at repo root (plugin root)');
assert.ok(
!fs.existsSync(path.join(repoRoot, '.codex-plugin', '.mcp.json')),
'.mcp.json must NOT be inside .codex-plugin/ — only plugin.json belongs there',
);
});
const mcpConfig = loadJsonObject(mcpJsonPath, '.mcp.json');
test('.mcp.json has mcpServers object', () => {
assert.ok(
mcpConfig.mcpServers && typeof mcpConfig.mcpServers === 'object',
'Expected mcpServers object',
);
});
test('.mcp.json includes at least github, context7, and exa servers', () => {
const servers = Object.keys(mcpConfig.mcpServers);
assert.ok(servers.includes('github'), 'Expected github MCP server');
assert.ok(servers.includes('context7'), 'Expected context7 MCP server');
assert.ok(servers.includes('exa'), 'Expected exa MCP server');
});
// ── Codex marketplace file ────────────────────────────────────────────────────
// Per official docs: repo marketplace lives at $REPO_ROOT/.agents/plugins/marketplace.json
console.log('\n=== .agents/plugins/marketplace.json ===\n');
const marketplacePath = path.join(repoRoot, '.agents', 'plugins', 'marketplace.json');
test('marketplace.json exists at .agents/plugins/', () => {
assert.ok(
fs.existsSync(marketplacePath),
'Expected .agents/plugins/marketplace.json for Codex repo marketplace discovery',
);
});
const marketplace = loadJsonObject(marketplacePath, '.agents/plugins/marketplace.json');
test('marketplace.json has name field', () => {
assert.ok(marketplace.name, 'Expected name field');
});
test('marketplace.json has plugins array with at least one entry', () => {
assert.ok(Array.isArray(marketplace.plugins) && marketplace.plugins.length > 0, 'Expected plugins array');
});
test('marketplace.json plugin entries have required fields', () => {
for (const plugin of marketplace.plugins) {
assert.ok(plugin.name, `Plugin entry missing name`);
assert.ok(plugin.source && plugin.source.source, `Plugin "${plugin.name}" missing source.source`);
assert.ok(plugin.policy && plugin.policy.installation, `Plugin "${plugin.name}" missing policy.installation`);
assert.ok(plugin.category, `Plugin "${plugin.name}" missing category`);
}
});
test('marketplace local plugin path resolves to the repo-root Codex bundle', () => {
for (const plugin of marketplace.plugins) {
if (!plugin.source || plugin.source.source !== 'local') {
continue;
}
const resolvedRoot = path.resolve(path.dirname(marketplacePath), plugin.source.path);
assert.strictEqual(
resolvedRoot,
repoRoot,
`Expected local marketplace path to resolve to repo root, got: ${plugin.source.path}`,
);
assert.ok(
fs.existsSync(path.join(resolvedRoot, '.codex-plugin', 'plugin.json')),
`Codex plugin manifest missing under resolved marketplace root: ${plugin.source.path}`,
);
assert.ok(
fs.existsSync(path.join(resolvedRoot, '.mcp.json')),
`Root MCP config missing under resolved marketplace root: ${plugin.source.path}`,
);
}
});
// ── Summary ───────────────────────────────────────────────────────────────────
console.log(`\nPassed: ${passed}`);
console.log(`Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);

View File

@@ -22,6 +22,8 @@ function readJson(filePath) {
return JSON.parse(fs.readFileSync(filePath, 'utf8')); return JSON.parse(fs.readFileSync(filePath, 'utf8'));
} }
const REPO_ROOT = path.join(__dirname, '..', '..');
function run(args = [], options = {}) { function run(args = [], options = {}) {
const env = { const env = {
...process.env, ...process.env,
@@ -326,6 +328,170 @@ function runTests() {
assert.ok(result.stderr.includes('Unknown install module: ghost-module')); assert.ok(result.stderr.includes('Unknown install module: ghost-module'));
})) passed++; else failed++; })) passed++; else failed++;
if (test('merges hooks into settings.json for claude target install', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
const claudeRoot = path.join(homeDir, '.claude');
assert.ok(fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')), 'hooks.json should be copied');
const settingsPath = path.join(claudeRoot, 'settings.json');
assert.ok(fs.existsSync(settingsPath), 'settings.json should exist after install');
const settings = readJson(settingsPath);
assert.ok(settings.hooks, 'settings.json should contain hooks key');
assert.ok(settings.hooks.PreToolUse, 'hooks should include PreToolUse');
assert.ok(Array.isArray(settings.hooks.PreToolUse), 'PreToolUse should be an array');
assert.ok(settings.hooks.PreToolUse.length > 0, 'PreToolUse should have entries');
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('preserves existing settings fields and hook entries when merging hooks', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const claudeRoot = path.join(homeDir, '.claude');
fs.mkdirSync(claudeRoot, { recursive: true });
fs.writeFileSync(
path.join(claudeRoot, 'settings.json'),
JSON.stringify({
effortLevel: 'high',
env: { MY_VAR: '1' },
hooks: {
PreToolUse: [{ matcher: 'Write', hooks: [{ type: 'command', command: 'echo custom-pretool' }] }],
UserPromptSubmit: [{ matcher: '*', hooks: [{ type: 'command', command: 'echo custom-submit' }] }],
},
}, null, 2)
);
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
const settings = readJson(path.join(claudeRoot, 'settings.json'));
assert.strictEqual(settings.effortLevel, 'high', 'existing effortLevel should be preserved');
assert.deepStrictEqual(settings.env, { MY_VAR: '1' }, 'existing env should be preserved');
assert.ok(settings.hooks, 'hooks should be merged in');
assert.ok(settings.hooks.PreToolUse, 'PreToolUse hooks should exist');
assert.ok(
settings.hooks.PreToolUse.some(entry => JSON.stringify(entry).includes('echo custom-pretool')),
'existing PreToolUse entries should be preserved'
);
assert.ok(settings.hooks.PreToolUse.length > 1, 'ECC PreToolUse hooks should be appended');
assert.deepStrictEqual(
settings.hooks.UserPromptSubmit,
[{ matcher: '*', hooks: [{ type: 'command', command: 'echo custom-submit' }] }],
'user-defined hook event types should be preserved'
);
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('reinstall does not duplicate managed hook entries', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const firstInstall = run(['--profile', 'core'], { cwd: projectDir, homeDir });
assert.strictEqual(firstInstall.code, 0, firstInstall.stderr);
const settingsPath = path.join(homeDir, '.claude', 'settings.json');
const afterFirstInstall = readJson(settingsPath);
const preToolUseLength = afterFirstInstall.hooks.PreToolUse.length;
const secondInstall = run(['--profile', 'core'], { cwd: projectDir, homeDir });
assert.strictEqual(secondInstall.code, 0, secondInstall.stderr);
const afterSecondInstall = readJson(settingsPath);
assert.strictEqual(
afterSecondInstall.hooks.PreToolUse.length,
preToolUseLength,
'managed hook entries should not duplicate on reinstall'
);
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('fails when existing settings.json is malformed', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const claudeRoot = path.join(homeDir, '.claude');
fs.mkdirSync(claudeRoot, { recursive: true });
const settingsPath = path.join(claudeRoot, 'settings.json');
fs.writeFileSync(settingsPath, '{ invalid json\n');
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 1);
assert.ok(result.stderr.includes('Failed to parse existing settings at'));
assert.strictEqual(fs.readFileSync(settingsPath, 'utf8'), '{ invalid json\n');
assert.ok(!fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')), 'hooks.json should not be copied on validation failure');
assert.ok(!fs.existsSync(path.join(claudeRoot, 'ecc', 'install-state.json')), 'install state should not be written on validation failure');
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('fails when existing settings.json root is not an object', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const claudeRoot = path.join(homeDir, '.claude');
fs.mkdirSync(claudeRoot, { recursive: true });
const settingsPath = path.join(claudeRoot, 'settings.json');
fs.writeFileSync(settingsPath, '[]\n');
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 1);
assert.ok(result.stderr.includes('Invalid existing settings at'));
assert.ok(result.stderr.includes('expected a JSON object'));
assert.strictEqual(fs.readFileSync(settingsPath, 'utf8'), '[]\n');
assert.ok(!fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')), 'hooks.json should not be copied on validation failure');
assert.ok(!fs.existsSync(path.join(claudeRoot, 'ecc', 'install-state.json')), 'install state should not be written on validation failure');
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('fails when source hooks.json root is not an object before copying files', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
const sourceHooksPath = path.join(REPO_ROOT, 'hooks', 'hooks.json');
const originalHooks = fs.readFileSync(sourceHooksPath, 'utf8');
try {
fs.writeFileSync(sourceHooksPath, '[]\n');
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 1);
assert.ok(result.stderr.includes('Invalid hooks config at'));
assert.ok(result.stderr.includes('expected a JSON object'));
const claudeRoot = path.join(homeDir, '.claude');
assert.ok(!fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')), 'hooks.json should not be copied when source hooks are invalid');
assert.ok(!fs.existsSync(path.join(claudeRoot, 'ecc', 'install-state.json')), 'install state should not be written when source hooks are invalid');
} finally {
fs.writeFileSync(sourceHooksPath, originalHooks);
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('installs from ecc-install.json and persists component selections', () => { if (test('installs from ecc-install.json and persists component selections', () => {
const homeDir = createTempDir('install-apply-home-'); const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-'); const projectDir = createTempDir('install-apply-project-');

View File

@@ -69,8 +69,9 @@ function runTests() {
if (test('filesystem-changing calls use argv-form run_or_echo invocations', () => { if (test('filesystem-changing calls use argv-form run_or_echo invocations', () => {
assert.ok(source.includes('run_or_echo mkdir -p "$BACKUP_DIR"'), 'mkdir should use argv form'); assert.ok(source.includes('run_or_echo mkdir -p "$BACKUP_DIR"'), 'mkdir should use argv form');
assert.ok(source.includes('run_or_echo rm -rf "$dest"'), 'rm should use argv form'); // Skills sync rm/cp calls were removed — Codex reads from ~/.agents/skills/ natively
assert.ok(source.includes('run_or_echo cp -R "$skill_dir" "$dest"'), 'recursive copy should use argv form'); assert.ok(!source.includes('run_or_echo rm -rf "$dest"'), 'skill sync rm should be removed');
assert.ok(!source.includes('run_or_echo cp -R "$skill_dir" "$dest"'), 'skill sync cp should be removed');
})) passed++; else failed++; })) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);

View File

@@ -30,14 +30,14 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"@eslint/config-array@npm:^0.21.2": "@eslint/config-array@npm:^0.21.1":
version: 0.21.2 version: 0.21.1
resolution: "@eslint/config-array@npm:0.21.2" resolution: "@eslint/config-array@npm:0.21.1"
dependencies: dependencies:
"@eslint/object-schema": "npm:^2.1.7" "@eslint/object-schema": "npm:^2.1.7"
debug: "npm:^4.3.1" debug: "npm:^4.3.1"
minimatch: "npm:^3.1.5" minimatch: "npm:^3.1.2"
checksum: 10c0/89dfe815d18456177c0a1f238daf4593107fd20298b3598e0103054360d3b8d09d967defd8318f031185d68df1f95cfa68becf1390a9c5c6887665f1475142e3 checksum: 10c0/2f657d4edd6ddcb920579b72e7a5b127865d4c3fb4dda24f11d5c4f445a93ca481aebdbd6bf3291c536f5d034458dbcbb298ee3b698bc6c9dd02900fe87eec3c
languageName: node languageName: node
linkType: hard linkType: hard
@@ -59,27 +59,27 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"@eslint/eslintrc@npm:^3.3.5": "@eslint/eslintrc@npm:^3.3.1":
version: 3.3.5 version: 3.3.3
resolution: "@eslint/eslintrc@npm:3.3.5" resolution: "@eslint/eslintrc@npm:3.3.3"
dependencies: dependencies:
ajv: "npm:^6.14.0" ajv: "npm:^6.12.4"
debug: "npm:^4.3.2" debug: "npm:^4.3.2"
espree: "npm:^10.0.1" espree: "npm:^10.0.1"
globals: "npm:^14.0.0" globals: "npm:^14.0.0"
ignore: "npm:^5.2.0" ignore: "npm:^5.2.0"
import-fresh: "npm:^3.2.1" import-fresh: "npm:^3.2.1"
js-yaml: "npm:^4.1.1" js-yaml: "npm:^4.1.1"
minimatch: "npm:^3.1.5" minimatch: "npm:^3.1.2"
strip-json-comments: "npm:^3.1.1" strip-json-comments: "npm:^3.1.1"
checksum: 10c0/9fb9f1ca65e46d6173966e3aaa5bd353e3a65d7f1f582bebf77f578fab7d7960a399fac1ecfb1e7d52bd61f5cefd6531087ca52a3a3c388f2e1b4f1ebd3da8b7 checksum: 10c0/532c7acc7ddd042724c28b1f020bd7bf148fcd4653bb44c8314168b5f772508c842ce4ee070299cac51c5c5757d2124bdcfcef5551c8c58ff9986e3e17f2260d
languageName: node languageName: node
linkType: hard linkType: hard
"@eslint/js@npm:9.39.4, @eslint/js@npm:^9.39.2": "@eslint/js@npm:9.39.2, @eslint/js@npm:^9.39.2":
version: 9.39.4 version: 9.39.2
resolution: "@eslint/js@npm:9.39.4" resolution: "@eslint/js@npm:9.39.2"
checksum: 10c0/5aa7dea2cbc5decf7f5e3b0c6f86a084ccee0f792d288ca8e839f8bc1b64e03e227068968e49b26096e6f71fd857ab6e42691d1b993826b9a3883f1bdd7a0e46 checksum: 10c0/00f51c52b04ac79faebfaa65a9652b2093b9c924e945479f1f3945473f78aee83cbc76c8d70bbffbf06f7024626575b16d97b66eab16182e1d0d39daff2f26f5
languageName: node languageName: node
linkType: hard linkType: hard
@@ -191,11 +191,11 @@ __metadata:
linkType: hard linkType: hard
"@types/debug@npm:^4.0.0": "@types/debug@npm:^4.0.0":
version: 4.1.13 version: 4.1.12
resolution: "@types/debug@npm:4.1.13" resolution: "@types/debug@npm:4.1.12"
dependencies: dependencies:
"@types/ms": "npm:*" "@types/ms": "npm:*"
checksum: 10c0/e5e124021bbdb23a82727eee0a726ae0fc8a3ae1f57253cbcc47497f259afb357de7f6941375e773e1abbfa1604c1555b901a409d762ec2bb4c1612131d4afb7 checksum: 10c0/5dcd465edbb5a7f226e9a5efd1f399c6172407ef5840686b73e3608ce135eeca54ae8037dcd9f16bdb2768ac74925b820a8b9ecc588a58ca09eca6acabe33e2f
languageName: node languageName: node
linkType: hard linkType: hard
@@ -251,15 +251,15 @@ __metadata:
linkType: hard linkType: hard
"acorn@npm:^8.15.0": "acorn@npm:^8.15.0":
version: 8.16.0 version: 8.15.0
resolution: "acorn@npm:8.16.0" resolution: "acorn@npm:8.15.0"
bin: bin:
acorn: bin/acorn acorn: bin/acorn
checksum: 10c0/c9c52697227661b68d0debaf972222d4f622aa06b185824164e153438afa7b08273432ca43ea792cadb24dada1d46f6f6bb1ef8de9956979288cc1b96bf9914e checksum: 10c0/dec73ff59b7d6628a01eebaece7f2bdb8bb62b9b5926dcad0f8931f2b8b79c2be21f6c68ac095592adb5adb15831a3635d9343e6a91d028bbe85d564875ec3ec
languageName: node languageName: node
linkType: hard linkType: hard
"ajv@npm:^6.14.0": "ajv@npm:^6.12.4":
version: 6.14.0 version: 6.14.0
resolution: "ajv@npm:6.14.0" resolution: "ajv@npm:6.14.0"
dependencies: dependencies:
@@ -290,7 +290,7 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"ansi-regex@npm:^6.2.2": "ansi-regex@npm:^6.0.1":
version: 6.2.2 version: 6.2.2
resolution: "ansi-regex@npm:6.2.2" resolution: "ansi-regex@npm:6.2.2"
checksum: 10c0/05d4acb1d2f59ab2cf4b794339c7b168890d44dda4bf0ce01152a8da0213aca207802f930442ce8cd22d7a92f44907664aac6508904e75e038fa944d2601b30f checksum: 10c0/05d4acb1d2f59ab2cf4b794339c7b168890d44dda4bf0ce01152a8da0213aca207802f930442ce8cd22d7a92f44907664aac6508904e75e038fa944d2601b30f
@@ -354,11 +354,11 @@ __metadata:
linkType: hard linkType: hard
"brace-expansion@npm:^5.0.2": "brace-expansion@npm:^5.0.2":
version: 5.0.5 version: 5.0.4
resolution: "brace-expansion@npm:5.0.5" resolution: "brace-expansion@npm:5.0.4"
dependencies: dependencies:
balanced-match: "npm:^4.0.2" balanced-match: "npm:^4.0.2"
checksum: 10c0/4d238e14ed4f5cc9c07285550a41cef23121ca08ba99fa9eb5b55b580dcb6bf868b8210aa10526bdc9f8dc97f33ca2a7259039c4cc131a93042beddb424c48e3 checksum: 10c0/359cbcfa80b2eb914ca1f3440e92313fbfe7919ee6b274c35db55bec555aded69dac5ee78f102cec90c35f98c20fa43d10936d0cd9978158823c249257e1643a
languageName: node languageName: node
linkType: hard linkType: hard
@@ -628,22 +628,22 @@ __metadata:
linkType: hard linkType: hard
"eslint@npm:^9.39.2": "eslint@npm:^9.39.2":
version: 9.39.4 version: 9.39.2
resolution: "eslint@npm:9.39.4" resolution: "eslint@npm:9.39.2"
dependencies: dependencies:
"@eslint-community/eslint-utils": "npm:^4.8.0" "@eslint-community/eslint-utils": "npm:^4.8.0"
"@eslint-community/regexpp": "npm:^4.12.1" "@eslint-community/regexpp": "npm:^4.12.1"
"@eslint/config-array": "npm:^0.21.2" "@eslint/config-array": "npm:^0.21.1"
"@eslint/config-helpers": "npm:^0.4.2" "@eslint/config-helpers": "npm:^0.4.2"
"@eslint/core": "npm:^0.17.0" "@eslint/core": "npm:^0.17.0"
"@eslint/eslintrc": "npm:^3.3.5" "@eslint/eslintrc": "npm:^3.3.1"
"@eslint/js": "npm:9.39.4" "@eslint/js": "npm:9.39.2"
"@eslint/plugin-kit": "npm:^0.4.1" "@eslint/plugin-kit": "npm:^0.4.1"
"@humanfs/node": "npm:^0.16.6" "@humanfs/node": "npm:^0.16.6"
"@humanwhocodes/module-importer": "npm:^1.0.1" "@humanwhocodes/module-importer": "npm:^1.0.1"
"@humanwhocodes/retry": "npm:^0.4.2" "@humanwhocodes/retry": "npm:^0.4.2"
"@types/estree": "npm:^1.0.6" "@types/estree": "npm:^1.0.6"
ajv: "npm:^6.14.0" ajv: "npm:^6.12.4"
chalk: "npm:^4.0.0" chalk: "npm:^4.0.0"
cross-spawn: "npm:^7.0.6" cross-spawn: "npm:^7.0.6"
debug: "npm:^4.3.2" debug: "npm:^4.3.2"
@@ -662,7 +662,7 @@ __metadata:
is-glob: "npm:^4.0.0" is-glob: "npm:^4.0.0"
json-stable-stringify-without-jsonify: "npm:^1.0.1" json-stable-stringify-without-jsonify: "npm:^1.0.1"
lodash.merge: "npm:^4.6.2" lodash.merge: "npm:^4.6.2"
minimatch: "npm:^3.1.5" minimatch: "npm:^3.1.2"
natural-compare: "npm:^1.4.0" natural-compare: "npm:^1.4.0"
optionator: "npm:^0.9.3" optionator: "npm:^0.9.3"
peerDependencies: peerDependencies:
@@ -672,7 +672,7 @@ __metadata:
optional: true optional: true
bin: bin:
eslint: bin/eslint.js eslint: bin/eslint.js
checksum: 10c0/1955067c2d991f0c84f4c4abfafe31bb47fa3b717a7fd3e43fe1e511c6f859d7700cbca969f85661dc4c130f7aeced5e5444884314198a54428f5e5141db9337 checksum: 10c0/bb88ca8fd16bb7e1ac3e13804c54d41c583214460c0faa7b3e7c574e69c5600c7122295500fb4b0c06067831111db740931e98da1340329527658e1cf80073d3
languageName: node languageName: node
linkType: hard linkType: hard
@@ -813,9 +813,9 @@ __metadata:
linkType: hard linkType: hard
"get-east-asian-width@npm:^1.3.0": "get-east-asian-width@npm:^1.3.0":
version: 1.5.0 version: 1.4.0
resolution: "get-east-asian-width@npm:1.5.0" resolution: "get-east-asian-width@npm:1.4.0"
checksum: 10c0/bff8bbc8d81790b9477f7aa55b1806b9f082a8dc1359fff7bd8b96939622c86b729685afc2bfeb22def1fc6ef1e5228e4d87dd4e6da60bc43a5edfb03c4ee167 checksum: 10c0/4e481d418e5a32061c36fbb90d1b225a254cc5b2df5f0b25da215dcd335a3c111f0c2023ffda43140727a9cafb62dac41d022da82c08f31083ee89f714ee3b83
languageName: node languageName: node
linkType: hard linkType: hard
@@ -852,9 +852,9 @@ __metadata:
linkType: hard linkType: hard
"globals@npm:^17.1.0": "globals@npm:^17.1.0":
version: 17.4.0 version: 17.1.0
resolution: "globals@npm:17.4.0" resolution: "globals@npm:17.1.0"
checksum: 10c0/2be9e8c2b9035836f13d420b22f0247a328db82967d3bebfc01126d888ed609305f06c05895914e969653af5c6ba35fd7a0920f3e6c869afa60666c810630feb checksum: 10c0/4a4a17847676a09f164b8bdce7df105a4f484d6d44586e374087ba9ec7089cbf4c578b8648838dee9917074199c542ce157ea3c07b266f708dfb1652010900c8
languageName: node languageName: node
linkType: hard linkType: hard
@@ -1066,13 +1066,13 @@ __metadata:
linkType: hard linkType: hard
"katex@npm:^0.16.0": "katex@npm:^0.16.0":
version: 0.16.43 version: 0.16.28
resolution: "katex@npm:0.16.43" resolution: "katex@npm:0.16.28"
dependencies: dependencies:
commander: "npm:^8.3.0" commander: "npm:^8.3.0"
bin: bin:
katex: cli.js katex: cli.js
checksum: 10c0/62873b03107afd7b68054c92b414700b73bf606f42518d6f01f1f8104f5f876be0cbe096142742f8803638809ea3726d05812a4de17b5287e0f79a56ab932b91 checksum: 10c0/9c6e100ecb10c8e8315ab1d6ae16642b91e05642d821158149be520d629c3b47f30d8475fa8978d2d765a1d8e1bd66ab6afffe3a0409265de520edccab346b3e
languageName: node languageName: node
linkType: hard linkType: hard
@@ -1502,7 +1502,7 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"minimatch@npm:^3.1.5": "minimatch@npm:^3.1.2":
version: 3.1.5 version: 3.1.5
resolution: "minimatch@npm:3.1.5" resolution: "minimatch@npm:3.1.5"
dependencies: dependencies:
@@ -1795,11 +1795,11 @@ __metadata:
linkType: hard linkType: hard
"strip-ansi@npm:^7.0.1, strip-ansi@npm:^7.1.0": "strip-ansi@npm:^7.0.1, strip-ansi@npm:^7.1.0":
version: 7.2.0 version: 7.1.2
resolution: "strip-ansi@npm:7.2.0" resolution: "strip-ansi@npm:7.1.2"
dependencies: dependencies:
ansi-regex: "npm:^6.2.2" ansi-regex: "npm:^6.0.1"
checksum: 10c0/544d13b7582f8254811ea97db202f519e189e59d35740c46095897e254e4f1aa9fe1524a83ad6bc5ad67d4dd6c0281d2e0219ed62b880a6238a16a17d375f221 checksum: 10c0/0d6d7a023de33368fd042aab0bf48f4f4077abdfd60e5393e73c7c411e85e1b3a83507c11af2e656188511475776215df9ca589b4da2295c9455cc399ce1858b
languageName: node languageName: node
linkType: hard linkType: hard