mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-30 13:43:26 +08:00
Compare commits
70 Commits
678fb6f0d3
...
fix/yarn-l
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9181382065 | ||
|
|
9434e07749 | ||
|
|
9cde3427e2 | ||
|
|
c6b4c719b2 | ||
|
|
f98207feea | ||
|
|
52e9bd58f1 | ||
|
|
4257c093ca | ||
|
|
23d743b92c | ||
|
|
414ea90e11 | ||
|
|
d473cf87e6 | ||
|
|
64847d0a21 | ||
|
|
c865d4c676 | ||
|
|
72de19effd | ||
|
|
56076edd48 | ||
|
|
04d7eeb16f | ||
|
|
4e7773c2ce | ||
|
|
a3fc90f7ac | ||
|
|
55efeb7f20 | ||
|
|
1e7c299706 | ||
|
|
47aa415b06 | ||
|
|
d7e6bb242a | ||
|
|
9f37a5d8c7 | ||
|
|
d9ec51c9e9 | ||
|
|
9033f2a997 | ||
|
|
67660540ac | ||
|
|
432788d0b5 | ||
|
|
6a7a115e18 | ||
|
|
1181d93498 | ||
|
|
80d6a89f12 | ||
|
|
28a1fbc3f2 | ||
|
|
7a4cb8c570 | ||
|
|
4b4f077d18 | ||
|
|
78c98dd4fd | ||
|
|
f07797533d | ||
|
|
87d883eb1b | ||
|
|
652f87c5b6 | ||
|
|
70b65a9d06 | ||
|
|
24674a7bd6 | ||
|
|
d49c95a5ec | ||
|
|
70a96bd363 | ||
|
|
8f7445a260 | ||
|
|
9ad4351f53 | ||
|
|
451732164f | ||
|
|
ebd14cde7d | ||
|
|
ae21a8df85 | ||
|
|
d8e3b9d593 | ||
|
|
7148d9006f | ||
|
|
c14765e701 | ||
|
|
194bc0000b | ||
|
|
1e44475458 | ||
|
|
31af1adcc8 | ||
|
|
c80631fc1d | ||
|
|
00f8628b83 | ||
|
|
ba09a34432 | ||
|
|
27e0d53f6d | ||
|
|
8b6140dedc | ||
|
|
7633386e04 | ||
|
|
b4296c7095 | ||
|
|
cc60bf6b65 | ||
|
|
160624d0ed | ||
|
|
73c10122fe | ||
|
|
fe6a6fc106 | ||
|
|
dafc9bcd60 | ||
|
|
2d0fddf174 | ||
|
|
f471f27658 | ||
|
|
925d830c53 | ||
|
|
2243f15581 | ||
|
|
6408511611 | ||
|
|
9348751b8e | ||
|
|
c96c4d2742 |
20
.agents/plugins/marketplace.json
Normal file
20
.agents/plugins/marketplace.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"interface": {
|
||||
"displayName": "Everything Claude Code"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"source": {
|
||||
"source": "local",
|
||||
"path": "../.."
|
||||
},
|
||||
"policy": {
|
||||
"installation": "AVAILABLE",
|
||||
"authentication": "ON_INSTALL"
|
||||
},
|
||||
"category": "Productivity"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -21,5 +21,37 @@
|
||||
"workflow",
|
||||
"automation",
|
||||
"best-practices"
|
||||
]
|
||||
],
|
||||
"agents": [
|
||||
"./agents/architect.md",
|
||||
"./agents/build-error-resolver.md",
|
||||
"./agents/chief-of-staff.md",
|
||||
"./agents/code-reviewer.md",
|
||||
"./agents/cpp-build-resolver.md",
|
||||
"./agents/cpp-reviewer.md",
|
||||
"./agents/database-reviewer.md",
|
||||
"./agents/doc-updater.md",
|
||||
"./agents/docs-lookup.md",
|
||||
"./agents/e2e-runner.md",
|
||||
"./agents/flutter-reviewer.md",
|
||||
"./agents/go-build-resolver.md",
|
||||
"./agents/go-reviewer.md",
|
||||
"./agents/harness-optimizer.md",
|
||||
"./agents/java-build-resolver.md",
|
||||
"./agents/java-reviewer.md",
|
||||
"./agents/kotlin-build-resolver.md",
|
||||
"./agents/kotlin-reviewer.md",
|
||||
"./agents/loop-operator.md",
|
||||
"./agents/planner.md",
|
||||
"./agents/python-reviewer.md",
|
||||
"./agents/pytorch-build-resolver.md",
|
||||
"./agents/refactor-cleaner.md",
|
||||
"./agents/rust-build-resolver.md",
|
||||
"./agents/rust-reviewer.md",
|
||||
"./agents/security-reviewer.md",
|
||||
"./agents/tdd-guide.md",
|
||||
"./agents/typescript-reviewer.md"
|
||||
],
|
||||
"skills": ["./skills/"],
|
||||
"commands": ["./commands/"]
|
||||
}
|
||||
|
||||
47
.claude/rules/node.md
Normal file
47
.claude/rules/node.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Node.js Rules for everything-claude-code
|
||||
|
||||
> Project-specific rules for the ECC codebase. Extends common rules.
|
||||
|
||||
## Stack
|
||||
|
||||
- **Runtime**: Node.js >=18 (no transpilation, plain CommonJS)
|
||||
- **Test runner**: `node tests/run-all.js` — individual files via `node tests/**/*.test.js`
|
||||
- **Linter**: ESLint (`@eslint/js`, flat config)
|
||||
- **Coverage**: c8
|
||||
- **Lint**: markdownlint-cli for `.md` files
|
||||
|
||||
## File Conventions
|
||||
|
||||
- `scripts/` — Node.js utilities, hooks. CommonJS (`require`/`module.exports`)
|
||||
- `agents/`, `commands/`, `skills/`, `rules/` — Markdown with YAML frontmatter
|
||||
- `tests/` — Mirror the `scripts/` structure. Test files named `*.test.js`
|
||||
- File naming: **lowercase with hyphens** (e.g. `session-start.js`, `post-edit-format.js`)
|
||||
|
||||
## Code Style
|
||||
|
||||
- CommonJS only — no ESM (`import`/`export`) unless file ends in `.mjs`
|
||||
- No TypeScript — plain `.js` throughout
|
||||
- Prefer `const` over `let`; never `var`
|
||||
- Keep hook scripts under 200 lines — extract helpers to `scripts/lib/`
|
||||
- All hooks must `exit 0` on non-critical errors (never block tool execution unexpectedly)
|
||||
|
||||
## Hook Development
|
||||
|
||||
- Hook scripts normally receive JSON on stdin, but hooks routed through `scripts/hooks/run-with-flags.js` can export `run(rawInput)` and let the wrapper handle parsing/gating
|
||||
- Async hooks: mark `"async": true` in `settings.json` with a timeout ≤30s
|
||||
- Blocking hooks (PreToolUse, stop): keep fast (<200ms) — no network calls
|
||||
- Use `run-with-flags.js` wrapper for all hooks so `ECC_HOOK_PROFILE` and `ECC_DISABLED_HOOKS` runtime gating works
|
||||
- Always exit 0 on parse errors; log to stderr with `[HookName]` prefix
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
- Run `node tests/run-all.js` before committing
|
||||
- New scripts in `scripts/lib/` require a matching test in `tests/lib/`
|
||||
- New hooks require at least one integration test in `tests/hooks/`
|
||||
|
||||
## Markdown / Agent Files
|
||||
|
||||
- Agents: YAML frontmatter with `name`, `description`, `tools`, `model`
|
||||
- Skills: sections — When to Use, How It Works, Examples
|
||||
- Commands: `description:` frontmatter line required
|
||||
- Run `npx markdownlint-cli '**/*.md' --ignore node_modules` before committing
|
||||
49
.codex-plugin/README.md
Normal file
49
.codex-plugin/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# .codex-plugin — Codex Native Plugin for ECC
|
||||
|
||||
This directory contains the **Codex plugin manifest** for Everything Claude Code.
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
.codex-plugin/
|
||||
└── plugin.json — Codex plugin manifest (name, version, skills ref, MCP ref)
|
||||
.mcp.json — MCP server configurations at plugin root (NOT inside .codex-plugin/)
|
||||
```
|
||||
|
||||
## What This Provides
|
||||
|
||||
- **125 skills** from `./skills/` — reusable Codex workflows for TDD, security,
|
||||
code review, architecture, and more
|
||||
- **6 MCP servers** — GitHub, Context7, Exa, Memory, Playwright, Sequential Thinking
|
||||
|
||||
## Installation
|
||||
|
||||
Codex plugin support is currently in preview. Once generally available:
|
||||
|
||||
```bash
|
||||
# Install from Codex CLI
|
||||
codex plugin install affaan-m/everything-claude-code
|
||||
|
||||
# Or reference locally during development
|
||||
codex plugin install ./
|
||||
|
||||
Run this from the repository root so `./` points to the repo root and `.mcp.json` resolves correctly.
|
||||
```
|
||||
|
||||
## MCP Servers Included
|
||||
|
||||
| Server | Purpose |
|
||||
|---|---|
|
||||
| `github` | GitHub API access |
|
||||
| `context7` | Live documentation lookup |
|
||||
| `exa` | Neural web search |
|
||||
| `memory` | Persistent memory across sessions |
|
||||
| `playwright` | Browser automation & E2E testing |
|
||||
| `sequential-thinking` | Step-by-step reasoning |
|
||||
|
||||
## Notes
|
||||
|
||||
- The `skills/` directory at the repo root is shared between Claude Code (`.claude-plugin/`)
|
||||
and Codex (`.codex-plugin/`) — same source of truth, no duplication
|
||||
- MCP server credentials are inherited from the launching environment (env vars)
|
||||
- This manifest does **not** override `~/.codex/config.toml` settings
|
||||
30
.codex-plugin/plugin.json
Normal file
30
.codex-plugin/plugin.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"version": "1.9.0",
|
||||
"description": "Battle-tested Codex workflows — 125 skills, production-ready MCP configs, and agent definitions for TDD, security scanning, code review, and autonomous development.",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
"email": "me@affaanmustafa.com",
|
||||
"url": "https://x.com/affaanmustafa"
|
||||
},
|
||||
"homepage": "https://github.com/affaan-m/everything-claude-code",
|
||||
"repository": "https://github.com/affaan-m/everything-claude-code",
|
||||
"license": "MIT",
|
||||
"keywords": ["codex", "agents", "skills", "tdd", "code-review", "security", "workflow", "automation"],
|
||||
"skills": "./skills/",
|
||||
"mcpServers": "./.mcp.json",
|
||||
"interface": {
|
||||
"displayName": "Everything Claude Code",
|
||||
"shortDescription": "125 battle-tested skills for TDD, security, code review, and autonomous development.",
|
||||
"longDescription": "Everything Claude Code (ECC) is a community-maintained collection of Codex skills and MCP configs evolved over 10+ months of intensive daily use. It covers TDD workflows, security scanning, code review, architecture decisions, and more — all in one installable plugin.",
|
||||
"developerName": "Affaan Mustafa",
|
||||
"category": "Productivity",
|
||||
"capabilities": ["Read", "Write"],
|
||||
"websiteURL": "https://github.com/affaan-m/everything-claude-code",
|
||||
"defaultPrompt": [
|
||||
"Use the tdd-workflow skill to write tests before implementation.",
|
||||
"Use the security-review skill to scan for OWASP Top 10 vulnerabilities.",
|
||||
"Use the code-review skill to review this PR for correctness and security."
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -46,12 +46,15 @@ Available skills:
|
||||
|
||||
Treat the project-local `.codex/config.toml` as the default Codex baseline for ECC. The current ECC baseline enables GitHub, Context7, Exa, Memory, Playwright, and Sequential Thinking; add heavier extras in `~/.codex/config.toml` only when a task actually needs them.
|
||||
|
||||
ECC's canonical Codex section name is `[mcp_servers.context7]`. The launcher package remains `@upstash/context7-mcp`; only the TOML section name is normalized for consistency with `codex mcp list` and the reference config.
|
||||
|
||||
### Automatic config.toml merging
|
||||
|
||||
The sync script (`scripts/sync-ecc-to-codex.sh`) uses a Node-based TOML parser to safely merge ECC MCP servers into `~/.codex/config.toml`:
|
||||
|
||||
- **Add-only by default** — missing ECC servers are appended; existing servers are never modified or removed.
|
||||
- **7 managed servers** — Supabase, Playwright, Context7, Exa, GitHub, Memory, Sequential Thinking.
|
||||
- **Canonical naming** — ECC manages Context7 as `[mcp_servers.context7]`; legacy `[mcp_servers.context7-mcp]` entries are treated as aliases during updates.
|
||||
- **Package-manager aware** — uses the project's configured package manager (npm/pnpm/yarn/bun) instead of hardcoding `pnpm`.
|
||||
- **Drift warnings** — if an existing server's config differs from the ECC recommendation, the script logs a warning.
|
||||
- **`--update-mcp`** — explicitly replaces all ECC-managed servers with the latest recommended config (safely removes subtables like `[mcp_servers.supabase.env]`).
|
||||
|
||||
@@ -27,7 +27,10 @@ notify = [
|
||||
"-sound", "default",
|
||||
]
|
||||
|
||||
# Prefer AGENTS.md and project-local .codex/AGENTS.md for instructions.
|
||||
# Persistent instructions are appended to every prompt (additive, unlike
|
||||
# model_instructions_file which replaces AGENTS.md).
|
||||
persistent_instructions = "Follow project AGENTS.md guidelines. Use available MCP servers when they can help."
|
||||
|
||||
# model_instructions_file replaces built-in instructions instead of AGENTS.md,
|
||||
# so leave it unset unless you intentionally want a single override file.
|
||||
# model_instructions_file = "/absolute/path/to/instructions.md"
|
||||
@@ -38,10 +41,14 @@ notify = [
|
||||
[mcp_servers.github]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-github"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.context7]
|
||||
command = "npx"
|
||||
# Canonical Codex section name is `context7`; the package itself remains
|
||||
# `@upstash/context7-mcp`.
|
||||
args = ["-y", "@upstash/context7-mcp@latest"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.exa]
|
||||
url = "https://mcp.exa.ai/mcp"
|
||||
@@ -49,14 +56,17 @@ url = "https://mcp.exa.ai/mcp"
|
||||
[mcp_servers.memory]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-memory"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.playwright]
|
||||
command = "npx"
|
||||
args = ["-y", "@playwright/mcp@latest", "--extension"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.sequential-thinking]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-sequential-thinking"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
# Additional MCP servers (uncomment as needed):
|
||||
# [mcp_servers.supabase]
|
||||
|
||||
21
.github/workflows/ci.yml
vendored
21
.github/workflows/ci.yml
vendored
@@ -44,13 +44,20 @@ jobs:
|
||||
# Package manager setup
|
||||
- name: Setup pnpm
|
||||
if: matrix.pm == 'pnpm'
|
||||
uses: pnpm/action-setup@v4
|
||||
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Setup Yarn (via Corepack)
|
||||
if: matrix.pm == 'yarn'
|
||||
shell: bash
|
||||
run: |
|
||||
corepack enable
|
||||
corepack prepare yarn@stable --activate
|
||||
|
||||
- name: Setup Bun
|
||||
if: matrix.pm == 'bun'
|
||||
uses: oven-sh/setup-bun@v2
|
||||
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2
|
||||
|
||||
# Cache configuration
|
||||
- name: Get npm cache directory
|
||||
@@ -114,14 +121,18 @@ jobs:
|
||||
${{ runner.os }}-bun-
|
||||
|
||||
# Install dependencies
|
||||
# COREPACK_ENABLE_STRICT=0 allows pnpm to install even though
|
||||
# package.json declares "packageManager": "yarn@..."
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
env:
|
||||
COREPACK_ENABLE_STRICT: '0'
|
||||
run: |
|
||||
case "${{ matrix.pm }}" in
|
||||
npm) npm ci ;;
|
||||
pnpm) pnpm install ;;
|
||||
# --ignore-engines required for Node 18 compat with some devDependencies (e.g., markdownlint-cli)
|
||||
yarn) yarn install --ignore-engines ;;
|
||||
pnpm) pnpm install --no-frozen-lockfile ;;
|
||||
# Yarn Berry (v4+) removed --ignore-engines; engine checking is no longer a core feature
|
||||
yarn) yarn install ;;
|
||||
bun) bun install ;;
|
||||
*) echo "Unsupported package manager: ${{ matrix.pm }}" && exit 1 ;;
|
||||
esac
|
||||
|
||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -20,11 +20,13 @@ jobs:
|
||||
|
||||
- name: Validate version tag
|
||||
run: |
|
||||
if ! [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
if ! [[ "${REF_NAME}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Invalid version tag format. Expected vX.Y.Z"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
env:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
- name: Verify plugin.json version matches tag
|
||||
env:
|
||||
TAG_NAME: ${{ github.ref_name }}
|
||||
@@ -61,7 +63,7 @@ jobs:
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2
|
||||
with:
|
||||
body_path: release_body.md
|
||||
generate_release_notes: true
|
||||
|
||||
2
.github/workflows/reusable-release.yml
vendored
2
.github/workflows/reusable-release.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2
|
||||
with:
|
||||
tag_name: ${{ inputs.tag }}
|
||||
body_path: release_body.md
|
||||
|
||||
20
.github/workflows/reusable-test.yml
vendored
20
.github/workflows/reusable-test.yml
vendored
@@ -36,13 +36,20 @@ jobs:
|
||||
|
||||
- name: Setup pnpm
|
||||
if: inputs.package-manager == 'pnpm'
|
||||
uses: pnpm/action-setup@v4
|
||||
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Setup Yarn (via Corepack)
|
||||
if: inputs.package-manager == 'yarn'
|
||||
shell: bash
|
||||
run: |
|
||||
corepack enable
|
||||
corepack prepare yarn@stable --activate
|
||||
|
||||
- name: Setup Bun
|
||||
if: inputs.package-manager == 'bun'
|
||||
uses: oven-sh/setup-bun@v2
|
||||
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2
|
||||
|
||||
- name: Get npm cache directory
|
||||
if: inputs.package-manager == 'npm'
|
||||
@@ -104,13 +111,18 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-bun-
|
||||
|
||||
# COREPACK_ENABLE_STRICT=0 allows pnpm to install even though
|
||||
# package.json declares "packageManager": "yarn@..."
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
env:
|
||||
COREPACK_ENABLE_STRICT: '0'
|
||||
run: |
|
||||
case "${{ inputs.package-manager }}" in
|
||||
npm) npm ci ;;
|
||||
pnpm) pnpm install ;;
|
||||
yarn) yarn install --ignore-engines ;;
|
||||
pnpm) pnpm install --no-frozen-lockfile ;;
|
||||
# Yarn Berry (v4+) removed --ignore-engines; engine checking is no longer a core feature
|
||||
yarn) yarn install ;;
|
||||
bun) bun install ;;
|
||||
*) echo "Unsupported package manager: ${{ inputs.package-manager }}" && exit 1 ;;
|
||||
esac
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -83,6 +83,9 @@ temp/
|
||||
*.bak
|
||||
*.backup
|
||||
|
||||
# Observer temp files (continuous-learning-v2)
|
||||
.observer-tmp/
|
||||
|
||||
# Rust build artifacts
|
||||
ecc2/target/
|
||||
|
||||
|
||||
27
.mcp.json
Normal file
27
.mcp.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"github": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-github"]
|
||||
},
|
||||
"context7": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@upstash/context7-mcp@2.1.4"]
|
||||
},
|
||||
"exa": {
|
||||
"url": "https://mcp.exa.ai/mcp"
|
||||
},
|
||||
"memory": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-memory"]
|
||||
},
|
||||
"playwright": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@playwright/mcp@0.0.68", "--extension"]
|
||||
},
|
||||
"sequential-thinking": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"]
|
||||
}
|
||||
}
|
||||
}
|
||||
1
.yarnrc.yml
Normal file
1
.yarnrc.yml
Normal file
@@ -0,0 +1 @@
|
||||
nodeLinker: node-modules
|
||||
159
COMMANDS-QUICK-REF.md
Normal file
159
COMMANDS-QUICK-REF.md
Normal file
@@ -0,0 +1,159 @@
|
||||
# Commands Quick Reference
|
||||
|
||||
> 59 slash commands installed globally. Type `/` in any Claude Code session to invoke.
|
||||
|
||||
---
|
||||
|
||||
## Core Workflow
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/plan` | Restate requirements, assess risks, write step-by-step implementation plan — **waits for your confirm before touching code** |
|
||||
| `/tdd` | Enforce test-driven development: scaffold interface → write failing test → implement → verify 80%+ coverage |
|
||||
| `/code-review` | Full code quality, security, and maintainability review of changed files |
|
||||
| `/build-fix` | Detect and fix build errors — delegates to the right build-resolver agent automatically |
|
||||
| `/verify` | Run the full verification loop: build → lint → test → type-check |
|
||||
| `/quality-gate` | Quality gate check against project standards |
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/tdd` | Universal TDD workflow (any language) |
|
||||
| `/e2e` | Generate + run Playwright end-to-end tests, capture screenshots/videos/traces |
|
||||
| `/test-coverage` | Report test coverage, identify gaps |
|
||||
| `/go-test` | TDD workflow for Go (table-driven, 80%+ coverage with `go test -cover`) |
|
||||
| `/kotlin-test` | TDD for Kotlin (Kotest + Kover) |
|
||||
| `/rust-test` | TDD for Rust (cargo test, integration tests) |
|
||||
| `/cpp-test` | TDD for C++ (GoogleTest + gcov/lcov) |
|
||||
|
||||
---
|
||||
|
||||
## Code Review
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/code-review` | Universal code review |
|
||||
| `/python-review` | Python — PEP 8, type hints, security, idiomatic patterns |
|
||||
| `/go-review` | Go — idiomatic patterns, concurrency safety, error handling |
|
||||
| `/kotlin-review` | Kotlin — null safety, coroutine safety, clean architecture |
|
||||
| `/rust-review` | Rust — ownership, lifetimes, unsafe usage |
|
||||
| `/cpp-review` | C++ — memory safety, modern idioms, concurrency |
|
||||
|
||||
---
|
||||
|
||||
## Build Fixers
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/build-fix` | Auto-detect language and fix build errors |
|
||||
| `/go-build` | Fix Go build errors and `go vet` warnings |
|
||||
| `/kotlin-build` | Fix Kotlin/Gradle compiler errors |
|
||||
| `/rust-build` | Fix Rust build + borrow checker issues |
|
||||
| `/cpp-build` | Fix C++ CMake and linker problems |
|
||||
| `/gradle-build` | Fix Gradle errors for Android / KMP |
|
||||
|
||||
---
|
||||
|
||||
## Planning & Architecture
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/plan` | Implementation plan with risk assessment |
|
||||
| `/multi-plan` | Multi-model collaborative planning |
|
||||
| `/multi-workflow` | Multi-model collaborative development |
|
||||
| `/multi-backend` | Backend-focused multi-model development |
|
||||
| `/multi-frontend` | Frontend-focused multi-model development |
|
||||
| `/multi-execute` | Multi-model collaborative execution |
|
||||
| `/orchestrate` | Guide for tmux/worktree multi-agent orchestration |
|
||||
| `/devfleet` | Orchestrate parallel Claude Code agents via DevFleet |
|
||||
|
||||
---
|
||||
|
||||
## Session Management
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/save-session` | Save current session state to `~/.claude/session-data/` |
|
||||
| `/resume-session` | Load the most recent saved session from the canonical session store and resume from where you left off |
|
||||
| `/sessions` | Browse, search, and manage session history with aliases from `~/.claude/session-data/` (with legacy reads from `~/.claude/sessions/`) |
|
||||
| `/checkpoint` | Mark a checkpoint in the current session |
|
||||
| `/aside` | Answer a quick side question without losing current task context |
|
||||
| `/context-budget` | Analyse context window usage — find token overhead, optimise |
|
||||
|
||||
---
|
||||
|
||||
## Learning & Improvement
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/learn` | Extract reusable patterns from the current session |
|
||||
| `/learn-eval` | Extract patterns + self-evaluate quality before saving |
|
||||
| `/evolve` | Analyse learned instincts, suggest evolved skill structures |
|
||||
| `/promote` | Promote project-scoped instincts to global scope |
|
||||
| `/instinct-status` | Show all learned instincts (project + global) with confidence scores |
|
||||
| `/instinct-export` | Export instincts to a file |
|
||||
| `/instinct-import` | Import instincts from a file or URL |
|
||||
| `/skill-create` | Analyse local git history → generate a reusable skill |
|
||||
| `/skill-health` | Skill portfolio health dashboard with analytics |
|
||||
| `/rules-distill` | Scan skills, extract cross-cutting principles, distill into rules |
|
||||
|
||||
---
|
||||
|
||||
## Refactoring & Cleanup
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/refactor-clean` | Remove dead code, consolidate duplicates, clean up structure |
|
||||
| `/prompt-optimize` | Analyse a draft prompt and output an optimised ECC-enriched version |
|
||||
|
||||
---
|
||||
|
||||
## Docs & Research
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/docs` | Look up current library/API documentation via Context7 |
|
||||
| `/update-docs` | Update project documentation |
|
||||
| `/update-codemaps` | Regenerate codemaps for the codebase |
|
||||
|
||||
---
|
||||
|
||||
## Loops & Automation
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/loop-start` | Start a recurring agent loop on an interval |
|
||||
| `/loop-status` | Check status of running loops |
|
||||
| `/claw` | Start NanoClaw v2 — persistent REPL with model routing, skill hot-load, branching, and metrics |
|
||||
|
||||
---
|
||||
|
||||
## Project & Infrastructure
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/projects` | List known projects and their instinct statistics |
|
||||
| `/harness-audit` | Audit the agent harness configuration for reliability and cost |
|
||||
| `/eval` | Run the evaluation harness |
|
||||
| `/model-route` | Route a task to the right model (Haiku / Sonnet / Opus) |
|
||||
| `/pm2` | PM2 process manager initialisation |
|
||||
| `/setup-pm` | Configure package manager (npm / pnpm / yarn / bun) |
|
||||
|
||||
---
|
||||
|
||||
## Quick Decision Guide
|
||||
|
||||
```
|
||||
Starting a new feature? → /plan first, then /tdd
|
||||
Code just written? → /code-review
|
||||
Build broken? → /build-fix
|
||||
Need live docs? → /docs <library>
|
||||
Session about to end? → /save-session or /learn-eval
|
||||
Resuming next day? → /resume-session
|
||||
Context getting heavy? → /context-budget then /checkpoint
|
||||
Want to extract what you learned? → /learn-eval then /evolve
|
||||
Running repeated tasks? → /loop-start
|
||||
```
|
||||
122
EVALUATION.md
Normal file
122
EVALUATION.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# Repo Evaluation vs Current Setup
|
||||
|
||||
**Date:** 2026-03-21
|
||||
**Branch:** `claude/evaluate-repo-comparison-ASZ9Y`
|
||||
|
||||
---
|
||||
|
||||
## Current Setup (`~/.claude/`)
|
||||
|
||||
The active Claude Code installation is near-minimal:
|
||||
|
||||
| Component | Current |
|
||||
|-----------|---------|
|
||||
| Agents | 0 |
|
||||
| Skills | 0 installed |
|
||||
| Commands | 0 |
|
||||
| Hooks | 1 (Stop: git check) |
|
||||
| Rules | 0 |
|
||||
| MCP configs | 0 |
|
||||
|
||||
**Installed hooks:**
|
||||
- `Stop` → `stop-hook-git-check.sh` — blocks session end if there are uncommitted changes or unpushed commits
|
||||
|
||||
**Installed permissions:**
|
||||
- `Skill` — allows skill invocations
|
||||
|
||||
**Plugins:** Only `blocklist.json` (no active plugins installed)
|
||||
|
||||
---
|
||||
|
||||
## This Repo (`everything-claude-code` v1.9.0)
|
||||
|
||||
| Component | Repo |
|
||||
|-----------|------|
|
||||
| Agents | 28 |
|
||||
| Skills | 116 |
|
||||
| Commands | 59 |
|
||||
| Rules sets | 12 languages + common (60+ rule files) |
|
||||
| Hooks | Comprehensive system (PreToolUse, PostToolUse, SessionStart, Stop) |
|
||||
| MCP configs | 1 (Context7 + others) |
|
||||
| Schemas | 9 JSON validators |
|
||||
| Scripts/CLI | 46+ Node.js modules + multiple CLIs |
|
||||
| Tests | 58 test files |
|
||||
| Install profiles | core, developer, security, research, full |
|
||||
| Supported harnesses | Claude Code, Codex, Cursor, OpenCode |
|
||||
|
||||
---
|
||||
|
||||
## Gap Analysis
|
||||
|
||||
### Hooks
|
||||
- **Current:** 1 Stop hook (git hygiene check)
|
||||
- **Repo:** Full hook matrix covering:
|
||||
- Dangerous command blocking (`rm -rf`, force pushes)
|
||||
- Auto-formatting on file edits
|
||||
- Dev server tmux enforcement
|
||||
- Cost tracking
|
||||
- Session evaluation and governance capture
|
||||
- MCP health monitoring
|
||||
|
||||
### Agents (28 missing)
|
||||
The repo provides specialized agents for every major workflow:
|
||||
- Language reviewers: TypeScript, Python, Go, Java, Kotlin, Rust, C++, Flutter
|
||||
- Build resolvers: Go, Java, Kotlin, Rust, C++, PyTorch
|
||||
- Workflow agents: planner, tdd-guide, code-reviewer, security-reviewer, architect
|
||||
- Automation: loop-operator, doc-updater, refactor-cleaner, harness-optimizer
|
||||
|
||||
### Skills (116 missing)
|
||||
Domain knowledge modules covering:
|
||||
- Language patterns (Python, Go, Kotlin, Rust, C++, Java, Swift, Perl, Laravel, Django)
|
||||
- Testing strategies (TDD, E2E, coverage)
|
||||
- Architecture patterns (backend, frontend, API design, database migrations)
|
||||
- AI/ML workflows (Claude API, eval harness, agent loops, cost-aware pipelines)
|
||||
- Business workflows (investor materials, market research, content engine)
|
||||
|
||||
### Commands (59 missing)
|
||||
- `/tdd`, `/plan`, `/e2e`, `/code-review` — core dev workflows
|
||||
- `/sessions`, `/save-session`, `/resume-session` — session persistence
|
||||
- `/orchestrate`, `/multi-plan`, `/multi-execute` — multi-agent coordination
|
||||
- `/learn`, `/skill-create`, `/evolve` — continuous improvement
|
||||
- `/build-fix`, `/verify`, `/quality-gate` — build/quality automation
|
||||
|
||||
### Rules (60+ files missing)
|
||||
Language-specific coding style, patterns, testing, and security guidelines for:
|
||||
TypeScript, Python, Go, Java, Kotlin, Rust, C++, C#, Swift, Perl, PHP, and common/cross-language rules.
|
||||
|
||||
---
|
||||
|
||||
## Recommendations
|
||||
|
||||
### Immediate value (core install)
|
||||
Run `ecc install --profile core` to get:
|
||||
- Core agents (code-reviewer, planner, tdd-guide, security-reviewer)
|
||||
- Essential skills (tdd-workflow, coding-standards, security-review)
|
||||
- Key commands (/tdd, /plan, /code-review, /build-fix)
|
||||
|
||||
### Full install
|
||||
Run `ecc install --profile full` to get all 28 agents, 116 skills, and 59 commands.
|
||||
|
||||
### Hooks upgrade
|
||||
The current Stop hook is solid. The repo's `hooks.json` adds:
|
||||
- Dangerous command blocking (safety)
|
||||
- Auto-formatting (quality)
|
||||
- Cost tracking (observability)
|
||||
- Session evaluation (learning)
|
||||
|
||||
### Rules
|
||||
Adding language rules (e.g., TypeScript, Python) provides always-on coding guidelines without relying on per-session prompts.
|
||||
|
||||
---
|
||||
|
||||
## What the Current Setup Does Well
|
||||
|
||||
- The `stop-hook-git-check.sh` Stop hook is production-quality and already enforces good git hygiene
|
||||
- The `Skill` permission is correctly configured
|
||||
- The setup is clean with no conflicts or cruft
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
The current setup is essentially a blank slate with one well-implemented git hygiene hook. This repo provides a complete, production-tested enhancement layer covering agents, skills, commands, hooks, and rules — with a selective install system so you can add exactly what you need without bloating the configuration.
|
||||
58
README.md
58
README.md
@@ -180,6 +180,11 @@ cd everything-claude-code
|
||||
npm install # or: pnpm install | yarn install | bun install
|
||||
|
||||
# macOS/Linux
|
||||
|
||||
# Recommended: install everything (full profile)
|
||||
./install.sh --profile full
|
||||
|
||||
# Or install for specific languages only
|
||||
./install.sh typescript # or python or golang or swift or php
|
||||
# ./install.sh typescript python golang swift php
|
||||
# ./install.sh --target cursor typescript
|
||||
@@ -188,6 +193,11 @@ npm install # or: pnpm install | yarn install | bun install
|
||||
|
||||
```powershell
|
||||
# Windows PowerShell
|
||||
|
||||
# Recommended: install everything (full profile)
|
||||
.\install.ps1 --profile full
|
||||
|
||||
# Or install for specific languages only
|
||||
.\install.ps1 typescript # or python or golang or swift or php
|
||||
# .\install.ps1 typescript python golang swift php
|
||||
# .\install.ps1 --target cursor typescript
|
||||
@@ -197,7 +207,7 @@ npm install # or: pnpm install | yarn install | bun install
|
||||
npx ecc-install typescript
|
||||
```
|
||||
|
||||
For manual install instructions see the README in the `rules/` folder.
|
||||
For manual install instructions see the README in the `rules/` folder. When copying rules manually, copy the whole language directory (for example `rules/common` or `rules/golang`), not the files inside it, so relative references keep working and filenames do not collide.
|
||||
|
||||
### Step 3: Start Using
|
||||
|
||||
@@ -214,6 +224,20 @@ For manual install instructions see the README in the `rules/` folder.
|
||||
|
||||
✨ **That's it!** You now have access to 28 agents, 125 skills, and 60 commands.
|
||||
|
||||
### Multi-model commands require additional setup
|
||||
|
||||
> ⚠️ `multi-*` commands are **not** covered by the base plugin/rules install above.
|
||||
>
|
||||
> To use `/multi-plan`, `/multi-execute`, `/multi-backend`, `/multi-frontend`, and `/multi-workflow`, you must also install the `ccg-workflow` runtime.
|
||||
>
|
||||
> Initialize it with `npx ccg-workflow`.
|
||||
>
|
||||
> That runtime provides the external dependencies these commands expect, including:
|
||||
> - `~/.claude/bin/codeagent-wrapper`
|
||||
> - `~/.claude/.ccg/prompts/*`
|
||||
>
|
||||
> Without `ccg-workflow`, these `multi-*` commands will not run correctly.
|
||||
|
||||
---
|
||||
|
||||
## 🌐 Cross-Platform Support
|
||||
@@ -614,16 +638,16 @@ This gives you instant access to all commands, agents, skills, and hooks.
|
||||
>
|
||||
> # Option A: User-level rules (applies to all projects)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/php/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/php ~/.claude/rules/
|
||||
>
|
||||
> # Option B: Project-level rules (applies to current project only)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* .claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript .claude/rules/ # pick your stack
|
||||
> ```
|
||||
|
||||
---
|
||||
@@ -639,12 +663,13 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
# Copy agents to your Claude config
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Copy rules (common + language-specific)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/php/* ~/.claude/rules/
|
||||
# Copy rules directories (common + language-specific)
|
||||
mkdir -p ~/.claude/rules
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # pick your stack
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/php ~/.claude/rules/
|
||||
|
||||
# Copy commands
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
@@ -850,7 +875,8 @@ Yes. Use Option 2 (manual installation) and copy only what you need:
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Just rules
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
mkdir -p ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
```
|
||||
|
||||
Each component is fully independent.
|
||||
@@ -999,6 +1025,8 @@ cp .codex/config.toml ~/.codex/config.toml
|
||||
|
||||
The sync script safely merges ECC MCP servers into your existing `~/.codex/config.toml` using an **add-only** strategy — it never removes or modifies your existing servers. Run with `--dry-run` to preview changes, or `--update-mcp` to force-refresh ECC servers to the latest recommended config.
|
||||
|
||||
For Context7, ECC uses the canonical Codex section name `[mcp_servers.context7]` while still launching the `@upstash/context7-mcp` package. If you already have a legacy `[mcp_servers.context7-mcp]` entry, `--update-mcp` migrates it to the canonical section name.
|
||||
|
||||
Codex macOS app:
|
||||
- Open this repository as your workspace.
|
||||
- The root `AGENTS.md` is auto-detected.
|
||||
|
||||
@@ -82,14 +82,17 @@
|
||||
# 首先克隆仓库
|
||||
git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
|
||||
# 复制规则(通用 + 语言特定)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
# 复制规则目录(通用 + 语言特定)
|
||||
mkdir -p ~/.claude/rules
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl ~/.claude/rules/
|
||||
```
|
||||
|
||||
复制规则时,请复制整个目录(例如 `rules/common`、`rules/golang`),而不是复制目录内的文件;这样可以保留相对引用,并避免不同规则集中的同名文件互相覆盖。
|
||||
|
||||
### 第三步:开始使用
|
||||
|
||||
```bash
|
||||
@@ -105,6 +108,20 @@ cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
|
||||
✨ **完成!** 你现在可以使用 13 个代理、43 个技能和 31 个命令。
|
||||
|
||||
### multi-* 命令需要额外配置
|
||||
|
||||
> ⚠️ 上面的基础插件 / rules 安装**不包含** `multi-*` 命令所需的运行时。
|
||||
>
|
||||
> 如果要使用 `/multi-plan`、`/multi-execute`、`/multi-backend`、`/multi-frontend` 和 `/multi-workflow`,还需要额外安装 `ccg-workflow` 运行时。
|
||||
>
|
||||
> 可通过 `npx ccg-workflow` 完成初始化安装。
|
||||
>
|
||||
> 该运行时会提供这些命令依赖的关键组件,包括:
|
||||
> - `~/.claude/bin/codeagent-wrapper`
|
||||
> - `~/.claude/.ccg/prompts/*`
|
||||
>
|
||||
> 未安装 `ccg-workflow` 时,这些 `multi-*` 命令将无法正常运行。
|
||||
|
||||
---
|
||||
|
||||
## 🌐 跨平台支持
|
||||
@@ -352,11 +369,20 @@ everything-claude-code/
|
||||
> git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
>
|
||||
> # 选项 A:用户级规则(应用于所有项目)
|
||||
> cp -r everything-claude-code/rules/* ~/.claude/rules/
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/perl ~/.claude/rules/
|
||||
>
|
||||
> # 选项 B:项目级规则(仅应用于当前项目)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript .claude/rules/
|
||||
> cp -r everything-claude-code/rules/python .claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang .claude/rules/
|
||||
> cp -r everything-claude-code/rules/perl .claude/rules/
|
||||
> ```
|
||||
|
||||
---
|
||||
@@ -372,12 +398,13 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
# 将代理复制到你的 Claude 配置
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# 复制规则(通用 + 语言特定)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
# 复制规则目录(通用 + 语言特定)
|
||||
mkdir -p ~/.claude/rules
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl ~/.claude/rules/
|
||||
|
||||
# 复制命令
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
|
||||
196
REPO-ASSESSMENT.md
Normal file
196
REPO-ASSESSMENT.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# Repo & Fork Assessment + Setup Recommendations
|
||||
|
||||
**Date:** 2026-03-21
|
||||
|
||||
---
|
||||
|
||||
## What's Available
|
||||
|
||||
### Repo: `Infiniteyieldai/everything-claude-code`
|
||||
|
||||
This is a **fork of `affaan-m/everything-claude-code`** (the upstream project with 50K+ stars, 6K+ forks).
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| Version | 1.9.0 (current) |
|
||||
| Status | Clean fork — 1 commit ahead of upstream `main` (the EVALUATION.md doc added in this session) |
|
||||
| Remote branches | `main`, `claude/evaluate-repo-comparison-ASZ9Y` |
|
||||
| Upstream sync | Fully synced — last upstream commit merged was the zh-CN docs PR (#728) |
|
||||
| License | MIT |
|
||||
|
||||
**This is the right repo to work from.** It's the latest upstream version with no divergence or merge conflicts.
|
||||
|
||||
---
|
||||
|
||||
### Current `~/.claude/` Installation
|
||||
|
||||
| Component | Installed | Available in Repo |
|
||||
|-----------|-----------|-------------------|
|
||||
| Agents | 0 | 28 |
|
||||
| Skills | 0 | 116 |
|
||||
| Commands | 0 | 59 |
|
||||
| Rules | 0 | 60+ files (12 languages) |
|
||||
| Hooks | 1 (git Stop check) | Full PreToolUse/PostToolUse matrix |
|
||||
| MCP configs | 0 | 1 (Context7) |
|
||||
|
||||
The existing Stop hook (`stop-hook-git-check.sh`) is solid — blocks session end on uncommitted/unpushed work. Keep it.
|
||||
|
||||
---
|
||||
|
||||
## Install Profile Recommendations
|
||||
|
||||
The repo ships 5 install profiles. Choose based on your primary use case:
|
||||
|
||||
### Profile: `core` (Minimum viable setup)
|
||||
> Fastest to install. Gets you commands, core agents, hooks runtime, and quality workflow.
|
||||
|
||||
**Best for:** Trying ECC out, minimal footprint, or a constrained environment.
|
||||
|
||||
```bash
|
||||
node scripts/install-plan.js --profile core
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
|
||||
**Installs:** rules-core, agents-core, commands-core, hooks-runtime, platform-configs, workflow-quality
|
||||
|
||||
---
|
||||
|
||||
### Profile: `developer` (Recommended for daily dev work)
|
||||
> The default engineering profile for most ECC users.
|
||||
|
||||
**Best for:** General software development across app codebases.
|
||||
|
||||
```bash
|
||||
node scripts/install-plan.js --profile developer
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
|
||||
**Adds over core:** framework-language skills, database patterns, orchestration commands
|
||||
|
||||
---
|
||||
|
||||
### Profile: `security`
|
||||
> Baseline runtime + security-specific agents and rules.
|
||||
|
||||
**Best for:** Security-focused workflows, code audits, vulnerability reviews.
|
||||
|
||||
---
|
||||
|
||||
### Profile: `research`
|
||||
> Investigation, synthesis, and publishing workflows.
|
||||
|
||||
**Best for:** Content creation, investor materials, market research, cross-posting.
|
||||
|
||||
---
|
||||
|
||||
### Profile: `full`
|
||||
> Everything — all 18 modules.
|
||||
|
||||
**Best for:** Power users who want the complete toolkit.
|
||||
|
||||
```bash
|
||||
node scripts/install-plan.js --profile full
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Priority Additions (High Value, Low Risk)
|
||||
|
||||
Regardless of profile, these components add immediate value:
|
||||
|
||||
### 1. Core Agents (highest ROI)
|
||||
|
||||
| Agent | Why it matters |
|
||||
|-------|----------------|
|
||||
| `planner.md` | Breaks complex tasks into implementation plans |
|
||||
| `code-reviewer.md` | Quality and maintainability review |
|
||||
| `tdd-guide.md` | TDD workflow (RED→GREEN→IMPROVE) |
|
||||
| `security-reviewer.md` | Vulnerability detection |
|
||||
| `architect.md` | System design & scalability decisions |
|
||||
|
||||
### 2. Key Commands
|
||||
|
||||
| Command | Why it matters |
|
||||
|---------|----------------|
|
||||
| `/plan` | Implementation planning before coding |
|
||||
| `/tdd` | Test-driven workflow |
|
||||
| `/code-review` | On-demand review |
|
||||
| `/build-fix` | Automated build error resolution |
|
||||
| `/learn` | Extract patterns from current session |
|
||||
|
||||
### 3. Hook Upgrades (from `hooks/hooks.json`)
|
||||
The repo's hook system adds these over the current single Stop hook:
|
||||
|
||||
| Hook | Trigger | Value |
|
||||
|------|---------|-------|
|
||||
| `block-no-verify` | PreToolUse: Bash | Blocks `--no-verify` git flag abuse |
|
||||
| `pre-bash-git-push-reminder` | PreToolUse: Bash | Pre-push review reminder |
|
||||
| `doc-file-warning` | PreToolUse: Write | Warns on non-standard doc files |
|
||||
| `suggest-compact` | PreToolUse: Edit/Write | Suggests compaction at logical intervals |
|
||||
| Continuous learning observer | PreToolUse: * | Captures tool use patterns for skill improvement |
|
||||
|
||||
### 4. Rules (Always-on guidelines)
|
||||
The `rules/common/` directory provides baseline guidelines that fire on every session:
|
||||
- `security.md` — Security guardrails
|
||||
- `testing.md` — 80%+ coverage requirement
|
||||
- `git-workflow.md` — Conventional commits, branch strategy
|
||||
- `coding-style.md` — Cross-language style standards
|
||||
|
||||
---
|
||||
|
||||
## What to Do With the Fork
|
||||
|
||||
### Option A: Use as upstream tracker (current state)
|
||||
Keep the fork synced with `affaan-m/everything-claude-code` upstream. Periodically merge upstream changes:
|
||||
```bash
|
||||
git fetch upstream
|
||||
git merge upstream/main
|
||||
```
|
||||
Install from the local clone. This is clean and maintainable.
|
||||
|
||||
### Option B: Customize the fork
|
||||
Add personal skills, agents, or commands to the fork. Good for:
|
||||
- Business-specific domain skills (your vertical)
|
||||
- Team-specific coding conventions
|
||||
- Custom hooks for your stack
|
||||
|
||||
The fork already has the EVALUATION.md and REPO-ASSESSMENT.md docs — that's fine for a working fork.
|
||||
|
||||
### Option C: Install from npm (simplest for fresh machines)
|
||||
```bash
|
||||
npx ecc-universal install --profile developer
|
||||
```
|
||||
No need to clone the repo. This is the recommended install method for most users.
|
||||
|
||||
---
|
||||
|
||||
## Recommended Setup Steps
|
||||
|
||||
1. **Keep the existing Stop hook** — it's doing its job
|
||||
2. **Run the developer profile install** from the local fork:
|
||||
```bash
|
||||
cd /path/to/everything-claude-code
|
||||
node scripts/install-plan.js --profile developer
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
3. **Add language rules** for your primary stack (TypeScript, Python, Go, etc.):
|
||||
```bash
|
||||
node scripts/install-plan.js --add rules/typescript
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
4. **Enable MCP Context7** for live documentation lookup:
|
||||
- Copy `mcp-configs/mcp-servers.json` into your project's `.claude/` dir
|
||||
5. **Review hooks** — enable the `hooks/hooks.json` additions selectively, starting with `block-no-verify` and `pre-bash-git-push-reminder`
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Question | Answer |
|
||||
|----------|--------|
|
||||
| Is the fork healthy? | Yes — fully synced with upstream v1.9.0 |
|
||||
| Other forks to consider? | None visible in this environment; upstream `affaan-m/everything-claude-code` is the source of truth |
|
||||
| Best install profile? | `developer` for day-to-day dev work |
|
||||
| Biggest gap in current setup? | 0 agents installed — add at minimum: planner, code-reviewer, tdd-guide, security-reviewer |
|
||||
| Quickest win? | Run `node scripts/install-plan.js --profile core && node scripts/install-apply.js` |
|
||||
13
package-lock.json
generated
13
package-lock.json
generated
@@ -11,6 +11,7 @@
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"ajv": "^8.18.0",
|
||||
"sql.js": "^1.14.1"
|
||||
},
|
||||
"bin": {
|
||||
@@ -19,7 +20,6 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.2",
|
||||
"ajv": "^8.18.0",
|
||||
"c8": "^10.1.2",
|
||||
"eslint": "^9.39.2",
|
||||
"globals": "^17.1.0",
|
||||
@@ -449,7 +449,6 @@
|
||||
"version": "8.18.0",
|
||||
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz",
|
||||
"integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"fast-deep-equal": "^3.1.3",
|
||||
@@ -1043,7 +1042,6 @@
|
||||
"version": "3.1.3",
|
||||
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
|
||||
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/fast-json-stable-stringify": {
|
||||
@@ -1064,7 +1062,6 @@
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz",
|
||||
"integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==",
|
||||
"dev": true,
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
@@ -1491,7 +1488,6 @@
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
|
||||
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/json-stable-stringify-without-jsonify": {
|
||||
@@ -2457,9 +2453,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/picomatch": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
|
||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz",
|
||||
"integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -2512,7 +2508,6 @@
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
|
||||
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
|
||||
@@ -89,6 +89,9 @@
|
||||
"AGENTS.md",
|
||||
".claude-plugin/plugin.json",
|
||||
".claude-plugin/README.md",
|
||||
".codex-plugin/plugin.json",
|
||||
".codex-plugin/README.md",
|
||||
".mcp.json",
|
||||
"install.sh",
|
||||
"install.ps1",
|
||||
"llms.txt"
|
||||
@@ -110,11 +113,11 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"ajv": "^8.18.0",
|
||||
"sql.js": "^1.14.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.2",
|
||||
"ajv": "^8.18.0",
|
||||
"c8": "^10.1.2",
|
||||
"eslint": "^9.39.2",
|
||||
"globals": "^17.1.0",
|
||||
@@ -122,5 +125,6 @@
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"packageManager": "yarn@4.9.2+sha512.1fc009bc09d13cfd0e19efa44cbfc2b9cf6ca61482725eb35bbc5e257e093ebf4130db6dfe15d604ff4b79efd8e1e8e99b25fa7d0a6197c9f9826358d4d65c3c"
|
||||
}
|
||||
|
||||
172
research/ecc2-codebase-analysis.md
Normal file
172
research/ecc2-codebase-analysis.md
Normal file
@@ -0,0 +1,172 @@
|
||||
# ECC2 Codebase Research Report
|
||||
|
||||
**Date:** 2026-03-26
|
||||
**Subject:** `ecc-tui` v0.1.0 — Agentic IDE Control Plane
|
||||
**Total Lines:** 4,417 across 15 `.rs` files
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
ECC2 is a Rust TUI application that orchestrates AI coding agent sessions. It uses:
|
||||
- **ratatui 0.29** + **crossterm 0.28** for terminal UI
|
||||
- **rusqlite 0.32** (bundled) for local state persistence
|
||||
- **tokio 1** (full) for async runtime
|
||||
- **clap 4** (derive) for CLI
|
||||
|
||||
### Module Breakdown
|
||||
|
||||
| Module | Lines | Purpose |
|
||||
|--------|------:|---------|
|
||||
| `session/` | 1,974 | Session lifecycle, persistence, runtime, output |
|
||||
| `tui/` | 1,613 | Dashboard, app loop, custom widgets |
|
||||
| `observability/` | 409 | Tool call risk scoring and logging |
|
||||
| `config/` | 144 | Configuration (TOML file) |
|
||||
| `main.rs` | 142 | CLI entry point |
|
||||
| `worktree/` | 99 | Git worktree management |
|
||||
| `comms/` | 36 | Inter-agent messaging (send only) |
|
||||
|
||||
### Key Architectural Patterns
|
||||
|
||||
- **DbWriter thread** in `session/runtime.rs` — dedicated OS thread for SQLite writes from async context via `mpsc::unbounded_channel` with oneshot acknowledgements. Clean solution to the "SQLite from async" problem.
|
||||
- **Session state machine** with enforced transitions: `Pending → {Running, Failed, Stopped}`, `Running → {Idle, Completed, Failed, Stopped}`, etc.
|
||||
- **Ring buffer** for session output — `OUTPUT_BUFFER_LIMIT = 1000` lines per session with automatic eviction.
|
||||
- **Risk scoring** on tool calls — 4-axis analysis (base tool risk, file sensitivity, blast radius, irreversibility) producing composite 0.0–1.0 scores with suggested actions (Allow/Review/RequireConfirmation/Block).
|
||||
|
||||
## 2. Code Quality Metrics
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Total lines | 4,417 |
|
||||
| Test functions | 29 |
|
||||
| `unwrap()` calls | 3 |
|
||||
| `unsafe` blocks | 0 |
|
||||
| TODO/FIXME comments | 0 |
|
||||
| Max file size | 1,273 lines (`dashboard.rs`) |
|
||||
|
||||
**Assessment:** The codebase is clean. Only 3 `unwrap()` calls (2 in tests, 1 in config `default()`), zero `unsafe`, and all modules use proper `anyhow::Result` error propagation. The `dashboard.rs` file at 1,273 lines exceeds the repo's 800-line max-file guideline, but it is still manageable at the current scope.
|
||||
|
||||
## 3. Identified Gaps
|
||||
|
||||
### 3.1 Comms Module — Send Without Receive
|
||||
|
||||
`comms/mod.rs` (36 lines) has `send()` but no `receive()`, `poll()`, `inbox()`, or `subscribe()`. The `messages` table exists in SQLite, but nothing reads from it. The inter-agent messaging story is half-built.
|
||||
|
||||
**Impact:** Agents cannot coordinate. The `TaskHandoff`, `Query`, `Response`, and `Conflict` message types are defined but unusable.
|
||||
|
||||
### 3.2 New Session Dialog — Stub
|
||||
|
||||
`dashboard.rs:495` — `new_session()` logs `"New session dialog requested"` but does nothing. Users must use the CLI (`ecc start --task "..."`) to create sessions; the TUI dashboard cannot.
|
||||
|
||||
### 3.3 Single Agent Support
|
||||
|
||||
`session/manager.rs` — `agent_program()` only supports `"claude"`. The CLI accepts `--agent` but anything other than `"claude"` fails. No codex, opencode, or custom agent support.
|
||||
|
||||
### 3.4 Config — File-Only
|
||||
|
||||
`Config::load()` reads `~/.claude/ecc2.toml` only. The implementation lacks environment variable overrides (e.g., `ECC_DB_PATH`, `ECC_WORKTREE_ROOT`) and CLI flags for configuration.
|
||||
|
||||
### 3.5 Legacy Dependency Candidate: `git2`
|
||||
|
||||
`git2 = "0.20"` is still declared in `Cargo.toml`, but the `worktree` module shells out to the `git` CLI instead. That makes `git2` a strong removal candidate rather than an already-completed cleanup.
|
||||
|
||||
### 3.6 No Metrics Aggregation
|
||||
|
||||
`SessionMetrics` tracks tokens, cost, duration, tool_calls, files_changed per session. But there's no aggregate view: total cost across sessions, average duration, top tools by usage, etc. The Metrics pane in the dashboard shows per-session detail only.
|
||||
|
||||
### 3.7 Daemon — No Health Reporting
|
||||
|
||||
`session/daemon.rs` runs an infinite loop checking session timeouts. No health endpoint, no log rotation, no PID file, no signal handling for graceful shutdown. `Ctrl+C` during daemon mode kills the process uncleanly.
|
||||
|
||||
## 4. Test Coverage Analysis
|
||||
|
||||
34 test functions across 10 source modules:
|
||||
|
||||
| Module | Tests | Coverage Focus |
|
||||
|--------|------:|----------------|
|
||||
| `main.rs` | 1 | CLI parsing |
|
||||
| `config/mod.rs` | 5 | Defaults, deserialization, legacy fallback |
|
||||
| `observability/mod.rs` | 5 | Risk scoring, persistence, pagination |
|
||||
| `session/daemon.rs` | 2 | Crash recovery / liveness handling |
|
||||
| `session/manager.rs` | 4 | Session lifecycle, resume, stop, latest status |
|
||||
| `session/output.rs` | 2 | Ring buffer, broadcast |
|
||||
| `session/runtime.rs` | 1 | Output capture persistence/events |
|
||||
| `session/store.rs` | 3 | Buffer window, migration, state transitions |
|
||||
| `tui/dashboard.rs` | 8 | Rendering, selection, pane navigation, scrolling |
|
||||
| `tui/widgets.rs` | 3 | Token meter rendering and thresholds |
|
||||
|
||||
**Direct coverage gaps:**
|
||||
- `comms/mod.rs` — 0 tests
|
||||
- `worktree/mod.rs` — 0 tests
|
||||
|
||||
The core I/O-heavy paths are no longer completely untested: `manager.rs`, `runtime.rs`, and `daemon.rs` each have targeted tests. The remaining gap is breadth rather than total absence, especially around `comms/`, `worktree/`, and more adversarial process/worktree failure cases.
|
||||
|
||||
## 5. Security Observations
|
||||
|
||||
- **No secrets in code.** Config reads from TOML file, no hardcoded credentials.
|
||||
- **Process spawning** uses `tokio::process::Command` with explicit `Stdio::piped()` — no shell injection vectors.
|
||||
- **Risk scoring** is a strong feature — catches `rm -rf`, `git push --force origin main`, file access to `.env`/secrets.
|
||||
- **No input sanitization on session task strings.** The task string is passed directly to `claude --print`. If the task contains shell metacharacters, it could be exploited depending on how `Command` handles argument quoting. Currently safe (arguments are not shell-interpreted), but worth auditing.
|
||||
|
||||
## 6. Dependency Health
|
||||
|
||||
| Crate | Version | Latest | Notes |
|
||||
|-------|---------|--------|-------|
|
||||
| ratatui | 0.29 | **0.30.0** | Update available |
|
||||
| crossterm | 0.28 | **0.29.0** | Update available |
|
||||
| rusqlite | 0.32 | **0.39.0** | Update available |
|
||||
| tokio | 1 | **1.50.0** | Update available |
|
||||
| serde | 1 | **1.0.228** | Update available |
|
||||
| clap | 4 | **4.6.0** | Update available |
|
||||
| chrono | 0.4 | **0.4.44** | Update available |
|
||||
| uuid | 1 | **1.22.0** | Update available |
|
||||
|
||||
`git2` is still present in `Cargo.toml` even though the `worktree` module shells out to the `git` CLI. Several other dependencies are outdated; either remove `git2` or start using it before the next release.
|
||||
|
||||
## 7. Recommendations (Prioritized)
|
||||
|
||||
### P0 — Quick Wins
|
||||
|
||||
1. **Add environment variable support to `Config::load()`** — `ECC_DB_PATH`, `ECC_WORKTREE_ROOT`, `ECC_DEFAULT_AGENT`. Standard practice for CLI tools.
|
||||
|
||||
### P1 — Feature Completions
|
||||
|
||||
2. **Implement `comms::receive()` / `comms::poll()`** — read unread messages from the `messages` table, optionally with a `broadcast` channel for real-time delivery. Wire it into the dashboard.
|
||||
3. **Build the new-session dialog in the TUI** — modal form with task input, agent selector, worktree toggle. Should call `session::manager::create_session()`.
|
||||
4. **Add aggregate metrics** — total cost, average session duration, tool call frequency, cost per session. Show in the Metrics pane.
|
||||
|
||||
### P2 — Robustness
|
||||
|
||||
5. **Expand integration coverage for `manager.rs`, `runtime.rs`, and `daemon.rs`** — the repo now has baseline tests here, but it still needs failure-path coverage around process crashes, timeouts, and cleanup edge cases.
|
||||
6. **Add first-party tests for `worktree/mod.rs` and `comms/mod.rs`** — these are still uncovered and back important orchestration features.
|
||||
7. **Add daemon health reporting** — PID file, structured logging, graceful shutdown via signal handler.
|
||||
8. **Task string security audit** — The session task uses `claude --print` via `tokio::process::Command`. Verify arguments are never shell-interpreted. Checklist: confirm `Command` arg usage, threat-model metacharacter injection, input validation/escaping strategy, logging of raw inputs, and automated tests. Re-audit if invocation code changes.
|
||||
9. **Break up `dashboard.rs`** — extract SessionsPane, OutputPane, MetricsPane, LogPane into separate files under `tui/panes/`.
|
||||
|
||||
### P3 — Extensibility
|
||||
|
||||
10. **Multi-agent support** — make `agent_program()` pluggable. Add `codex`, `opencode`, `custom` agent types.
|
||||
11. **Config validation** — validate risk thresholds sum correctly, budget values are positive, paths exist.
|
||||
|
||||
## 8. Comparison with Ratatui 0.29 Best Practices
|
||||
|
||||
The codebase follows ratatui conventions well:
|
||||
- Uses `TableState` for stateful selection (correct pattern)
|
||||
- Custom `Widget` trait implementation for `TokenMeter` (idiomatic)
|
||||
- `tick()` method for periodic state sync (standard)
|
||||
- `broadcast::channel` for real-time output events (appropriate)
|
||||
|
||||
**Minor deviations:**
|
||||
- The `Dashboard` struct directly holds `StateStore` (SQLite connection). Ratatui best practice is to keep the state store behind an `Arc<Mutex<>>` to allow background updates. Currently the TUI owns the DB exclusively, which blocks adding a background metrics refresh task.
|
||||
- No `Clear` widget usage when rendering the help overlay — could cause rendering artifacts on some terminals.
|
||||
|
||||
## 9. Risk Assessment
|
||||
|
||||
| Risk | Likelihood | Impact | Mitigation |
|
||||
|------|-----------|--------|------------|
|
||||
| Dashboard file exceeds 1500 lines (projected) | High | Medium | At 1,273 lines currently (Section 2); extract panes into modules before it grows further |
|
||||
| SQLite lock contention | Low | High | DbWriter pattern already handles this |
|
||||
| No agent diversity | Medium | Medium | Pluggable agent support |
|
||||
| Task-string handling assumptions drift over time | Medium | Medium | Keep `Command` argument handling shell-free, document the threat model, and add regression tests for metacharacter-heavy task input |
|
||||
|
||||
---
|
||||
|
||||
**Bottom line:** ECC2 is a well-structured Rust project with clean error handling, good separation of concerns, and strong security features (risk scoring). The main gaps are incomplete features (comms, new-session dialog, single agent) rather than architectural problems. The codebase is ready for feature work on top of the solid foundation.
|
||||
186
scripts/catalog.js
Normal file
186
scripts/catalog.js
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const {
|
||||
getInstallComponent,
|
||||
listInstallComponents,
|
||||
listInstallProfiles,
|
||||
} = require('./lib/install-manifests');
|
||||
|
||||
const FAMILY_ALIASES = Object.freeze({
|
||||
baseline: 'baseline',
|
||||
baselines: 'baseline',
|
||||
language: 'language',
|
||||
languages: 'language',
|
||||
lang: 'language',
|
||||
framework: 'framework',
|
||||
frameworks: 'framework',
|
||||
capability: 'capability',
|
||||
capabilities: 'capability',
|
||||
agent: 'agent',
|
||||
agents: 'agent',
|
||||
skill: 'skill',
|
||||
skills: 'skill',
|
||||
});
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
console.log(`
|
||||
Discover ECC install components and profiles
|
||||
|
||||
Usage:
|
||||
node scripts/catalog.js profiles [--json]
|
||||
node scripts/catalog.js components [--family <family>] [--target <target>] [--json]
|
||||
node scripts/catalog.js show <component-id> [--json]
|
||||
|
||||
Examples:
|
||||
node scripts/catalog.js profiles
|
||||
node scripts/catalog.js components --family language
|
||||
node scripts/catalog.js show framework:nextjs
|
||||
`);
|
||||
|
||||
process.exit(exitCode);
|
||||
}
|
||||
|
||||
function normalizeFamily(value) {
|
||||
if (!value) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const normalized = String(value).trim().toLowerCase();
|
||||
return FAMILY_ALIASES[normalized] || normalized;
|
||||
}
|
||||
|
||||
function parseArgs(argv) {
|
||||
const args = argv.slice(2);
|
||||
const parsed = {
|
||||
command: null,
|
||||
componentId: null,
|
||||
family: null,
|
||||
target: null,
|
||||
json: false,
|
||||
help: false,
|
||||
};
|
||||
|
||||
if (args.length === 0 || args[0] === '--help' || args[0] === '-h') {
|
||||
parsed.help = true;
|
||||
return parsed;
|
||||
}
|
||||
|
||||
parsed.command = args[0];
|
||||
|
||||
for (let index = 1; index < args.length; index += 1) {
|
||||
const arg = args[index];
|
||||
|
||||
if (arg === '--help' || arg === '-h') {
|
||||
parsed.help = true;
|
||||
} else if (arg === '--json') {
|
||||
parsed.json = true;
|
||||
} else if (arg === '--family') {
|
||||
if (!args[index + 1]) {
|
||||
throw new Error('Missing value for --family');
|
||||
}
|
||||
parsed.family = normalizeFamily(args[index + 1]);
|
||||
index += 1;
|
||||
} else if (arg === '--target') {
|
||||
if (!args[index + 1]) {
|
||||
throw new Error('Missing value for --target');
|
||||
}
|
||||
parsed.target = args[index + 1];
|
||||
index += 1;
|
||||
} else if (parsed.command === 'show' && !parsed.componentId) {
|
||||
parsed.componentId = arg;
|
||||
} else {
|
||||
throw new Error(`Unknown argument: ${arg}`);
|
||||
}
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function printProfiles(profiles) {
|
||||
console.log('Install profiles:\n');
|
||||
for (const profile of profiles) {
|
||||
console.log(`- ${profile.id} (${profile.moduleCount} modules)`);
|
||||
console.log(` ${profile.description}`);
|
||||
}
|
||||
}
|
||||
|
||||
function printComponents(components) {
|
||||
console.log('Install components:\n');
|
||||
for (const component of components) {
|
||||
console.log(`- ${component.id} [${component.family}]`);
|
||||
console.log(` targets=${component.targets.join(', ')} modules=${component.moduleIds.join(', ')}`);
|
||||
console.log(` ${component.description}`);
|
||||
}
|
||||
}
|
||||
|
||||
function printComponent(component) {
|
||||
console.log(`Install component: ${component.id}\n`);
|
||||
console.log(`Family: ${component.family}`);
|
||||
console.log(`Targets: ${component.targets.join(', ')}`);
|
||||
console.log(`Modules: ${component.moduleIds.join(', ')}`);
|
||||
console.log(`Description: ${component.description}`);
|
||||
|
||||
if (component.modules.length > 0) {
|
||||
console.log('\nResolved modules:');
|
||||
for (const module of component.modules) {
|
||||
console.log(`- ${module.id} [${module.kind}]`);
|
||||
console.log(
|
||||
` targets=${module.targets.join(', ')} default=${module.defaultInstall} cost=${module.cost} stability=${module.stability}`
|
||||
);
|
||||
console.log(` ${module.description}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function main() {
|
||||
try {
|
||||
const options = parseArgs(process.argv);
|
||||
|
||||
if (options.help) {
|
||||
showHelp(0);
|
||||
}
|
||||
|
||||
if (options.command === 'profiles') {
|
||||
const profiles = listInstallProfiles();
|
||||
if (options.json) {
|
||||
console.log(JSON.stringify({ profiles }, null, 2));
|
||||
} else {
|
||||
printProfiles(profiles);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.command === 'components') {
|
||||
const components = listInstallComponents({
|
||||
family: options.family,
|
||||
target: options.target,
|
||||
});
|
||||
if (options.json) {
|
||||
console.log(JSON.stringify({ components }, null, 2));
|
||||
} else {
|
||||
printComponents(components);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.command === 'show') {
|
||||
if (!options.componentId) {
|
||||
throw new Error('Catalog show requires an install component ID');
|
||||
}
|
||||
const component = getInstallComponent(options.componentId);
|
||||
if (options.json) {
|
||||
console.log(JSON.stringify(component, null, 2));
|
||||
} else {
|
||||
printComponent(component);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
throw new Error(`Unknown catalog command: ${options.command}`);
|
||||
} catch (error) {
|
||||
console.error(`Error: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
16
scripts/codex/check-codex-global-state.sh
Normal file → Executable file
16
scripts/codex/check-codex-global-state.sh
Normal file → Executable file
@@ -89,7 +89,13 @@ fi
|
||||
if [[ -f "$CONFIG_FILE" ]]; then
|
||||
check_config_pattern '^multi_agent\s*=\s*true' "multi_agent is enabled"
|
||||
check_config_absent '^\s*collab\s*=' "deprecated collab flag is absent"
|
||||
check_config_pattern '^persistent_instructions\s*=' "persistent_instructions is configured"
|
||||
# persistent_instructions is recommended but optional; warn instead of fail
|
||||
# so users who rely on AGENTS.md alone are not blocked (#967).
|
||||
if rg -n '^[[:space:]]*persistent_instructions\s*=' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
ok "persistent_instructions is configured"
|
||||
else
|
||||
warn "persistent_instructions is not set (recommended but optional)"
|
||||
fi
|
||||
check_config_pattern '^\[profiles\.strict\]' "profiles.strict exists"
|
||||
check_config_pattern '^\[profiles\.yolo\]' "profiles.yolo exists"
|
||||
|
||||
@@ -97,7 +103,7 @@ if [[ -f "$CONFIG_FILE" ]]; then
|
||||
'mcp_servers.github' \
|
||||
'mcp_servers.memory' \
|
||||
'mcp_servers.sequential-thinking' \
|
||||
'mcp_servers.context7-mcp'
|
||||
'mcp_servers.context7'
|
||||
do
|
||||
if rg -n "^\[$section\]" "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
ok "MCP section [$section] exists"
|
||||
@@ -106,10 +112,10 @@ if [[ -f "$CONFIG_FILE" ]]; then
|
||||
fi
|
||||
done
|
||||
|
||||
if rg -n '^\[mcp_servers\.context7\]' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
warn "Duplicate [mcp_servers.context7] exists (context7-mcp is preferred)"
|
||||
if rg -n '^\[mcp_servers\.context7-mcp\]' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
warn "Legacy [mcp_servers.context7-mcp] exists (context7 is preferred)"
|
||||
else
|
||||
ok "No duplicate [mcp_servers.context7] section"
|
||||
ok "No legacy [mcp_servers.context7-mcp] section"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
0
scripts/codex/install-global-git-hooks.sh
Normal file → Executable file
0
scripts/codex/install-global-git-hooks.sh
Normal file → Executable file
@@ -83,20 +83,23 @@ function dlxServer(name, pkg, extraFields, extraToml) {
|
||||
}
|
||||
|
||||
/** Each entry: key = section name under mcp_servers, value = { toml, fields } */
|
||||
const DEFAULT_MCP_STARTUP_TIMEOUT_SEC = 30;
|
||||
const DEFAULT_MCP_STARTUP_TIMEOUT_TOML = `startup_timeout_sec = ${DEFAULT_MCP_STARTUP_TIMEOUT_SEC}`;
|
||||
|
||||
const ECC_SERVERS = {
|
||||
supabase: dlxServer('supabase', '@supabase/mcp-server-supabase@latest', { startup_timeout_sec: 20.0, tool_timeout_sec: 120.0 }, 'startup_timeout_sec = 20.0\ntool_timeout_sec = 120.0'),
|
||||
playwright: dlxServer('playwright', '@playwright/mcp@latest'),
|
||||
'context7-mcp': dlxServer('context7-mcp', '@upstash/context7-mcp'),
|
||||
playwright: dlxServer('playwright', '@playwright/mcp@latest', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
|
||||
context7: dlxServer('context7', '@upstash/context7-mcp@latest', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
|
||||
exa: {
|
||||
fields: { url: 'https://mcp.exa.ai/mcp' },
|
||||
toml: `[mcp_servers.exa]\nurl = "https://mcp.exa.ai/mcp"`
|
||||
},
|
||||
github: {
|
||||
fields: { command: 'bash', args: ['-lc', GH_BOOTSTRAP] },
|
||||
toml: `[mcp_servers.github]\ncommand = "bash"\nargs = ["-lc", ${JSON.stringify(GH_BOOTSTRAP)}]`
|
||||
fields: { command: 'bash', args: ['-lc', GH_BOOTSTRAP], startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC },
|
||||
toml: `[mcp_servers.github]\ncommand = "bash"\nargs = ["-lc", ${JSON.stringify(GH_BOOTSTRAP)}]\n${DEFAULT_MCP_STARTUP_TIMEOUT_TOML}`
|
||||
},
|
||||
memory: dlxServer('memory', '@modelcontextprotocol/server-memory'),
|
||||
'sequential-thinking': dlxServer('sequential-thinking', '@modelcontextprotocol/server-sequential-thinking')
|
||||
memory: dlxServer('memory', '@modelcontextprotocol/server-memory', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
|
||||
'sequential-thinking': dlxServer('sequential-thinking', '@modelcontextprotocol/server-sequential-thinking', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML)
|
||||
};
|
||||
|
||||
// Append --features arg for supabase after dlxServer builds the base
|
||||
@@ -104,9 +107,9 @@ ECC_SERVERS.supabase.fields.args.push('--features=account,docs,database,debuggin
|
||||
ECC_SERVERS.supabase.toml = ECC_SERVERS.supabase.toml.replace(/^(args = \[.*)\]$/m, '$1, "--features=account,docs,database,debugging,development,functions,storage,branching"]');
|
||||
|
||||
// Legacy section names that should be treated as an existing ECC server.
|
||||
// e.g. old configs shipped [mcp_servers.context7] instead of [mcp_servers.context7-mcp].
|
||||
// e.g. older configs shipped [mcp_servers.context7-mcp] instead of [mcp_servers.context7].
|
||||
const LEGACY_ALIASES = {
|
||||
'context7-mcp': ['context7']
|
||||
context7: ['context7-mcp']
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -254,6 +257,10 @@ function main() {
|
||||
if (resolvedLabel !== name) {
|
||||
raw = removeServerFromText(raw, name, existing);
|
||||
}
|
||||
if (legacyName && hasCanonical) {
|
||||
toRemoveLog.push(`mcp_servers.${legacyName}`);
|
||||
raw = removeServerFromText(raw, legacyName, existing);
|
||||
}
|
||||
toAppend.push(spec.toml);
|
||||
} else {
|
||||
// Add-only mode: skip, but warn about drift
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const os = require('os');
|
||||
const { buildDoctorReport } = require('./lib/install-lifecycle');
|
||||
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
|
||||
|
||||
@@ -88,7 +89,7 @@ function main() {
|
||||
|
||||
const report = buildDoctorReport({
|
||||
repoRoot: require('path').join(__dirname, '..'),
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
projectRoot: process.cwd(),
|
||||
targets: options.targets,
|
||||
});
|
||||
|
||||
8
scripts/ecc.js
Normal file → Executable file
8
scripts/ecc.js
Normal file → Executable file
@@ -13,6 +13,10 @@ const COMMANDS = {
|
||||
script: 'install-plan.js',
|
||||
description: 'Inspect selective-install manifests and resolved plans',
|
||||
},
|
||||
catalog: {
|
||||
script: 'catalog.js',
|
||||
description: 'Discover install profiles and component IDs',
|
||||
},
|
||||
'install-plan': {
|
||||
script: 'install-plan.js',
|
||||
description: 'Alias for plan',
|
||||
@@ -50,6 +54,7 @@ const COMMANDS = {
|
||||
const PRIMARY_COMMANDS = [
|
||||
'install',
|
||||
'plan',
|
||||
'catalog',
|
||||
'list-installed',
|
||||
'doctor',
|
||||
'repair',
|
||||
@@ -79,6 +84,9 @@ Examples:
|
||||
ecc typescript
|
||||
ecc install --profile developer --target claude
|
||||
ecc plan --profile core --target cursor
|
||||
ecc catalog profiles
|
||||
ecc catalog components --family language
|
||||
ecc catalog show framework:nextjs
|
||||
ecc list-installed --json
|
||||
ecc doctor --target cursor
|
||||
ecc repair --dry-run
|
||||
|
||||
24
scripts/install-apply.js
Normal file → Executable file
24
scripts/install-apply.js
Normal file → Executable file
@@ -6,6 +6,7 @@
|
||||
* target-specific mutation logic into testable Node code.
|
||||
*/
|
||||
|
||||
const os = require('os');
|
||||
const {
|
||||
SUPPORTED_INSTALL_TARGETS,
|
||||
listLegacyCompatibilityLanguages,
|
||||
@@ -16,10 +17,10 @@ const {
|
||||
parseInstallArgs,
|
||||
} = require('./lib/install/request');
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
function getHelpText() {
|
||||
const languages = listLegacyCompatibilityLanguages();
|
||||
|
||||
console.log(`
|
||||
return `
|
||||
Usage: install.sh [--target <${LEGACY_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] <language> [<language> ...]
|
||||
install.sh [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] --profile <name> [--with <component>]... [--without <component>]...
|
||||
install.sh [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] --modules <id,id,...> [--with <component>]... [--without <component>]...
|
||||
@@ -43,8 +44,11 @@ Options:
|
||||
|
||||
Available languages:
|
||||
${languages.map(language => ` - ${language}`).join('\n')}
|
||||
`);
|
||||
`;
|
||||
}
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
console.log(getHelpText());
|
||||
process.exit(exitCode);
|
||||
}
|
||||
|
||||
@@ -100,19 +104,25 @@ function main() {
|
||||
showHelp(0);
|
||||
}
|
||||
|
||||
const { loadInstallConfig } = require('./lib/install/config');
|
||||
const {
|
||||
findDefaultInstallConfigPath,
|
||||
loadInstallConfig,
|
||||
} = require('./lib/install/config');
|
||||
const { applyInstallPlan } = require('./lib/install-executor');
|
||||
const { createInstallPlanFromRequest } = require('./lib/install/runtime');
|
||||
const defaultConfigPath = options.configPath || options.languages.length > 0
|
||||
? null
|
||||
: findDefaultInstallConfigPath({ cwd: process.cwd() });
|
||||
const config = options.configPath
|
||||
? loadInstallConfig(options.configPath, { cwd: process.cwd() })
|
||||
: null;
|
||||
: (defaultConfigPath ? loadInstallConfig(defaultConfigPath, { cwd: process.cwd() }) : null);
|
||||
const request = normalizeInstallRequest({
|
||||
...options,
|
||||
config,
|
||||
});
|
||||
const plan = createInstallPlanFromRequest(request, {
|
||||
projectRoot: process.cwd(),
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
claudeRulesDir: process.env.CLAUDE_RULES_DIR || null,
|
||||
});
|
||||
|
||||
@@ -132,7 +142,7 @@ function main() {
|
||||
printHumanPlan(result, false);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error: ${error.message}`);
|
||||
process.stderr.write(`Error: ${error.message}${getHelpText()}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,10 @@ const {
|
||||
listInstallProfiles,
|
||||
resolveInstallPlan,
|
||||
} = require('./lib/install-manifests');
|
||||
const { loadInstallConfig } = require('./lib/install/config');
|
||||
const {
|
||||
findDefaultInstallConfigPath,
|
||||
loadInstallConfig,
|
||||
} = require('./lib/install/config');
|
||||
const { normalizeInstallRequest } = require('./lib/install/request');
|
||||
|
||||
function showHelp() {
|
||||
@@ -186,7 +189,7 @@ function main() {
|
||||
try {
|
||||
const options = parseArgs(process.argv);
|
||||
|
||||
if (options.help || process.argv.length <= 2) {
|
||||
if (options.help) {
|
||||
showHelp();
|
||||
process.exit(0);
|
||||
}
|
||||
@@ -224,9 +227,18 @@ function main() {
|
||||
return;
|
||||
}
|
||||
|
||||
const defaultConfigPath = options.configPath
|
||||
? null
|
||||
: findDefaultInstallConfigPath({ cwd: process.cwd() });
|
||||
const config = options.configPath
|
||||
? loadInstallConfig(options.configPath, { cwd: process.cwd() })
|
||||
: null;
|
||||
: (defaultConfigPath ? loadInstallConfig(defaultConfigPath, { cwd: process.cwd() }) : null);
|
||||
|
||||
if (process.argv.length <= 2 && !config) {
|
||||
showHelp();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const request = normalizeInstallRequest({
|
||||
...options,
|
||||
languages: [],
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { execFileSync } = require('child_process');
|
||||
|
||||
@@ -442,7 +443,7 @@ function planAntigravityLegacyInstall(context) {
|
||||
function createLegacyInstallPlan(options = {}) {
|
||||
const sourceRoot = options.sourceRoot || getSourceRoot();
|
||||
const projectRoot = options.projectRoot || process.cwd();
|
||||
const homeDir = options.homeDir || process.env.HOME;
|
||||
const homeDir = options.homeDir || process.env.HOME || os.homedir();
|
||||
const target = options.target || 'claude';
|
||||
|
||||
validateLegacyTarget(target);
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
|
||||
const { resolveInstallPlan, loadInstallManifests } = require('./install-manifests');
|
||||
@@ -696,7 +697,7 @@ function buildDiscoveryRecord(adapter, context) {
|
||||
|
||||
function discoverInstalledStates(options = {}) {
|
||||
const context = {
|
||||
homeDir: options.homeDir || process.env.HOME,
|
||||
homeDir: options.homeDir || process.env.HOME || os.homedir(),
|
||||
projectRoot: options.projectRoot || process.cwd(),
|
||||
};
|
||||
const targets = normalizeTargets(options.targets);
|
||||
@@ -904,7 +905,7 @@ function buildDoctorReport(options = {}) {
|
||||
}).filter(record => record.exists);
|
||||
const context = {
|
||||
repoRoot,
|
||||
homeDir: options.homeDir || process.env.HOME,
|
||||
homeDir: options.homeDir || process.env.HOME || os.homedir(),
|
||||
projectRoot: options.projectRoot || process.cwd(),
|
||||
manifestVersion: manifests.modulesVersion,
|
||||
packageVersion: readPackageVersion(repoRoot),
|
||||
@@ -988,7 +989,7 @@ function repairInstalledStates(options = {}) {
|
||||
const manifests = loadInstallManifests({ repoRoot });
|
||||
const context = {
|
||||
repoRoot,
|
||||
homeDir: options.homeDir || process.env.HOME,
|
||||
homeDir: options.homeDir || process.env.HOME || os.homedir(),
|
||||
projectRoot: options.projectRoot || process.cwd(),
|
||||
manifestVersion: manifests.modulesVersion,
|
||||
packageVersion: readPackageVersion(repoRoot),
|
||||
|
||||
@@ -216,6 +216,45 @@ function listInstallComponents(options = {}) {
|
||||
.filter(component => !target || component.targets.includes(target));
|
||||
}
|
||||
|
||||
function getInstallComponent(componentId, options = {}) {
|
||||
const manifests = loadInstallManifests(options);
|
||||
const normalizedComponentId = String(componentId || '').trim();
|
||||
|
||||
if (!normalizedComponentId) {
|
||||
throw new Error('An install component ID is required');
|
||||
}
|
||||
|
||||
const component = manifests.componentsById.get(normalizedComponentId);
|
||||
if (!component) {
|
||||
throw new Error(`Unknown install component: ${normalizedComponentId}`);
|
||||
}
|
||||
|
||||
const moduleIds = dedupeStrings(component.modules);
|
||||
const modules = moduleIds
|
||||
.map(moduleId => manifests.modulesById.get(moduleId))
|
||||
.filter(Boolean)
|
||||
.map(module => ({
|
||||
id: module.id,
|
||||
kind: module.kind,
|
||||
description: module.description,
|
||||
targets: module.targets,
|
||||
defaultInstall: module.defaultInstall,
|
||||
cost: module.cost,
|
||||
stability: module.stability,
|
||||
dependencies: dedupeStrings(module.dependencies),
|
||||
}));
|
||||
|
||||
return {
|
||||
id: component.id,
|
||||
family: component.family,
|
||||
description: component.description,
|
||||
moduleIds,
|
||||
moduleCount: moduleIds.length,
|
||||
targets: intersectTargets(modules),
|
||||
modules,
|
||||
};
|
||||
}
|
||||
|
||||
function expandComponentIdsToModuleIds(componentIds, manifests) {
|
||||
const expandedModuleIds = [];
|
||||
|
||||
@@ -438,6 +477,7 @@ module.exports = {
|
||||
SUPPORTED_INSTALL_TARGETS,
|
||||
getManifestPaths,
|
||||
loadInstallManifests,
|
||||
getInstallComponent,
|
||||
listInstallComponents,
|
||||
listLegacyCompatibilityLanguages,
|
||||
listInstallModules,
|
||||
|
||||
@@ -1,15 +1,109 @@
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const { writeInstallState } = require('../install-state');
|
||||
|
||||
function readJsonObject(filePath, label) {
|
||||
let parsed;
|
||||
try {
|
||||
parsed = JSON.parse(fs.readFileSync(filePath, 'utf8'));
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to parse ${label} at ${filePath}: ${error.message}`);
|
||||
}
|
||||
|
||||
if (!parsed || typeof parsed !== 'object' || Array.isArray(parsed)) {
|
||||
throw new Error(`Invalid ${label} at ${filePath}: expected a JSON object`);
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function mergeHookEntries(existingEntries, incomingEntries) {
|
||||
const mergedEntries = [];
|
||||
const seenEntries = new Set();
|
||||
|
||||
for (const entry of [...existingEntries, ...incomingEntries]) {
|
||||
const entryKey = JSON.stringify(entry);
|
||||
if (seenEntries.has(entryKey)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
seenEntries.add(entryKey);
|
||||
mergedEntries.push(entry);
|
||||
}
|
||||
|
||||
return mergedEntries;
|
||||
}
|
||||
|
||||
function findHooksSourcePath(plan, hooksDestinationPath) {
|
||||
const operation = plan.operations.find(item => item.destinationPath === hooksDestinationPath);
|
||||
return operation ? operation.sourcePath : null;
|
||||
}
|
||||
|
||||
function buildMergedSettings(plan) {
|
||||
if (!plan.adapter || plan.adapter.target !== 'claude') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hooksDestinationPath = path.join(plan.targetRoot, 'hooks', 'hooks.json');
|
||||
const hooksSourcePath = findHooksSourcePath(plan, hooksDestinationPath) || hooksDestinationPath;
|
||||
if (!fs.existsSync(hooksSourcePath)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hooksConfig = readJsonObject(hooksSourcePath, 'hooks config');
|
||||
const incomingHooks = hooksConfig.hooks;
|
||||
if (!incomingHooks || typeof incomingHooks !== 'object' || Array.isArray(incomingHooks)) {
|
||||
throw new Error(`Invalid hooks config at ${hooksSourcePath}: expected "hooks" to be a JSON object`);
|
||||
}
|
||||
|
||||
const settingsPath = path.join(plan.targetRoot, 'settings.json');
|
||||
let settings = {};
|
||||
if (fs.existsSync(settingsPath)) {
|
||||
settings = readJsonObject(settingsPath, 'existing settings');
|
||||
}
|
||||
|
||||
const existingHooks = settings.hooks && typeof settings.hooks === 'object' && !Array.isArray(settings.hooks)
|
||||
? settings.hooks
|
||||
: {};
|
||||
const mergedHooks = { ...existingHooks };
|
||||
|
||||
for (const [eventName, incomingEntries] of Object.entries(incomingHooks)) {
|
||||
const currentEntries = Array.isArray(existingHooks[eventName]) ? existingHooks[eventName] : [];
|
||||
const nextEntries = Array.isArray(incomingEntries) ? incomingEntries : [];
|
||||
mergedHooks[eventName] = mergeHookEntries(currentEntries, nextEntries);
|
||||
}
|
||||
|
||||
const mergedSettings = {
|
||||
...settings,
|
||||
hooks: mergedHooks,
|
||||
};
|
||||
|
||||
return {
|
||||
settingsPath,
|
||||
mergedSettings,
|
||||
};
|
||||
}
|
||||
|
||||
function applyInstallPlan(plan) {
|
||||
const mergedSettingsPlan = buildMergedSettings(plan);
|
||||
|
||||
for (const operation of plan.operations) {
|
||||
fs.mkdirSync(require('path').dirname(operation.destinationPath), { recursive: true });
|
||||
fs.mkdirSync(path.dirname(operation.destinationPath), { recursive: true });
|
||||
fs.copyFileSync(operation.sourcePath, operation.destinationPath);
|
||||
}
|
||||
|
||||
if (mergedSettingsPlan) {
|
||||
fs.mkdirSync(path.dirname(mergedSettingsPlan.settingsPath), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
mergedSettingsPlan.settingsPath,
|
||||
JSON.stringify(mergedSettingsPlan.mergedSettings, null, 2) + '\n',
|
||||
'utf8'
|
||||
);
|
||||
}
|
||||
|
||||
writeInstallState(plan.installStatePath, plan.statePreview);
|
||||
|
||||
return {
|
||||
|
||||
@@ -47,6 +47,12 @@ function resolveInstallConfigPath(configPath, options = {}) {
|
||||
: path.normalize(path.join(cwd, configPath));
|
||||
}
|
||||
|
||||
function findDefaultInstallConfigPath(options = {}) {
|
||||
const cwd = options.cwd || process.cwd();
|
||||
const candidatePath = path.join(cwd, DEFAULT_INSTALL_CONFIG);
|
||||
return fs.existsSync(candidatePath) ? candidatePath : null;
|
||||
}
|
||||
|
||||
function loadInstallConfig(configPath, options = {}) {
|
||||
const resolvedPath = resolveInstallConfigPath(configPath, options);
|
||||
|
||||
@@ -77,6 +83,7 @@ function loadInstallConfig(configPath, options = {}) {
|
||||
|
||||
module.exports = {
|
||||
DEFAULT_INSTALL_CONFIG,
|
||||
findDefaultInstallConfigPath,
|
||||
loadInstallConfig,
|
||||
resolveInstallConfigPath,
|
||||
};
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const os = require('os');
|
||||
const { discoverInstalledStates } = require('./lib/install-lifecycle');
|
||||
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
|
||||
|
||||
@@ -70,7 +71,7 @@ function main() {
|
||||
}
|
||||
|
||||
const records = discoverInstalledStates({
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
projectRoot: process.cwd(),
|
||||
targets: options.targets,
|
||||
}).filter(record => record.exists);
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const os = require('os');
|
||||
const { repairInstalledStates } = require('./lib/install-lifecycle');
|
||||
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
|
||||
|
||||
@@ -74,7 +75,7 @@ function main() {
|
||||
|
||||
const result = repairInstalledStates({
|
||||
repoRoot: require('path').join(__dirname, '..'),
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
projectRoot: process.cwd(),
|
||||
targets: options.targets,
|
||||
dryRun: options.dryRun,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const os = require('os');
|
||||
const { createStateStore } = require('./lib/state-store');
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
@@ -134,7 +135,7 @@ async function main() {
|
||||
|
||||
store = await createStateStore({
|
||||
dbPath: options.dbPath,
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
});
|
||||
|
||||
if (!options.sessionId) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const os = require('os');
|
||||
const { createStateStore } = require('./lib/state-store');
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
@@ -139,7 +140,7 @@ async function main() {
|
||||
|
||||
store = await createStateStore({
|
||||
dbPath: options.dbPath,
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
});
|
||||
|
||||
const payload = {
|
||||
|
||||
0
scripts/sync-ecc-to-codex.sh
Normal file → Executable file
0
scripts/sync-ecc-to-codex.sh
Normal file → Executable file
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const os = require('os');
|
||||
const { uninstallInstalledStates } = require('./lib/install-lifecycle');
|
||||
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
|
||||
|
||||
@@ -73,7 +74,7 @@ function main() {
|
||||
}
|
||||
|
||||
const result = uninstallInstalledStates({
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
projectRoot: process.cwd(),
|
||||
targets: options.targets,
|
||||
dryRun: options.dryRun,
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: benchmark
|
||||
description: Use this skill to measure performance baselines, detect regressions before/after PRs, and compare stack alternatives.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Benchmark — Performance Baseline & Regression Detection
|
||||
|
||||
## When to Use
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: browser-qa
|
||||
description: Use this skill to automate visual testing and UI interaction verification using browser automation after deploying features.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Browser QA — Automated Visual Testing & Interaction
|
||||
|
||||
## When to Use
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: canary-watch
|
||||
description: Use this skill to monitor a deployed URL for regressions after deploys, merges, or dependency upgrades.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Canary Watch — Post-Deploy Monitoring
|
||||
|
||||
## When to Use
|
||||
|
||||
@@ -62,9 +62,16 @@ analyze_observations() {
|
||||
analysis_count=$(wc -l < "$analysis_file" 2>/dev/null || echo 0)
|
||||
echo "[$(date)] Using last $analysis_count of $obs_count observations for analysis" >> "$LOG_FILE"
|
||||
|
||||
# Use relative path from PROJECT_DIR for cross-platform compatibility (#842).
|
||||
# On Windows (Git Bash/MSYS2), absolute paths from mktemp may use MSYS-style
|
||||
# prefixes (e.g. /c/Users/...) that the Claude subprocess cannot resolve.
|
||||
analysis_relpath=".observer-tmp/$(basename "$analysis_file")"
|
||||
|
||||
prompt_file="$(mktemp "${observer_tmp_dir}/ecc-observer-prompt.XXXXXX")"
|
||||
cat > "$prompt_file" <<PROMPT
|
||||
Read ${analysis_file} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences).
|
||||
IMPORTANT: You are running in non-interactive --print mode. You MUST use the Write tool directly to create files. Do NOT ask for permission, do NOT ask for confirmation, do NOT output summaries instead of writing. Just read, analyze, and write.
|
||||
|
||||
Read ${analysis_relpath} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences).
|
||||
If you find 3+ occurrences of the same pattern, you MUST write an instinct file directly to ${INSTINCTS_DIR}/<id>.md using the Write tool.
|
||||
Do NOT ask for permission to write files, do NOT describe what you would write, and do NOT stop at analysis when a qualifying pattern exists.
|
||||
|
||||
@@ -117,11 +124,19 @@ PROMPT
|
||||
max_turns=10
|
||||
fi
|
||||
|
||||
# Prevent observe.sh from recording this automated Haiku session as observations
|
||||
# Ensure CWD is PROJECT_DIR so the relative analysis_relpath resolves correctly
|
||||
# on all platforms, not just when the observer happens to be launched from the project root.
|
||||
cd "$PROJECT_DIR" || { echo "[$(date)] Failed to cd to PROJECT_DIR ($PROJECT_DIR), skipping analysis" >> "$LOG_FILE"; rm -f "$prompt_file" "$analysis_file"; return; }
|
||||
|
||||
# Prevent observe.sh from recording this automated Haiku session as observations.
|
||||
# Pass prompt via -p flag instead of stdin redirect for Windows compatibility (#842).
|
||||
ECC_SKIP_OBSERVE=1 ECC_HOOK_PROFILE=minimal claude --model haiku --max-turns "$max_turns" --print \
|
||||
--allowedTools "Read,Write" \
|
||||
< "$prompt_file" >> "$LOG_FILE" 2>&1 &
|
||||
-p "$(cat "$prompt_file")" >> "$LOG_FILE" 2>&1 &
|
||||
claude_pid=$!
|
||||
# prompt_file content was already expanded by the shell; remove early to avoid
|
||||
# leaving stale temp files during the (potentially long) analysis window.
|
||||
rm -f "$prompt_file"
|
||||
|
||||
(
|
||||
sleep "$timeout_seconds"
|
||||
@@ -135,7 +150,7 @@ PROMPT
|
||||
wait "$claude_pid"
|
||||
exit_code=$?
|
||||
kill "$watchdog_pid" 2>/dev/null || true
|
||||
rm -f "$prompt_file" "$analysis_file"
|
||||
rm -f "$analysis_file"
|
||||
|
||||
if [ "$exit_code" -ne 0 ]; then
|
||||
echo "[$(date)] Claude analysis failed (exit $exit_code)" >> "$LOG_FILE"
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: design-system
|
||||
description: Use this skill to generate or audit design systems, check visual consistency, and review PRs that touch styling.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Design System — Generate & Audit Visual Systems
|
||||
|
||||
## When to Use
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: laravel-verification
|
||||
description: Verification loop for Laravel projects: env checks, linting, static analysis, tests with coverage, security scans, and deployment readiness.
|
||||
description: "Verification loop for Laravel projects: env checks, linting, static analysis, tests with coverage, security scans, and deployment readiness."
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: product-lens
|
||||
description: Use this skill to validate the "why" before building, run product diagnostics, and convert vague ideas into specs.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Product Lens — Think Before You Build
|
||||
|
||||
## When to Use
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: safety-guard
|
||||
description: Use this skill to prevent destructive operations when working on production systems or running agents autonomously.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Safety Guard — Prevent Destructive Operations
|
||||
|
||||
## When to Use
|
||||
|
||||
@@ -175,7 +175,7 @@ test('prompt references analysis_file not full OBSERVATIONS_FILE', () => {
|
||||
assert.ok(heredocStart > 0, 'Should find prompt heredoc start');
|
||||
assert.ok(heredocEnd > heredocStart, 'Should find prompt heredoc end');
|
||||
const promptSection = content.substring(heredocStart, heredocEnd);
|
||||
assert.ok(promptSection.includes('${analysis_file}'), 'Prompt should point Claude at the sampled analysis file, not the full observations file');
|
||||
assert.ok(promptSection.includes('${analysis_relpath}'), 'Prompt should point Claude at the sampled analysis file (via relative path), not the full observations file');
|
||||
});
|
||||
|
||||
// ──────────────────────────────────────────────────────
|
||||
|
||||
@@ -8,6 +8,7 @@ const os = require('os');
|
||||
const path = require('path');
|
||||
|
||||
const {
|
||||
findDefaultInstallConfigPath,
|
||||
loadInstallConfig,
|
||||
resolveInstallConfigPath,
|
||||
} = require('../../scripts/lib/install/config');
|
||||
@@ -49,6 +50,32 @@ function runTests() {
|
||||
assert.strictEqual(resolved, path.join(cwd, 'configs', 'ecc-install.json'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('finds the default project install config in the provided cwd', () => {
|
||||
const cwd = createTempDir('install-config-');
|
||||
|
||||
try {
|
||||
const configPath = path.join(cwd, 'ecc-install.json');
|
||||
writeJson(configPath, {
|
||||
version: 1,
|
||||
profile: 'core',
|
||||
});
|
||||
|
||||
assert.strictEqual(findDefaultInstallConfigPath({ cwd }), configPath);
|
||||
} finally {
|
||||
cleanup(cwd);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('returns null when no default project install config exists', () => {
|
||||
const cwd = createTempDir('install-config-');
|
||||
|
||||
try {
|
||||
assert.strictEqual(findDefaultInstallConfigPath({ cwd }), null);
|
||||
} finally {
|
||||
cleanup(cwd);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('loads and normalizes a valid install config', () => {
|
||||
const cwd = createTempDir('install-config-');
|
||||
|
||||
|
||||
277
tests/plugin-manifest.test.js
Normal file
277
tests/plugin-manifest.test.js
Normal file
@@ -0,0 +1,277 @@
|
||||
/**
|
||||
* Tests for plugin manifests:
|
||||
* - .claude-plugin/plugin.json (Claude Code plugin)
|
||||
* - .codex-plugin/plugin.json (Codex native plugin)
|
||||
* - .mcp.json (MCP server config at plugin root)
|
||||
* - .agents/plugins/marketplace.json (Codex marketplace discovery)
|
||||
*
|
||||
* Enforces rules from:
|
||||
* - .claude-plugin/PLUGIN_SCHEMA_NOTES.md (Claude Code validator rules)
|
||||
* - https://platform.openai.com/docs/codex/plugins (Codex official docs)
|
||||
*
|
||||
* Run with: node tests/run-all.js
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const repoRoot = path.resolve(__dirname, '..');
|
||||
const repoRootWithSep = `${repoRoot}${path.sep}`;
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
passed++;
|
||||
} catch (err) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
function loadJsonObject(filePath, label) {
|
||||
assert.ok(fs.existsSync(filePath), `Expected ${label} to exist`);
|
||||
|
||||
let parsed;
|
||||
try {
|
||||
parsed = JSON.parse(fs.readFileSync(filePath, 'utf8'));
|
||||
} catch (error) {
|
||||
assert.fail(`Expected ${label} to contain valid JSON: ${error.message}`);
|
||||
}
|
||||
|
||||
assert.ok(
|
||||
parsed && typeof parsed === 'object' && !Array.isArray(parsed),
|
||||
`Expected ${label} to contain a JSON object`,
|
||||
);
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function assertSafeRepoRelativePath(relativePath, label) {
|
||||
const normalized = path.posix.normalize(relativePath.replace(/\\/g, '/'));
|
||||
|
||||
assert.ok(!path.isAbsolute(relativePath), `${label} must not be absolute: ${relativePath}`);
|
||||
assert.ok(
|
||||
!normalized.startsWith('../') && !normalized.includes('/../'),
|
||||
`${label} must not traverse directories: ${relativePath}`,
|
||||
);
|
||||
}
|
||||
|
||||
// ── Claude plugin manifest ────────────────────────────────────────────────────
|
||||
console.log('\n=== .claude-plugin/plugin.json ===\n');
|
||||
|
||||
const claudePluginPath = path.join(repoRoot, '.claude-plugin', 'plugin.json');
|
||||
|
||||
test('claude plugin.json exists', () => {
|
||||
assert.ok(fs.existsSync(claudePluginPath), 'Expected .claude-plugin/plugin.json to exist');
|
||||
});
|
||||
|
||||
const claudePlugin = loadJsonObject(claudePluginPath, '.claude-plugin/plugin.json');
|
||||
|
||||
test('claude plugin.json has version field', () => {
|
||||
assert.ok(claudePlugin.version, 'Expected version field');
|
||||
});
|
||||
|
||||
test('claude plugin.json agents is an array', () => {
|
||||
assert.ok(Array.isArray(claudePlugin.agents), 'Expected agents to be an array (not a string/directory)');
|
||||
});
|
||||
|
||||
test('claude plugin.json agents uses explicit file paths (not directories)', () => {
|
||||
for (const agentPath of claudePlugin.agents) {
|
||||
assertSafeRepoRelativePath(agentPath, 'Agent path');
|
||||
assert.ok(
|
||||
agentPath.endsWith('.md'),
|
||||
`Expected explicit .md file path, got: ${agentPath}`,
|
||||
);
|
||||
assert.ok(
|
||||
!agentPath.endsWith('/'),
|
||||
`Expected explicit file path, not directory, got: ${agentPath}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
test('claude plugin.json all agent files exist', () => {
|
||||
for (const agentRelPath of claudePlugin.agents) {
|
||||
assertSafeRepoRelativePath(agentRelPath, 'Agent path');
|
||||
const absolute = path.resolve(repoRoot, agentRelPath);
|
||||
assert.ok(
|
||||
absolute === repoRoot || absolute.startsWith(repoRootWithSep),
|
||||
`Agent path resolves outside repo root: ${agentRelPath}`,
|
||||
);
|
||||
assert.ok(
|
||||
fs.existsSync(absolute),
|
||||
`Agent file missing: ${agentRelPath}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
test('claude plugin.json skills is an array', () => {
|
||||
assert.ok(Array.isArray(claudePlugin.skills), 'Expected skills to be an array');
|
||||
});
|
||||
|
||||
test('claude plugin.json commands is an array', () => {
|
||||
assert.ok(Array.isArray(claudePlugin.commands), 'Expected commands to be an array');
|
||||
});
|
||||
|
||||
test('claude plugin.json does NOT have explicit hooks declaration', () => {
|
||||
assert.ok(
|
||||
!('hooks' in claudePlugin),
|
||||
'hooks field must NOT be declared — Claude Code v2.1+ auto-loads hooks/hooks.json by convention',
|
||||
);
|
||||
});
|
||||
|
||||
// ── Codex plugin manifest ─────────────────────────────────────────────────────
|
||||
// Per official docs: https://platform.openai.com/docs/codex/plugins
|
||||
// - .codex-plugin/plugin.json is the required manifest
|
||||
// - skills, mcpServers, apps are STRING paths relative to plugin root (not arrays)
|
||||
// - .mcp.json must be at plugin root (NOT inside .codex-plugin/)
|
||||
console.log('\n=== .codex-plugin/plugin.json ===\n');
|
||||
|
||||
const codexPluginPath = path.join(repoRoot, '.codex-plugin', 'plugin.json');
|
||||
|
||||
test('codex plugin.json exists', () => {
|
||||
assert.ok(fs.existsSync(codexPluginPath), 'Expected .codex-plugin/plugin.json to exist');
|
||||
});
|
||||
|
||||
const codexPlugin = loadJsonObject(codexPluginPath, '.codex-plugin/plugin.json');
|
||||
|
||||
test('codex plugin.json has name field', () => {
|
||||
assert.ok(codexPlugin.name, 'Expected name field');
|
||||
});
|
||||
|
||||
test('codex plugin.json has version field', () => {
|
||||
assert.ok(codexPlugin.version, 'Expected version field');
|
||||
});
|
||||
|
||||
test('codex plugin.json skills is a string (not array) per official spec', () => {
|
||||
assert.strictEqual(
|
||||
typeof codexPlugin.skills,
|
||||
'string',
|
||||
'skills must be a string path per Codex official docs, not an array',
|
||||
);
|
||||
});
|
||||
|
||||
test('codex plugin.json mcpServers is a string path (not array) per official spec', () => {
|
||||
assert.strictEqual(
|
||||
typeof codexPlugin.mcpServers,
|
||||
'string',
|
||||
'mcpServers must be a string path per Codex official docs',
|
||||
);
|
||||
});
|
||||
|
||||
test('codex plugin.json mcpServers exactly matches "./.mcp.json"', () => {
|
||||
assert.strictEqual(
|
||||
codexPlugin.mcpServers,
|
||||
'./.mcp.json',
|
||||
'mcpServers must point exactly to "./.mcp.json" per official docs',
|
||||
);
|
||||
const mcpPath = path.join(repoRoot, codexPlugin.mcpServers.replace(/^\.\//, ''));
|
||||
assert.ok(
|
||||
fs.existsSync(mcpPath),
|
||||
`mcpServers file missing at plugin root: ${codexPlugin.mcpServers}`,
|
||||
);
|
||||
});
|
||||
|
||||
test('codex plugin.json has interface.displayName', () => {
|
||||
assert.ok(
|
||||
codexPlugin.interface && codexPlugin.interface.displayName,
|
||||
'Expected interface.displayName for plugin directory presentation',
|
||||
);
|
||||
});
|
||||
|
||||
// ── .mcp.json at plugin root ──────────────────────────────────────────────────
|
||||
// Per official docs: keep .mcp.json at plugin root, NOT inside .codex-plugin/
|
||||
console.log('\n=== .mcp.json (plugin root) ===\n');
|
||||
|
||||
const mcpJsonPath = path.join(repoRoot, '.mcp.json');
|
||||
|
||||
test('.mcp.json exists at plugin root (not inside .codex-plugin/)', () => {
|
||||
assert.ok(fs.existsSync(mcpJsonPath), 'Expected .mcp.json at repo root (plugin root)');
|
||||
assert.ok(
|
||||
!fs.existsSync(path.join(repoRoot, '.codex-plugin', '.mcp.json')),
|
||||
'.mcp.json must NOT be inside .codex-plugin/ — only plugin.json belongs there',
|
||||
);
|
||||
});
|
||||
|
||||
const mcpConfig = loadJsonObject(mcpJsonPath, '.mcp.json');
|
||||
|
||||
test('.mcp.json has mcpServers object', () => {
|
||||
assert.ok(
|
||||
mcpConfig.mcpServers && typeof mcpConfig.mcpServers === 'object',
|
||||
'Expected mcpServers object',
|
||||
);
|
||||
});
|
||||
|
||||
test('.mcp.json includes at least github, context7, and exa servers', () => {
|
||||
const servers = Object.keys(mcpConfig.mcpServers);
|
||||
assert.ok(servers.includes('github'), 'Expected github MCP server');
|
||||
assert.ok(servers.includes('context7'), 'Expected context7 MCP server');
|
||||
assert.ok(servers.includes('exa'), 'Expected exa MCP server');
|
||||
});
|
||||
|
||||
// ── Codex marketplace file ────────────────────────────────────────────────────
|
||||
// Per official docs: repo marketplace lives at $REPO_ROOT/.agents/plugins/marketplace.json
|
||||
console.log('\n=== .agents/plugins/marketplace.json ===\n');
|
||||
|
||||
const marketplacePath = path.join(repoRoot, '.agents', 'plugins', 'marketplace.json');
|
||||
|
||||
test('marketplace.json exists at .agents/plugins/', () => {
|
||||
assert.ok(
|
||||
fs.existsSync(marketplacePath),
|
||||
'Expected .agents/plugins/marketplace.json for Codex repo marketplace discovery',
|
||||
);
|
||||
});
|
||||
|
||||
const marketplace = loadJsonObject(marketplacePath, '.agents/plugins/marketplace.json');
|
||||
|
||||
test('marketplace.json has name field', () => {
|
||||
assert.ok(marketplace.name, 'Expected name field');
|
||||
});
|
||||
|
||||
test('marketplace.json has plugins array with at least one entry', () => {
|
||||
assert.ok(Array.isArray(marketplace.plugins) && marketplace.plugins.length > 0, 'Expected plugins array');
|
||||
});
|
||||
|
||||
test('marketplace.json plugin entries have required fields', () => {
|
||||
for (const plugin of marketplace.plugins) {
|
||||
assert.ok(plugin.name, `Plugin entry missing name`);
|
||||
assert.ok(plugin.source && plugin.source.source, `Plugin "${plugin.name}" missing source.source`);
|
||||
assert.ok(plugin.policy && plugin.policy.installation, `Plugin "${plugin.name}" missing policy.installation`);
|
||||
assert.ok(plugin.category, `Plugin "${plugin.name}" missing category`);
|
||||
}
|
||||
});
|
||||
|
||||
test('marketplace local plugin path resolves to the repo-root Codex bundle', () => {
|
||||
for (const plugin of marketplace.plugins) {
|
||||
if (!plugin.source || plugin.source.source !== 'local') {
|
||||
continue;
|
||||
}
|
||||
|
||||
const resolvedRoot = path.resolve(path.dirname(marketplacePath), plugin.source.path);
|
||||
assert.strictEqual(
|
||||
resolvedRoot,
|
||||
repoRoot,
|
||||
`Expected local marketplace path to resolve to repo root, got: ${plugin.source.path}`,
|
||||
);
|
||||
assert.ok(
|
||||
fs.existsSync(path.join(resolvedRoot, '.codex-plugin', 'plugin.json')),
|
||||
`Codex plugin manifest missing under resolved marketplace root: ${plugin.source.path}`,
|
||||
);
|
||||
assert.ok(
|
||||
fs.existsSync(path.join(resolvedRoot, '.mcp.json')),
|
||||
`Root MCP config missing under resolved marketplace root: ${plugin.source.path}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// ── Summary ───────────────────────────────────────────────────────────────────
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
104
tests/scripts/catalog.test.js
Normal file
104
tests/scripts/catalog.test.js
Normal file
@@ -0,0 +1,104 @@
|
||||
/**
|
||||
* Tests for scripts/catalog.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const { execFileSync } = require('child_process');
|
||||
|
||||
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'catalog.js');
|
||||
|
||||
function run(args = []) {
|
||||
try {
|
||||
const stdout = execFileSync('node', [SCRIPT, ...args], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 10000,
|
||||
});
|
||||
return { code: 0, stdout, stderr: '' };
|
||||
} catch (error) {
|
||||
return {
|
||||
code: error.status || 1,
|
||||
stdout: error.stdout || '',
|
||||
stderr: error.stderr || '',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` \u2713 ${name}`);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.log(` \u2717 ${name}`);
|
||||
console.log(` Error: ${error.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing catalog.js ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
if (test('shows help with no arguments', () => {
|
||||
const result = run();
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Discover ECC install components and profiles'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('shows help with an explicit help flag', () => {
|
||||
const result = run(['--help']);
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Usage:'));
|
||||
assert.ok(result.stdout.includes('node scripts/catalog.js show <component-id>'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('lists install profiles', () => {
|
||||
const result = run(['profiles']);
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Install profiles'));
|
||||
assert.ok(result.stdout.includes('core'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('filters components by family and emits JSON', () => {
|
||||
const result = run(['components', '--family', 'language', '--json']);
|
||||
assert.strictEqual(result.code, 0, result.stderr);
|
||||
const parsed = JSON.parse(result.stdout);
|
||||
assert.ok(Array.isArray(parsed.components));
|
||||
assert.ok(parsed.components.length > 0);
|
||||
assert.ok(parsed.components.every(component => component.family === 'language'));
|
||||
assert.ok(parsed.components.some(component => component.id === 'lang:typescript'));
|
||||
assert.ok(parsed.components.every(component => component.id !== 'framework:nextjs'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('shows a resolved component payload', () => {
|
||||
const result = run(['show', 'framework:nextjs', '--json']);
|
||||
assert.strictEqual(result.code, 0, result.stderr);
|
||||
const parsed = JSON.parse(result.stdout);
|
||||
assert.strictEqual(parsed.id, 'framework:nextjs');
|
||||
assert.strictEqual(parsed.family, 'framework');
|
||||
assert.deepStrictEqual(parsed.moduleIds, ['framework-language']);
|
||||
assert.ok(Array.isArray(parsed.modules));
|
||||
assert.strictEqual(parsed.modules[0].id, 'framework-language');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails on unknown subcommands', () => {
|
||||
const result = run(['bogus']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('Unknown catalog command'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails on unknown component ids', () => {
|
||||
const result = run(['show', 'framework:not-real']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('Unknown install component'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
@@ -65,6 +65,7 @@ function main() {
|
||||
const result = runCli(['--help']);
|
||||
assert.strictEqual(result.status, 0);
|
||||
assert.match(result.stdout, /ECC selective-install CLI/);
|
||||
assert.match(result.stdout, /catalog/);
|
||||
assert.match(result.stdout, /list-installed/);
|
||||
assert.match(result.stdout, /doctor/);
|
||||
}],
|
||||
@@ -93,6 +94,13 @@ function main() {
|
||||
assert.ok(Array.isArray(payload.profiles));
|
||||
assert.ok(payload.profiles.length > 0);
|
||||
}],
|
||||
['delegates catalog command', () => {
|
||||
const result = runCli(['catalog', 'show', 'framework:nextjs', '--json']);
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
const payload = parseJson(result.stdout);
|
||||
assert.strictEqual(payload.id, 'framework:nextjs');
|
||||
assert.deepStrictEqual(payload.moduleIds, ['framework-language']);
|
||||
}],
|
||||
['delegates lifecycle commands', () => {
|
||||
const homeDir = createTempDir('ecc-cli-home-');
|
||||
const projectRoot = createTempDir('ecc-cli-project-');
|
||||
@@ -127,6 +135,11 @@ function main() {
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
assert.match(result.stdout, /Usage: node scripts\/repair\.js/);
|
||||
}],
|
||||
['supports help for the catalog subcommand', () => {
|
||||
const result = runCli(['help', 'catalog']);
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
assert.match(result.stdout, /node scripts\/catalog\.js show <component-id>/);
|
||||
}],
|
||||
['fails on unknown commands instead of treating them as installs', () => {
|
||||
const result = runCli(['bogus']);
|
||||
assert.strictEqual(result.status, 1);
|
||||
|
||||
@@ -22,6 +22,8 @@ function readJson(filePath) {
|
||||
return JSON.parse(fs.readFileSync(filePath, 'utf8'));
|
||||
}
|
||||
|
||||
const REPO_ROOT = path.join(__dirname, '..', '..');
|
||||
|
||||
function run(args = [], options = {}) {
|
||||
const env = {
|
||||
...process.env,
|
||||
@@ -326,6 +328,170 @@ function runTests() {
|
||||
assert.ok(result.stderr.includes('Unknown install module: ghost-module'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('merges hooks into settings.json for claude target install', () => {
|
||||
const homeDir = createTempDir('install-apply-home-');
|
||||
const projectDir = createTempDir('install-apply-project-');
|
||||
|
||||
try {
|
||||
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
|
||||
assert.strictEqual(result.code, 0, result.stderr);
|
||||
|
||||
const claudeRoot = path.join(homeDir, '.claude');
|
||||
assert.ok(fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')), 'hooks.json should be copied');
|
||||
|
||||
const settingsPath = path.join(claudeRoot, 'settings.json');
|
||||
assert.ok(fs.existsSync(settingsPath), 'settings.json should exist after install');
|
||||
|
||||
const settings = readJson(settingsPath);
|
||||
assert.ok(settings.hooks, 'settings.json should contain hooks key');
|
||||
assert.ok(settings.hooks.PreToolUse, 'hooks should include PreToolUse');
|
||||
assert.ok(Array.isArray(settings.hooks.PreToolUse), 'PreToolUse should be an array');
|
||||
assert.ok(settings.hooks.PreToolUse.length > 0, 'PreToolUse should have entries');
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
cleanup(projectDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('preserves existing settings fields and hook entries when merging hooks', () => {
|
||||
const homeDir = createTempDir('install-apply-home-');
|
||||
const projectDir = createTempDir('install-apply-project-');
|
||||
|
||||
try {
|
||||
const claudeRoot = path.join(homeDir, '.claude');
|
||||
fs.mkdirSync(claudeRoot, { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(claudeRoot, 'settings.json'),
|
||||
JSON.stringify({
|
||||
effortLevel: 'high',
|
||||
env: { MY_VAR: '1' },
|
||||
hooks: {
|
||||
PreToolUse: [{ matcher: 'Write', hooks: [{ type: 'command', command: 'echo custom-pretool' }] }],
|
||||
UserPromptSubmit: [{ matcher: '*', hooks: [{ type: 'command', command: 'echo custom-submit' }] }],
|
||||
},
|
||||
}, null, 2)
|
||||
);
|
||||
|
||||
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
|
||||
assert.strictEqual(result.code, 0, result.stderr);
|
||||
|
||||
const settings = readJson(path.join(claudeRoot, 'settings.json'));
|
||||
assert.strictEqual(settings.effortLevel, 'high', 'existing effortLevel should be preserved');
|
||||
assert.deepStrictEqual(settings.env, { MY_VAR: '1' }, 'existing env should be preserved');
|
||||
assert.ok(settings.hooks, 'hooks should be merged in');
|
||||
assert.ok(settings.hooks.PreToolUse, 'PreToolUse hooks should exist');
|
||||
assert.ok(
|
||||
settings.hooks.PreToolUse.some(entry => JSON.stringify(entry).includes('echo custom-pretool')),
|
||||
'existing PreToolUse entries should be preserved'
|
||||
);
|
||||
assert.ok(settings.hooks.PreToolUse.length > 1, 'ECC PreToolUse hooks should be appended');
|
||||
assert.deepStrictEqual(
|
||||
settings.hooks.UserPromptSubmit,
|
||||
[{ matcher: '*', hooks: [{ type: 'command', command: 'echo custom-submit' }] }],
|
||||
'user-defined hook event types should be preserved'
|
||||
);
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
cleanup(projectDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('reinstall does not duplicate managed hook entries', () => {
|
||||
const homeDir = createTempDir('install-apply-home-');
|
||||
const projectDir = createTempDir('install-apply-project-');
|
||||
|
||||
try {
|
||||
const firstInstall = run(['--profile', 'core'], { cwd: projectDir, homeDir });
|
||||
assert.strictEqual(firstInstall.code, 0, firstInstall.stderr);
|
||||
|
||||
const settingsPath = path.join(homeDir, '.claude', 'settings.json');
|
||||
const afterFirstInstall = readJson(settingsPath);
|
||||
const preToolUseLength = afterFirstInstall.hooks.PreToolUse.length;
|
||||
|
||||
const secondInstall = run(['--profile', 'core'], { cwd: projectDir, homeDir });
|
||||
assert.strictEqual(secondInstall.code, 0, secondInstall.stderr);
|
||||
|
||||
const afterSecondInstall = readJson(settingsPath);
|
||||
assert.strictEqual(
|
||||
afterSecondInstall.hooks.PreToolUse.length,
|
||||
preToolUseLength,
|
||||
'managed hook entries should not duplicate on reinstall'
|
||||
);
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
cleanup(projectDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails when existing settings.json is malformed', () => {
|
||||
const homeDir = createTempDir('install-apply-home-');
|
||||
const projectDir = createTempDir('install-apply-project-');
|
||||
|
||||
try {
|
||||
const claudeRoot = path.join(homeDir, '.claude');
|
||||
fs.mkdirSync(claudeRoot, { recursive: true });
|
||||
const settingsPath = path.join(claudeRoot, 'settings.json');
|
||||
fs.writeFileSync(settingsPath, '{ invalid json\n');
|
||||
|
||||
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('Failed to parse existing settings at'));
|
||||
assert.strictEqual(fs.readFileSync(settingsPath, 'utf8'), '{ invalid json\n');
|
||||
assert.ok(!fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')), 'hooks.json should not be copied on validation failure');
|
||||
assert.ok(!fs.existsSync(path.join(claudeRoot, 'ecc', 'install-state.json')), 'install state should not be written on validation failure');
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
cleanup(projectDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails when existing settings.json root is not an object', () => {
|
||||
const homeDir = createTempDir('install-apply-home-');
|
||||
const projectDir = createTempDir('install-apply-project-');
|
||||
|
||||
try {
|
||||
const claudeRoot = path.join(homeDir, '.claude');
|
||||
fs.mkdirSync(claudeRoot, { recursive: true });
|
||||
const settingsPath = path.join(claudeRoot, 'settings.json');
|
||||
fs.writeFileSync(settingsPath, '[]\n');
|
||||
|
||||
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('Invalid existing settings at'));
|
||||
assert.ok(result.stderr.includes('expected a JSON object'));
|
||||
assert.strictEqual(fs.readFileSync(settingsPath, 'utf8'), '[]\n');
|
||||
assert.ok(!fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')), 'hooks.json should not be copied on validation failure');
|
||||
assert.ok(!fs.existsSync(path.join(claudeRoot, 'ecc', 'install-state.json')), 'install state should not be written on validation failure');
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
cleanup(projectDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails when source hooks.json root is not an object before copying files', () => {
|
||||
const homeDir = createTempDir('install-apply-home-');
|
||||
const projectDir = createTempDir('install-apply-project-');
|
||||
const sourceHooksPath = path.join(REPO_ROOT, 'hooks', 'hooks.json');
|
||||
const originalHooks = fs.readFileSync(sourceHooksPath, 'utf8');
|
||||
|
||||
try {
|
||||
fs.writeFileSync(sourceHooksPath, '[]\n');
|
||||
|
||||
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('Invalid hooks config at'));
|
||||
assert.ok(result.stderr.includes('expected a JSON object'));
|
||||
|
||||
const claudeRoot = path.join(homeDir, '.claude');
|
||||
assert.ok(!fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')), 'hooks.json should not be copied when source hooks are invalid');
|
||||
assert.ok(!fs.existsSync(path.join(claudeRoot, 'ecc', 'install-state.json')), 'install state should not be written when source hooks are invalid');
|
||||
} finally {
|
||||
fs.writeFileSync(sourceHooksPath, originalHooks);
|
||||
cleanup(homeDir);
|
||||
cleanup(projectDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('installs from ecc-install.json and persists component selections', () => {
|
||||
const homeDir = createTempDir('install-apply-home-');
|
||||
const projectDir = createTempDir('install-apply-project-');
|
||||
@@ -358,6 +524,67 @@ function runTests() {
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('auto-detects ecc-install.json from the project root', () => {
|
||||
const homeDir = createTempDir('install-apply-home-');
|
||||
const projectDir = createTempDir('install-apply-project-');
|
||||
const configPath = path.join(projectDir, 'ecc-install.json');
|
||||
|
||||
try {
|
||||
fs.writeFileSync(configPath, JSON.stringify({
|
||||
version: 1,
|
||||
target: 'claude',
|
||||
profile: 'developer',
|
||||
include: ['capability:security'],
|
||||
exclude: ['capability:orchestration'],
|
||||
}, null, 2));
|
||||
|
||||
const result = run([], { cwd: projectDir, homeDir });
|
||||
assert.strictEqual(result.code, 0, result.stderr);
|
||||
|
||||
assert.ok(fs.existsSync(path.join(homeDir, '.claude', 'skills', 'security-review', 'SKILL.md')));
|
||||
assert.ok(!fs.existsSync(path.join(homeDir, '.claude', 'skills', 'dmux-workflows', 'SKILL.md')));
|
||||
|
||||
const state = readJson(path.join(homeDir, '.claude', 'ecc', 'install-state.json'));
|
||||
assert.strictEqual(state.request.profile, 'developer');
|
||||
assert.deepStrictEqual(state.request.includeComponents, ['capability:security']);
|
||||
assert.deepStrictEqual(state.request.excludeComponents, ['capability:orchestration']);
|
||||
assert.ok(state.resolution.selectedModules.includes('security'));
|
||||
assert.ok(!state.resolution.selectedModules.includes('orchestration'));
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
cleanup(projectDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('preserves legacy language installs when a project config is present', () => {
|
||||
const homeDir = createTempDir('install-apply-home-');
|
||||
const projectDir = createTempDir('install-apply-project-');
|
||||
const configPath = path.join(projectDir, 'ecc-install.json');
|
||||
|
||||
try {
|
||||
fs.writeFileSync(configPath, JSON.stringify({
|
||||
version: 1,
|
||||
target: 'claude',
|
||||
profile: 'developer',
|
||||
include: ['capability:security'],
|
||||
}, null, 2));
|
||||
|
||||
const result = run(['typescript'], { cwd: projectDir, homeDir });
|
||||
assert.strictEqual(result.code, 0, result.stderr);
|
||||
|
||||
const state = readJson(path.join(homeDir, '.claude', 'ecc', 'install-state.json'));
|
||||
assert.strictEqual(state.request.legacyMode, true);
|
||||
assert.deepStrictEqual(state.request.legacyLanguages, ['typescript']);
|
||||
assert.strictEqual(state.request.profile, null);
|
||||
assert.deepStrictEqual(state.request.includeComponents, []);
|
||||
assert.ok(state.resolution.selectedModules.includes('framework-language'));
|
||||
assert.ok(!state.resolution.selectedModules.includes('security'));
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
cleanup(projectDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
@@ -8,11 +8,12 @@ const { execFileSync } = require('child_process');
|
||||
|
||||
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'install-plan.js');
|
||||
|
||||
function run(args = []) {
|
||||
function run(args = [], options = {}) {
|
||||
try {
|
||||
const stdout = execFileSync('node', [SCRIPT, ...args], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
cwd: options.cwd,
|
||||
timeout: 10000,
|
||||
});
|
||||
return { code: 0, stdout, stderr: '' };
|
||||
@@ -135,6 +136,31 @@ function runTests() {
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('auto-detects planning intent from project ecc-install.json', () => {
|
||||
const configDir = path.join(__dirname, '..', 'fixtures', 'tmp-install-plan-autodetect');
|
||||
const configPath = path.join(configDir, 'ecc-install.json');
|
||||
|
||||
try {
|
||||
require('fs').mkdirSync(configDir, { recursive: true });
|
||||
require('fs').writeFileSync(configPath, JSON.stringify({
|
||||
version: 1,
|
||||
target: 'cursor',
|
||||
profile: 'core',
|
||||
include: ['capability:security'],
|
||||
}, null, 2));
|
||||
|
||||
const result = run(['--json'], { cwd: configDir });
|
||||
assert.strictEqual(result.code, 0, result.stderr);
|
||||
const parsed = JSON.parse(result.stdout);
|
||||
assert.strictEqual(parsed.target, 'cursor');
|
||||
assert.strictEqual(parsed.profileId, 'core');
|
||||
assert.deepStrictEqual(parsed.includedComponentIds, ['capability:security']);
|
||||
assert.ok(parsed.selectedModuleIds.includes('security'));
|
||||
} finally {
|
||||
require('fs').rmSync(configDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails on unknown arguments', () => {
|
||||
const result = run(['--unknown-flag']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
|
||||
Reference in New Issue
Block a user