mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-30 13:43:26 +08:00
Compare commits
107 Commits
feat/herme
...
fix/ecc2-a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9cde3427e2 | ||
|
|
c6b4c719b2 | ||
|
|
f98207feea | ||
|
|
52e9bd58f1 | ||
|
|
4257c093ca | ||
|
|
23d743b92c | ||
|
|
414ea90e11 | ||
|
|
d473cf87e6 | ||
|
|
64847d0a21 | ||
|
|
c865d4c676 | ||
|
|
72de19effd | ||
|
|
56076edd48 | ||
|
|
04d7eeb16f | ||
|
|
4e7773c2ce | ||
|
|
a3fc90f7ac | ||
|
|
55efeb7f20 | ||
|
|
1e7c299706 | ||
|
|
47aa415b06 | ||
|
|
d7e6bb242a | ||
|
|
9f37a5d8c7 | ||
|
|
d9ec51c9e9 | ||
|
|
9033f2a997 | ||
|
|
67660540ac | ||
|
|
432788d0b5 | ||
|
|
6a7a115e18 | ||
|
|
1181d93498 | ||
|
|
80d6a89f12 | ||
|
|
28a1fbc3f2 | ||
|
|
7a4cb8c570 | ||
|
|
4b4f077d18 | ||
|
|
78c98dd4fd | ||
|
|
f07797533d | ||
|
|
87d883eb1b | ||
|
|
652f87c5b6 | ||
|
|
70b65a9d06 | ||
|
|
24674a7bd6 | ||
|
|
d49c95a5ec | ||
|
|
70a96bd363 | ||
|
|
8f7445a260 | ||
|
|
9ad4351f53 | ||
|
|
451732164f | ||
|
|
ebd14cde7d | ||
|
|
ae21a8df85 | ||
|
|
d8e3b9d593 | ||
|
|
7148d9006f | ||
|
|
c14765e701 | ||
|
|
194bc0000b | ||
|
|
1e44475458 | ||
|
|
31af1adcc8 | ||
|
|
c80631fc1d | ||
|
|
00f8628b83 | ||
|
|
ba09a34432 | ||
|
|
27e0d53f6d | ||
|
|
8b6140dedc | ||
|
|
7633386e04 | ||
|
|
b4296c7095 | ||
|
|
cc60bf6b65 | ||
|
|
160624d0ed | ||
|
|
73c10122fe | ||
|
|
fe6a6fc106 | ||
|
|
dafc9bcd60 | ||
|
|
2d0fddf174 | ||
|
|
f471f27658 | ||
|
|
925d830c53 | ||
|
|
2243f15581 | ||
|
|
6408511611 | ||
|
|
9348751b8e | ||
|
|
c96c4d2742 | ||
|
|
678fb6f0d3 | ||
|
|
401e26a45a | ||
|
|
eb934afbb5 | ||
|
|
8303970258 | ||
|
|
319f9efafb | ||
|
|
6c2a3a2bae | ||
|
|
adaeab9dba | ||
|
|
8981dd6067 | ||
|
|
4105a2f36c | ||
|
|
0166231ddb | ||
|
|
cf439dd481 | ||
|
|
9903ae528b | ||
|
|
44c2bf6f7b | ||
|
|
e78c092499 | ||
|
|
61f70de479 | ||
|
|
776ac439f3 | ||
|
|
b19b4c6b5e | ||
|
|
b5157f4ed1 | ||
|
|
2d1e384eef | ||
|
|
9c5ca92e6e | ||
|
|
7b510c886e | ||
|
|
c1b47ac9db | ||
|
|
3f02fa439a | ||
|
|
f6b10481f3 | ||
|
|
d3699f9010 | ||
|
|
445ae5099d | ||
|
|
00bc7f30be | ||
|
|
1d0aa5ac2a | ||
|
|
7f7e319d9f | ||
|
|
d7bcc92007 | ||
|
|
e883385ab0 | ||
|
|
e7d827548c | ||
|
|
2787b8e92f | ||
|
|
2166d80d58 | ||
|
|
67306c22cd | ||
|
|
b2407ab3f5 | ||
|
|
00dce30d3b | ||
|
|
7726c25e46 | ||
|
|
df4f2df297 |
20
.agents/plugins/marketplace.json
Normal file
20
.agents/plugins/marketplace.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"interface": {
|
||||
"displayName": "Everything Claude Code"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"source": {
|
||||
"source": "local",
|
||||
"path": "../.."
|
||||
},
|
||||
"policy": {
|
||||
"installation": "AVAILABLE",
|
||||
"authentication": "ON_INSTALL"
|
||||
},
|
||||
"category": "Productivity"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -21,5 +21,37 @@
|
||||
"workflow",
|
||||
"automation",
|
||||
"best-practices"
|
||||
]
|
||||
],
|
||||
"agents": [
|
||||
"./agents/architect.md",
|
||||
"./agents/build-error-resolver.md",
|
||||
"./agents/chief-of-staff.md",
|
||||
"./agents/code-reviewer.md",
|
||||
"./agents/cpp-build-resolver.md",
|
||||
"./agents/cpp-reviewer.md",
|
||||
"./agents/database-reviewer.md",
|
||||
"./agents/doc-updater.md",
|
||||
"./agents/docs-lookup.md",
|
||||
"./agents/e2e-runner.md",
|
||||
"./agents/flutter-reviewer.md",
|
||||
"./agents/go-build-resolver.md",
|
||||
"./agents/go-reviewer.md",
|
||||
"./agents/harness-optimizer.md",
|
||||
"./agents/java-build-resolver.md",
|
||||
"./agents/java-reviewer.md",
|
||||
"./agents/kotlin-build-resolver.md",
|
||||
"./agents/kotlin-reviewer.md",
|
||||
"./agents/loop-operator.md",
|
||||
"./agents/planner.md",
|
||||
"./agents/python-reviewer.md",
|
||||
"./agents/pytorch-build-resolver.md",
|
||||
"./agents/refactor-cleaner.md",
|
||||
"./agents/rust-build-resolver.md",
|
||||
"./agents/rust-reviewer.md",
|
||||
"./agents/security-reviewer.md",
|
||||
"./agents/tdd-guide.md",
|
||||
"./agents/typescript-reviewer.md"
|
||||
],
|
||||
"skills": ["./skills/"],
|
||||
"commands": ["./commands/"]
|
||||
}
|
||||
|
||||
47
.claude/rules/node.md
Normal file
47
.claude/rules/node.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Node.js Rules for everything-claude-code
|
||||
|
||||
> Project-specific rules for the ECC codebase. Extends common rules.
|
||||
|
||||
## Stack
|
||||
|
||||
- **Runtime**: Node.js >=18 (no transpilation, plain CommonJS)
|
||||
- **Test runner**: `node tests/run-all.js` — individual files via `node tests/**/*.test.js`
|
||||
- **Linter**: ESLint (`@eslint/js`, flat config)
|
||||
- **Coverage**: c8
|
||||
- **Lint**: markdownlint-cli for `.md` files
|
||||
|
||||
## File Conventions
|
||||
|
||||
- `scripts/` — Node.js utilities, hooks. CommonJS (`require`/`module.exports`)
|
||||
- `agents/`, `commands/`, `skills/`, `rules/` — Markdown with YAML frontmatter
|
||||
- `tests/` — Mirror the `scripts/` structure. Test files named `*.test.js`
|
||||
- File naming: **lowercase with hyphens** (e.g. `session-start.js`, `post-edit-format.js`)
|
||||
|
||||
## Code Style
|
||||
|
||||
- CommonJS only — no ESM (`import`/`export`) unless file ends in `.mjs`
|
||||
- No TypeScript — plain `.js` throughout
|
||||
- Prefer `const` over `let`; never `var`
|
||||
- Keep hook scripts under 200 lines — extract helpers to `scripts/lib/`
|
||||
- All hooks must `exit 0` on non-critical errors (never block tool execution unexpectedly)
|
||||
|
||||
## Hook Development
|
||||
|
||||
- Hook scripts normally receive JSON on stdin, but hooks routed through `scripts/hooks/run-with-flags.js` can export `run(rawInput)` and let the wrapper handle parsing/gating
|
||||
- Async hooks: mark `"async": true` in `settings.json` with a timeout ≤30s
|
||||
- Blocking hooks (PreToolUse, stop): keep fast (<200ms) — no network calls
|
||||
- Use `run-with-flags.js` wrapper for all hooks so `ECC_HOOK_PROFILE` and `ECC_DISABLED_HOOKS` runtime gating works
|
||||
- Always exit 0 on parse errors; log to stderr with `[HookName]` prefix
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
- Run `node tests/run-all.js` before committing
|
||||
- New scripts in `scripts/lib/` require a matching test in `tests/lib/`
|
||||
- New hooks require at least one integration test in `tests/hooks/`
|
||||
|
||||
## Markdown / Agent Files
|
||||
|
||||
- Agents: YAML frontmatter with `name`, `description`, `tools`, `model`
|
||||
- Skills: sections — When to Use, How It Works, Examples
|
||||
- Commands: `description:` frontmatter line required
|
||||
- Run `npx markdownlint-cli '**/*.md' --ignore node_modules` before committing
|
||||
49
.codex-plugin/README.md
Normal file
49
.codex-plugin/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# .codex-plugin — Codex Native Plugin for ECC
|
||||
|
||||
This directory contains the **Codex plugin manifest** for Everything Claude Code.
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
.codex-plugin/
|
||||
└── plugin.json — Codex plugin manifest (name, version, skills ref, MCP ref)
|
||||
.mcp.json — MCP server configurations at plugin root (NOT inside .codex-plugin/)
|
||||
```
|
||||
|
||||
## What This Provides
|
||||
|
||||
- **125 skills** from `./skills/` — reusable Codex workflows for TDD, security,
|
||||
code review, architecture, and more
|
||||
- **6 MCP servers** — GitHub, Context7, Exa, Memory, Playwright, Sequential Thinking
|
||||
|
||||
## Installation
|
||||
|
||||
Codex plugin support is currently in preview. Once generally available:
|
||||
|
||||
```bash
|
||||
# Install from Codex CLI
|
||||
codex plugin install affaan-m/everything-claude-code
|
||||
|
||||
# Or reference locally during development
|
||||
codex plugin install ./
|
||||
|
||||
Run this from the repository root so `./` points to the repo root and `.mcp.json` resolves correctly.
|
||||
```
|
||||
|
||||
## MCP Servers Included
|
||||
|
||||
| Server | Purpose |
|
||||
|---|---|
|
||||
| `github` | GitHub API access |
|
||||
| `context7` | Live documentation lookup |
|
||||
| `exa` | Neural web search |
|
||||
| `memory` | Persistent memory across sessions |
|
||||
| `playwright` | Browser automation & E2E testing |
|
||||
| `sequential-thinking` | Step-by-step reasoning |
|
||||
|
||||
## Notes
|
||||
|
||||
- The `skills/` directory at the repo root is shared between Claude Code (`.claude-plugin/`)
|
||||
and Codex (`.codex-plugin/`) — same source of truth, no duplication
|
||||
- MCP server credentials are inherited from the launching environment (env vars)
|
||||
- This manifest does **not** override `~/.codex/config.toml` settings
|
||||
30
.codex-plugin/plugin.json
Normal file
30
.codex-plugin/plugin.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"version": "1.9.0",
|
||||
"description": "Battle-tested Codex workflows — 125 skills, production-ready MCP configs, and agent definitions for TDD, security scanning, code review, and autonomous development.",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
"email": "me@affaanmustafa.com",
|
||||
"url": "https://x.com/affaanmustafa"
|
||||
},
|
||||
"homepage": "https://github.com/affaan-m/everything-claude-code",
|
||||
"repository": "https://github.com/affaan-m/everything-claude-code",
|
||||
"license": "MIT",
|
||||
"keywords": ["codex", "agents", "skills", "tdd", "code-review", "security", "workflow", "automation"],
|
||||
"skills": "./skills/",
|
||||
"mcpServers": "./.mcp.json",
|
||||
"interface": {
|
||||
"displayName": "Everything Claude Code",
|
||||
"shortDescription": "125 battle-tested skills for TDD, security, code review, and autonomous development.",
|
||||
"longDescription": "Everything Claude Code (ECC) is a community-maintained collection of Codex skills and MCP configs evolved over 10+ months of intensive daily use. It covers TDD workflows, security scanning, code review, architecture decisions, and more — all in one installable plugin.",
|
||||
"developerName": "Affaan Mustafa",
|
||||
"category": "Productivity",
|
||||
"capabilities": ["Read", "Write"],
|
||||
"websiteURL": "https://github.com/affaan-m/everything-claude-code",
|
||||
"defaultPrompt": [
|
||||
"Use the tdd-workflow skill to write tests before implementation.",
|
||||
"Use the security-review skill to scan for OWASP Top 10 vulnerabilities.",
|
||||
"Use the code-review skill to review this PR for correctness and security."
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -46,12 +46,15 @@ Available skills:
|
||||
|
||||
Treat the project-local `.codex/config.toml` as the default Codex baseline for ECC. The current ECC baseline enables GitHub, Context7, Exa, Memory, Playwright, and Sequential Thinking; add heavier extras in `~/.codex/config.toml` only when a task actually needs them.
|
||||
|
||||
ECC's canonical Codex section name is `[mcp_servers.context7]`. The launcher package remains `@upstash/context7-mcp`; only the TOML section name is normalized for consistency with `codex mcp list` and the reference config.
|
||||
|
||||
### Automatic config.toml merging
|
||||
|
||||
The sync script (`scripts/sync-ecc-to-codex.sh`) uses a Node-based TOML parser to safely merge ECC MCP servers into `~/.codex/config.toml`:
|
||||
|
||||
- **Add-only by default** — missing ECC servers are appended; existing servers are never modified or removed.
|
||||
- **7 managed servers** — Supabase, Playwright, Context7, Exa, GitHub, Memory, Sequential Thinking.
|
||||
- **Canonical naming** — ECC manages Context7 as `[mcp_servers.context7]`; legacy `[mcp_servers.context7-mcp]` entries are treated as aliases during updates.
|
||||
- **Package-manager aware** — uses the project's configured package manager (npm/pnpm/yarn/bun) instead of hardcoding `pnpm`.
|
||||
- **Drift warnings** — if an existing server's config differs from the ECC recommendation, the script logs a warning.
|
||||
- **`--update-mcp`** — explicitly replaces all ECC-managed servers with the latest recommended config (safely removes subtables like `[mcp_servers.supabase.env]`).
|
||||
|
||||
@@ -27,7 +27,10 @@ notify = [
|
||||
"-sound", "default",
|
||||
]
|
||||
|
||||
# Prefer AGENTS.md and project-local .codex/AGENTS.md for instructions.
|
||||
# Persistent instructions are appended to every prompt (additive, unlike
|
||||
# model_instructions_file which replaces AGENTS.md).
|
||||
persistent_instructions = "Follow project AGENTS.md guidelines. Use available MCP servers when they can help."
|
||||
|
||||
# model_instructions_file replaces built-in instructions instead of AGENTS.md,
|
||||
# so leave it unset unless you intentionally want a single override file.
|
||||
# model_instructions_file = "/absolute/path/to/instructions.md"
|
||||
@@ -38,10 +41,14 @@ notify = [
|
||||
[mcp_servers.github]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-github"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.context7]
|
||||
command = "npx"
|
||||
# Canonical Codex section name is `context7`; the package itself remains
|
||||
# `@upstash/context7-mcp`.
|
||||
args = ["-y", "@upstash/context7-mcp@latest"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.exa]
|
||||
url = "https://mcp.exa.ai/mcp"
|
||||
@@ -49,14 +56,17 @@ url = "https://mcp.exa.ai/mcp"
|
||||
[mcp_servers.memory]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-memory"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.playwright]
|
||||
command = "npx"
|
||||
args = ["-y", "@playwright/mcp@latest", "--extension"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.sequential-thinking]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-sequential-thinking"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
# Additional MCP servers (uncomment as needed):
|
||||
# [mcp_servers.supabase]
|
||||
|
||||
25
.github/workflows/ci.yml
vendored
25
.github/workflows/ci.yml
vendored
@@ -44,13 +44,20 @@ jobs:
|
||||
# Package manager setup
|
||||
- name: Setup pnpm
|
||||
if: matrix.pm == 'pnpm'
|
||||
uses: pnpm/action-setup@v4
|
||||
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Setup Yarn (via Corepack)
|
||||
if: matrix.pm == 'yarn'
|
||||
shell: bash
|
||||
run: |
|
||||
corepack enable
|
||||
corepack prepare yarn@stable --activate
|
||||
|
||||
- name: Setup Bun
|
||||
if: matrix.pm == 'bun'
|
||||
uses: oven-sh/setup-bun@v2
|
||||
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2
|
||||
|
||||
# Cache configuration
|
||||
- name: Get npm cache directory
|
||||
@@ -114,14 +121,18 @@ jobs:
|
||||
${{ runner.os }}-bun-
|
||||
|
||||
# Install dependencies
|
||||
# COREPACK_ENABLE_STRICT=0 allows pnpm to install even though
|
||||
# package.json declares "packageManager": "yarn@..."
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
env:
|
||||
COREPACK_ENABLE_STRICT: '0'
|
||||
run: |
|
||||
case "${{ matrix.pm }}" in
|
||||
npm) npm ci ;;
|
||||
pnpm) pnpm install ;;
|
||||
# --ignore-engines required for Node 18 compat with some devDependencies (e.g., markdownlint-cli)
|
||||
yarn) yarn install --ignore-engines ;;
|
||||
pnpm) pnpm install --no-frozen-lockfile ;;
|
||||
# Yarn Berry (v4+) removed --ignore-engines; engine checking is no longer a core feature
|
||||
yarn) yarn install ;;
|
||||
bun) bun install ;;
|
||||
*) echo "Unsupported package manager: ${{ matrix.pm }}" && exit 1 ;;
|
||||
esac
|
||||
@@ -175,6 +186,10 @@ jobs:
|
||||
run: node scripts/ci/validate-skills.js
|
||||
continue-on-error: false
|
||||
|
||||
- name: Validate install manifests
|
||||
run: node scripts/ci/validate-install-manifests.js
|
||||
continue-on-error: false
|
||||
|
||||
- name: Validate rules
|
||||
run: node scripts/ci/validate-rules.js
|
||||
continue-on-error: false
|
||||
|
||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -20,11 +20,13 @@ jobs:
|
||||
|
||||
- name: Validate version tag
|
||||
run: |
|
||||
if ! [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
if ! [[ "${REF_NAME}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Invalid version tag format. Expected vX.Y.Z"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
env:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
- name: Verify plugin.json version matches tag
|
||||
env:
|
||||
TAG_NAME: ${{ github.ref_name }}
|
||||
@@ -61,7 +63,7 @@ jobs:
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2
|
||||
with:
|
||||
body_path: release_body.md
|
||||
generate_release_notes: true
|
||||
|
||||
2
.github/workflows/reusable-release.yml
vendored
2
.github/workflows/reusable-release.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2
|
||||
with:
|
||||
tag_name: ${{ inputs.tag }}
|
||||
body_path: release_body.md
|
||||
|
||||
20
.github/workflows/reusable-test.yml
vendored
20
.github/workflows/reusable-test.yml
vendored
@@ -36,13 +36,20 @@ jobs:
|
||||
|
||||
- name: Setup pnpm
|
||||
if: inputs.package-manager == 'pnpm'
|
||||
uses: pnpm/action-setup@v4
|
||||
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Setup Yarn (via Corepack)
|
||||
if: inputs.package-manager == 'yarn'
|
||||
shell: bash
|
||||
run: |
|
||||
corepack enable
|
||||
corepack prepare yarn@stable --activate
|
||||
|
||||
- name: Setup Bun
|
||||
if: inputs.package-manager == 'bun'
|
||||
uses: oven-sh/setup-bun@v2
|
||||
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2
|
||||
|
||||
- name: Get npm cache directory
|
||||
if: inputs.package-manager == 'npm'
|
||||
@@ -104,13 +111,18 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-bun-
|
||||
|
||||
# COREPACK_ENABLE_STRICT=0 allows pnpm to install even though
|
||||
# package.json declares "packageManager": "yarn@..."
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
env:
|
||||
COREPACK_ENABLE_STRICT: '0'
|
||||
run: |
|
||||
case "${{ inputs.package-manager }}" in
|
||||
npm) npm ci ;;
|
||||
pnpm) pnpm install ;;
|
||||
yarn) yarn install --ignore-engines ;;
|
||||
pnpm) pnpm install --no-frozen-lockfile ;;
|
||||
# Yarn Berry (v4+) removed --ignore-engines; engine checking is no longer a core feature
|
||||
yarn) yarn install ;;
|
||||
bun) bun install ;;
|
||||
*) echo "Unsupported package manager: ${{ inputs.package-manager }}" && exit 1 ;;
|
||||
esac
|
||||
|
||||
3
.github/workflows/reusable-validate.yml
vendored
3
.github/workflows/reusable-validate.yml
vendored
@@ -39,5 +39,8 @@ jobs:
|
||||
- name: Validate skills
|
||||
run: node scripts/ci/validate-skills.js
|
||||
|
||||
- name: Validate install manifests
|
||||
run: node scripts/ci/validate-install-manifests.js
|
||||
|
||||
- name: Validate rules
|
||||
run: node scripts/ci/validate-rules.js
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -83,6 +83,12 @@ temp/
|
||||
*.bak
|
||||
*.backup
|
||||
|
||||
# Observer temp files (continuous-learning-v2)
|
||||
.observer-tmp/
|
||||
|
||||
# Rust build artifacts
|
||||
ecc2/target/
|
||||
|
||||
# Bootstrap pipeline outputs
|
||||
# Generated lock files in tool subdirectories
|
||||
.opencode/package-lock.json
|
||||
|
||||
27
.mcp.json
Normal file
27
.mcp.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"github": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-github"]
|
||||
},
|
||||
"context7": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@upstash/context7-mcp@2.1.4"]
|
||||
},
|
||||
"exa": {
|
||||
"url": "https://mcp.exa.ai/mcp"
|
||||
},
|
||||
"memory": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-memory"]
|
||||
},
|
||||
"playwright": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@playwright/mcp@0.0.68", "--extension"]
|
||||
},
|
||||
"sequential-thinking": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"]
|
||||
}
|
||||
}
|
||||
}
|
||||
1
.yarnrc.yml
Normal file
1
.yarnrc.yml
Normal file
@@ -0,0 +1 @@
|
||||
nodeLinker: node-modules
|
||||
@@ -1,6 +1,6 @@
|
||||
# Everything Claude Code (ECC) — Agent Instructions
|
||||
|
||||
This is a **production-ready AI coding plugin** providing 28 specialized agents, 119 skills, 60 commands, and automated hook workflows for software development.
|
||||
This is a **production-ready AI coding plugin** providing 28 specialized agents, 125 skills, 60 commands, and automated hook workflows for software development.
|
||||
|
||||
**Version:** 1.9.0
|
||||
|
||||
@@ -142,7 +142,7 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat
|
||||
|
||||
```
|
||||
agents/ — 28 specialized subagents
|
||||
skills/ — 117 workflow skills and domain knowledge
|
||||
skills/ — 125 workflow skills and domain knowledge
|
||||
commands/ — 60 slash commands
|
||||
hooks/ — Trigger-based automations
|
||||
rules/ — Always-follow guidelines (common + per-language)
|
||||
|
||||
159
COMMANDS-QUICK-REF.md
Normal file
159
COMMANDS-QUICK-REF.md
Normal file
@@ -0,0 +1,159 @@
|
||||
# Commands Quick Reference
|
||||
|
||||
> 59 slash commands installed globally. Type `/` in any Claude Code session to invoke.
|
||||
|
||||
---
|
||||
|
||||
## Core Workflow
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/plan` | Restate requirements, assess risks, write step-by-step implementation plan — **waits for your confirm before touching code** |
|
||||
| `/tdd` | Enforce test-driven development: scaffold interface → write failing test → implement → verify 80%+ coverage |
|
||||
| `/code-review` | Full code quality, security, and maintainability review of changed files |
|
||||
| `/build-fix` | Detect and fix build errors — delegates to the right build-resolver agent automatically |
|
||||
| `/verify` | Run the full verification loop: build → lint → test → type-check |
|
||||
| `/quality-gate` | Quality gate check against project standards |
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/tdd` | Universal TDD workflow (any language) |
|
||||
| `/e2e` | Generate + run Playwright end-to-end tests, capture screenshots/videos/traces |
|
||||
| `/test-coverage` | Report test coverage, identify gaps |
|
||||
| `/go-test` | TDD workflow for Go (table-driven, 80%+ coverage with `go test -cover`) |
|
||||
| `/kotlin-test` | TDD for Kotlin (Kotest + Kover) |
|
||||
| `/rust-test` | TDD for Rust (cargo test, integration tests) |
|
||||
| `/cpp-test` | TDD for C++ (GoogleTest + gcov/lcov) |
|
||||
|
||||
---
|
||||
|
||||
## Code Review
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/code-review` | Universal code review |
|
||||
| `/python-review` | Python — PEP 8, type hints, security, idiomatic patterns |
|
||||
| `/go-review` | Go — idiomatic patterns, concurrency safety, error handling |
|
||||
| `/kotlin-review` | Kotlin — null safety, coroutine safety, clean architecture |
|
||||
| `/rust-review` | Rust — ownership, lifetimes, unsafe usage |
|
||||
| `/cpp-review` | C++ — memory safety, modern idioms, concurrency |
|
||||
|
||||
---
|
||||
|
||||
## Build Fixers
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/build-fix` | Auto-detect language and fix build errors |
|
||||
| `/go-build` | Fix Go build errors and `go vet` warnings |
|
||||
| `/kotlin-build` | Fix Kotlin/Gradle compiler errors |
|
||||
| `/rust-build` | Fix Rust build + borrow checker issues |
|
||||
| `/cpp-build` | Fix C++ CMake and linker problems |
|
||||
| `/gradle-build` | Fix Gradle errors for Android / KMP |
|
||||
|
||||
---
|
||||
|
||||
## Planning & Architecture
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/plan` | Implementation plan with risk assessment |
|
||||
| `/multi-plan` | Multi-model collaborative planning |
|
||||
| `/multi-workflow` | Multi-model collaborative development |
|
||||
| `/multi-backend` | Backend-focused multi-model development |
|
||||
| `/multi-frontend` | Frontend-focused multi-model development |
|
||||
| `/multi-execute` | Multi-model collaborative execution |
|
||||
| `/orchestrate` | Guide for tmux/worktree multi-agent orchestration |
|
||||
| `/devfleet` | Orchestrate parallel Claude Code agents via DevFleet |
|
||||
|
||||
---
|
||||
|
||||
## Session Management
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/save-session` | Save current session state to `~/.claude/session-data/` |
|
||||
| `/resume-session` | Load the most recent saved session from the canonical session store and resume from where you left off |
|
||||
| `/sessions` | Browse, search, and manage session history with aliases from `~/.claude/session-data/` (with legacy reads from `~/.claude/sessions/`) |
|
||||
| `/checkpoint` | Mark a checkpoint in the current session |
|
||||
| `/aside` | Answer a quick side question without losing current task context |
|
||||
| `/context-budget` | Analyse context window usage — find token overhead, optimise |
|
||||
|
||||
---
|
||||
|
||||
## Learning & Improvement
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/learn` | Extract reusable patterns from the current session |
|
||||
| `/learn-eval` | Extract patterns + self-evaluate quality before saving |
|
||||
| `/evolve` | Analyse learned instincts, suggest evolved skill structures |
|
||||
| `/promote` | Promote project-scoped instincts to global scope |
|
||||
| `/instinct-status` | Show all learned instincts (project + global) with confidence scores |
|
||||
| `/instinct-export` | Export instincts to a file |
|
||||
| `/instinct-import` | Import instincts from a file or URL |
|
||||
| `/skill-create` | Analyse local git history → generate a reusable skill |
|
||||
| `/skill-health` | Skill portfolio health dashboard with analytics |
|
||||
| `/rules-distill` | Scan skills, extract cross-cutting principles, distill into rules |
|
||||
|
||||
---
|
||||
|
||||
## Refactoring & Cleanup
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/refactor-clean` | Remove dead code, consolidate duplicates, clean up structure |
|
||||
| `/prompt-optimize` | Analyse a draft prompt and output an optimised ECC-enriched version |
|
||||
|
||||
---
|
||||
|
||||
## Docs & Research
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/docs` | Look up current library/API documentation via Context7 |
|
||||
| `/update-docs` | Update project documentation |
|
||||
| `/update-codemaps` | Regenerate codemaps for the codebase |
|
||||
|
||||
---
|
||||
|
||||
## Loops & Automation
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/loop-start` | Start a recurring agent loop on an interval |
|
||||
| `/loop-status` | Check status of running loops |
|
||||
| `/claw` | Start NanoClaw v2 — persistent REPL with model routing, skill hot-load, branching, and metrics |
|
||||
|
||||
---
|
||||
|
||||
## Project & Infrastructure
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/projects` | List known projects and their instinct statistics |
|
||||
| `/harness-audit` | Audit the agent harness configuration for reliability and cost |
|
||||
| `/eval` | Run the evaluation harness |
|
||||
| `/model-route` | Route a task to the right model (Haiku / Sonnet / Opus) |
|
||||
| `/pm2` | PM2 process manager initialisation |
|
||||
| `/setup-pm` | Configure package manager (npm / pnpm / yarn / bun) |
|
||||
|
||||
---
|
||||
|
||||
## Quick Decision Guide
|
||||
|
||||
```
|
||||
Starting a new feature? → /plan first, then /tdd
|
||||
Code just written? → /code-review
|
||||
Build broken? → /build-fix
|
||||
Need live docs? → /docs <library>
|
||||
Session about to end? → /save-session or /learn-eval
|
||||
Resuming next day? → /resume-session
|
||||
Context getting heavy? → /context-budget then /checkpoint
|
||||
Want to extract what you learned? → /learn-eval then /evolve
|
||||
Running repeated tasks? → /loop-start
|
||||
```
|
||||
122
EVALUATION.md
Normal file
122
EVALUATION.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# Repo Evaluation vs Current Setup
|
||||
|
||||
**Date:** 2026-03-21
|
||||
**Branch:** `claude/evaluate-repo-comparison-ASZ9Y`
|
||||
|
||||
---
|
||||
|
||||
## Current Setup (`~/.claude/`)
|
||||
|
||||
The active Claude Code installation is near-minimal:
|
||||
|
||||
| Component | Current |
|
||||
|-----------|---------|
|
||||
| Agents | 0 |
|
||||
| Skills | 0 installed |
|
||||
| Commands | 0 |
|
||||
| Hooks | 1 (Stop: git check) |
|
||||
| Rules | 0 |
|
||||
| MCP configs | 0 |
|
||||
|
||||
**Installed hooks:**
|
||||
- `Stop` → `stop-hook-git-check.sh` — blocks session end if there are uncommitted changes or unpushed commits
|
||||
|
||||
**Installed permissions:**
|
||||
- `Skill` — allows skill invocations
|
||||
|
||||
**Plugins:** Only `blocklist.json` (no active plugins installed)
|
||||
|
||||
---
|
||||
|
||||
## This Repo (`everything-claude-code` v1.9.0)
|
||||
|
||||
| Component | Repo |
|
||||
|-----------|------|
|
||||
| Agents | 28 |
|
||||
| Skills | 116 |
|
||||
| Commands | 59 |
|
||||
| Rules sets | 12 languages + common (60+ rule files) |
|
||||
| Hooks | Comprehensive system (PreToolUse, PostToolUse, SessionStart, Stop) |
|
||||
| MCP configs | 1 (Context7 + others) |
|
||||
| Schemas | 9 JSON validators |
|
||||
| Scripts/CLI | 46+ Node.js modules + multiple CLIs |
|
||||
| Tests | 58 test files |
|
||||
| Install profiles | core, developer, security, research, full |
|
||||
| Supported harnesses | Claude Code, Codex, Cursor, OpenCode |
|
||||
|
||||
---
|
||||
|
||||
## Gap Analysis
|
||||
|
||||
### Hooks
|
||||
- **Current:** 1 Stop hook (git hygiene check)
|
||||
- **Repo:** Full hook matrix covering:
|
||||
- Dangerous command blocking (`rm -rf`, force pushes)
|
||||
- Auto-formatting on file edits
|
||||
- Dev server tmux enforcement
|
||||
- Cost tracking
|
||||
- Session evaluation and governance capture
|
||||
- MCP health monitoring
|
||||
|
||||
### Agents (28 missing)
|
||||
The repo provides specialized agents for every major workflow:
|
||||
- Language reviewers: TypeScript, Python, Go, Java, Kotlin, Rust, C++, Flutter
|
||||
- Build resolvers: Go, Java, Kotlin, Rust, C++, PyTorch
|
||||
- Workflow agents: planner, tdd-guide, code-reviewer, security-reviewer, architect
|
||||
- Automation: loop-operator, doc-updater, refactor-cleaner, harness-optimizer
|
||||
|
||||
### Skills (116 missing)
|
||||
Domain knowledge modules covering:
|
||||
- Language patterns (Python, Go, Kotlin, Rust, C++, Java, Swift, Perl, Laravel, Django)
|
||||
- Testing strategies (TDD, E2E, coverage)
|
||||
- Architecture patterns (backend, frontend, API design, database migrations)
|
||||
- AI/ML workflows (Claude API, eval harness, agent loops, cost-aware pipelines)
|
||||
- Business workflows (investor materials, market research, content engine)
|
||||
|
||||
### Commands (59 missing)
|
||||
- `/tdd`, `/plan`, `/e2e`, `/code-review` — core dev workflows
|
||||
- `/sessions`, `/save-session`, `/resume-session` — session persistence
|
||||
- `/orchestrate`, `/multi-plan`, `/multi-execute` — multi-agent coordination
|
||||
- `/learn`, `/skill-create`, `/evolve` — continuous improvement
|
||||
- `/build-fix`, `/verify`, `/quality-gate` — build/quality automation
|
||||
|
||||
### Rules (60+ files missing)
|
||||
Language-specific coding style, patterns, testing, and security guidelines for:
|
||||
TypeScript, Python, Go, Java, Kotlin, Rust, C++, C#, Swift, Perl, PHP, and common/cross-language rules.
|
||||
|
||||
---
|
||||
|
||||
## Recommendations
|
||||
|
||||
### Immediate value (core install)
|
||||
Run `ecc install --profile core` to get:
|
||||
- Core agents (code-reviewer, planner, tdd-guide, security-reviewer)
|
||||
- Essential skills (tdd-workflow, coding-standards, security-review)
|
||||
- Key commands (/tdd, /plan, /code-review, /build-fix)
|
||||
|
||||
### Full install
|
||||
Run `ecc install --profile full` to get all 28 agents, 116 skills, and 59 commands.
|
||||
|
||||
### Hooks upgrade
|
||||
The current Stop hook is solid. The repo's `hooks.json` adds:
|
||||
- Dangerous command blocking (safety)
|
||||
- Auto-formatting (quality)
|
||||
- Cost tracking (observability)
|
||||
- Session evaluation (learning)
|
||||
|
||||
### Rules
|
||||
Adding language rules (e.g., TypeScript, Python) provides always-on coding guidelines without relying on per-session prompts.
|
||||
|
||||
---
|
||||
|
||||
## What the Current Setup Does Well
|
||||
|
||||
- The `stop-hook-git-check.sh` Stop hook is production-quality and already enforces good git hygiene
|
||||
- The `Skill` permission is correctly configured
|
||||
- The setup is clean with no conflicts or cruft
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
The current setup is essentially a blank slate with one well-implemented git hygiene hook. This repo provides a complete, production-tested enhancement layer covering agents, skills, commands, hooks, and rules — with a selective install system so you can add exactly what you need without bloating the configuration.
|
||||
62
README.md
62
README.md
@@ -180,6 +180,11 @@ cd everything-claude-code
|
||||
npm install # or: pnpm install | yarn install | bun install
|
||||
|
||||
# macOS/Linux
|
||||
|
||||
# Recommended: install everything (full profile)
|
||||
./install.sh --profile full
|
||||
|
||||
# Or install for specific languages only
|
||||
./install.sh typescript # or python or golang or swift or php
|
||||
# ./install.sh typescript python golang swift php
|
||||
# ./install.sh --target cursor typescript
|
||||
@@ -188,6 +193,11 @@ npm install # or: pnpm install | yarn install | bun install
|
||||
|
||||
```powershell
|
||||
# Windows PowerShell
|
||||
|
||||
# Recommended: install everything (full profile)
|
||||
.\install.ps1 --profile full
|
||||
|
||||
# Or install for specific languages only
|
||||
.\install.ps1 typescript # or python or golang or swift or php
|
||||
# .\install.ps1 typescript python golang swift php
|
||||
# .\install.ps1 --target cursor typescript
|
||||
@@ -197,7 +207,7 @@ npm install # or: pnpm install | yarn install | bun install
|
||||
npx ecc-install typescript
|
||||
```
|
||||
|
||||
For manual install instructions see the README in the `rules/` folder.
|
||||
For manual install instructions see the README in the `rules/` folder. When copying rules manually, copy the whole language directory (for example `rules/common` or `rules/golang`), not the files inside it, so relative references keep working and filenames do not collide.
|
||||
|
||||
### Step 3: Start Using
|
||||
|
||||
@@ -212,7 +222,21 @@ For manual install instructions see the README in the `rules/` folder.
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
✨ **That's it!** You now have access to 28 agents, 119 skills, and 60 commands.
|
||||
✨ **That's it!** You now have access to 28 agents, 125 skills, and 60 commands.
|
||||
|
||||
### Multi-model commands require additional setup
|
||||
|
||||
> ⚠️ `multi-*` commands are **not** covered by the base plugin/rules install above.
|
||||
>
|
||||
> To use `/multi-plan`, `/multi-execute`, `/multi-backend`, `/multi-frontend`, and `/multi-workflow`, you must also install the `ccg-workflow` runtime.
|
||||
>
|
||||
> Initialize it with `npx ccg-workflow`.
|
||||
>
|
||||
> That runtime provides the external dependencies these commands expect, including:
|
||||
> - `~/.claude/bin/codeagent-wrapper`
|
||||
> - `~/.claude/.ccg/prompts/*`
|
||||
>
|
||||
> Without `ccg-workflow`, these `multi-*` commands will not run correctly.
|
||||
|
||||
---
|
||||
|
||||
@@ -614,16 +638,16 @@ This gives you instant access to all commands, agents, skills, and hooks.
|
||||
>
|
||||
> # Option A: User-level rules (applies to all projects)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/php/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/php ~/.claude/rules/
|
||||
>
|
||||
> # Option B: Project-level rules (applies to current project only)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* .claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript .claude/rules/ # pick your stack
|
||||
> ```
|
||||
|
||||
---
|
||||
@@ -639,12 +663,13 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
# Copy agents to your Claude config
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Copy rules (common + language-specific)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/php/* ~/.claude/rules/
|
||||
# Copy rules directories (common + language-specific)
|
||||
mkdir -p ~/.claude/rules
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # pick your stack
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/php ~/.claude/rules/
|
||||
|
||||
# Copy commands
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
@@ -850,7 +875,8 @@ Yes. Use Option 2 (manual installation) and copy only what you need:
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Just rules
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
mkdir -p ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
```
|
||||
|
||||
Each component is fully independent.
|
||||
@@ -999,6 +1025,8 @@ cp .codex/config.toml ~/.codex/config.toml
|
||||
|
||||
The sync script safely merges ECC MCP servers into your existing `~/.codex/config.toml` using an **add-only** strategy — it never removes or modifies your existing servers. Run with `--dry-run` to preview changes, or `--update-mcp` to force-refresh ECC servers to the latest recommended config.
|
||||
|
||||
For Context7, ECC uses the canonical Codex section name `[mcp_servers.context7]` while still launching the `@upstash/context7-mcp` package. If you already have a legacy `[mcp_servers.context7-mcp]` entry, `--update-mcp` migrates it to the canonical section name.
|
||||
|
||||
Codex macOS app:
|
||||
- Open this repository as your workspace.
|
||||
- The root `AGENTS.md` is auto-detected.
|
||||
@@ -1085,7 +1113,7 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|
||||
|---------|-------------|----------|--------|
|
||||
| Agents | ✅ 28 agents | ✅ 12 agents | **Claude Code leads** |
|
||||
| Commands | ✅ 60 commands | ✅ 31 commands | **Claude Code leads** |
|
||||
| Skills | ✅ 119 skills | ✅ 37 skills | **Claude Code leads** |
|
||||
| Skills | ✅ 125 skills | ✅ 37 skills | **Claude Code leads** |
|
||||
| Hooks | ✅ 8 event types | ✅ 11 events | **OpenCode has more!** |
|
||||
| Rules | ✅ 29 rules | ✅ 13 instructions | **Claude Code leads** |
|
||||
| MCP Servers | ✅ 14 servers | ✅ Full | **Full parity** |
|
||||
|
||||
@@ -82,14 +82,17 @@
|
||||
# 首先克隆仓库
|
||||
git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
|
||||
# 复制规则(通用 + 语言特定)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
# 复制规则目录(通用 + 语言特定)
|
||||
mkdir -p ~/.claude/rules
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl ~/.claude/rules/
|
||||
```
|
||||
|
||||
复制规则时,请复制整个目录(例如 `rules/common`、`rules/golang`),而不是复制目录内的文件;这样可以保留相对引用,并避免不同规则集中的同名文件互相覆盖。
|
||||
|
||||
### 第三步:开始使用
|
||||
|
||||
```bash
|
||||
@@ -105,6 +108,20 @@ cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
|
||||
✨ **完成!** 你现在可以使用 13 个代理、43 个技能和 31 个命令。
|
||||
|
||||
### multi-* 命令需要额外配置
|
||||
|
||||
> ⚠️ 上面的基础插件 / rules 安装**不包含** `multi-*` 命令所需的运行时。
|
||||
>
|
||||
> 如果要使用 `/multi-plan`、`/multi-execute`、`/multi-backend`、`/multi-frontend` 和 `/multi-workflow`,还需要额外安装 `ccg-workflow` 运行时。
|
||||
>
|
||||
> 可通过 `npx ccg-workflow` 完成初始化安装。
|
||||
>
|
||||
> 该运行时会提供这些命令依赖的关键组件,包括:
|
||||
> - `~/.claude/bin/codeagent-wrapper`
|
||||
> - `~/.claude/.ccg/prompts/*`
|
||||
>
|
||||
> 未安装 `ccg-workflow` 时,这些 `multi-*` 命令将无法正常运行。
|
||||
|
||||
---
|
||||
|
||||
## 🌐 跨平台支持
|
||||
@@ -352,11 +369,20 @@ everything-claude-code/
|
||||
> git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
>
|
||||
> # 选项 A:用户级规则(应用于所有项目)
|
||||
> cp -r everything-claude-code/rules/* ~/.claude/rules/
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/perl ~/.claude/rules/
|
||||
>
|
||||
> # 选项 B:项目级规则(仅应用于当前项目)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript .claude/rules/
|
||||
> cp -r everything-claude-code/rules/python .claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang .claude/rules/
|
||||
> cp -r everything-claude-code/rules/perl .claude/rules/
|
||||
> ```
|
||||
|
||||
---
|
||||
@@ -372,12 +398,13 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
# 将代理复制到你的 Claude 配置
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# 复制规则(通用 + 语言特定)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
# 复制规则目录(通用 + 语言特定)
|
||||
mkdir -p ~/.claude/rules
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl ~/.claude/rules/
|
||||
|
||||
# 复制命令
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
|
||||
196
REPO-ASSESSMENT.md
Normal file
196
REPO-ASSESSMENT.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# Repo & Fork Assessment + Setup Recommendations
|
||||
|
||||
**Date:** 2026-03-21
|
||||
|
||||
---
|
||||
|
||||
## What's Available
|
||||
|
||||
### Repo: `Infiniteyieldai/everything-claude-code`
|
||||
|
||||
This is a **fork of `affaan-m/everything-claude-code`** (the upstream project with 50K+ stars, 6K+ forks).
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| Version | 1.9.0 (current) |
|
||||
| Status | Clean fork — 1 commit ahead of upstream `main` (the EVALUATION.md doc added in this session) |
|
||||
| Remote branches | `main`, `claude/evaluate-repo-comparison-ASZ9Y` |
|
||||
| Upstream sync | Fully synced — last upstream commit merged was the zh-CN docs PR (#728) |
|
||||
| License | MIT |
|
||||
|
||||
**This is the right repo to work from.** It's the latest upstream version with no divergence or merge conflicts.
|
||||
|
||||
---
|
||||
|
||||
### Current `~/.claude/` Installation
|
||||
|
||||
| Component | Installed | Available in Repo |
|
||||
|-----------|-----------|-------------------|
|
||||
| Agents | 0 | 28 |
|
||||
| Skills | 0 | 116 |
|
||||
| Commands | 0 | 59 |
|
||||
| Rules | 0 | 60+ files (12 languages) |
|
||||
| Hooks | 1 (git Stop check) | Full PreToolUse/PostToolUse matrix |
|
||||
| MCP configs | 0 | 1 (Context7) |
|
||||
|
||||
The existing Stop hook (`stop-hook-git-check.sh`) is solid — blocks session end on uncommitted/unpushed work. Keep it.
|
||||
|
||||
---
|
||||
|
||||
## Install Profile Recommendations
|
||||
|
||||
The repo ships 5 install profiles. Choose based on your primary use case:
|
||||
|
||||
### Profile: `core` (Minimum viable setup)
|
||||
> Fastest to install. Gets you commands, core agents, hooks runtime, and quality workflow.
|
||||
|
||||
**Best for:** Trying ECC out, minimal footprint, or a constrained environment.
|
||||
|
||||
```bash
|
||||
node scripts/install-plan.js --profile core
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
|
||||
**Installs:** rules-core, agents-core, commands-core, hooks-runtime, platform-configs, workflow-quality
|
||||
|
||||
---
|
||||
|
||||
### Profile: `developer` (Recommended for daily dev work)
|
||||
> The default engineering profile for most ECC users.
|
||||
|
||||
**Best for:** General software development across app codebases.
|
||||
|
||||
```bash
|
||||
node scripts/install-plan.js --profile developer
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
|
||||
**Adds over core:** framework-language skills, database patterns, orchestration commands
|
||||
|
||||
---
|
||||
|
||||
### Profile: `security`
|
||||
> Baseline runtime + security-specific agents and rules.
|
||||
|
||||
**Best for:** Security-focused workflows, code audits, vulnerability reviews.
|
||||
|
||||
---
|
||||
|
||||
### Profile: `research`
|
||||
> Investigation, synthesis, and publishing workflows.
|
||||
|
||||
**Best for:** Content creation, investor materials, market research, cross-posting.
|
||||
|
||||
---
|
||||
|
||||
### Profile: `full`
|
||||
> Everything — all 18 modules.
|
||||
|
||||
**Best for:** Power users who want the complete toolkit.
|
||||
|
||||
```bash
|
||||
node scripts/install-plan.js --profile full
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Priority Additions (High Value, Low Risk)
|
||||
|
||||
Regardless of profile, these components add immediate value:
|
||||
|
||||
### 1. Core Agents (highest ROI)
|
||||
|
||||
| Agent | Why it matters |
|
||||
|-------|----------------|
|
||||
| `planner.md` | Breaks complex tasks into implementation plans |
|
||||
| `code-reviewer.md` | Quality and maintainability review |
|
||||
| `tdd-guide.md` | TDD workflow (RED→GREEN→IMPROVE) |
|
||||
| `security-reviewer.md` | Vulnerability detection |
|
||||
| `architect.md` | System design & scalability decisions |
|
||||
|
||||
### 2. Key Commands
|
||||
|
||||
| Command | Why it matters |
|
||||
|---------|----------------|
|
||||
| `/plan` | Implementation planning before coding |
|
||||
| `/tdd` | Test-driven workflow |
|
||||
| `/code-review` | On-demand review |
|
||||
| `/build-fix` | Automated build error resolution |
|
||||
| `/learn` | Extract patterns from current session |
|
||||
|
||||
### 3. Hook Upgrades (from `hooks/hooks.json`)
|
||||
The repo's hook system adds these over the current single Stop hook:
|
||||
|
||||
| Hook | Trigger | Value |
|
||||
|------|---------|-------|
|
||||
| `block-no-verify` | PreToolUse: Bash | Blocks `--no-verify` git flag abuse |
|
||||
| `pre-bash-git-push-reminder` | PreToolUse: Bash | Pre-push review reminder |
|
||||
| `doc-file-warning` | PreToolUse: Write | Warns on non-standard doc files |
|
||||
| `suggest-compact` | PreToolUse: Edit/Write | Suggests compaction at logical intervals |
|
||||
| Continuous learning observer | PreToolUse: * | Captures tool use patterns for skill improvement |
|
||||
|
||||
### 4. Rules (Always-on guidelines)
|
||||
The `rules/common/` directory provides baseline guidelines that fire on every session:
|
||||
- `security.md` — Security guardrails
|
||||
- `testing.md` — 80%+ coverage requirement
|
||||
- `git-workflow.md` — Conventional commits, branch strategy
|
||||
- `coding-style.md` — Cross-language style standards
|
||||
|
||||
---
|
||||
|
||||
## What to Do With the Fork
|
||||
|
||||
### Option A: Use as upstream tracker (current state)
|
||||
Keep the fork synced with `affaan-m/everything-claude-code` upstream. Periodically merge upstream changes:
|
||||
```bash
|
||||
git fetch upstream
|
||||
git merge upstream/main
|
||||
```
|
||||
Install from the local clone. This is clean and maintainable.
|
||||
|
||||
### Option B: Customize the fork
|
||||
Add personal skills, agents, or commands to the fork. Good for:
|
||||
- Business-specific domain skills (your vertical)
|
||||
- Team-specific coding conventions
|
||||
- Custom hooks for your stack
|
||||
|
||||
The fork already has the EVALUATION.md and REPO-ASSESSMENT.md docs — that's fine for a working fork.
|
||||
|
||||
### Option C: Install from npm (simplest for fresh machines)
|
||||
```bash
|
||||
npx ecc-universal install --profile developer
|
||||
```
|
||||
No need to clone the repo. This is the recommended install method for most users.
|
||||
|
||||
---
|
||||
|
||||
## Recommended Setup Steps
|
||||
|
||||
1. **Keep the existing Stop hook** — it's doing its job
|
||||
2. **Run the developer profile install** from the local fork:
|
||||
```bash
|
||||
cd /path/to/everything-claude-code
|
||||
node scripts/install-plan.js --profile developer
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
3. **Add language rules** for your primary stack (TypeScript, Python, Go, etc.):
|
||||
```bash
|
||||
node scripts/install-plan.js --add rules/typescript
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
4. **Enable MCP Context7** for live documentation lookup:
|
||||
- Copy `mcp-configs/mcp-servers.json` into your project's `.claude/` dir
|
||||
5. **Review hooks** — enable the `hooks/hooks.json` additions selectively, starting with `block-no-verify` and `pre-bash-git-push-reminder`
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Question | Answer |
|
||||
|----------|--------|
|
||||
| Is the fork healthy? | Yes — fully synced with upstream v1.9.0 |
|
||||
| Other forks to consider? | None visible in this environment; upstream `affaan-m/everything-claude-code` is the source of truth |
|
||||
| Best install profile? | `developer` for day-to-day dev work |
|
||||
| Biggest gap in current setup? | 0 agents installed — add at minimum: planner, code-reviewer, tdd-guide, security-reviewer |
|
||||
| Quickest win? | Run `node scripts/install-plan.js --profile core && node scripts/install-apply.js` |
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
description: Load the most recent session file from ~/.claude/sessions/ and resume work with full context from where the last session ended.
|
||||
description: Load the most recent session file from ~/.claude/session-data/ and resume work with full context from where the last session ended.
|
||||
---
|
||||
|
||||
# Resume Session Command
|
||||
@@ -17,10 +17,10 @@ This command is the counterpart to `/save-session`.
|
||||
## Usage
|
||||
|
||||
```
|
||||
/resume-session # loads most recent file in ~/.claude/sessions/
|
||||
/resume-session # loads most recent file in ~/.claude/session-data/
|
||||
/resume-session 2024-01-15 # loads most recent session for that date
|
||||
/resume-session ~/.claude/sessions/2024-01-15-session.tmp # loads a specific legacy-format file
|
||||
/resume-session ~/.claude/sessions/2024-01-15-abc123de-session.tmp # loads a current short-id session file
|
||||
/resume-session ~/.claude/session-data/2024-01-15-abc123de-session.tmp # loads a current short-id session file
|
||||
/resume-session ~/.claude/sessions/2024-01-15-session.tmp # loads a specific legacy-format file
|
||||
```
|
||||
|
||||
## Process
|
||||
@@ -29,19 +29,20 @@ This command is the counterpart to `/save-session`.
|
||||
|
||||
If no argument provided:
|
||||
|
||||
1. Check `~/.claude/sessions/`
|
||||
1. Check `~/.claude/session-data/`
|
||||
2. Pick the most recently modified `*-session.tmp` file
|
||||
3. If the folder does not exist or has no matching files, tell the user:
|
||||
```
|
||||
No session files found in ~/.claude/sessions/
|
||||
No session files found in ~/.claude/session-data/
|
||||
Run /save-session at the end of a session to create one.
|
||||
```
|
||||
Then stop.
|
||||
|
||||
If an argument is provided:
|
||||
|
||||
- If it looks like a date (`YYYY-MM-DD`), search `~/.claude/sessions/` for files matching
|
||||
`YYYY-MM-DD-session.tmp` (legacy format) or `YYYY-MM-DD-<shortid>-session.tmp` (current format)
|
||||
- If it looks like a date (`YYYY-MM-DD`), search `~/.claude/session-data/` first, then the legacy
|
||||
`~/.claude/sessions/`, for files matching `YYYY-MM-DD-session.tmp` (legacy format) or
|
||||
`YYYY-MM-DD-<shortid>-session.tmp` (current format)
|
||||
and load the most recently modified variant for that date
|
||||
- If it looks like a file path, read that file directly
|
||||
- If not found, report clearly and stop
|
||||
@@ -114,7 +115,7 @@ Report: "Session file found but appears empty or unreadable. You may need to cre
|
||||
## Example Output
|
||||
|
||||
```
|
||||
SESSION LOADED: /Users/you/.claude/sessions/2024-01-15-abc123de-session.tmp
|
||||
SESSION LOADED: /Users/you/.claude/session-data/2024-01-15-abc123de-session.tmp
|
||||
════════════════════════════════════════════════
|
||||
|
||||
PROJECT: my-app — JWT Authentication
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
description: Save current session state to a dated file in ~/.claude/sessions/ so work can be resumed in a future session with full context.
|
||||
description: Save current session state to a dated file in ~/.claude/session-data/ so work can be resumed in a future session with full context.
|
||||
---
|
||||
|
||||
# Save Session Command
|
||||
@@ -29,19 +29,19 @@ Before writing the file, collect:
|
||||
Create the canonical sessions folder in the user's Claude home directory:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.claude/sessions
|
||||
mkdir -p ~/.claude/session-data
|
||||
```
|
||||
|
||||
### Step 3: Write the session file
|
||||
|
||||
Create `~/.claude/sessions/YYYY-MM-DD-<short-id>-session.tmp`, using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`:
|
||||
Create `~/.claude/session-data/YYYY-MM-DD-<short-id>-session.tmp`, using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`:
|
||||
|
||||
- Allowed characters: lowercase `a-z`, digits `0-9`, hyphens `-`
|
||||
- Minimum length: 8 characters
|
||||
- No uppercase letters, no underscores, no spaces
|
||||
- Compatibility characters: letters `a-z` / `A-Z`, digits `0-9`, hyphens `-`, underscores `_`
|
||||
- Compatibility minimum length: 1 character
|
||||
- Recommended style for new files: lowercase letters, digits, and hyphens with 8+ characters to avoid collisions
|
||||
|
||||
Valid examples: `abc123de`, `a1b2c3d4`, `frontend-worktree-1`
|
||||
Invalid examples: `ABC123de` (uppercase), `short` (under 8 chars), `test_id1` (underscore)
|
||||
Valid examples: `abc123de`, `a1b2c3d4`, `frontend-worktree-1`, `ChezMoi_2`
|
||||
Avoid for new files: `A`, `test_id1`, `ABC123de`
|
||||
|
||||
Full valid filename example: `2024-01-15-abc123de-session.tmp`
|
||||
|
||||
@@ -271,5 +271,5 @@ Then test with Postman — the response should include a `Set-Cookie` header.
|
||||
- The "What Did NOT Work" section is the most critical — future sessions will blindly retry failed approaches without it
|
||||
- If the user asks to save mid-session (not just at the end), save what's known so far and mark in-progress items clearly
|
||||
- The file is meant to be read by Claude at the start of the next session via `/resume-session`
|
||||
- Use the canonical global session store: `~/.claude/sessions/`
|
||||
- Use the canonical global session store: `~/.claude/session-data/`
|
||||
- Prefer the short-id filename form (`YYYY-MM-DD-<short-id>-session.tmp`) for any new session file
|
||||
|
||||
@@ -4,7 +4,7 @@ description: Manage Claude Code session history, aliases, and session metadata.
|
||||
|
||||
# Sessions Command
|
||||
|
||||
Manage Claude Code session history - list, load, alias, and edit sessions stored in `~/.claude/sessions/`.
|
||||
Manage Claude Code session history - list, load, alias, and edit sessions stored in `~/.claude/session-data/` with legacy reads from `~/.claude/sessions/`.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -89,7 +89,7 @@ const size = sm.getSessionSize(session.sessionPath);
|
||||
const aliases = aa.getAliasesForSession(session.filename);
|
||||
|
||||
console.log('Session: ' + session.filename);
|
||||
console.log('Path: ~/.claude/sessions/' + session.filename);
|
||||
console.log('Path: ' + session.sessionPath);
|
||||
console.log('');
|
||||
console.log('Statistics:');
|
||||
console.log(' Lines: ' + stats.lineCount);
|
||||
@@ -327,7 +327,7 @@ $ARGUMENTS:
|
||||
|
||||
## Notes
|
||||
|
||||
- Sessions are stored as markdown files in `~/.claude/sessions/`
|
||||
- Sessions are stored as markdown files in `~/.claude/session-data/` with legacy reads from `~/.claude/sessions/`
|
||||
- Aliases are stored in `~/.claude/session-aliases.json`
|
||||
- Session IDs can be shortened (first 4-8 characters usually unique enough)
|
||||
- Use aliases for frequently referenced sessions
|
||||
|
||||
2017
ecc2/Cargo.lock
generated
Normal file
2017
ecc2/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
53
ecc2/Cargo.toml
Normal file
53
ecc2/Cargo.toml
Normal file
@@ -0,0 +1,53 @@
|
||||
[package]
|
||||
name = "ecc-tui"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
description = "ECC 2.0 — Agentic IDE control plane with TUI dashboard"
|
||||
license = "MIT"
|
||||
authors = ["Affaan Mustafa <me@affaanmustafa.com>"]
|
||||
repository = "https://github.com/affaan-m/everything-claude-code"
|
||||
|
||||
[dependencies]
|
||||
# TUI
|
||||
ratatui = "0.29"
|
||||
crossterm = "0.28"
|
||||
|
||||
# Async runtime
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
# State store
|
||||
rusqlite = { version = "0.32", features = ["bundled"] }
|
||||
|
||||
# Git integration
|
||||
git2 = "0.20"
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
toml = "0.8"
|
||||
|
||||
# CLI
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
|
||||
# Logging & tracing
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
|
||||
# Error handling
|
||||
anyhow = "1"
|
||||
thiserror = "2"
|
||||
libc = "0.2"
|
||||
|
||||
# Time
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
|
||||
# UUID for session IDs
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
|
||||
# Directory paths
|
||||
dirs = "6"
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
codegen-units = 1
|
||||
strip = true
|
||||
36
ecc2/src/comms/mod.rs
Normal file
36
ecc2/src/comms/mod.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use anyhow::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::session::store::StateStore;
|
||||
|
||||
/// Message types for inter-agent communication.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum MessageType {
|
||||
/// Task handoff from one agent to another
|
||||
TaskHandoff { task: String, context: String },
|
||||
/// Agent requesting information from another
|
||||
Query { question: String },
|
||||
/// Response to a query
|
||||
Response { answer: String },
|
||||
/// Notification of completion
|
||||
Completed {
|
||||
summary: String,
|
||||
files_changed: Vec<String>,
|
||||
},
|
||||
/// Conflict detected (e.g., two agents editing the same file)
|
||||
Conflict { file: String, description: String },
|
||||
}
|
||||
|
||||
/// Send a structured message between sessions.
|
||||
pub fn send(db: &StateStore, from: &str, to: &str, msg: &MessageType) -> Result<()> {
|
||||
let content = serde_json::to_string(msg)?;
|
||||
let msg_type = match msg {
|
||||
MessageType::TaskHandoff { .. } => "task_handoff",
|
||||
MessageType::Query { .. } => "query",
|
||||
MessageType::Response { .. } => "response",
|
||||
MessageType::Completed { .. } => "completed",
|
||||
MessageType::Conflict { .. } => "conflict",
|
||||
};
|
||||
db.send_message(from, to, &content, msg_type)?;
|
||||
Ok(())
|
||||
}
|
||||
144
ecc2/src/config/mod.rs
Normal file
144
ecc2/src/config/mod.rs
Normal file
@@ -0,0 +1,144 @@
|
||||
use anyhow::Result;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum PaneLayout {
|
||||
#[default]
|
||||
Horizontal,
|
||||
Vertical,
|
||||
Grid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct RiskThresholds {
|
||||
pub review: f64,
|
||||
pub confirm: f64,
|
||||
pub block: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(default)]
|
||||
pub struct Config {
|
||||
pub db_path: PathBuf,
|
||||
pub worktree_root: PathBuf,
|
||||
pub max_parallel_sessions: usize,
|
||||
pub max_parallel_worktrees: usize,
|
||||
pub session_timeout_secs: u64,
|
||||
pub heartbeat_interval_secs: u64,
|
||||
pub default_agent: String,
|
||||
pub cost_budget_usd: f64,
|
||||
pub token_budget: u64,
|
||||
pub theme: Theme,
|
||||
pub pane_layout: PaneLayout,
|
||||
pub risk_thresholds: RiskThresholds,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum Theme {
|
||||
Dark,
|
||||
Light,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
let home = dirs::home_dir().unwrap_or_else(|| PathBuf::from("."));
|
||||
Self {
|
||||
db_path: home.join(".claude").join("ecc2.db"),
|
||||
worktree_root: PathBuf::from("/tmp/ecc-worktrees"),
|
||||
max_parallel_sessions: 8,
|
||||
max_parallel_worktrees: 6,
|
||||
session_timeout_secs: 3600,
|
||||
heartbeat_interval_secs: 30,
|
||||
default_agent: "claude".to_string(),
|
||||
cost_budget_usd: 10.0,
|
||||
token_budget: 500_000,
|
||||
theme: Theme::Dark,
|
||||
pane_layout: PaneLayout::Horizontal,
|
||||
risk_thresholds: Self::RISK_THRESHOLDS,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub const RISK_THRESHOLDS: RiskThresholds = RiskThresholds {
|
||||
review: 0.35,
|
||||
confirm: 0.60,
|
||||
block: 0.85,
|
||||
};
|
||||
|
||||
pub fn load() -> Result<Self> {
|
||||
let config_path = dirs::home_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join(".claude")
|
||||
.join("ecc2.toml");
|
||||
|
||||
if config_path.exists() {
|
||||
let content = std::fs::read_to_string(&config_path)?;
|
||||
let config: Config = toml::from_str(&content)?;
|
||||
Ok(config)
|
||||
} else {
|
||||
Ok(Config::default())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RiskThresholds {
|
||||
fn default() -> Self {
|
||||
Config::RISK_THRESHOLDS
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Config, PaneLayout};
|
||||
|
||||
#[test]
|
||||
fn default_includes_positive_budget_thresholds() {
|
||||
let config = Config::default();
|
||||
|
||||
assert!(config.cost_budget_usd > 0.0);
|
||||
assert!(config.token_budget > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_budget_fields_fall_back_to_defaults() {
|
||||
let legacy_config = r#"
|
||||
db_path = "/tmp/ecc2.db"
|
||||
worktree_root = "/tmp/ecc-worktrees"
|
||||
max_parallel_sessions = 8
|
||||
max_parallel_worktrees = 6
|
||||
session_timeout_secs = 3600
|
||||
heartbeat_interval_secs = 30
|
||||
default_agent = "claude"
|
||||
theme = "Dark"
|
||||
"#;
|
||||
|
||||
let config: Config = toml::from_str(legacy_config).unwrap();
|
||||
let defaults = Config::default();
|
||||
|
||||
assert_eq!(config.cost_budget_usd, defaults.cost_budget_usd);
|
||||
assert_eq!(config.token_budget, defaults.token_budget);
|
||||
assert_eq!(config.pane_layout, defaults.pane_layout);
|
||||
assert_eq!(config.risk_thresholds, defaults.risk_thresholds);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_pane_layout_is_horizontal() {
|
||||
assert_eq!(Config::default().pane_layout, PaneLayout::Horizontal);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pane_layout_deserializes_from_toml() {
|
||||
let config: Config = toml::from_str(r#"pane_layout = "grid""#).unwrap();
|
||||
|
||||
assert_eq!(config.pane_layout, PaneLayout::Grid);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_risk_thresholds_are_applied() {
|
||||
assert_eq!(Config::default().risk_thresholds, Config::RISK_THRESHOLDS);
|
||||
}
|
||||
}
|
||||
142
ecc2/src/main.rs
Normal file
142
ecc2/src/main.rs
Normal file
@@ -0,0 +1,142 @@
|
||||
mod comms;
|
||||
mod config;
|
||||
mod observability;
|
||||
mod session;
|
||||
mod tui;
|
||||
mod worktree;
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use std::path::PathBuf;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "ecc", version, about = "ECC 2.0 — Agentic IDE control plane")]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Option<Commands>,
|
||||
}
|
||||
|
||||
#[derive(clap::Subcommand, Debug)]
|
||||
enum Commands {
|
||||
/// Launch the TUI dashboard
|
||||
Dashboard,
|
||||
/// Start a new agent session
|
||||
Start {
|
||||
/// Task description for the agent
|
||||
#[arg(short, long)]
|
||||
task: String,
|
||||
/// Agent type (claude, codex, custom)
|
||||
#[arg(short, long, default_value = "claude")]
|
||||
agent: String,
|
||||
/// Create a dedicated worktree for this session
|
||||
#[arg(short, long)]
|
||||
worktree: bool,
|
||||
},
|
||||
/// List active sessions
|
||||
Sessions,
|
||||
/// Show session details
|
||||
Status {
|
||||
/// Session ID or alias
|
||||
session_id: Option<String>,
|
||||
},
|
||||
/// Stop a running session
|
||||
Stop {
|
||||
/// Session ID or alias
|
||||
session_id: String,
|
||||
},
|
||||
/// Resume a failed or stopped session
|
||||
Resume {
|
||||
/// Session ID or alias
|
||||
session_id: String,
|
||||
},
|
||||
/// Run as background daemon
|
||||
Daemon,
|
||||
#[command(hide = true)]
|
||||
RunSession {
|
||||
#[arg(long)]
|
||||
session_id: String,
|
||||
#[arg(long)]
|
||||
task: String,
|
||||
#[arg(long)]
|
||||
agent: String,
|
||||
#[arg(long)]
|
||||
cwd: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.init();
|
||||
|
||||
let cli = Cli::parse();
|
||||
|
||||
let cfg = config::Config::load()?;
|
||||
let db = session::store::StateStore::open(&cfg.db_path)?;
|
||||
|
||||
match cli.command {
|
||||
Some(Commands::Dashboard) | None => {
|
||||
tui::app::run(db, cfg).await?;
|
||||
}
|
||||
Some(Commands::Start {
|
||||
task,
|
||||
agent,
|
||||
worktree: use_worktree,
|
||||
}) => {
|
||||
let session_id =
|
||||
session::manager::create_session(&db, &cfg, &task, &agent, use_worktree).await?;
|
||||
println!("Session started: {session_id}");
|
||||
}
|
||||
Some(Commands::Sessions) => {
|
||||
let sessions = session::manager::list_sessions(&db)?;
|
||||
for s in sessions {
|
||||
println!("{} [{}] {}", s.id, s.state, s.task);
|
||||
}
|
||||
}
|
||||
Some(Commands::Status { session_id }) => {
|
||||
let id = session_id.unwrap_or_else(|| "latest".to_string());
|
||||
let status = session::manager::get_status(&db, &id)?;
|
||||
println!("{status}");
|
||||
}
|
||||
Some(Commands::Stop { session_id }) => {
|
||||
session::manager::stop_session(&db, &session_id).await?;
|
||||
println!("Session stopped: {session_id}");
|
||||
}
|
||||
Some(Commands::Resume { session_id }) => {
|
||||
let resumed_id = session::manager::resume_session(&db, &session_id).await?;
|
||||
println!("Session resumed: {resumed_id}");
|
||||
}
|
||||
Some(Commands::Daemon) => {
|
||||
println!("Starting ECC daemon...");
|
||||
session::daemon::run(db, cfg).await?;
|
||||
}
|
||||
Some(Commands::RunSession {
|
||||
session_id,
|
||||
task,
|
||||
agent,
|
||||
cwd,
|
||||
}) => {
|
||||
session::manager::run_session(&cfg, &session_id, &task, &agent, &cwd).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn cli_parses_resume_command() {
|
||||
let cli = Cli::try_parse_from(["ecc", "resume", "deadbeef"])
|
||||
.expect("resume subcommand should parse");
|
||||
|
||||
match cli.command {
|
||||
Some(Commands::Resume { session_id }) => assert_eq!(session_id, "deadbeef"),
|
||||
_ => panic!("expected resume subcommand"),
|
||||
}
|
||||
}
|
||||
}
|
||||
409
ecc2/src/observability/mod.rs
Normal file
409
ecc2/src/observability/mod.rs
Normal file
@@ -0,0 +1,409 @@
|
||||
use anyhow::{bail, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::config::{Config, RiskThresholds};
|
||||
use crate::session::store::StateStore;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ToolCallEvent {
|
||||
pub session_id: String,
|
||||
pub tool_name: String,
|
||||
pub input_summary: String,
|
||||
pub output_summary: String,
|
||||
pub duration_ms: u64,
|
||||
pub risk_score: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct RiskAssessment {
|
||||
pub score: f64,
|
||||
pub reasons: Vec<String>,
|
||||
pub suggested_action: SuggestedAction,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum SuggestedAction {
|
||||
Allow,
|
||||
Review,
|
||||
RequireConfirmation,
|
||||
Block,
|
||||
}
|
||||
|
||||
impl ToolCallEvent {
|
||||
pub fn new(
|
||||
session_id: impl Into<String>,
|
||||
tool_name: impl Into<String>,
|
||||
input_summary: impl Into<String>,
|
||||
output_summary: impl Into<String>,
|
||||
duration_ms: u64,
|
||||
) -> Self {
|
||||
let tool_name = tool_name.into();
|
||||
let input_summary = input_summary.into();
|
||||
|
||||
Self {
|
||||
session_id: session_id.into(),
|
||||
risk_score: Self::compute_risk(&tool_name, &input_summary, &Config::RISK_THRESHOLDS)
|
||||
.score,
|
||||
tool_name,
|
||||
input_summary,
|
||||
output_summary: output_summary.into(),
|
||||
duration_ms,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute risk from the tool type and input characteristics.
|
||||
pub fn compute_risk(
|
||||
tool_name: &str,
|
||||
input: &str,
|
||||
thresholds: &RiskThresholds,
|
||||
) -> RiskAssessment {
|
||||
let normalized_tool = tool_name.to_ascii_lowercase();
|
||||
let normalized_input = input.to_ascii_lowercase();
|
||||
let mut score = 0.0;
|
||||
let mut reasons = Vec::new();
|
||||
|
||||
let (base_score, base_reason) = base_tool_risk(&normalized_tool);
|
||||
score += base_score;
|
||||
if let Some(reason) = base_reason {
|
||||
reasons.push(reason.to_string());
|
||||
}
|
||||
|
||||
let (file_sensitivity_score, file_sensitivity_reason) =
|
||||
assess_file_sensitivity(&normalized_input);
|
||||
score += file_sensitivity_score;
|
||||
if let Some(reason) = file_sensitivity_reason {
|
||||
reasons.push(reason);
|
||||
}
|
||||
|
||||
let (blast_radius_score, blast_radius_reason) = assess_blast_radius(&normalized_input);
|
||||
score += blast_radius_score;
|
||||
if let Some(reason) = blast_radius_reason {
|
||||
reasons.push(reason);
|
||||
}
|
||||
|
||||
let (irreversibility_score, irreversibility_reason) =
|
||||
assess_irreversibility(&normalized_input);
|
||||
score += irreversibility_score;
|
||||
if let Some(reason) = irreversibility_reason {
|
||||
reasons.push(reason);
|
||||
}
|
||||
|
||||
let score = score.clamp(0.0, 1.0);
|
||||
let suggested_action = SuggestedAction::from_score(score, thresholds);
|
||||
|
||||
RiskAssessment {
|
||||
score,
|
||||
reasons,
|
||||
suggested_action,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SuggestedAction {
|
||||
fn from_score(score: f64, thresholds: &RiskThresholds) -> Self {
|
||||
if score >= thresholds.block {
|
||||
Self::Block
|
||||
} else if score >= thresholds.confirm {
|
||||
Self::RequireConfirmation
|
||||
} else if score >= thresholds.review {
|
||||
Self::Review
|
||||
} else {
|
||||
Self::Allow
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn base_tool_risk(tool_name: &str) -> (f64, Option<&'static str>) {
|
||||
match tool_name {
|
||||
"bash" => (
|
||||
0.20,
|
||||
Some("shell execution can modify local or shared state"),
|
||||
),
|
||||
"write" | "multiedit" => (0.15, Some("writes files directly")),
|
||||
"edit" => (0.10, Some("modifies existing files")),
|
||||
_ => (0.05, None),
|
||||
}
|
||||
}
|
||||
|
||||
fn assess_file_sensitivity(input: &str) -> (f64, Option<String>) {
|
||||
const SECRET_PATTERNS: &[&str] = &[
|
||||
".env",
|
||||
"secret",
|
||||
"credential",
|
||||
"token",
|
||||
"api_key",
|
||||
"apikey",
|
||||
"auth",
|
||||
"id_rsa",
|
||||
".pem",
|
||||
".key",
|
||||
];
|
||||
const SHARED_INFRA_PATTERNS: &[&str] = &[
|
||||
"cargo.toml",
|
||||
"package.json",
|
||||
"dockerfile",
|
||||
".github/workflows",
|
||||
"schema",
|
||||
"migration",
|
||||
"production",
|
||||
];
|
||||
|
||||
if contains_any(input, SECRET_PATTERNS) {
|
||||
(
|
||||
0.25,
|
||||
Some("targets a sensitive file or credential surface".to_string()),
|
||||
)
|
||||
} else if contains_any(input, SHARED_INFRA_PATTERNS) {
|
||||
(
|
||||
0.15,
|
||||
Some("targets shared infrastructure or release-critical files".to_string()),
|
||||
)
|
||||
} else {
|
||||
(0.0, None)
|
||||
}
|
||||
}
|
||||
|
||||
fn assess_blast_radius(input: &str) -> (f64, Option<String>) {
|
||||
const LARGE_SCOPE_PATTERNS: &[&str] = &[
|
||||
"**",
|
||||
"/*",
|
||||
"--all",
|
||||
"--recursive",
|
||||
"entire repo",
|
||||
"all files",
|
||||
"across src/",
|
||||
"find ",
|
||||
" xargs ",
|
||||
];
|
||||
const SHARED_STATE_PATTERNS: &[&str] = &[
|
||||
"git push --force",
|
||||
"git push -f",
|
||||
"origin main",
|
||||
"origin master",
|
||||
"rm -rf .",
|
||||
"rm -rf /",
|
||||
];
|
||||
|
||||
if contains_any(input, SHARED_STATE_PATTERNS) {
|
||||
(
|
||||
0.35,
|
||||
Some("has a broad blast radius across shared state or history".to_string()),
|
||||
)
|
||||
} else if contains_any(input, LARGE_SCOPE_PATTERNS) {
|
||||
(
|
||||
0.25,
|
||||
Some("has a broad blast radius across multiple files or directories".to_string()),
|
||||
)
|
||||
} else {
|
||||
(0.0, None)
|
||||
}
|
||||
}
|
||||
|
||||
fn assess_irreversibility(input: &str) -> (f64, Option<String>) {
|
||||
const HIGH_IRREVERSIBILITY_PATTERNS: &[&str] = &[
|
||||
"rm -rf",
|
||||
"git reset --hard",
|
||||
"git clean -fd",
|
||||
"drop database",
|
||||
"drop table",
|
||||
"truncate ",
|
||||
"shred ",
|
||||
];
|
||||
const MODERATE_IRREVERSIBILITY_PATTERNS: &[&str] =
|
||||
&["rm -f", "git push --force", "git push -f", "delete from"];
|
||||
|
||||
if contains_any(input, HIGH_IRREVERSIBILITY_PATTERNS) {
|
||||
(
|
||||
0.45,
|
||||
Some("includes an irreversible or destructive operation".to_string()),
|
||||
)
|
||||
} else if contains_any(input, MODERATE_IRREVERSIBILITY_PATTERNS) {
|
||||
(
|
||||
0.40,
|
||||
Some("includes an irreversible or difficult-to-undo operation".to_string()),
|
||||
)
|
||||
} else {
|
||||
(0.0, None)
|
||||
}
|
||||
}
|
||||
|
||||
fn contains_any(input: &str, patterns: &[&str]) -> bool {
|
||||
patterns.iter().any(|pattern| input.contains(pattern))
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct ToolLogEntry {
|
||||
pub id: i64,
|
||||
pub session_id: String,
|
||||
pub tool_name: String,
|
||||
pub input_summary: String,
|
||||
pub output_summary: String,
|
||||
pub duration_ms: u64,
|
||||
pub risk_score: f64,
|
||||
pub timestamp: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||
pub struct ToolLogPage {
|
||||
pub entries: Vec<ToolLogEntry>,
|
||||
pub page: u64,
|
||||
pub page_size: u64,
|
||||
pub total: u64,
|
||||
}
|
||||
|
||||
pub struct ToolLogger<'a> {
|
||||
db: &'a StateStore,
|
||||
}
|
||||
|
||||
impl<'a> ToolLogger<'a> {
|
||||
pub fn new(db: &'a StateStore) -> Self {
|
||||
Self { db }
|
||||
}
|
||||
|
||||
pub fn log(&self, event: &ToolCallEvent) -> Result<ToolLogEntry> {
|
||||
let timestamp = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
self.db.insert_tool_log(
|
||||
&event.session_id,
|
||||
&event.tool_name,
|
||||
&event.input_summary,
|
||||
&event.output_summary,
|
||||
event.duration_ms,
|
||||
event.risk_score,
|
||||
×tamp,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn query(&self, session_id: &str, page: u64, page_size: u64) -> Result<ToolLogPage> {
|
||||
if page_size == 0 {
|
||||
bail!("page_size must be greater than 0");
|
||||
}
|
||||
|
||||
self.db.query_tool_logs(session_id, page.max(1), page_size)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn log_tool_call(db: &StateStore, event: &ToolCallEvent) -> Result<ToolLogEntry> {
|
||||
ToolLogger::new(db).log(event)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{SuggestedAction, ToolCallEvent, ToolLogger};
|
||||
use crate::config::Config;
|
||||
use crate::session::store::StateStore;
|
||||
use crate::session::{Session, SessionMetrics, SessionState};
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn test_db_path() -> PathBuf {
|
||||
std::env::temp_dir().join(format!("ecc2-observability-{}.db", uuid::Uuid::new_v4()))
|
||||
}
|
||||
|
||||
fn test_session(id: &str) -> Session {
|
||||
let now = chrono::Utc::now();
|
||||
|
||||
Session {
|
||||
id: id.to_string(),
|
||||
task: "test task".to_string(),
|
||||
agent_type: "claude".to_string(),
|
||||
state: SessionState::Pending,
|
||||
pid: None,
|
||||
worktree: None,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
metrics: SessionMetrics::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn computes_sensitive_file_risk() {
|
||||
let assessment = ToolCallEvent::compute_risk(
|
||||
"Write",
|
||||
"Update .env.production with rotated API token",
|
||||
&Config::RISK_THRESHOLDS,
|
||||
);
|
||||
|
||||
assert!(assessment.score >= Config::RISK_THRESHOLDS.review);
|
||||
assert_eq!(assessment.suggested_action, SuggestedAction::Review);
|
||||
assert!(assessment
|
||||
.reasons
|
||||
.iter()
|
||||
.any(|reason| reason.contains("sensitive file")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn computes_blast_radius_risk() {
|
||||
let assessment = ToolCallEvent::compute_risk(
|
||||
"Edit",
|
||||
"Apply the same replacement across src/**/*.rs",
|
||||
&Config::RISK_THRESHOLDS,
|
||||
);
|
||||
|
||||
assert!(assessment.score >= Config::RISK_THRESHOLDS.review);
|
||||
assert_eq!(assessment.suggested_action, SuggestedAction::Review);
|
||||
assert!(assessment
|
||||
.reasons
|
||||
.iter()
|
||||
.any(|reason| reason.contains("blast radius")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn computes_irreversible_risk() {
|
||||
let assessment = ToolCallEvent::compute_risk(
|
||||
"Bash",
|
||||
"rm -f /tmp/ecc-temp.txt",
|
||||
&Config::RISK_THRESHOLDS,
|
||||
);
|
||||
|
||||
assert!(assessment.score >= Config::RISK_THRESHOLDS.confirm);
|
||||
assert_eq!(
|
||||
assessment.suggested_action,
|
||||
SuggestedAction::RequireConfirmation,
|
||||
);
|
||||
assert!(assessment
|
||||
.reasons
|
||||
.iter()
|
||||
.any(|reason| reason.contains("irreversible")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blocks_combined_high_risk_operations() {
|
||||
let assessment = ToolCallEvent::compute_risk(
|
||||
"Bash",
|
||||
"rm -rf . && git push --force origin main",
|
||||
&Config::RISK_THRESHOLDS,
|
||||
);
|
||||
|
||||
assert!(assessment.score >= Config::RISK_THRESHOLDS.block);
|
||||
assert_eq!(assessment.suggested_action, SuggestedAction::Block);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn logger_persists_entries_and_paginates() -> anyhow::Result<()> {
|
||||
let db_path = test_db_path();
|
||||
let db = StateStore::open(&db_path)?;
|
||||
db.insert_session(&test_session("sess-1"))?;
|
||||
|
||||
let logger = ToolLogger::new(&db);
|
||||
|
||||
logger.log(&ToolCallEvent::new("sess-1", "Read", "first", "ok", 5))?;
|
||||
logger.log(&ToolCallEvent::new("sess-1", "Write", "second", "ok", 15))?;
|
||||
logger.log(&ToolCallEvent::new("sess-1", "Bash", "third", "ok", 25))?;
|
||||
|
||||
let first_page = logger.query("sess-1", 1, 2)?;
|
||||
assert_eq!(first_page.total, 3);
|
||||
assert_eq!(first_page.entries.len(), 2);
|
||||
assert_eq!(first_page.entries[0].tool_name, "Bash");
|
||||
assert_eq!(first_page.entries[1].tool_name, "Write");
|
||||
|
||||
let second_page = logger.query("sess-1", 2, 2)?;
|
||||
assert_eq!(second_page.total, 3);
|
||||
assert_eq!(second_page.entries.len(), 1);
|
||||
assert_eq!(second_page.entries[0].tool_name, "Read");
|
||||
|
||||
std::fs::remove_file(&db_path).ok();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
177
ecc2/src/session/daemon.rs
Normal file
177
ecc2/src/session/daemon.rs
Normal file
@@ -0,0 +1,177 @@
|
||||
use anyhow::Result;
|
||||
use std::time::Duration;
|
||||
use tokio::time;
|
||||
|
||||
use super::store::StateStore;
|
||||
use super::SessionState;
|
||||
use crate::config::Config;
|
||||
|
||||
/// Background daemon that monitors sessions, handles heartbeats,
|
||||
/// and cleans up stale resources.
|
||||
pub async fn run(db: StateStore, cfg: Config) -> Result<()> {
|
||||
tracing::info!("ECC daemon started");
|
||||
resume_crashed_sessions(&db)?;
|
||||
|
||||
let heartbeat_interval = Duration::from_secs(cfg.heartbeat_interval_secs);
|
||||
let timeout = Duration::from_secs(cfg.session_timeout_secs);
|
||||
|
||||
loop {
|
||||
if let Err(e) = check_sessions(&db, timeout) {
|
||||
tracing::error!("Session check failed: {e}");
|
||||
}
|
||||
|
||||
time::sleep(heartbeat_interval).await;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resume_crashed_sessions(db: &StateStore) -> Result<()> {
|
||||
let failed_sessions = resume_crashed_sessions_with(db, pid_is_alive)?;
|
||||
if failed_sessions > 0 {
|
||||
tracing::warn!("Marked {failed_sessions} crashed sessions as failed during daemon startup");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn resume_crashed_sessions_with<F>(db: &StateStore, is_pid_alive: F) -> Result<usize>
|
||||
where
|
||||
F: Fn(u32) -> bool,
|
||||
{
|
||||
let sessions = db.list_sessions()?;
|
||||
let mut failed_sessions = 0;
|
||||
|
||||
for session in sessions {
|
||||
if session.state != SessionState::Running {
|
||||
continue;
|
||||
}
|
||||
|
||||
let is_alive = session.pid.is_some_and(&is_pid_alive);
|
||||
if is_alive {
|
||||
continue;
|
||||
}
|
||||
|
||||
tracing::warn!(
|
||||
"Session {} was left running with stale pid {:?}; marking it failed",
|
||||
session.id,
|
||||
session.pid
|
||||
);
|
||||
db.update_state_and_pid(&session.id, &SessionState::Failed, None)?;
|
||||
failed_sessions += 1;
|
||||
}
|
||||
|
||||
Ok(failed_sessions)
|
||||
}
|
||||
|
||||
fn check_sessions(db: &StateStore, timeout: Duration) -> Result<()> {
|
||||
let sessions = db.list_sessions()?;
|
||||
|
||||
for session in sessions {
|
||||
if session.state != SessionState::Running {
|
||||
continue;
|
||||
}
|
||||
|
||||
let elapsed = chrono::Utc::now()
|
||||
.signed_duration_since(session.updated_at)
|
||||
.to_std()
|
||||
.unwrap_or(Duration::ZERO);
|
||||
|
||||
if elapsed > timeout {
|
||||
tracing::warn!("Session {} timed out after {:?}", session.id, elapsed);
|
||||
db.update_state_and_pid(&session.id, &SessionState::Failed, None)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn pid_is_alive(pid: u32) -> bool {
|
||||
if pid == 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
// SAFETY: kill(pid, 0) probes process existence without delivering a signal.
|
||||
let result = unsafe { libc::kill(pid as libc::pid_t, 0) };
|
||||
if result == 0 {
|
||||
return true;
|
||||
}
|
||||
|
||||
matches!(
|
||||
std::io::Error::last_os_error().raw_os_error(),
|
||||
Some(code) if code == libc::EPERM
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
fn pid_is_alive(_pid: u32) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::session::{Session, SessionMetrics, SessionState};
|
||||
use std::path::PathBuf;
|
||||
|
||||
fn temp_db_path() -> PathBuf {
|
||||
std::env::temp_dir().join(format!("ecc2-daemon-test-{}.db", uuid::Uuid::new_v4()))
|
||||
}
|
||||
|
||||
fn sample_session(id: &str, state: SessionState, pid: Option<u32>) -> Session {
|
||||
let now = chrono::Utc::now();
|
||||
Session {
|
||||
id: id.to_string(),
|
||||
task: "Recover crashed worker".to_string(),
|
||||
agent_type: "claude".to_string(),
|
||||
state,
|
||||
pid,
|
||||
worktree: None,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
metrics: SessionMetrics::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_crashed_sessions_marks_dead_running_sessions_failed() -> Result<()> {
|
||||
let path = temp_db_path();
|
||||
let store = StateStore::open(&path)?;
|
||||
store.insert_session(&sample_session(
|
||||
"deadbeef",
|
||||
SessionState::Running,
|
||||
Some(4242),
|
||||
))?;
|
||||
|
||||
resume_crashed_sessions_with(&store, |_| false)?;
|
||||
|
||||
let session = store
|
||||
.get_session("deadbeef")?
|
||||
.expect("session should still exist");
|
||||
assert_eq!(session.state, SessionState::Failed);
|
||||
assert_eq!(session.pid, None);
|
||||
|
||||
let _ = std::fs::remove_file(path);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resume_crashed_sessions_keeps_live_running_sessions_running() -> Result<()> {
|
||||
let path = temp_db_path();
|
||||
let store = StateStore::open(&path)?;
|
||||
store.insert_session(&sample_session(
|
||||
"alive123",
|
||||
SessionState::Running,
|
||||
Some(7777),
|
||||
))?;
|
||||
|
||||
resume_crashed_sessions_with(&store, |_| true)?;
|
||||
|
||||
let session = store
|
||||
.get_session("alive123")?
|
||||
.expect("session should still exist");
|
||||
assert_eq!(session.state, SessionState::Running);
|
||||
assert_eq!(session.pid, Some(7777));
|
||||
|
||||
let _ = std::fs::remove_file(path);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
680
ecc2/src/session/manager.rs
Normal file
680
ecc2/src/session/manager.rs
Normal file
@@ -0,0 +1,680 @@
|
||||
use anyhow::{Context, Result};
|
||||
use std::fmt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Stdio;
|
||||
use tokio::process::Command;
|
||||
|
||||
use super::output::SessionOutputStore;
|
||||
use super::runtime::capture_command_output;
|
||||
use super::store::StateStore;
|
||||
use super::{Session, SessionMetrics, SessionState};
|
||||
use crate::config::Config;
|
||||
use crate::observability::{log_tool_call, ToolCallEvent, ToolLogEntry, ToolLogPage, ToolLogger};
|
||||
use crate::worktree;
|
||||
|
||||
pub async fn create_session(
|
||||
db: &StateStore,
|
||||
cfg: &Config,
|
||||
task: &str,
|
||||
agent_type: &str,
|
||||
use_worktree: bool,
|
||||
) -> Result<String> {
|
||||
let repo_root =
|
||||
std::env::current_dir().context("Failed to resolve current working directory")?;
|
||||
queue_session_in_dir(db, cfg, task, agent_type, use_worktree, &repo_root).await
|
||||
}
|
||||
|
||||
pub fn list_sessions(db: &StateStore) -> Result<Vec<Session>> {
|
||||
db.list_sessions()
|
||||
}
|
||||
|
||||
pub fn get_status(db: &StateStore, id: &str) -> Result<SessionStatus> {
|
||||
let session = resolve_session(db, id)?;
|
||||
Ok(SessionStatus(session))
|
||||
}
|
||||
|
||||
pub async fn stop_session(db: &StateStore, id: &str) -> Result<()> {
|
||||
stop_session_with_options(db, id, true).await
|
||||
}
|
||||
|
||||
pub fn record_tool_call(
|
||||
db: &StateStore,
|
||||
session_id: &str,
|
||||
tool_name: &str,
|
||||
input_summary: &str,
|
||||
output_summary: &str,
|
||||
duration_ms: u64,
|
||||
) -> Result<ToolLogEntry> {
|
||||
let session = db
|
||||
.get_session(session_id)?
|
||||
.ok_or_else(|| anyhow::anyhow!("Session not found: {session_id}"))?;
|
||||
|
||||
let event = ToolCallEvent::new(
|
||||
session.id.clone(),
|
||||
tool_name,
|
||||
input_summary,
|
||||
output_summary,
|
||||
duration_ms,
|
||||
);
|
||||
let entry = log_tool_call(db, &event)?;
|
||||
db.increment_tool_calls(&session.id)?;
|
||||
|
||||
Ok(entry)
|
||||
}
|
||||
|
||||
pub fn query_tool_calls(
|
||||
db: &StateStore,
|
||||
session_id: &str,
|
||||
page: u64,
|
||||
page_size: u64,
|
||||
) -> Result<ToolLogPage> {
|
||||
let session = db
|
||||
.get_session(session_id)?
|
||||
.ok_or_else(|| anyhow::anyhow!("Session not found: {session_id}"))?;
|
||||
|
||||
ToolLogger::new(db).query(&session.id, page, page_size)
|
||||
}
|
||||
|
||||
pub async fn resume_session(db: &StateStore, id: &str) -> Result<String> {
|
||||
let session = resolve_session(db, id)?;
|
||||
|
||||
if session.state == SessionState::Completed {
|
||||
anyhow::bail!("Completed sessions cannot be resumed: {}", session.id);
|
||||
}
|
||||
|
||||
if session.state == SessionState::Running {
|
||||
anyhow::bail!("Session is already running: {}", session.id);
|
||||
}
|
||||
|
||||
db.update_state_and_pid(&session.id, &SessionState::Pending, None)?;
|
||||
Ok(session.id)
|
||||
}
|
||||
|
||||
fn agent_program(agent_type: &str) -> Result<PathBuf> {
|
||||
match agent_type {
|
||||
"claude" => Ok(PathBuf::from("claude")),
|
||||
other => anyhow::bail!("Unsupported agent type: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_session(db: &StateStore, id: &str) -> Result<Session> {
|
||||
let session = if id == "latest" {
|
||||
db.get_latest_session()?
|
||||
} else {
|
||||
db.get_session(id)?
|
||||
};
|
||||
|
||||
session.ok_or_else(|| anyhow::anyhow!("Session not found: {id}"))
|
||||
}
|
||||
|
||||
pub async fn run_session(
|
||||
cfg: &Config,
|
||||
session_id: &str,
|
||||
task: &str,
|
||||
agent_type: &str,
|
||||
working_dir: &Path,
|
||||
) -> Result<()> {
|
||||
let db = StateStore::open(&cfg.db_path)?;
|
||||
let session = resolve_session(&db, session_id)?;
|
||||
|
||||
if session.state != SessionState::Pending {
|
||||
tracing::info!(
|
||||
"Skipping run_session for {} because state is {}",
|
||||
session_id,
|
||||
session.state
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let agent_program = agent_program(agent_type)?;
|
||||
let command = build_agent_command(&agent_program, task, session_id, working_dir);
|
||||
capture_command_output(
|
||||
cfg.db_path.clone(),
|
||||
session_id.to_string(),
|
||||
command,
|
||||
SessionOutputStore::default(),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn queue_session_in_dir(
|
||||
db: &StateStore,
|
||||
cfg: &Config,
|
||||
task: &str,
|
||||
agent_type: &str,
|
||||
use_worktree: bool,
|
||||
repo_root: &Path,
|
||||
) -> Result<String> {
|
||||
let session = build_session_record(task, agent_type, use_worktree, cfg, repo_root)?;
|
||||
db.insert_session(&session)?;
|
||||
|
||||
let working_dir = session
|
||||
.worktree
|
||||
.as_ref()
|
||||
.map(|worktree| worktree.path.as_path())
|
||||
.unwrap_or(repo_root);
|
||||
|
||||
match spawn_session_runner(task, &session.id, agent_type, working_dir).await {
|
||||
Ok(()) => Ok(session.id),
|
||||
Err(error) => {
|
||||
db.update_state(&session.id, &SessionState::Failed)?;
|
||||
|
||||
if let Some(worktree) = session.worktree.as_ref() {
|
||||
let _ = crate::worktree::remove(&worktree.path);
|
||||
}
|
||||
|
||||
Err(error.context(format!("Failed to queue session {}", session.id)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn build_session_record(
|
||||
task: &str,
|
||||
agent_type: &str,
|
||||
use_worktree: bool,
|
||||
cfg: &Config,
|
||||
repo_root: &Path,
|
||||
) -> Result<Session> {
|
||||
let id = uuid::Uuid::new_v4().to_string()[..8].to_string();
|
||||
let now = chrono::Utc::now();
|
||||
|
||||
let worktree = if use_worktree {
|
||||
Some(worktree::create_for_session_in_repo(&id, cfg, repo_root)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Session {
|
||||
id,
|
||||
task: task.to_string(),
|
||||
agent_type: agent_type.to_string(),
|
||||
state: SessionState::Pending,
|
||||
pid: None,
|
||||
worktree,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
metrics: SessionMetrics::default(),
|
||||
})
|
||||
}
|
||||
|
||||
async fn create_session_in_dir(
|
||||
db: &StateStore,
|
||||
cfg: &Config,
|
||||
task: &str,
|
||||
agent_type: &str,
|
||||
use_worktree: bool,
|
||||
repo_root: &Path,
|
||||
agent_program: &Path,
|
||||
) -> Result<String> {
|
||||
let session = build_session_record(task, agent_type, use_worktree, cfg, repo_root)?;
|
||||
|
||||
db.insert_session(&session)?;
|
||||
|
||||
let working_dir = session
|
||||
.worktree
|
||||
.as_ref()
|
||||
.map(|worktree| worktree.path.as_path())
|
||||
.unwrap_or(repo_root);
|
||||
|
||||
match spawn_claude_code(agent_program, task, &session.id, working_dir).await {
|
||||
Ok(pid) => {
|
||||
db.update_pid(&session.id, Some(pid))?;
|
||||
db.update_state(&session.id, &SessionState::Running)?;
|
||||
Ok(session.id)
|
||||
}
|
||||
Err(error) => {
|
||||
db.update_state(&session.id, &SessionState::Failed)?;
|
||||
|
||||
if let Some(worktree) = session.worktree.as_ref() {
|
||||
let _ = crate::worktree::remove(&worktree.path);
|
||||
}
|
||||
|
||||
Err(error.context(format!("Failed to start session {}", session.id)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn spawn_session_runner(
|
||||
task: &str,
|
||||
session_id: &str,
|
||||
agent_type: &str,
|
||||
working_dir: &Path,
|
||||
) -> Result<()> {
|
||||
let current_exe = std::env::current_exe().context("Failed to resolve ECC executable path")?;
|
||||
let child = Command::new(¤t_exe)
|
||||
.arg("run-session")
|
||||
.arg("--session-id")
|
||||
.arg(session_id)
|
||||
.arg("--task")
|
||||
.arg(task)
|
||||
.arg("--agent")
|
||||
.arg(agent_type)
|
||||
.arg("--cwd")
|
||||
.arg(working_dir)
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to spawn ECC runner from {}",
|
||||
current_exe.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
child
|
||||
.id()
|
||||
.ok_or_else(|| anyhow::anyhow!("ECC runner did not expose a process id"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn build_agent_command(agent_program: &Path, task: &str, session_id: &str, working_dir: &Path) -> Command {
|
||||
let mut command = Command::new(agent_program);
|
||||
command
|
||||
.arg("--print")
|
||||
.arg("--name")
|
||||
.arg(format!("ecc-{session_id}"))
|
||||
.arg(task)
|
||||
.current_dir(working_dir)
|
||||
.stdin(Stdio::null());
|
||||
command
|
||||
}
|
||||
|
||||
async fn spawn_claude_code(
|
||||
agent_program: &Path,
|
||||
task: &str,
|
||||
session_id: &str,
|
||||
working_dir: &Path,
|
||||
) -> Result<u32> {
|
||||
let mut command = build_agent_command(agent_program, task, session_id, working_dir);
|
||||
let child = command
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to spawn Claude Code from {}",
|
||||
agent_program.display()
|
||||
)
|
||||
})?;
|
||||
|
||||
child
|
||||
.id()
|
||||
.ok_or_else(|| anyhow::anyhow!("Claude Code did not expose a process id"))
|
||||
}
|
||||
|
||||
async fn stop_session_with_options(
|
||||
db: &StateStore,
|
||||
id: &str,
|
||||
cleanup_worktree: bool,
|
||||
) -> Result<()> {
|
||||
let session = resolve_session(db, id)?;
|
||||
|
||||
if let Some(pid) = session.pid {
|
||||
kill_process(pid).await?;
|
||||
}
|
||||
|
||||
db.update_pid(&session.id, None)?;
|
||||
db.update_state(&session.id, &SessionState::Stopped)?;
|
||||
|
||||
if cleanup_worktree {
|
||||
if let Some(worktree) = session.worktree.as_ref() {
|
||||
crate::worktree::remove(&worktree.path)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn kill_process(pid: u32) -> Result<()> {
|
||||
send_signal(pid, libc::SIGTERM)?;
|
||||
tokio::time::sleep(std::time::Duration::from_millis(1200)).await;
|
||||
send_signal(pid, libc::SIGKILL)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn send_signal(pid: u32, signal: i32) -> Result<()> {
|
||||
let outcome = unsafe { libc::kill(pid as i32, signal) };
|
||||
if outcome == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let error = std::io::Error::last_os_error();
|
||||
if error.raw_os_error() == Some(libc::ESRCH) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(error).with_context(|| format!("Failed to kill process {pid}"))
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
async fn kill_process(pid: u32) -> Result<()> {
|
||||
let status = Command::new("taskkill")
|
||||
.args(["/F", "/PID", &pid.to_string()])
|
||||
.stdin(Stdio::null())
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.status()
|
||||
.await
|
||||
.with_context(|| format!("Failed to invoke taskkill for process {pid}"))?;
|
||||
|
||||
if status.success() {
|
||||
Ok(())
|
||||
} else {
|
||||
anyhow::bail!("taskkill failed for process {pid}");
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SessionStatus(Session);
|
||||
|
||||
impl fmt::Display for SessionStatus {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let s = &self.0;
|
||||
writeln!(f, "Session: {}", s.id)?;
|
||||
writeln!(f, "Task: {}", s.task)?;
|
||||
writeln!(f, "Agent: {}", s.agent_type)?;
|
||||
writeln!(f, "State: {}", s.state)?;
|
||||
if let Some(pid) = s.pid {
|
||||
writeln!(f, "PID: {}", pid)?;
|
||||
}
|
||||
if let Some(ref wt) = s.worktree {
|
||||
writeln!(f, "Branch: {}", wt.branch)?;
|
||||
writeln!(f, "Worktree: {}", wt.path.display())?;
|
||||
}
|
||||
writeln!(f, "Tokens: {}", s.metrics.tokens_used)?;
|
||||
writeln!(f, "Tools: {}", s.metrics.tool_calls)?;
|
||||
writeln!(f, "Files: {}", s.metrics.files_changed)?;
|
||||
writeln!(f, "Cost: ${:.4}", s.metrics.cost_usd)?;
|
||||
writeln!(f, "Created: {}", s.created_at)?;
|
||||
write!(f, "Updated: {}", s.updated_at)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::{Config, PaneLayout, Theme};
|
||||
use crate::session::{Session, SessionMetrics, SessionState};
|
||||
use anyhow::{Context, Result};
|
||||
use chrono::{Duration, Utc};
|
||||
use std::fs;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::thread;
|
||||
use std::time::Duration as StdDuration;
|
||||
|
||||
struct TestDir {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl TestDir {
|
||||
fn new(label: &str) -> Result<Self> {
|
||||
let path =
|
||||
std::env::temp_dir().join(format!("ecc2-{}-{}", label, uuid::Uuid::new_v4()));
|
||||
fs::create_dir_all(&path)?;
|
||||
Ok(Self { path })
|
||||
}
|
||||
|
||||
fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TestDir {
|
||||
fn drop(&mut self) {
|
||||
let _ = fs::remove_dir_all(&self.path);
|
||||
}
|
||||
}
|
||||
|
||||
fn build_config(root: &Path) -> Config {
|
||||
Config {
|
||||
db_path: root.join("state.db"),
|
||||
worktree_root: root.join("worktrees"),
|
||||
max_parallel_sessions: 4,
|
||||
max_parallel_worktrees: 4,
|
||||
session_timeout_secs: 60,
|
||||
heartbeat_interval_secs: 5,
|
||||
default_agent: "claude".to_string(),
|
||||
cost_budget_usd: 10.0,
|
||||
token_budget: 500_000,
|
||||
theme: Theme::Dark,
|
||||
pane_layout: PaneLayout::Horizontal,
|
||||
risk_thresholds: Config::RISK_THRESHOLDS,
|
||||
}
|
||||
}
|
||||
|
||||
fn build_session(id: &str, state: SessionState, updated_at: chrono::DateTime<Utc>) -> Session {
|
||||
Session {
|
||||
id: id.to_string(),
|
||||
task: format!("task-{id}"),
|
||||
agent_type: "claude".to_string(),
|
||||
state,
|
||||
pid: None,
|
||||
worktree: None,
|
||||
created_at: updated_at - Duration::minutes(1),
|
||||
updated_at,
|
||||
metrics: SessionMetrics::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn init_git_repo(path: &Path) -> Result<()> {
|
||||
fs::create_dir_all(path)?;
|
||||
run_git(path, ["init", "-q"])?;
|
||||
fs::write(path.join("README.md"), "hello\n")?;
|
||||
run_git(path, ["add", "README.md"])?;
|
||||
run_git(
|
||||
path,
|
||||
[
|
||||
"-c",
|
||||
"user.name=ECC Tests",
|
||||
"-c",
|
||||
"user.email=ecc-tests@example.com",
|
||||
"commit",
|
||||
"-qm",
|
||||
"init",
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn run_git<const N: usize>(path: &Path, args: [&str; N]) -> Result<()> {
|
||||
let status = StdCommand::new("git")
|
||||
.args(args)
|
||||
.current_dir(path)
|
||||
.status()
|
||||
.with_context(|| format!("failed to run git in {}", path.display()))?;
|
||||
|
||||
if !status.success() {
|
||||
anyhow::bail!("git command failed in {}", path.display());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_fake_claude(root: &Path) -> Result<(PathBuf, PathBuf)> {
|
||||
let script_path = root.join("fake-claude.sh");
|
||||
let log_path = root.join("fake-claude.log");
|
||||
let script = format!(
|
||||
"#!/usr/bin/env python3\nimport os\nimport pathlib\nimport signal\nimport sys\nimport time\n\nlog_path = pathlib.Path(r\"{}\")\nlog_path.write_text(os.getcwd() + \"\\n\", encoding=\"utf-8\")\nwith log_path.open(\"a\", encoding=\"utf-8\") as handle:\n handle.write(\" \".join(sys.argv[1:]) + \"\\n\")\n\ndef handle_term(signum, frame):\n raise SystemExit(0)\n\nsignal.signal(signal.SIGTERM, handle_term)\nwhile True:\n time.sleep(0.1)\n",
|
||||
log_path.display()
|
||||
);
|
||||
|
||||
fs::write(&script_path, script)?;
|
||||
let mut permissions = fs::metadata(&script_path)?.permissions();
|
||||
permissions.set_mode(0o755);
|
||||
fs::set_permissions(&script_path, permissions)?;
|
||||
|
||||
Ok((script_path, log_path))
|
||||
}
|
||||
|
||||
fn wait_for_file(path: &Path) -> Result<String> {
|
||||
for _ in 0..50 {
|
||||
if path.exists() {
|
||||
return fs::read_to_string(path)
|
||||
.with_context(|| format!("failed to read {}", path.display()));
|
||||
}
|
||||
|
||||
thread::sleep(StdDuration::from_millis(20));
|
||||
}
|
||||
|
||||
anyhow::bail!("timed out waiting for {}", path.display());
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn create_session_spawns_process_and_marks_session_running() -> Result<()> {
|
||||
let tempdir = TestDir::new("manager-create-session")?;
|
||||
let repo_root = tempdir.path().join("repo");
|
||||
init_git_repo(&repo_root)?;
|
||||
|
||||
let cfg = build_config(tempdir.path());
|
||||
let db = StateStore::open(&cfg.db_path)?;
|
||||
let (fake_claude, log_path) = write_fake_claude(tempdir.path())?;
|
||||
|
||||
let session_id = create_session_in_dir(
|
||||
&db,
|
||||
&cfg,
|
||||
"implement lifecycle",
|
||||
"claude",
|
||||
false,
|
||||
&repo_root,
|
||||
&fake_claude,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let session = db
|
||||
.get_session(&session_id)?
|
||||
.context("session should exist")?;
|
||||
assert_eq!(session.state, SessionState::Running);
|
||||
assert!(
|
||||
session.pid.is_some(),
|
||||
"spawned session should persist a pid"
|
||||
);
|
||||
|
||||
let log = wait_for_file(&log_path)?;
|
||||
assert!(log.contains(repo_root.to_string_lossy().as_ref()));
|
||||
assert!(log.contains("--print"));
|
||||
assert!(log.contains("implement lifecycle"));
|
||||
|
||||
stop_session_with_options(&db, &session_id, false).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn stop_session_kills_process_and_optionally_cleans_worktree() -> Result<()> {
|
||||
let tempdir = TestDir::new("manager-stop-session")?;
|
||||
let repo_root = tempdir.path().join("repo");
|
||||
init_git_repo(&repo_root)?;
|
||||
|
||||
let cfg = build_config(tempdir.path());
|
||||
let db = StateStore::open(&cfg.db_path)?;
|
||||
let (fake_claude, _) = write_fake_claude(tempdir.path())?;
|
||||
|
||||
let keep_id = create_session_in_dir(
|
||||
&db,
|
||||
&cfg,
|
||||
"keep worktree",
|
||||
"claude",
|
||||
true,
|
||||
&repo_root,
|
||||
&fake_claude,
|
||||
)
|
||||
.await?;
|
||||
let keep_session = db.get_session(&keep_id)?.context("keep session missing")?;
|
||||
keep_session.pid.context("keep session pid missing")?;
|
||||
let keep_worktree = keep_session
|
||||
.worktree
|
||||
.clone()
|
||||
.context("keep session worktree missing")?
|
||||
.path;
|
||||
|
||||
stop_session_with_options(&db, &keep_id, false).await?;
|
||||
|
||||
let stopped_keep = db
|
||||
.get_session(&keep_id)?
|
||||
.context("stopped keep session missing")?;
|
||||
assert_eq!(stopped_keep.state, SessionState::Stopped);
|
||||
assert_eq!(stopped_keep.pid, None);
|
||||
assert!(
|
||||
keep_worktree.exists(),
|
||||
"worktree should remain when cleanup is disabled"
|
||||
);
|
||||
|
||||
let cleanup_id = create_session_in_dir(
|
||||
&db,
|
||||
&cfg,
|
||||
"cleanup worktree",
|
||||
"claude",
|
||||
true,
|
||||
&repo_root,
|
||||
&fake_claude,
|
||||
)
|
||||
.await?;
|
||||
let cleanup_session = db
|
||||
.get_session(&cleanup_id)?
|
||||
.context("cleanup session missing")?;
|
||||
let cleanup_worktree = cleanup_session
|
||||
.worktree
|
||||
.clone()
|
||||
.context("cleanup session worktree missing")?
|
||||
.path;
|
||||
|
||||
stop_session_with_options(&db, &cleanup_id, true).await?;
|
||||
assert!(
|
||||
!cleanup_worktree.exists(),
|
||||
"worktree should be removed when cleanup is enabled"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "current_thread")]
|
||||
async fn resume_session_requeues_failed_session() -> Result<()> {
|
||||
let tempdir = TestDir::new("manager-resume-session")?;
|
||||
let cfg = build_config(tempdir.path());
|
||||
let db = StateStore::open(&cfg.db_path)?;
|
||||
let now = Utc::now();
|
||||
|
||||
db.insert_session(&Session {
|
||||
id: "deadbeef".to_string(),
|
||||
task: "resume previous task".to_string(),
|
||||
agent_type: "claude".to_string(),
|
||||
state: SessionState::Failed,
|
||||
pid: Some(31337),
|
||||
worktree: None,
|
||||
created_at: now - Duration::minutes(1),
|
||||
updated_at: now,
|
||||
metrics: SessionMetrics::default(),
|
||||
})?;
|
||||
|
||||
let resumed_id = resume_session(&db, "deadbeef").await?;
|
||||
let resumed = db
|
||||
.get_session(&resumed_id)?
|
||||
.context("resumed session should exist")?;
|
||||
|
||||
assert_eq!(resumed.state, SessionState::Pending);
|
||||
assert_eq!(resumed.pid, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_status_supports_latest_alias() -> Result<()> {
|
||||
let tempdir = TestDir::new("manager-latest-status")?;
|
||||
let cfg = build_config(tempdir.path());
|
||||
let db = StateStore::open(&cfg.db_path)?;
|
||||
let older = Utc::now() - Duration::minutes(2);
|
||||
let newer = Utc::now();
|
||||
|
||||
db.insert_session(&build_session("older", SessionState::Running, older))?;
|
||||
db.insert_session(&build_session("newer", SessionState::Idle, newer))?;
|
||||
|
||||
let status = get_status(&db, "latest")?;
|
||||
assert_eq!(status.0.id, "newer");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
102
ecc2/src/session/mod.rs
Normal file
102
ecc2/src/session/mod.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
pub mod daemon;
|
||||
pub mod manager;
|
||||
pub mod output;
|
||||
pub mod runtime;
|
||||
pub mod store;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Session {
|
||||
pub id: String,
|
||||
pub task: String,
|
||||
pub agent_type: String,
|
||||
pub state: SessionState,
|
||||
pub pid: Option<u32>,
|
||||
pub worktree: Option<WorktreeInfo>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub metrics: SessionMetrics,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum SessionState {
|
||||
Pending,
|
||||
Running,
|
||||
Idle,
|
||||
Completed,
|
||||
Failed,
|
||||
Stopped,
|
||||
}
|
||||
|
||||
impl fmt::Display for SessionState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
SessionState::Pending => write!(f, "pending"),
|
||||
SessionState::Running => write!(f, "running"),
|
||||
SessionState::Idle => write!(f, "idle"),
|
||||
SessionState::Completed => write!(f, "completed"),
|
||||
SessionState::Failed => write!(f, "failed"),
|
||||
SessionState::Stopped => write!(f, "stopped"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionState {
|
||||
pub fn can_transition_to(&self, next: &Self) -> bool {
|
||||
if self == next {
|
||||
return true;
|
||||
}
|
||||
|
||||
matches!(
|
||||
(self, next),
|
||||
(
|
||||
SessionState::Pending,
|
||||
SessionState::Running | SessionState::Failed | SessionState::Stopped
|
||||
) | (
|
||||
SessionState::Running,
|
||||
SessionState::Idle
|
||||
| SessionState::Completed
|
||||
| SessionState::Failed
|
||||
| SessionState::Stopped
|
||||
) | (
|
||||
SessionState::Idle,
|
||||
SessionState::Running
|
||||
| SessionState::Completed
|
||||
| SessionState::Failed
|
||||
| SessionState::Stopped
|
||||
) | (SessionState::Completed, SessionState::Stopped)
|
||||
| (SessionState::Failed, SessionState::Stopped)
|
||||
)
|
||||
}
|
||||
|
||||
pub fn from_db_value(value: &str) -> Self {
|
||||
match value {
|
||||
"running" => SessionState::Running,
|
||||
"idle" => SessionState::Idle,
|
||||
"completed" => SessionState::Completed,
|
||||
"failed" => SessionState::Failed,
|
||||
"stopped" => SessionState::Stopped,
|
||||
_ => SessionState::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct WorktreeInfo {
|
||||
pub path: PathBuf,
|
||||
pub branch: String,
|
||||
pub base_branch: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct SessionMetrics {
|
||||
pub tokens_used: u64,
|
||||
pub tool_calls: u64,
|
||||
pub files_changed: u32,
|
||||
pub duration_secs: u64,
|
||||
pub cost_usd: f64,
|
||||
}
|
||||
149
ecc2/src/session/output.rs
Normal file
149
ecc2/src/session/output.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::{Arc, Mutex, MutexGuard};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
pub const OUTPUT_BUFFER_LIMIT: usize = 1000;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum OutputStream {
|
||||
Stdout,
|
||||
Stderr,
|
||||
}
|
||||
|
||||
impl OutputStream {
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Self::Stdout => "stdout",
|
||||
Self::Stderr => "stderr",
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_db_value(value: &str) -> Self {
|
||||
match value {
|
||||
"stderr" => Self::Stderr,
|
||||
_ => Self::Stdout,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct OutputLine {
|
||||
pub stream: OutputStream,
|
||||
pub text: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct OutputEvent {
|
||||
pub session_id: String,
|
||||
pub line: OutputLine,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SessionOutputStore {
|
||||
capacity: usize,
|
||||
buffers: Arc<Mutex<HashMap<String, VecDeque<OutputLine>>>>,
|
||||
tx: broadcast::Sender<OutputEvent>,
|
||||
}
|
||||
|
||||
impl Default for SessionOutputStore {
|
||||
fn default() -> Self {
|
||||
Self::new(OUTPUT_BUFFER_LIMIT)
|
||||
}
|
||||
}
|
||||
|
||||
impl SessionOutputStore {
|
||||
pub fn new(capacity: usize) -> Self {
|
||||
let capacity = capacity.max(1);
|
||||
let (tx, _) = broadcast::channel(capacity.max(16));
|
||||
|
||||
Self {
|
||||
capacity,
|
||||
buffers: Arc::new(Mutex::new(HashMap::new())),
|
||||
tx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn subscribe(&self) -> broadcast::Receiver<OutputEvent> {
|
||||
self.tx.subscribe()
|
||||
}
|
||||
|
||||
pub fn push_line(&self, session_id: &str, stream: OutputStream, text: impl Into<String>) {
|
||||
let line = OutputLine {
|
||||
stream,
|
||||
text: text.into(),
|
||||
};
|
||||
|
||||
{
|
||||
let mut buffers = self.lock_buffers();
|
||||
let buffer = buffers.entry(session_id.to_string()).or_default();
|
||||
buffer.push_back(line.clone());
|
||||
|
||||
while buffer.len() > self.capacity {
|
||||
let _ = buffer.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
let _ = self.tx.send(OutputEvent {
|
||||
session_id: session_id.to_string(),
|
||||
line,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn replace_lines(&self, session_id: &str, lines: Vec<OutputLine>) {
|
||||
let mut buffer: VecDeque<OutputLine> = lines.into_iter().collect();
|
||||
|
||||
while buffer.len() > self.capacity {
|
||||
let _ = buffer.pop_front();
|
||||
}
|
||||
|
||||
self.lock_buffers().insert(session_id.to_string(), buffer);
|
||||
}
|
||||
|
||||
pub fn lines(&self, session_id: &str) -> Vec<OutputLine> {
|
||||
self.lock_buffers()
|
||||
.get(session_id)
|
||||
.map(|buffer| buffer.iter().cloned().collect())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
fn lock_buffers(&self) -> MutexGuard<'_, HashMap<String, VecDeque<OutputLine>>> {
|
||||
self.buffers
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{OutputStream, SessionOutputStore};
|
||||
|
||||
#[test]
|
||||
fn ring_buffer_keeps_most_recent_lines() {
|
||||
let store = SessionOutputStore::new(3);
|
||||
|
||||
store.push_line("session-1", OutputStream::Stdout, "line-1");
|
||||
store.push_line("session-1", OutputStream::Stdout, "line-2");
|
||||
store.push_line("session-1", OutputStream::Stdout, "line-3");
|
||||
store.push_line("session-1", OutputStream::Stdout, "line-4");
|
||||
|
||||
let lines = store.lines("session-1");
|
||||
let texts: Vec<_> = lines.iter().map(|line| line.text.as_str()).collect();
|
||||
|
||||
assert_eq!(texts, vec!["line-2", "line-3", "line-4"]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pushing_output_broadcasts_events() {
|
||||
let store = SessionOutputStore::new(8);
|
||||
let mut rx = store.subscribe();
|
||||
|
||||
store.push_line("session-1", OutputStream::Stderr, "problem");
|
||||
|
||||
let event = rx.recv().await.expect("broadcast event");
|
||||
assert_eq!(event.session_id, "session-1");
|
||||
assert_eq!(event.line.stream, OutputStream::Stderr);
|
||||
assert_eq!(event.line.text, "problem");
|
||||
}
|
||||
}
|
||||
290
ecc2/src/session/runtime.rs
Normal file
290
ecc2/src/session/runtime.rs
Normal file
@@ -0,0 +1,290 @@
|
||||
use std::path::PathBuf;
|
||||
use std::process::{ExitStatus, Stdio};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use tokio::io::{AsyncBufReadExt, AsyncRead, BufReader};
|
||||
use tokio::process::Command;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
use super::output::{OutputStream, SessionOutputStore};
|
||||
use super::store::StateStore;
|
||||
use super::SessionState;
|
||||
|
||||
type DbAck = std::result::Result<(), String>;
|
||||
|
||||
enum DbMessage {
|
||||
UpdateState {
|
||||
state: SessionState,
|
||||
ack: oneshot::Sender<DbAck>,
|
||||
},
|
||||
UpdatePid {
|
||||
pid: Option<u32>,
|
||||
ack: oneshot::Sender<DbAck>,
|
||||
},
|
||||
AppendOutputLine {
|
||||
stream: OutputStream,
|
||||
line: String,
|
||||
ack: oneshot::Sender<DbAck>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct DbWriter {
|
||||
tx: mpsc::UnboundedSender<DbMessage>,
|
||||
}
|
||||
|
||||
impl DbWriter {
|
||||
fn start(db_path: PathBuf, session_id: String) -> Self {
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
std::thread::spawn(move || run_db_writer(db_path, session_id, rx));
|
||||
Self { tx }
|
||||
}
|
||||
|
||||
async fn update_state(&self, state: SessionState) -> Result<()> {
|
||||
self.send(|ack| DbMessage::UpdateState { state, ack }).await
|
||||
}
|
||||
|
||||
async fn update_pid(&self, pid: Option<u32>) -> Result<()> {
|
||||
self.send(|ack| DbMessage::UpdatePid { pid, ack }).await
|
||||
}
|
||||
|
||||
async fn append_output_line(&self, stream: OutputStream, line: String) -> Result<()> {
|
||||
self.send(|ack| DbMessage::AppendOutputLine { stream, line, ack })
|
||||
.await
|
||||
}
|
||||
|
||||
async fn send<F>(&self, build: F) -> Result<()>
|
||||
where
|
||||
F: FnOnce(oneshot::Sender<DbAck>) -> DbMessage,
|
||||
{
|
||||
let (ack_tx, ack_rx) = oneshot::channel();
|
||||
self.tx
|
||||
.send(build(ack_tx))
|
||||
.map_err(|_| anyhow::anyhow!("DB writer channel closed"))?;
|
||||
|
||||
match ack_rx.await {
|
||||
Ok(Ok(())) => Ok(()),
|
||||
Ok(Err(error)) => Err(anyhow::anyhow!(error)),
|
||||
Err(_) => Err(anyhow::anyhow!("DB writer acknowledgement dropped")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn run_db_writer(
|
||||
db_path: PathBuf,
|
||||
session_id: String,
|
||||
mut rx: mpsc::UnboundedReceiver<DbMessage>,
|
||||
) {
|
||||
let (opened, open_error) = match StateStore::open(&db_path) {
|
||||
Ok(db) => (Some(db), None),
|
||||
Err(error) => (None, Some(error.to_string())),
|
||||
};
|
||||
|
||||
while let Some(message) = rx.blocking_recv() {
|
||||
match message {
|
||||
DbMessage::UpdateState { state, ack } => {
|
||||
let result = match opened.as_ref() {
|
||||
Some(db) => db.update_state(&session_id, &state).map_err(|error| error.to_string()),
|
||||
None => Err(open_error
|
||||
.clone()
|
||||
.unwrap_or_else(|| "Failed to open state store".to_string())),
|
||||
};
|
||||
let _ = ack.send(result);
|
||||
}
|
||||
DbMessage::UpdatePid { pid, ack } => {
|
||||
let result = match opened.as_ref() {
|
||||
Some(db) => db.update_pid(&session_id, pid).map_err(|error| error.to_string()),
|
||||
None => Err(open_error
|
||||
.clone()
|
||||
.unwrap_or_else(|| "Failed to open state store".to_string())),
|
||||
};
|
||||
let _ = ack.send(result);
|
||||
}
|
||||
DbMessage::AppendOutputLine { stream, line, ack } => {
|
||||
let result = match opened.as_ref() {
|
||||
Some(db) => db
|
||||
.append_output_line(&session_id, stream, &line)
|
||||
.map_err(|error| error.to_string()),
|
||||
None => Err(open_error
|
||||
.clone()
|
||||
.unwrap_or_else(|| "Failed to open state store".to_string())),
|
||||
};
|
||||
let _ = ack.send(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn capture_command_output(
|
||||
db_path: PathBuf,
|
||||
session_id: String,
|
||||
mut command: Command,
|
||||
output_store: SessionOutputStore,
|
||||
) -> Result<ExitStatus> {
|
||||
let db_writer = DbWriter::start(db_path, session_id.clone());
|
||||
|
||||
let result = async {
|
||||
let mut child = command
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()
|
||||
.with_context(|| format!("Failed to start process for session {}", session_id))?;
|
||||
|
||||
let stdout = match child.stdout.take() {
|
||||
Some(stdout) => stdout,
|
||||
None => {
|
||||
let _ = child.kill().await;
|
||||
let _ = child.wait().await;
|
||||
anyhow::bail!("Child stdout was not piped");
|
||||
}
|
||||
};
|
||||
let stderr = match child.stderr.take() {
|
||||
Some(stderr) => stderr,
|
||||
None => {
|
||||
let _ = child.kill().await;
|
||||
let _ = child.wait().await;
|
||||
anyhow::bail!("Child stderr was not piped");
|
||||
}
|
||||
};
|
||||
|
||||
let pid = child
|
||||
.id()
|
||||
.ok_or_else(|| anyhow::anyhow!("Spawned process did not expose a process id"))?;
|
||||
db_writer.update_pid(Some(pid)).await?;
|
||||
db_writer.update_state(SessionState::Running).await?;
|
||||
|
||||
let stdout_task = tokio::spawn(capture_stream(
|
||||
session_id.clone(),
|
||||
stdout,
|
||||
OutputStream::Stdout,
|
||||
output_store.clone(),
|
||||
db_writer.clone(),
|
||||
));
|
||||
let stderr_task = tokio::spawn(capture_stream(
|
||||
session_id.clone(),
|
||||
stderr,
|
||||
OutputStream::Stderr,
|
||||
output_store,
|
||||
db_writer.clone(),
|
||||
));
|
||||
|
||||
let status = child.wait().await?;
|
||||
stdout_task.await??;
|
||||
stderr_task.await??;
|
||||
|
||||
let final_state = if status.success() {
|
||||
SessionState::Completed
|
||||
} else {
|
||||
SessionState::Failed
|
||||
};
|
||||
db_writer.update_pid(None).await?;
|
||||
db_writer.update_state(final_state).await?;
|
||||
|
||||
Ok(status)
|
||||
}
|
||||
.await;
|
||||
|
||||
if result.is_err() {
|
||||
let _ = db_writer.update_pid(None).await;
|
||||
let _ = db_writer.update_state(SessionState::Failed).await;
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn capture_stream<R>(
|
||||
session_id: String,
|
||||
reader: R,
|
||||
stream: OutputStream,
|
||||
output_store: SessionOutputStore,
|
||||
db_writer: DbWriter,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
let mut lines = BufReader::new(reader).lines();
|
||||
|
||||
while let Some(line) = lines.next_line().await? {
|
||||
db_writer
|
||||
.append_output_line(stream, line.clone())
|
||||
.await?;
|
||||
output_store.push_line(&session_id, stream, line);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::collections::HashSet;
|
||||
use std::env;
|
||||
|
||||
use anyhow::Result;
|
||||
use chrono::Utc;
|
||||
use tokio::process::Command;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::capture_command_output;
|
||||
use crate::session::output::{SessionOutputStore, OUTPUT_BUFFER_LIMIT};
|
||||
use crate::session::store::StateStore;
|
||||
use crate::session::{Session, SessionMetrics, SessionState};
|
||||
|
||||
#[tokio::test]
|
||||
async fn capture_command_output_persists_lines_and_events() -> Result<()> {
|
||||
let db_path = env::temp_dir().join(format!("ecc2-runtime-{}.db", Uuid::new_v4()));
|
||||
let db = StateStore::open(&db_path)?;
|
||||
let session_id = "session-1".to_string();
|
||||
let now = Utc::now();
|
||||
|
||||
db.insert_session(&Session {
|
||||
id: session_id.clone(),
|
||||
task: "stream output".to_string(),
|
||||
agent_type: "test".to_string(),
|
||||
state: SessionState::Pending,
|
||||
pid: None,
|
||||
worktree: None,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
metrics: SessionMetrics::default(),
|
||||
})?;
|
||||
|
||||
let output_store = SessionOutputStore::default();
|
||||
let mut rx = output_store.subscribe();
|
||||
let mut command = Command::new("/bin/sh");
|
||||
command
|
||||
.arg("-c")
|
||||
.arg("printf 'alpha\\n'; printf 'beta\\n' >&2");
|
||||
|
||||
let status =
|
||||
capture_command_output(db_path.clone(), session_id.clone(), command, output_store)
|
||||
.await?;
|
||||
|
||||
assert!(status.success());
|
||||
|
||||
let db = StateStore::open(&db_path)?;
|
||||
let session = db
|
||||
.get_session(&session_id)?
|
||||
.expect("session should still exist");
|
||||
assert_eq!(session.state, SessionState::Completed);
|
||||
assert_eq!(session.pid, None);
|
||||
|
||||
let lines = db.get_output_lines(&session_id, OUTPUT_BUFFER_LIMIT)?;
|
||||
let texts: HashSet<_> = lines.iter().map(|line| line.text.as_str()).collect();
|
||||
assert_eq!(lines.len(), 2);
|
||||
assert!(texts.contains("alpha"));
|
||||
assert!(texts.contains("beta"));
|
||||
|
||||
let mut events = Vec::new();
|
||||
while let Ok(event) = rx.try_recv() {
|
||||
events.push(event.line.text);
|
||||
}
|
||||
|
||||
assert_eq!(events.len(), 2);
|
||||
assert!(events.iter().any(|line| line == "alpha"));
|
||||
assert!(events.iter().any(|line| line == "beta"));
|
||||
|
||||
let _ = std::fs::remove_file(db_path);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
576
ecc2/src/session/store.rs
Normal file
576
ecc2/src/session/store.rs
Normal file
@@ -0,0 +1,576 @@
|
||||
use anyhow::{Context, Result};
|
||||
use rusqlite::{Connection, OptionalExtension};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::observability::{ToolLogEntry, ToolLogPage};
|
||||
|
||||
use super::output::{OutputLine, OutputStream, OUTPUT_BUFFER_LIMIT};
|
||||
use super::{Session, SessionMetrics, SessionState};
|
||||
|
||||
pub struct StateStore {
|
||||
conn: Connection,
|
||||
}
|
||||
|
||||
impl StateStore {
|
||||
pub fn open(path: &Path) -> Result<Self> {
|
||||
let conn = Connection::open(path)?;
|
||||
conn.execute_batch("PRAGMA foreign_keys = ON;")?;
|
||||
conn.busy_timeout(Duration::from_secs(5))?;
|
||||
let store = Self { conn };
|
||||
store.init_schema()?;
|
||||
Ok(store)
|
||||
}
|
||||
|
||||
fn init_schema(&self) -> Result<()> {
|
||||
self.conn.execute_batch(
|
||||
"
|
||||
CREATE TABLE IF NOT EXISTS sessions (
|
||||
id TEXT PRIMARY KEY,
|
||||
task TEXT NOT NULL,
|
||||
agent_type TEXT NOT NULL,
|
||||
state TEXT NOT NULL DEFAULT 'pending',
|
||||
pid INTEGER,
|
||||
worktree_path TEXT,
|
||||
worktree_branch TEXT,
|
||||
worktree_base TEXT,
|
||||
tokens_used INTEGER DEFAULT 0,
|
||||
tool_calls INTEGER DEFAULT 0,
|
||||
files_changed INTEGER DEFAULT 0,
|
||||
duration_secs INTEGER DEFAULT 0,
|
||||
cost_usd REAL DEFAULT 0.0,
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tool_log (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
session_id TEXT NOT NULL REFERENCES sessions(id),
|
||||
tool_name TEXT NOT NULL,
|
||||
input_summary TEXT,
|
||||
output_summary TEXT,
|
||||
duration_ms INTEGER,
|
||||
risk_score REAL DEFAULT 0.0,
|
||||
timestamp TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
from_session TEXT NOT NULL,
|
||||
to_session TEXT NOT NULL,
|
||||
content TEXT NOT NULL,
|
||||
msg_type TEXT NOT NULL DEFAULT 'info',
|
||||
read INTEGER DEFAULT 0,
|
||||
timestamp TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS session_output (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
session_id TEXT NOT NULL REFERENCES sessions(id),
|
||||
stream TEXT NOT NULL,
|
||||
line TEXT NOT NULL,
|
||||
timestamp TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_sessions_state ON sessions(state);
|
||||
CREATE INDEX IF NOT EXISTS idx_tool_log_session ON tool_log(session_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_to ON messages(to_session, read);
|
||||
CREATE INDEX IF NOT EXISTS idx_session_output_session
|
||||
ON session_output(session_id, id);
|
||||
",
|
||||
)?;
|
||||
self.ensure_session_columns()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn ensure_session_columns(&self) -> Result<()> {
|
||||
if !self.has_column("sessions", "pid")? {
|
||||
self.conn
|
||||
.execute("ALTER TABLE sessions ADD COLUMN pid INTEGER", [])
|
||||
.context("Failed to add pid column to sessions table")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn has_column(&self, table: &str, column: &str) -> Result<bool> {
|
||||
let pragma = format!("PRAGMA table_info({table})");
|
||||
let mut stmt = self.conn.prepare(&pragma)?;
|
||||
let columns = stmt
|
||||
.query_map([], |row| row.get::<_, String>(1))?
|
||||
.collect::<std::result::Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(columns.iter().any(|existing| existing == column))
|
||||
}
|
||||
|
||||
pub fn insert_session(&self, session: &Session) -> Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO sessions (id, task, agent_type, state, pid, worktree_path, worktree_branch, worktree_base, created_at, updated_at)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)",
|
||||
rusqlite::params![
|
||||
session.id,
|
||||
session.task,
|
||||
session.agent_type,
|
||||
session.state.to_string(),
|
||||
session.pid.map(i64::from),
|
||||
session
|
||||
.worktree
|
||||
.as_ref()
|
||||
.map(|w| w.path.to_string_lossy().to_string()),
|
||||
session.worktree.as_ref().map(|w| w.branch.clone()),
|
||||
session.worktree.as_ref().map(|w| w.base_branch.clone()),
|
||||
session.created_at.to_rfc3339(),
|
||||
session.updated_at.to_rfc3339(),
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_state_and_pid(
|
||||
&self,
|
||||
session_id: &str,
|
||||
state: &SessionState,
|
||||
pid: Option<u32>,
|
||||
) -> Result<()> {
|
||||
let updated = self.conn.execute(
|
||||
"UPDATE sessions SET state = ?1, pid = ?2, updated_at = ?3 WHERE id = ?4",
|
||||
rusqlite::params![
|
||||
state.to_string(),
|
||||
pid.map(i64::from),
|
||||
chrono::Utc::now().to_rfc3339(),
|
||||
session_id,
|
||||
],
|
||||
)?;
|
||||
|
||||
if updated == 0 {
|
||||
anyhow::bail!("Session not found: {session_id}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_state(&self, session_id: &str, state: &SessionState) -> Result<()> {
|
||||
let current_state = self
|
||||
.conn
|
||||
.query_row(
|
||||
"SELECT state FROM sessions WHERE id = ?1",
|
||||
[session_id],
|
||||
|row| row.get::<_, String>(0),
|
||||
)
|
||||
.optional()?
|
||||
.map(|raw| SessionState::from_db_value(&raw))
|
||||
.ok_or_else(|| anyhow::anyhow!("Session not found: {session_id}"))?;
|
||||
|
||||
if !current_state.can_transition_to(state) {
|
||||
anyhow::bail!(
|
||||
"Invalid session state transition: {} -> {}",
|
||||
current_state,
|
||||
state
|
||||
);
|
||||
}
|
||||
|
||||
let updated = self.conn.execute(
|
||||
"UPDATE sessions SET state = ?1, updated_at = ?2 WHERE id = ?3",
|
||||
rusqlite::params![
|
||||
state.to_string(),
|
||||
chrono::Utc::now().to_rfc3339(),
|
||||
session_id,
|
||||
],
|
||||
)?;
|
||||
|
||||
if updated == 0 {
|
||||
anyhow::bail!("Session not found: {session_id}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_pid(&self, session_id: &str, pid: Option<u32>) -> Result<()> {
|
||||
let updated = self.conn.execute(
|
||||
"UPDATE sessions SET pid = ?1, updated_at = ?2 WHERE id = ?3",
|
||||
rusqlite::params![
|
||||
pid.map(i64::from),
|
||||
chrono::Utc::now().to_rfc3339(),
|
||||
session_id,
|
||||
],
|
||||
)?;
|
||||
|
||||
if updated == 0 {
|
||||
anyhow::bail!("Session not found: {session_id}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_metrics(&self, session_id: &str, metrics: &SessionMetrics) -> Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE sessions SET tokens_used = ?1, tool_calls = ?2, files_changed = ?3, duration_secs = ?4, cost_usd = ?5, updated_at = ?6 WHERE id = ?7",
|
||||
rusqlite::params![
|
||||
metrics.tokens_used,
|
||||
metrics.tool_calls,
|
||||
metrics.files_changed,
|
||||
metrics.duration_secs,
|
||||
metrics.cost_usd,
|
||||
chrono::Utc::now().to_rfc3339(),
|
||||
session_id,
|
||||
],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn increment_tool_calls(&self, session_id: &str) -> Result<()> {
|
||||
self.conn.execute(
|
||||
"UPDATE sessions SET tool_calls = tool_calls + 1, updated_at = ?1 WHERE id = ?2",
|
||||
rusqlite::params![chrono::Utc::now().to_rfc3339(), session_id],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn list_sessions(&self) -> Result<Vec<Session>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id, task, agent_type, state, pid, worktree_path, worktree_branch, worktree_base,
|
||||
tokens_used, tool_calls, files_changed, duration_secs, cost_usd,
|
||||
created_at, updated_at
|
||||
FROM sessions ORDER BY updated_at DESC",
|
||||
)?;
|
||||
|
||||
let sessions = stmt
|
||||
.query_map([], |row| {
|
||||
let state_str: String = row.get(3)?;
|
||||
let state = SessionState::from_db_value(&state_str);
|
||||
|
||||
let worktree_path: Option<String> = row.get(5)?;
|
||||
let worktree = worktree_path.map(|path| super::WorktreeInfo {
|
||||
path: PathBuf::from(path),
|
||||
branch: row.get::<_, String>(6).unwrap_or_default(),
|
||||
base_branch: row.get::<_, String>(7).unwrap_or_default(),
|
||||
});
|
||||
|
||||
let created_str: String = row.get(13)?;
|
||||
let updated_str: String = row.get(14)?;
|
||||
|
||||
Ok(Session {
|
||||
id: row.get(0)?,
|
||||
task: row.get(1)?,
|
||||
agent_type: row.get(2)?,
|
||||
state,
|
||||
pid: row.get::<_, Option<u32>>(4)?,
|
||||
worktree,
|
||||
created_at: chrono::DateTime::parse_from_rfc3339(&created_str)
|
||||
.unwrap_or_default()
|
||||
.with_timezone(&chrono::Utc),
|
||||
updated_at: chrono::DateTime::parse_from_rfc3339(&updated_str)
|
||||
.unwrap_or_default()
|
||||
.with_timezone(&chrono::Utc),
|
||||
metrics: SessionMetrics {
|
||||
tokens_used: row.get(8)?,
|
||||
tool_calls: row.get(9)?,
|
||||
files_changed: row.get(10)?,
|
||||
duration_secs: row.get(11)?,
|
||||
cost_usd: row.get(12)?,
|
||||
},
|
||||
})
|
||||
})?
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(sessions)
|
||||
}
|
||||
|
||||
pub fn get_latest_session(&self) -> Result<Option<Session>> {
|
||||
Ok(self.list_sessions()?.into_iter().next())
|
||||
}
|
||||
|
||||
pub fn get_session(&self, id: &str) -> Result<Option<Session>> {
|
||||
let sessions = self.list_sessions()?;
|
||||
Ok(sessions
|
||||
.into_iter()
|
||||
.find(|session| session.id == id || session.id.starts_with(id)))
|
||||
}
|
||||
|
||||
pub fn send_message(&self, from: &str, to: &str, content: &str, msg_type: &str) -> Result<()> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO messages (from_session, to_session, content, msg_type, timestamp)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5)",
|
||||
rusqlite::params![from, to, content, msg_type, chrono::Utc::now().to_rfc3339()],
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn append_output_line(
|
||||
&self,
|
||||
session_id: &str,
|
||||
stream: OutputStream,
|
||||
line: &str,
|
||||
) -> Result<()> {
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
|
||||
self.conn.execute(
|
||||
"INSERT INTO session_output (session_id, stream, line, timestamp)
|
||||
VALUES (?1, ?2, ?3, ?4)",
|
||||
rusqlite::params![session_id, stream.as_str(), line, now],
|
||||
)?;
|
||||
|
||||
self.conn.execute(
|
||||
"DELETE FROM session_output
|
||||
WHERE session_id = ?1
|
||||
AND id NOT IN (
|
||||
SELECT id
|
||||
FROM session_output
|
||||
WHERE session_id = ?1
|
||||
ORDER BY id DESC
|
||||
LIMIT ?2
|
||||
)",
|
||||
rusqlite::params![session_id, OUTPUT_BUFFER_LIMIT as i64],
|
||||
)?;
|
||||
|
||||
self.conn.execute(
|
||||
"UPDATE sessions SET updated_at = ?1 WHERE id = ?2",
|
||||
rusqlite::params![chrono::Utc::now().to_rfc3339(), session_id],
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_output_lines(&self, session_id: &str, limit: usize) -> Result<Vec<OutputLine>> {
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT stream, line
|
||||
FROM (
|
||||
SELECT id, stream, line
|
||||
FROM session_output
|
||||
WHERE session_id = ?1
|
||||
ORDER BY id DESC
|
||||
LIMIT ?2
|
||||
)
|
||||
ORDER BY id ASC",
|
||||
)?;
|
||||
|
||||
let lines = stmt
|
||||
.query_map(rusqlite::params![session_id, limit as i64], |row| {
|
||||
let stream: String = row.get(0)?;
|
||||
let text: String = row.get(1)?;
|
||||
|
||||
Ok(OutputLine {
|
||||
stream: OutputStream::from_db_value(&stream),
|
||||
text,
|
||||
})
|
||||
})?
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(lines)
|
||||
}
|
||||
|
||||
pub fn insert_tool_log(
|
||||
&self,
|
||||
session_id: &str,
|
||||
tool_name: &str,
|
||||
input_summary: &str,
|
||||
output_summary: &str,
|
||||
duration_ms: u64,
|
||||
risk_score: f64,
|
||||
timestamp: &str,
|
||||
) -> Result<ToolLogEntry> {
|
||||
self.conn.execute(
|
||||
"INSERT INTO tool_log (session_id, tool_name, input_summary, output_summary, duration_ms, risk_score, timestamp)
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
|
||||
rusqlite::params![
|
||||
session_id,
|
||||
tool_name,
|
||||
input_summary,
|
||||
output_summary,
|
||||
duration_ms,
|
||||
risk_score,
|
||||
timestamp,
|
||||
],
|
||||
)?;
|
||||
|
||||
Ok(ToolLogEntry {
|
||||
id: self.conn.last_insert_rowid(),
|
||||
session_id: session_id.to_string(),
|
||||
tool_name: tool_name.to_string(),
|
||||
input_summary: input_summary.to_string(),
|
||||
output_summary: output_summary.to_string(),
|
||||
duration_ms,
|
||||
risk_score,
|
||||
timestamp: timestamp.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn query_tool_logs(
|
||||
&self,
|
||||
session_id: &str,
|
||||
page: u64,
|
||||
page_size: u64,
|
||||
) -> Result<ToolLogPage> {
|
||||
let page = page.max(1);
|
||||
let offset = (page - 1) * page_size;
|
||||
|
||||
let total: u64 = self.conn.query_row(
|
||||
"SELECT COUNT(*) FROM tool_log WHERE session_id = ?1",
|
||||
rusqlite::params![session_id],
|
||||
|row| row.get(0),
|
||||
)?;
|
||||
|
||||
let mut stmt = self.conn.prepare(
|
||||
"SELECT id, session_id, tool_name, input_summary, output_summary, duration_ms, risk_score, timestamp
|
||||
FROM tool_log
|
||||
WHERE session_id = ?1
|
||||
ORDER BY timestamp DESC, id DESC
|
||||
LIMIT ?2 OFFSET ?3",
|
||||
)?;
|
||||
|
||||
let entries = stmt
|
||||
.query_map(rusqlite::params![session_id, page_size, offset], |row| {
|
||||
Ok(ToolLogEntry {
|
||||
id: row.get(0)?,
|
||||
session_id: row.get(1)?,
|
||||
tool_name: row.get(2)?,
|
||||
input_summary: row.get::<_, Option<String>>(3)?.unwrap_or_default(),
|
||||
output_summary: row.get::<_, Option<String>>(4)?.unwrap_or_default(),
|
||||
duration_ms: row.get::<_, Option<u64>>(5)?.unwrap_or_default(),
|
||||
risk_score: row.get::<_, Option<f64>>(6)?.unwrap_or_default(),
|
||||
timestamp: row.get(7)?,
|
||||
})
|
||||
})?
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
Ok(ToolLogPage {
|
||||
entries,
|
||||
page,
|
||||
page_size,
|
||||
total,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::{Duration as ChronoDuration, Utc};
|
||||
use std::fs;
|
||||
|
||||
struct TestDir {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl TestDir {
|
||||
fn new(label: &str) -> Result<Self> {
|
||||
let path =
|
||||
std::env::temp_dir().join(format!("ecc2-{}-{}", label, uuid::Uuid::new_v4()));
|
||||
fs::create_dir_all(&path)?;
|
||||
Ok(Self { path })
|
||||
}
|
||||
|
||||
fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TestDir {
|
||||
fn drop(&mut self) {
|
||||
let _ = fs::remove_dir_all(&self.path);
|
||||
}
|
||||
}
|
||||
|
||||
fn build_session(id: &str, state: SessionState) -> Session {
|
||||
let now = Utc::now();
|
||||
Session {
|
||||
id: id.to_string(),
|
||||
task: "task".to_string(),
|
||||
agent_type: "claude".to_string(),
|
||||
state,
|
||||
pid: None,
|
||||
worktree: None,
|
||||
created_at: now - ChronoDuration::minutes(1),
|
||||
updated_at: now,
|
||||
metrics: SessionMetrics::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn update_state_rejects_invalid_terminal_transition() -> Result<()> {
|
||||
let tempdir = TestDir::new("store-invalid-transition")?;
|
||||
let db = StateStore::open(&tempdir.path().join("state.db"))?;
|
||||
|
||||
db.insert_session(&build_session("done", SessionState::Completed))?;
|
||||
|
||||
let error = db
|
||||
.update_state("done", &SessionState::Running)
|
||||
.expect_err("completed sessions must not transition back to running");
|
||||
|
||||
assert!(error
|
||||
.to_string()
|
||||
.contains("Invalid session state transition"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn open_migrates_existing_sessions_table_with_pid_column() -> Result<()> {
|
||||
let tempdir = TestDir::new("store-migration")?;
|
||||
let db_path = tempdir.path().join("state.db");
|
||||
|
||||
let conn = Connection::open(&db_path)?;
|
||||
conn.execute_batch(
|
||||
"
|
||||
CREATE TABLE sessions (
|
||||
id TEXT PRIMARY KEY,
|
||||
task TEXT NOT NULL,
|
||||
agent_type TEXT NOT NULL,
|
||||
state TEXT NOT NULL DEFAULT 'pending',
|
||||
worktree_path TEXT,
|
||||
worktree_branch TEXT,
|
||||
worktree_base TEXT,
|
||||
tokens_used INTEGER DEFAULT 0,
|
||||
tool_calls INTEGER DEFAULT 0,
|
||||
files_changed INTEGER DEFAULT 0,
|
||||
duration_secs INTEGER DEFAULT 0,
|
||||
cost_usd REAL DEFAULT 0.0,
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL
|
||||
);
|
||||
",
|
||||
)?;
|
||||
drop(conn);
|
||||
|
||||
let db = StateStore::open(&db_path)?;
|
||||
let mut stmt = db.conn.prepare("PRAGMA table_info(sessions)")?;
|
||||
let column_names = stmt
|
||||
.query_map([], |row| row.get::<_, String>(1))?
|
||||
.collect::<std::result::Result<Vec<_>, _>>()?;
|
||||
|
||||
assert!(column_names.iter().any(|column| column == "pid"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn append_output_line_keeps_latest_buffer_window() -> Result<()> {
|
||||
let tempdir = TestDir::new("store-output")?;
|
||||
let db = StateStore::open(&tempdir.path().join("state.db"))?;
|
||||
let now = Utc::now();
|
||||
|
||||
db.insert_session(&Session {
|
||||
id: "session-1".to_string(),
|
||||
task: "buffer output".to_string(),
|
||||
agent_type: "claude".to_string(),
|
||||
state: SessionState::Running,
|
||||
pid: None,
|
||||
worktree: None,
|
||||
created_at: now,
|
||||
updated_at: now,
|
||||
metrics: SessionMetrics::default(),
|
||||
})?;
|
||||
|
||||
for index in 0..(OUTPUT_BUFFER_LIMIT + 5) {
|
||||
db.append_output_line("session-1", OutputStream::Stdout, &format!("line-{index}"))?;
|
||||
}
|
||||
|
||||
let lines = db.get_output_lines("session-1", OUTPUT_BUFFER_LIMIT)?;
|
||||
let texts: Vec<_> = lines.iter().map(|line| line.text.as_str()).collect();
|
||||
|
||||
assert_eq!(lines.len(), OUTPUT_BUFFER_LIMIT);
|
||||
assert_eq!(texts.first().copied(), Some("line-5"));
|
||||
let expected_last_line = format!("line-{}", OUTPUT_BUFFER_LIMIT + 4);
|
||||
assert_eq!(texts.last().copied(), Some(expected_last_line.as_str()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
56
ecc2/src/tui/app.rs
Normal file
56
ecc2/src/tui/app.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use anyhow::Result;
|
||||
use crossterm::{
|
||||
event::{self, Event, KeyCode, KeyModifiers},
|
||||
execute,
|
||||
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
|
||||
};
|
||||
use ratatui::prelude::*;
|
||||
use std::io;
|
||||
use std::time::Duration;
|
||||
|
||||
use super::dashboard::Dashboard;
|
||||
use crate::config::Config;
|
||||
use crate::session::store::StateStore;
|
||||
|
||||
pub async fn run(db: StateStore, cfg: Config) -> Result<()> {
|
||||
enable_raw_mode()?;
|
||||
let mut stdout = io::stdout();
|
||||
execute!(stdout, EnterAlternateScreen)?;
|
||||
|
||||
let backend = CrosstermBackend::new(stdout);
|
||||
let mut terminal = Terminal::new(backend)?;
|
||||
|
||||
let mut dashboard = Dashboard::new(db, cfg);
|
||||
|
||||
loop {
|
||||
terminal.draw(|frame| dashboard.render(frame))?;
|
||||
|
||||
if event::poll(Duration::from_millis(250))? {
|
||||
if let Event::Key(key) = event::read()? {
|
||||
match (key.modifiers, key.code) {
|
||||
(KeyModifiers::CONTROL, KeyCode::Char('c')) => break,
|
||||
(_, KeyCode::Char('q')) => break,
|
||||
(_, KeyCode::Tab) => dashboard.next_pane(),
|
||||
(KeyModifiers::SHIFT, KeyCode::BackTab) => dashboard.prev_pane(),
|
||||
(_, KeyCode::Char('+')) | (_, KeyCode::Char('=')) => {
|
||||
dashboard.increase_pane_size()
|
||||
}
|
||||
(_, KeyCode::Char('-')) => dashboard.decrease_pane_size(),
|
||||
(_, KeyCode::Char('j')) | (_, KeyCode::Down) => dashboard.scroll_down(),
|
||||
(_, KeyCode::Char('k')) | (_, KeyCode::Up) => dashboard.scroll_up(),
|
||||
(_, KeyCode::Char('n')) => dashboard.new_session(),
|
||||
(_, KeyCode::Char('s')) => dashboard.stop_selected(),
|
||||
(_, KeyCode::Char('r')) => dashboard.refresh(),
|
||||
(_, KeyCode::Char('?')) => dashboard.toggle_help(),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dashboard.tick().await;
|
||||
}
|
||||
|
||||
disable_raw_mode()?;
|
||||
execute!(terminal.backend_mut(), LeaveAlternateScreen)?;
|
||||
Ok(())
|
||||
}
|
||||
1273
ecc2/src/tui/dashboard.rs
Normal file
1273
ecc2/src/tui/dashboard.rs
Normal file
File diff suppressed because it is too large
Load Diff
3
ecc2/src/tui/mod.rs
Normal file
3
ecc2/src/tui/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod app;
|
||||
mod dashboard;
|
||||
mod widgets;
|
||||
281
ecc2/src/tui/widgets.rs
Normal file
281
ecc2/src/tui/widgets.rs
Normal file
@@ -0,0 +1,281 @@
|
||||
use ratatui::{
|
||||
prelude::*,
|
||||
text::{Line, Span},
|
||||
widgets::{Gauge, Paragraph, Widget},
|
||||
};
|
||||
|
||||
pub(crate) const WARNING_THRESHOLD: f64 = 0.8;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub(crate) enum BudgetState {
|
||||
Unconfigured,
|
||||
Normal,
|
||||
Warning,
|
||||
OverBudget,
|
||||
}
|
||||
|
||||
impl BudgetState {
|
||||
pub(crate) const fn is_warning(self) -> bool {
|
||||
matches!(self, Self::Warning | Self::OverBudget)
|
||||
}
|
||||
|
||||
fn badge(self) -> Option<&'static str> {
|
||||
match self {
|
||||
Self::Warning => Some("warning"),
|
||||
Self::OverBudget => Some("over budget"),
|
||||
Self::Unconfigured => Some("no budget"),
|
||||
Self::Normal => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn style(self) -> Style {
|
||||
let base = Style::default().fg(match self {
|
||||
Self::Unconfigured => Color::DarkGray,
|
||||
Self::Normal => Color::DarkGray,
|
||||
Self::Warning => Color::Yellow,
|
||||
Self::OverBudget => Color::Red,
|
||||
});
|
||||
|
||||
if self.is_warning() {
|
||||
base.add_modifier(Modifier::BOLD)
|
||||
} else {
|
||||
base
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum MeterFormat {
|
||||
Tokens,
|
||||
Currency,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct TokenMeter<'a> {
|
||||
title: &'a str,
|
||||
used: f64,
|
||||
budget: f64,
|
||||
format: MeterFormat,
|
||||
}
|
||||
|
||||
impl<'a> TokenMeter<'a> {
|
||||
pub(crate) fn tokens(title: &'a str, used: u64, budget: u64) -> Self {
|
||||
Self {
|
||||
title,
|
||||
used: used as f64,
|
||||
budget: budget as f64,
|
||||
format: MeterFormat::Tokens,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn currency(title: &'a str, used: f64, budget: f64) -> Self {
|
||||
Self {
|
||||
title,
|
||||
used,
|
||||
budget,
|
||||
format: MeterFormat::Currency,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn state(&self) -> BudgetState {
|
||||
budget_state(self.used, self.budget)
|
||||
}
|
||||
|
||||
fn ratio(&self) -> f64 {
|
||||
budget_ratio(self.used, self.budget)
|
||||
}
|
||||
|
||||
fn clamped_ratio(&self) -> f64 {
|
||||
self.ratio().clamp(0.0, 1.0)
|
||||
}
|
||||
|
||||
fn title_line(&self) -> Line<'static> {
|
||||
let mut spans = vec![Span::styled(
|
||||
self.title.to_string(),
|
||||
Style::default()
|
||||
.fg(Color::Gray)
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)];
|
||||
|
||||
if let Some(badge) = self.state().badge() {
|
||||
spans.push(Span::raw(" "));
|
||||
spans.push(Span::styled(format!("[{badge}]"), self.state().style()));
|
||||
}
|
||||
|
||||
Line::from(spans)
|
||||
}
|
||||
|
||||
fn display_label(&self) -> String {
|
||||
if self.budget <= 0.0 {
|
||||
return match self.format {
|
||||
MeterFormat::Tokens => format!("{} tok used | no budget", self.used_label()),
|
||||
MeterFormat::Currency => format!("{} spent | no budget", self.used_label()),
|
||||
};
|
||||
}
|
||||
|
||||
format!(
|
||||
"{} / {}{} ({}%)",
|
||||
self.used_label(),
|
||||
self.budget_label(),
|
||||
self.unit_suffix(),
|
||||
(self.ratio() * 100.0).round() as u64
|
||||
)
|
||||
}
|
||||
|
||||
fn used_label(&self) -> String {
|
||||
match self.format {
|
||||
MeterFormat::Tokens => format_token_count(self.used.max(0.0).round() as u64),
|
||||
MeterFormat::Currency => format_currency(self.used.max(0.0)),
|
||||
}
|
||||
}
|
||||
|
||||
fn budget_label(&self) -> String {
|
||||
match self.format {
|
||||
MeterFormat::Tokens => format_token_count(self.budget.max(0.0).round() as u64),
|
||||
MeterFormat::Currency => format_currency(self.budget.max(0.0)),
|
||||
}
|
||||
}
|
||||
|
||||
fn unit_suffix(&self) -> &'static str {
|
||||
match self.format {
|
||||
MeterFormat::Tokens => " tok",
|
||||
MeterFormat::Currency => "",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Widget for TokenMeter<'_> {
|
||||
fn render(self, area: Rect, buf: &mut Buffer) {
|
||||
if area.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut gauge_area = area;
|
||||
if area.height > 1 {
|
||||
let chunks = Layout::default()
|
||||
.direction(Direction::Vertical)
|
||||
.constraints([Constraint::Length(1), Constraint::Min(1)])
|
||||
.split(area);
|
||||
Paragraph::new(self.title_line()).render(chunks[0], buf);
|
||||
gauge_area = chunks[1];
|
||||
}
|
||||
|
||||
Gauge::default()
|
||||
.ratio(self.clamped_ratio())
|
||||
.label(self.display_label())
|
||||
.gauge_style(
|
||||
Style::default()
|
||||
.fg(gradient_color(self.ratio()))
|
||||
.add_modifier(Modifier::BOLD),
|
||||
)
|
||||
.style(Style::default().fg(Color::DarkGray))
|
||||
.use_unicode(true)
|
||||
.render(gauge_area, buf);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn budget_ratio(used: f64, budget: f64) -> f64 {
|
||||
if budget <= 0.0 {
|
||||
0.0
|
||||
} else {
|
||||
used / budget
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn budget_state(used: f64, budget: f64) -> BudgetState {
|
||||
if budget <= 0.0 {
|
||||
BudgetState::Unconfigured
|
||||
} else if used / budget >= 1.0 {
|
||||
BudgetState::OverBudget
|
||||
} else if used / budget >= WARNING_THRESHOLD {
|
||||
BudgetState::Warning
|
||||
} else {
|
||||
BudgetState::Normal
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn gradient_color(ratio: f64) -> Color {
|
||||
const GREEN: (u8, u8, u8) = (34, 197, 94);
|
||||
const YELLOW: (u8, u8, u8) = (234, 179, 8);
|
||||
const RED: (u8, u8, u8) = (239, 68, 68);
|
||||
|
||||
let clamped = ratio.clamp(0.0, 1.0);
|
||||
if clamped <= WARNING_THRESHOLD {
|
||||
interpolate_rgb(GREEN, YELLOW, clamped / WARNING_THRESHOLD)
|
||||
} else {
|
||||
interpolate_rgb(
|
||||
YELLOW,
|
||||
RED,
|
||||
(clamped - WARNING_THRESHOLD) / (1.0 - WARNING_THRESHOLD),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn format_currency(value: f64) -> String {
|
||||
format!("${value:.2}")
|
||||
}
|
||||
|
||||
pub(crate) fn format_token_count(value: u64) -> String {
|
||||
let digits = value.to_string();
|
||||
let mut formatted = String::with_capacity(digits.len() + digits.len() / 3);
|
||||
|
||||
for (index, ch) in digits.chars().rev().enumerate() {
|
||||
if index != 0 && index % 3 == 0 {
|
||||
formatted.push(',');
|
||||
}
|
||||
formatted.push(ch);
|
||||
}
|
||||
|
||||
formatted.chars().rev().collect()
|
||||
}
|
||||
|
||||
fn interpolate_rgb(from: (u8, u8, u8), to: (u8, u8, u8), ratio: f64) -> Color {
|
||||
let ratio = ratio.clamp(0.0, 1.0);
|
||||
let channel = |start: u8, end: u8| -> u8 {
|
||||
(f64::from(start) + (f64::from(end) - f64::from(start)) * ratio).round() as u8
|
||||
};
|
||||
|
||||
Color::Rgb(
|
||||
channel(from.0, to.0),
|
||||
channel(from.1, to.1),
|
||||
channel(from.2, to.2),
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use ratatui::{buffer::Buffer, layout::Rect, style::Color, widgets::Widget};
|
||||
|
||||
use super::{gradient_color, BudgetState, TokenMeter};
|
||||
|
||||
#[test]
|
||||
fn warning_state_starts_at_eighty_percent() {
|
||||
let meter = TokenMeter::tokens("Token Budget", 80, 100);
|
||||
|
||||
assert_eq!(meter.state(), BudgetState::Warning);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn gradient_runs_from_green_to_yellow_to_red() {
|
||||
assert_eq!(gradient_color(0.0), Color::Rgb(34, 197, 94));
|
||||
assert_eq!(gradient_color(0.8), Color::Rgb(234, 179, 8));
|
||||
assert_eq!(gradient_color(1.0), Color::Rgb(239, 68, 68));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn token_meter_renders_compact_usage_label() {
|
||||
let meter = TokenMeter::tokens("Token Budget", 4_000, 10_000);
|
||||
let area = Rect::new(0, 0, 48, 2);
|
||||
let mut buffer = Buffer::empty(area);
|
||||
|
||||
meter.render(area, &mut buffer);
|
||||
|
||||
let rendered = buffer
|
||||
.content()
|
||||
.chunks(area.width as usize)
|
||||
.flat_map(|row| row.iter().map(|cell| cell.symbol()))
|
||||
.collect::<String>();
|
||||
|
||||
assert!(rendered.contains("4,000 / 10,000 tok (40%)"));
|
||||
}
|
||||
}
|
||||
99
ecc2/src/worktree/mod.rs
Normal file
99
ecc2/src/worktree/mod.rs
Normal file
@@ -0,0 +1,99 @@
|
||||
use anyhow::{Context, Result};
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::session::WorktreeInfo;
|
||||
|
||||
/// Create a new git worktree for an agent session.
|
||||
pub fn create_for_session(session_id: &str, cfg: &Config) -> Result<WorktreeInfo> {
|
||||
let repo_root = std::env::current_dir().context("Failed to resolve repository root")?;
|
||||
create_for_session_in_repo(session_id, cfg, &repo_root)
|
||||
}
|
||||
|
||||
pub(crate) fn create_for_session_in_repo(
|
||||
session_id: &str,
|
||||
cfg: &Config,
|
||||
repo_root: &Path,
|
||||
) -> Result<WorktreeInfo> {
|
||||
let branch = format!("ecc/{session_id}");
|
||||
let path = cfg.worktree_root.join(session_id);
|
||||
|
||||
// Get current branch as base
|
||||
let base = get_current_branch(repo_root)?;
|
||||
|
||||
std::fs::create_dir_all(&cfg.worktree_root)
|
||||
.context("Failed to create worktree root directory")?;
|
||||
|
||||
let output = Command::new("git")
|
||||
.arg("-C")
|
||||
.arg(repo_root)
|
||||
.args(["worktree", "add", "-b", &branch])
|
||||
.arg(&path)
|
||||
.arg("HEAD")
|
||||
.output()
|
||||
.context("Failed to run git worktree add")?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
anyhow::bail!("git worktree add failed: {stderr}");
|
||||
}
|
||||
|
||||
tracing::info!(
|
||||
"Created worktree at {} on branch {}",
|
||||
path.display(),
|
||||
branch
|
||||
);
|
||||
|
||||
Ok(WorktreeInfo {
|
||||
path,
|
||||
branch,
|
||||
base_branch: base,
|
||||
})
|
||||
}
|
||||
|
||||
/// Remove a worktree and its branch.
|
||||
pub fn remove(path: &Path) -> Result<()> {
|
||||
let output = Command::new("git")
|
||||
.arg("-C")
|
||||
.arg(path)
|
||||
.args(["worktree", "remove", "--force"])
|
||||
.arg(path)
|
||||
.output()
|
||||
.context("Failed to remove worktree")?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
tracing::warn!("Worktree removal warning: {stderr}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// List all active worktrees.
|
||||
pub fn list() -> Result<Vec<String>> {
|
||||
let output = Command::new("git")
|
||||
.args(["worktree", "list", "--porcelain"])
|
||||
.output()
|
||||
.context("Failed to list worktrees")?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let worktrees: Vec<String> = stdout
|
||||
.lines()
|
||||
.filter(|l| l.starts_with("worktree "))
|
||||
.map(|l| l.trim_start_matches("worktree ").to_string())
|
||||
.collect();
|
||||
|
||||
Ok(worktrees)
|
||||
}
|
||||
|
||||
fn get_current_branch(repo_root: &Path) -> Result<String> {
|
||||
let output = Command::new("git")
|
||||
.arg("-C")
|
||||
.arg(repo_root)
|
||||
.args(["rev-parse", "--abbrev-ref", "HEAD"])
|
||||
.output()
|
||||
.context("Failed to get current branch")?;
|
||||
|
||||
Ok(String::from_utf8_lossy(&output.stdout).trim().to_string())
|
||||
}
|
||||
@@ -48,6 +48,7 @@ User request → Claude picks a tool → PreToolUse hook runs → Tool executes
|
||||
| **Session summary** | `Stop` | Persists session state when transcript path is available |
|
||||
| **Pattern extraction** | `Stop` | Evaluates session for extractable patterns (continuous learning) |
|
||||
| **Cost tracker** | `Stop` | Emits lightweight run-cost telemetry markers |
|
||||
| **Desktop notify** | `Stop` | Sends macOS desktop notification with task summary (standard+) |
|
||||
| **Session end marker** | `SessionEnd` | Lifecycle marker and cleanup log |
|
||||
|
||||
## Customizing Hooks
|
||||
|
||||
@@ -136,7 +136,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "bash -lc 'input=$(cat); for root in \"${CLAUDE_PLUGIN_ROOT:-}\" \"$HOME/.claude/plugins/everything-claude-code\" \"$HOME/.claude/plugins/everything-claude-code@everything-claude-code\" \"$HOME/.claude/plugins/marketplace/everything-claude-code\"; do if [ -n \"$root\" ] && [ -f \"$root/scripts/hooks/run-with-flags.js\" ]; then printf \"%s\" \"$input\" | node \"$root/scripts/hooks/run-with-flags.js\" \"session:start\" \"scripts/hooks/session-start.js\" \"minimal,standard,strict\"; exit $?; fi; done; for parent in \"$HOME/.claude/plugins\" \"$HOME/.claude/plugins/marketplace\"; do if [ -d \"$parent\" ]; then candidate=$(find \"$parent\" -maxdepth 2 -type f -path \"*/scripts/hooks/run-with-flags.js\" 2>/dev/null | head -n 1); if [ -n \"$candidate\" ]; then root=$(dirname \"$(dirname \"$(dirname \"$candidate\")\")\"); printf \"%s\" \"$input\" | node \"$root/scripts/hooks/run-with-flags.js\" \"session:start\" \"scripts/hooks/session-start.js\" \"minimal,standard,strict\"; exit $?; fi; fi; done; echo \"[SessionStart] WARNING: could not resolve ECC plugin root; skipping session-start hook\" >&2; printf \"%s\" \"$input\"; exit 0'"
|
||||
"command": "node -e \"const fs=require('fs');const path=require('path');const {spawnSync}=require('child_process');const raw=fs.readFileSync(0,'utf8');const rel=path.join('scripts','hooks','run-with-flags.js');const hasRunnerRoot=candidate=>{const value=typeof candidate==='string'?candidate.trim():'';return value.length>0&&fs.existsSync(path.join(path.resolve(value),rel));};const root=(()=>{const envRoot=process.env.CLAUDE_PLUGIN_ROOT||'';if(hasRunnerRoot(envRoot))return path.resolve(envRoot.trim());const home=require('os').homedir();const claudeDir=path.join(home,'.claude');if(hasRunnerRoot(claudeDir))return claudeDir;for(const candidate of [path.join(claudeDir,'plugins','everything-claude-code'),path.join(claudeDir,'plugins','everything-claude-code@everything-claude-code'),path.join(claudeDir,'plugins','marketplace','everything-claude-code')]){if(hasRunnerRoot(candidate))return candidate;}try{const cacheBase=path.join(claudeDir,'plugins','cache','everything-claude-code');for(const org of fs.readdirSync(cacheBase,{withFileTypes:true})){if(!org.isDirectory())continue;for(const version of fs.readdirSync(path.join(cacheBase,org.name),{withFileTypes:true})){if(!version.isDirectory())continue;const candidate=path.join(cacheBase,org.name,version.name);if(hasRunnerRoot(candidate))return candidate;}}}catch{}return claudeDir;})();const script=path.join(root,rel);if(fs.existsSync(script)){const result=spawnSync(process.execPath,[script,'session:start','scripts/hooks/session-start.js','minimal,standard,strict'],{input:raw,encoding:'utf8',env:process.env,cwd:process.cwd(),timeout:30000});const stdout=typeof result.stdout==='string'?result.stdout:'';if(stdout)process.stdout.write(stdout);else process.stdout.write(raw);if(result.stderr)process.stderr.write(result.stderr);if(result.error||result.status===null||result.signal){const reason=result.error?result.error.message:(result.signal?'signal '+result.signal:'missing exit status');process.stderr.write('[SessionStart] ERROR: session-start hook failed: '+reason+String.fromCharCode(10));process.exit(1);}process.exit(Number.isInteger(result.status)?result.status:0);}process.stderr.write('[SessionStart] WARNING: could not resolve ECC plugin root; skipping session-start hook'+String.fromCharCode(10));process.stdout.write(raw);\""
|
||||
}
|
||||
],
|
||||
"description": "Load previous context and detect package manager on new session"
|
||||
@@ -289,6 +289,18 @@
|
||||
}
|
||||
],
|
||||
"description": "Track token and cost metrics per session"
|
||||
},
|
||||
{
|
||||
"matcher": "*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:desktop-notify\" \"scripts/hooks/desktop-notify.js\" \"standard,strict\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
],
|
||||
"description": "Send macOS desktop notification with task summary when Claude responds"
|
||||
}
|
||||
],
|
||||
"SessionEnd": [
|
||||
|
||||
@@ -63,7 +63,8 @@
|
||||
"description": "Runtime hook configs and hook script helpers.",
|
||||
"paths": [
|
||||
"hooks",
|
||||
"scripts/hooks"
|
||||
"scripts/hooks",
|
||||
"scripts/lib"
|
||||
],
|
||||
"targets": [
|
||||
"claude",
|
||||
|
||||
13
package-lock.json
generated
13
package-lock.json
generated
@@ -11,6 +11,7 @@
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"ajv": "^8.18.0",
|
||||
"sql.js": "^1.14.1"
|
||||
},
|
||||
"bin": {
|
||||
@@ -19,7 +20,6 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.2",
|
||||
"ajv": "^8.18.0",
|
||||
"c8": "^10.1.2",
|
||||
"eslint": "^9.39.2",
|
||||
"globals": "^17.1.0",
|
||||
@@ -449,7 +449,6 @@
|
||||
"version": "8.18.0",
|
||||
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz",
|
||||
"integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"fast-deep-equal": "^3.1.3",
|
||||
@@ -1043,7 +1042,6 @@
|
||||
"version": "3.1.3",
|
||||
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
|
||||
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/fast-json-stable-stringify": {
|
||||
@@ -1064,7 +1062,6 @@
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz",
|
||||
"integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==",
|
||||
"dev": true,
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
@@ -1491,7 +1488,6 @@
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
|
||||
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/json-stable-stringify-without-jsonify": {
|
||||
@@ -2457,9 +2453,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/picomatch": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
|
||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz",
|
||||
"integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -2512,7 +2508,6 @@
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
|
||||
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
|
||||
@@ -89,6 +89,9 @@
|
||||
"AGENTS.md",
|
||||
".claude-plugin/plugin.json",
|
||||
".claude-plugin/README.md",
|
||||
".codex-plugin/plugin.json",
|
||||
".codex-plugin/README.md",
|
||||
".mcp.json",
|
||||
"install.sh",
|
||||
"install.ps1",
|
||||
"llms.txt"
|
||||
@@ -110,11 +113,11 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"ajv": "^8.18.0",
|
||||
"sql.js": "^1.14.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.2",
|
||||
"ajv": "^8.18.0",
|
||||
"c8": "^10.1.2",
|
||||
"eslint": "^9.39.2",
|
||||
"globals": "^17.1.0",
|
||||
@@ -122,5 +125,6 @@
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
},
|
||||
"packageManager": "yarn@4.9.2+sha512.1fc009bc09d13cfd0e19efa44cbfc2b9cf6ca61482725eb35bbc5e257e093ebf4130db6dfe15d604ff4b79efd8e1e8e99b25fa7d0a6197c9f9826358d4d65c3c"
|
||||
}
|
||||
|
||||
172
research/ecc2-codebase-analysis.md
Normal file
172
research/ecc2-codebase-analysis.md
Normal file
@@ -0,0 +1,172 @@
|
||||
# ECC2 Codebase Research Report
|
||||
|
||||
**Date:** 2026-03-26
|
||||
**Subject:** `ecc-tui` v0.1.0 — Agentic IDE Control Plane
|
||||
**Total Lines:** 4,417 across 15 `.rs` files
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
ECC2 is a Rust TUI application that orchestrates AI coding agent sessions. It uses:
|
||||
- **ratatui 0.29** + **crossterm 0.28** for terminal UI
|
||||
- **rusqlite 0.32** (bundled) for local state persistence
|
||||
- **tokio 1** (full) for async runtime
|
||||
- **clap 4** (derive) for CLI
|
||||
|
||||
### Module Breakdown
|
||||
|
||||
| Module | Lines | Purpose |
|
||||
|--------|------:|---------|
|
||||
| `session/` | 1,974 | Session lifecycle, persistence, runtime, output |
|
||||
| `tui/` | 1,613 | Dashboard, app loop, custom widgets |
|
||||
| `observability/` | 409 | Tool call risk scoring and logging |
|
||||
| `config/` | 144 | Configuration (TOML file) |
|
||||
| `main.rs` | 142 | CLI entry point |
|
||||
| `worktree/` | 99 | Git worktree management |
|
||||
| `comms/` | 36 | Inter-agent messaging (send only) |
|
||||
|
||||
### Key Architectural Patterns
|
||||
|
||||
- **DbWriter thread** in `session/runtime.rs` — dedicated OS thread for SQLite writes from async context via `mpsc::unbounded_channel` with oneshot acknowledgements. Clean solution to the "SQLite from async" problem.
|
||||
- **Session state machine** with enforced transitions: `Pending → {Running, Failed, Stopped}`, `Running → {Idle, Completed, Failed, Stopped}`, etc.
|
||||
- **Ring buffer** for session output — `OUTPUT_BUFFER_LIMIT = 1000` lines per session with automatic eviction.
|
||||
- **Risk scoring** on tool calls — 4-axis analysis (base tool risk, file sensitivity, blast radius, irreversibility) producing composite 0.0–1.0 scores with suggested actions (Allow/Review/RequireConfirmation/Block).
|
||||
|
||||
## 2. Code Quality Metrics
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Total lines | 4,417 |
|
||||
| Test functions | 29 |
|
||||
| `unwrap()` calls | 3 |
|
||||
| `unsafe` blocks | 0 |
|
||||
| TODO/FIXME comments | 0 |
|
||||
| Max file size | 1,273 lines (`dashboard.rs`) |
|
||||
|
||||
**Assessment:** The codebase is clean. Only 3 `unwrap()` calls (2 in tests, 1 in config `default()`), zero `unsafe`, and all modules use proper `anyhow::Result` error propagation. The `dashboard.rs` file at 1,273 lines exceeds the repo's 800-line max-file guideline, but it is still manageable at the current scope.
|
||||
|
||||
## 3. Identified Gaps
|
||||
|
||||
### 3.1 Comms Module — Send Without Receive
|
||||
|
||||
`comms/mod.rs` (36 lines) has `send()` but no `receive()`, `poll()`, `inbox()`, or `subscribe()`. The `messages` table exists in SQLite, but nothing reads from it. The inter-agent messaging story is half-built.
|
||||
|
||||
**Impact:** Agents cannot coordinate. The `TaskHandoff`, `Query`, `Response`, and `Conflict` message types are defined but unusable.
|
||||
|
||||
### 3.2 New Session Dialog — Stub
|
||||
|
||||
`dashboard.rs:495` — `new_session()` logs `"New session dialog requested"` but does nothing. Users must use the CLI (`ecc start --task "..."`) to create sessions; the TUI dashboard cannot.
|
||||
|
||||
### 3.3 Single Agent Support
|
||||
|
||||
`session/manager.rs` — `agent_program()` only supports `"claude"`. The CLI accepts `--agent` but anything other than `"claude"` fails. No codex, opencode, or custom agent support.
|
||||
|
||||
### 3.4 Config — File-Only
|
||||
|
||||
`Config::load()` reads `~/.claude/ecc2.toml` only. The implementation lacks environment variable overrides (e.g., `ECC_DB_PATH`, `ECC_WORKTREE_ROOT`) and CLI flags for configuration.
|
||||
|
||||
### 3.5 Legacy Dependency Candidate: `git2`
|
||||
|
||||
`git2 = "0.20"` is still declared in `Cargo.toml`, but the `worktree` module shells out to the `git` CLI instead. That makes `git2` a strong removal candidate rather than an already-completed cleanup.
|
||||
|
||||
### 3.6 No Metrics Aggregation
|
||||
|
||||
`SessionMetrics` tracks tokens, cost, duration, tool_calls, files_changed per session. But there's no aggregate view: total cost across sessions, average duration, top tools by usage, etc. The Metrics pane in the dashboard shows per-session detail only.
|
||||
|
||||
### 3.7 Daemon — No Health Reporting
|
||||
|
||||
`session/daemon.rs` runs an infinite loop checking session timeouts. No health endpoint, no log rotation, no PID file, no signal handling for graceful shutdown. `Ctrl+C` during daemon mode kills the process uncleanly.
|
||||
|
||||
## 4. Test Coverage Analysis
|
||||
|
||||
34 test functions across 10 source modules:
|
||||
|
||||
| Module | Tests | Coverage Focus |
|
||||
|--------|------:|----------------|
|
||||
| `main.rs` | 1 | CLI parsing |
|
||||
| `config/mod.rs` | 5 | Defaults, deserialization, legacy fallback |
|
||||
| `observability/mod.rs` | 5 | Risk scoring, persistence, pagination |
|
||||
| `session/daemon.rs` | 2 | Crash recovery / liveness handling |
|
||||
| `session/manager.rs` | 4 | Session lifecycle, resume, stop, latest status |
|
||||
| `session/output.rs` | 2 | Ring buffer, broadcast |
|
||||
| `session/runtime.rs` | 1 | Output capture persistence/events |
|
||||
| `session/store.rs` | 3 | Buffer window, migration, state transitions |
|
||||
| `tui/dashboard.rs` | 8 | Rendering, selection, pane navigation, scrolling |
|
||||
| `tui/widgets.rs` | 3 | Token meter rendering and thresholds |
|
||||
|
||||
**Direct coverage gaps:**
|
||||
- `comms/mod.rs` — 0 tests
|
||||
- `worktree/mod.rs` — 0 tests
|
||||
|
||||
The core I/O-heavy paths are no longer completely untested: `manager.rs`, `runtime.rs`, and `daemon.rs` each have targeted tests. The remaining gap is breadth rather than total absence, especially around `comms/`, `worktree/`, and more adversarial process/worktree failure cases.
|
||||
|
||||
## 5. Security Observations
|
||||
|
||||
- **No secrets in code.** Config reads from TOML file, no hardcoded credentials.
|
||||
- **Process spawning** uses `tokio::process::Command` with explicit `Stdio::piped()` — no shell injection vectors.
|
||||
- **Risk scoring** is a strong feature — catches `rm -rf`, `git push --force origin main`, file access to `.env`/secrets.
|
||||
- **No input sanitization on session task strings.** The task string is passed directly to `claude --print`. If the task contains shell metacharacters, it could be exploited depending on how `Command` handles argument quoting. Currently safe (arguments are not shell-interpreted), but worth auditing.
|
||||
|
||||
## 6. Dependency Health
|
||||
|
||||
| Crate | Version | Latest | Notes |
|
||||
|-------|---------|--------|-------|
|
||||
| ratatui | 0.29 | **0.30.0** | Update available |
|
||||
| crossterm | 0.28 | **0.29.0** | Update available |
|
||||
| rusqlite | 0.32 | **0.39.0** | Update available |
|
||||
| tokio | 1 | **1.50.0** | Update available |
|
||||
| serde | 1 | **1.0.228** | Update available |
|
||||
| clap | 4 | **4.6.0** | Update available |
|
||||
| chrono | 0.4 | **0.4.44** | Update available |
|
||||
| uuid | 1 | **1.22.0** | Update available |
|
||||
|
||||
`git2` is still present in `Cargo.toml` even though the `worktree` module shells out to the `git` CLI. Several other dependencies are outdated; either remove `git2` or start using it before the next release.
|
||||
|
||||
## 7. Recommendations (Prioritized)
|
||||
|
||||
### P0 — Quick Wins
|
||||
|
||||
1. **Add environment variable support to `Config::load()`** — `ECC_DB_PATH`, `ECC_WORKTREE_ROOT`, `ECC_DEFAULT_AGENT`. Standard practice for CLI tools.
|
||||
|
||||
### P1 — Feature Completions
|
||||
|
||||
2. **Implement `comms::receive()` / `comms::poll()`** — read unread messages from the `messages` table, optionally with a `broadcast` channel for real-time delivery. Wire it into the dashboard.
|
||||
3. **Build the new-session dialog in the TUI** — modal form with task input, agent selector, worktree toggle. Should call `session::manager::create_session()`.
|
||||
4. **Add aggregate metrics** — total cost, average session duration, tool call frequency, cost per session. Show in the Metrics pane.
|
||||
|
||||
### P2 — Robustness
|
||||
|
||||
5. **Expand integration coverage for `manager.rs`, `runtime.rs`, and `daemon.rs`** — the repo now has baseline tests here, but it still needs failure-path coverage around process crashes, timeouts, and cleanup edge cases.
|
||||
6. **Add first-party tests for `worktree/mod.rs` and `comms/mod.rs`** — these are still uncovered and back important orchestration features.
|
||||
7. **Add daemon health reporting** — PID file, structured logging, graceful shutdown via signal handler.
|
||||
8. **Task string security audit** — The session task uses `claude --print` via `tokio::process::Command`. Verify arguments are never shell-interpreted. Checklist: confirm `Command` arg usage, threat-model metacharacter injection, input validation/escaping strategy, logging of raw inputs, and automated tests. Re-audit if invocation code changes.
|
||||
9. **Break up `dashboard.rs`** — extract SessionsPane, OutputPane, MetricsPane, LogPane into separate files under `tui/panes/`.
|
||||
|
||||
### P3 — Extensibility
|
||||
|
||||
10. **Multi-agent support** — make `agent_program()` pluggable. Add `codex`, `opencode`, `custom` agent types.
|
||||
11. **Config validation** — validate risk thresholds sum correctly, budget values are positive, paths exist.
|
||||
|
||||
## 8. Comparison with Ratatui 0.29 Best Practices
|
||||
|
||||
The codebase follows ratatui conventions well:
|
||||
- Uses `TableState` for stateful selection (correct pattern)
|
||||
- Custom `Widget` trait implementation for `TokenMeter` (idiomatic)
|
||||
- `tick()` method for periodic state sync (standard)
|
||||
- `broadcast::channel` for real-time output events (appropriate)
|
||||
|
||||
**Minor deviations:**
|
||||
- The `Dashboard` struct directly holds `StateStore` (SQLite connection). Ratatui best practice is to keep the state store behind an `Arc<Mutex<>>` to allow background updates. Currently the TUI owns the DB exclusively, which blocks adding a background metrics refresh task.
|
||||
- No `Clear` widget usage when rendering the help overlay — could cause rendering artifacts on some terminals.
|
||||
|
||||
## 9. Risk Assessment
|
||||
|
||||
| Risk | Likelihood | Impact | Mitigation |
|
||||
|------|-----------|--------|------------|
|
||||
| Dashboard file exceeds 1500 lines (projected) | High | Medium | At 1,273 lines currently (Section 2); extract panes into modules before it grows further |
|
||||
| SQLite lock contention | Low | High | DbWriter pattern already handles this |
|
||||
| No agent diversity | Medium | Medium | Pluggable agent support |
|
||||
| Task-string handling assumptions drift over time | Medium | Medium | Keep `Command` argument handling shell-free, document the threat model, and add regression tests for metacharacter-heavy task input |
|
||||
|
||||
---
|
||||
|
||||
**Bottom line:** ECC2 is a well-structured Rust project with clean error handling, good separation of concerns, and strong security features (risk scoring). The main gaps are incomplete features (comms, new-session dialog, single agent) rather than architectural problems. The codebase is ready for feature work on top of the solid foundation.
|
||||
186
scripts/catalog.js
Normal file
186
scripts/catalog.js
Normal file
@@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const {
|
||||
getInstallComponent,
|
||||
listInstallComponents,
|
||||
listInstallProfiles,
|
||||
} = require('./lib/install-manifests');
|
||||
|
||||
const FAMILY_ALIASES = Object.freeze({
|
||||
baseline: 'baseline',
|
||||
baselines: 'baseline',
|
||||
language: 'language',
|
||||
languages: 'language',
|
||||
lang: 'language',
|
||||
framework: 'framework',
|
||||
frameworks: 'framework',
|
||||
capability: 'capability',
|
||||
capabilities: 'capability',
|
||||
agent: 'agent',
|
||||
agents: 'agent',
|
||||
skill: 'skill',
|
||||
skills: 'skill',
|
||||
});
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
console.log(`
|
||||
Discover ECC install components and profiles
|
||||
|
||||
Usage:
|
||||
node scripts/catalog.js profiles [--json]
|
||||
node scripts/catalog.js components [--family <family>] [--target <target>] [--json]
|
||||
node scripts/catalog.js show <component-id> [--json]
|
||||
|
||||
Examples:
|
||||
node scripts/catalog.js profiles
|
||||
node scripts/catalog.js components --family language
|
||||
node scripts/catalog.js show framework:nextjs
|
||||
`);
|
||||
|
||||
process.exit(exitCode);
|
||||
}
|
||||
|
||||
function normalizeFamily(value) {
|
||||
if (!value) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const normalized = String(value).trim().toLowerCase();
|
||||
return FAMILY_ALIASES[normalized] || normalized;
|
||||
}
|
||||
|
||||
function parseArgs(argv) {
|
||||
const args = argv.slice(2);
|
||||
const parsed = {
|
||||
command: null,
|
||||
componentId: null,
|
||||
family: null,
|
||||
target: null,
|
||||
json: false,
|
||||
help: false,
|
||||
};
|
||||
|
||||
if (args.length === 0 || args[0] === '--help' || args[0] === '-h') {
|
||||
parsed.help = true;
|
||||
return parsed;
|
||||
}
|
||||
|
||||
parsed.command = args[0];
|
||||
|
||||
for (let index = 1; index < args.length; index += 1) {
|
||||
const arg = args[index];
|
||||
|
||||
if (arg === '--help' || arg === '-h') {
|
||||
parsed.help = true;
|
||||
} else if (arg === '--json') {
|
||||
parsed.json = true;
|
||||
} else if (arg === '--family') {
|
||||
if (!args[index + 1]) {
|
||||
throw new Error('Missing value for --family');
|
||||
}
|
||||
parsed.family = normalizeFamily(args[index + 1]);
|
||||
index += 1;
|
||||
} else if (arg === '--target') {
|
||||
if (!args[index + 1]) {
|
||||
throw new Error('Missing value for --target');
|
||||
}
|
||||
parsed.target = args[index + 1];
|
||||
index += 1;
|
||||
} else if (parsed.command === 'show' && !parsed.componentId) {
|
||||
parsed.componentId = arg;
|
||||
} else {
|
||||
throw new Error(`Unknown argument: ${arg}`);
|
||||
}
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function printProfiles(profiles) {
|
||||
console.log('Install profiles:\n');
|
||||
for (const profile of profiles) {
|
||||
console.log(`- ${profile.id} (${profile.moduleCount} modules)`);
|
||||
console.log(` ${profile.description}`);
|
||||
}
|
||||
}
|
||||
|
||||
function printComponents(components) {
|
||||
console.log('Install components:\n');
|
||||
for (const component of components) {
|
||||
console.log(`- ${component.id} [${component.family}]`);
|
||||
console.log(` targets=${component.targets.join(', ')} modules=${component.moduleIds.join(', ')}`);
|
||||
console.log(` ${component.description}`);
|
||||
}
|
||||
}
|
||||
|
||||
function printComponent(component) {
|
||||
console.log(`Install component: ${component.id}\n`);
|
||||
console.log(`Family: ${component.family}`);
|
||||
console.log(`Targets: ${component.targets.join(', ')}`);
|
||||
console.log(`Modules: ${component.moduleIds.join(', ')}`);
|
||||
console.log(`Description: ${component.description}`);
|
||||
|
||||
if (component.modules.length > 0) {
|
||||
console.log('\nResolved modules:');
|
||||
for (const module of component.modules) {
|
||||
console.log(`- ${module.id} [${module.kind}]`);
|
||||
console.log(
|
||||
` targets=${module.targets.join(', ')} default=${module.defaultInstall} cost=${module.cost} stability=${module.stability}`
|
||||
);
|
||||
console.log(` ${module.description}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function main() {
|
||||
try {
|
||||
const options = parseArgs(process.argv);
|
||||
|
||||
if (options.help) {
|
||||
showHelp(0);
|
||||
}
|
||||
|
||||
if (options.command === 'profiles') {
|
||||
const profiles = listInstallProfiles();
|
||||
if (options.json) {
|
||||
console.log(JSON.stringify({ profiles }, null, 2));
|
||||
} else {
|
||||
printProfiles(profiles);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.command === 'components') {
|
||||
const components = listInstallComponents({
|
||||
family: options.family,
|
||||
target: options.target,
|
||||
});
|
||||
if (options.json) {
|
||||
console.log(JSON.stringify({ components }, null, 2));
|
||||
} else {
|
||||
printComponents(components);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (options.command === 'show') {
|
||||
if (!options.componentId) {
|
||||
throw new Error('Catalog show requires an install component ID');
|
||||
}
|
||||
const component = getInstallComponent(options.componentId);
|
||||
if (options.json) {
|
||||
console.log(JSON.stringify(component, null, 2));
|
||||
} else {
|
||||
printComponent(component);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
throw new Error(`Unknown catalog command: ${options.command}`);
|
||||
} catch (error) {
|
||||
console.error(`Error: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
16
scripts/codex/check-codex-global-state.sh
Normal file → Executable file
16
scripts/codex/check-codex-global-state.sh
Normal file → Executable file
@@ -89,7 +89,13 @@ fi
|
||||
if [[ -f "$CONFIG_FILE" ]]; then
|
||||
check_config_pattern '^multi_agent\s*=\s*true' "multi_agent is enabled"
|
||||
check_config_absent '^\s*collab\s*=' "deprecated collab flag is absent"
|
||||
check_config_pattern '^persistent_instructions\s*=' "persistent_instructions is configured"
|
||||
# persistent_instructions is recommended but optional; warn instead of fail
|
||||
# so users who rely on AGENTS.md alone are not blocked (#967).
|
||||
if rg -n '^[[:space:]]*persistent_instructions\s*=' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
ok "persistent_instructions is configured"
|
||||
else
|
||||
warn "persistent_instructions is not set (recommended but optional)"
|
||||
fi
|
||||
check_config_pattern '^\[profiles\.strict\]' "profiles.strict exists"
|
||||
check_config_pattern '^\[profiles\.yolo\]' "profiles.yolo exists"
|
||||
|
||||
@@ -97,7 +103,7 @@ if [[ -f "$CONFIG_FILE" ]]; then
|
||||
'mcp_servers.github' \
|
||||
'mcp_servers.memory' \
|
||||
'mcp_servers.sequential-thinking' \
|
||||
'mcp_servers.context7-mcp'
|
||||
'mcp_servers.context7'
|
||||
do
|
||||
if rg -n "^\[$section\]" "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
ok "MCP section [$section] exists"
|
||||
@@ -106,10 +112,10 @@ if [[ -f "$CONFIG_FILE" ]]; then
|
||||
fi
|
||||
done
|
||||
|
||||
if rg -n '^\[mcp_servers\.context7\]' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
warn "Duplicate [mcp_servers.context7] exists (context7-mcp is preferred)"
|
||||
if rg -n '^\[mcp_servers\.context7-mcp\]' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
warn "Legacy [mcp_servers.context7-mcp] exists (context7 is preferred)"
|
||||
else
|
||||
ok "No duplicate [mcp_servers.context7] section"
|
||||
ok "No legacy [mcp_servers.context7-mcp] section"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
20
scripts/codex/install-global-git-hooks.sh
Normal file → Executable file
20
scripts/codex/install-global-git-hooks.sh
Normal file → Executable file
@@ -24,9 +24,11 @@ log() {
|
||||
|
||||
run_or_echo() {
|
||||
if [[ "$MODE" == "dry-run" ]]; then
|
||||
printf '[dry-run] %s\n' "$*"
|
||||
printf '[dry-run]'
|
||||
printf ' %q' "$@"
|
||||
printf '\n'
|
||||
else
|
||||
eval "$*"
|
||||
"$@"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -41,14 +43,14 @@ log "Global hooks destination: $DEST_DIR"
|
||||
|
||||
if [[ -d "$DEST_DIR" ]]; then
|
||||
log "Backing up existing hooks directory to $BACKUP_DIR"
|
||||
run_or_echo "mkdir -p \"$BACKUP_DIR\""
|
||||
run_or_echo "cp -R \"$DEST_DIR\" \"$BACKUP_DIR/hooks\""
|
||||
run_or_echo mkdir -p "$BACKUP_DIR"
|
||||
run_or_echo cp -R "$DEST_DIR" "$BACKUP_DIR/hooks"
|
||||
fi
|
||||
|
||||
run_or_echo "mkdir -p \"$DEST_DIR\""
|
||||
run_or_echo "cp \"$SOURCE_DIR/pre-commit\" \"$DEST_DIR/pre-commit\""
|
||||
run_or_echo "cp \"$SOURCE_DIR/pre-push\" \"$DEST_DIR/pre-push\""
|
||||
run_or_echo "chmod +x \"$DEST_DIR/pre-commit\" \"$DEST_DIR/pre-push\""
|
||||
run_or_echo mkdir -p "$DEST_DIR"
|
||||
run_or_echo cp "$SOURCE_DIR/pre-commit" "$DEST_DIR/pre-commit"
|
||||
run_or_echo cp "$SOURCE_DIR/pre-push" "$DEST_DIR/pre-push"
|
||||
run_or_echo chmod +x "$DEST_DIR/pre-commit" "$DEST_DIR/pre-push"
|
||||
|
||||
if [[ "$MODE" == "apply" ]]; then
|
||||
prev_hooks_path="$(git config --global core.hooksPath || true)"
|
||||
@@ -56,7 +58,7 @@ if [[ "$MODE" == "apply" ]]; then
|
||||
log "Previous global hooksPath: $prev_hooks_path"
|
||||
fi
|
||||
fi
|
||||
run_or_echo "git config --global core.hooksPath \"$DEST_DIR\""
|
||||
run_or_echo git config --global core.hooksPath "$DEST_DIR"
|
||||
|
||||
log "Installed ECC global git hooks."
|
||||
log "Disable per repo by creating .ecc-hooks-disable in project root."
|
||||
|
||||
@@ -83,20 +83,23 @@ function dlxServer(name, pkg, extraFields, extraToml) {
|
||||
}
|
||||
|
||||
/** Each entry: key = section name under mcp_servers, value = { toml, fields } */
|
||||
const DEFAULT_MCP_STARTUP_TIMEOUT_SEC = 30;
|
||||
const DEFAULT_MCP_STARTUP_TIMEOUT_TOML = `startup_timeout_sec = ${DEFAULT_MCP_STARTUP_TIMEOUT_SEC}`;
|
||||
|
||||
const ECC_SERVERS = {
|
||||
supabase: dlxServer('supabase', '@supabase/mcp-server-supabase@latest', { startup_timeout_sec: 20.0, tool_timeout_sec: 120.0 }, 'startup_timeout_sec = 20.0\ntool_timeout_sec = 120.0'),
|
||||
playwright: dlxServer('playwright', '@playwright/mcp@latest'),
|
||||
'context7-mcp': dlxServer('context7-mcp', '@upstash/context7-mcp'),
|
||||
playwright: dlxServer('playwright', '@playwright/mcp@latest', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
|
||||
context7: dlxServer('context7', '@upstash/context7-mcp@latest', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
|
||||
exa: {
|
||||
fields: { url: 'https://mcp.exa.ai/mcp' },
|
||||
toml: `[mcp_servers.exa]\nurl = "https://mcp.exa.ai/mcp"`
|
||||
},
|
||||
github: {
|
||||
fields: { command: 'bash', args: ['-lc', GH_BOOTSTRAP] },
|
||||
toml: `[mcp_servers.github]\ncommand = "bash"\nargs = ["-lc", ${JSON.stringify(GH_BOOTSTRAP)}]`
|
||||
fields: { command: 'bash', args: ['-lc', GH_BOOTSTRAP], startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC },
|
||||
toml: `[mcp_servers.github]\ncommand = "bash"\nargs = ["-lc", ${JSON.stringify(GH_BOOTSTRAP)}]\n${DEFAULT_MCP_STARTUP_TIMEOUT_TOML}`
|
||||
},
|
||||
memory: dlxServer('memory', '@modelcontextprotocol/server-memory'),
|
||||
'sequential-thinking': dlxServer('sequential-thinking', '@modelcontextprotocol/server-sequential-thinking')
|
||||
memory: dlxServer('memory', '@modelcontextprotocol/server-memory', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
|
||||
'sequential-thinking': dlxServer('sequential-thinking', '@modelcontextprotocol/server-sequential-thinking', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML)
|
||||
};
|
||||
|
||||
// Append --features arg for supabase after dlxServer builds the base
|
||||
@@ -104,9 +107,9 @@ ECC_SERVERS.supabase.fields.args.push('--features=account,docs,database,debuggin
|
||||
ECC_SERVERS.supabase.toml = ECC_SERVERS.supabase.toml.replace(/^(args = \[.*)\]$/m, '$1, "--features=account,docs,database,debugging,development,functions,storage,branching"]');
|
||||
|
||||
// Legacy section names that should be treated as an existing ECC server.
|
||||
// e.g. old configs shipped [mcp_servers.context7] instead of [mcp_servers.context7-mcp].
|
||||
// e.g. older configs shipped [mcp_servers.context7-mcp] instead of [mcp_servers.context7].
|
||||
const LEGACY_ALIASES = {
|
||||
'context7-mcp': ['context7']
|
||||
context7: ['context7-mcp']
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -254,6 +257,10 @@ function main() {
|
||||
if (resolvedLabel !== name) {
|
||||
raw = removeServerFromText(raw, name, existing);
|
||||
}
|
||||
if (legacyName && hasCanonical) {
|
||||
toRemoveLog.push(`mcp_servers.${legacyName}`);
|
||||
raw = removeServerFromText(raw, legacyName, existing);
|
||||
}
|
||||
toAppend.push(spec.toml);
|
||||
} else {
|
||||
// Add-only mode: skip, but warn about drift
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const os = require('os');
|
||||
const { buildDoctorReport } = require('./lib/install-lifecycle');
|
||||
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
|
||||
|
||||
@@ -88,7 +89,7 @@ function main() {
|
||||
|
||||
const report = buildDoctorReport({
|
||||
repoRoot: require('path').join(__dirname, '..'),
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
projectRoot: process.cwd(),
|
||||
targets: options.targets,
|
||||
});
|
||||
|
||||
8
scripts/ecc.js
Normal file → Executable file
8
scripts/ecc.js
Normal file → Executable file
@@ -13,6 +13,10 @@ const COMMANDS = {
|
||||
script: 'install-plan.js',
|
||||
description: 'Inspect selective-install manifests and resolved plans',
|
||||
},
|
||||
catalog: {
|
||||
script: 'catalog.js',
|
||||
description: 'Discover install profiles and component IDs',
|
||||
},
|
||||
'install-plan': {
|
||||
script: 'install-plan.js',
|
||||
description: 'Alias for plan',
|
||||
@@ -50,6 +54,7 @@ const COMMANDS = {
|
||||
const PRIMARY_COMMANDS = [
|
||||
'install',
|
||||
'plan',
|
||||
'catalog',
|
||||
'list-installed',
|
||||
'doctor',
|
||||
'repair',
|
||||
@@ -79,6 +84,9 @@ Examples:
|
||||
ecc typescript
|
||||
ecc install --profile developer --target claude
|
||||
ecc plan --profile core --target cursor
|
||||
ecc catalog profiles
|
||||
ecc catalog components --family language
|
||||
ecc catalog show framework:nextjs
|
||||
ecc list-installed --json
|
||||
ecc doctor --target cursor
|
||||
ecc repair --dry-run
|
||||
|
||||
@@ -61,11 +61,34 @@ const PROTECTED_FILES = new Set([
|
||||
'.markdownlintrc',
|
||||
]);
|
||||
|
||||
function parseInput(inputOrRaw) {
|
||||
if (typeof inputOrRaw === 'string') {
|
||||
try {
|
||||
return inputOrRaw.trim() ? JSON.parse(inputOrRaw) : {};
|
||||
} catch {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
return inputOrRaw && typeof inputOrRaw === 'object' ? inputOrRaw : {};
|
||||
}
|
||||
|
||||
/**
|
||||
* Exportable run() for in-process execution via run-with-flags.js.
|
||||
* Avoids the ~50-100ms spawnSync overhead when available.
|
||||
*/
|
||||
function run(input) {
|
||||
function run(inputOrRaw, options = {}) {
|
||||
if (options.truncated) {
|
||||
return {
|
||||
exitCode: 2,
|
||||
stderr:
|
||||
`BLOCKED: Hook input exceeded ${options.maxStdin || MAX_STDIN} bytes. ` +
|
||||
'Refusing to bypass config-protection on a truncated payload. ' +
|
||||
'Retry with a smaller edit or disable the config-protection hook temporarily.'
|
||||
};
|
||||
}
|
||||
|
||||
const input = parseInput(inputOrRaw);
|
||||
const filePath = input?.tool_input?.file_path || input?.tool_input?.file || '';
|
||||
if (!filePath) return { exitCode: 0 };
|
||||
|
||||
@@ -75,9 +98,9 @@ function run(input) {
|
||||
exitCode: 2,
|
||||
stderr:
|
||||
`BLOCKED: Modifying ${basename} is not allowed. ` +
|
||||
`Fix the source code to satisfy linter/formatter rules instead of ` +
|
||||
`weakening the config. If this is a legitimate config change, ` +
|
||||
`disable the config-protection hook temporarily.`,
|
||||
'Fix the source code to satisfy linter/formatter rules instead of ' +
|
||||
'weakening the config. If this is a legitimate config change, ' +
|
||||
'disable the config-protection hook temporarily.',
|
||||
};
|
||||
}
|
||||
|
||||
@@ -87,7 +110,7 @@ function run(input) {
|
||||
module.exports = { run };
|
||||
|
||||
// Stdin fallback for spawnSync execution
|
||||
let truncated = false;
|
||||
let truncated = /^(1|true|yes)$/i.test(String(process.env.ECC_HOOK_INPUT_TRUNCATED || ''));
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
@@ -100,25 +123,17 @@ process.stdin.on('data', chunk => {
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
// If stdin was truncated, the JSON is likely malformed. Fail open but
|
||||
// log a warning so the issue is visible. The run() path (used by
|
||||
// run-with-flags.js in-process) is not affected by this.
|
||||
if (truncated) {
|
||||
process.stderr.write('[config-protection] Warning: stdin exceeded 1MB, skipping check\n');
|
||||
process.stdout.write(raw);
|
||||
return;
|
||||
const result = run(raw, {
|
||||
truncated,
|
||||
maxStdin: Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN,
|
||||
});
|
||||
|
||||
if (result.stderr) {
|
||||
process.stderr.write(result.stderr + '\n');
|
||||
}
|
||||
|
||||
try {
|
||||
const input = raw.trim() ? JSON.parse(raw) : {};
|
||||
const result = run(input);
|
||||
|
||||
if (result.exitCode === 2) {
|
||||
process.stderr.write(result.stderr + '\n');
|
||||
process.exit(2);
|
||||
}
|
||||
} catch {
|
||||
// Keep hook non-blocking on parse errors.
|
||||
if (result.exitCode === 2) {
|
||||
process.exit(2);
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
|
||||
94
scripts/hooks/desktop-notify.js
Normal file
94
scripts/hooks/desktop-notify.js
Normal file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Desktop Notification Hook (Stop)
|
||||
*
|
||||
* Sends a native desktop notification with the task summary when Claude
|
||||
* finishes responding. Currently supports macOS (osascript); other
|
||||
* platforms exit silently. Windows (PowerShell) and Linux (notify-send)
|
||||
* support is planned.
|
||||
*
|
||||
* Hook ID : stop:desktop-notify
|
||||
* Profiles: standard, strict
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const { spawnSync } = require('child_process');
|
||||
const { isMacOS, log } = require('../lib/utils');
|
||||
|
||||
const TITLE = 'Claude Code';
|
||||
const MAX_BODY_LENGTH = 100;
|
||||
|
||||
/**
|
||||
* Extract a short summary from the last assistant message.
|
||||
* Takes the first non-empty line and truncates to MAX_BODY_LENGTH chars.
|
||||
*/
|
||||
function extractSummary(message) {
|
||||
if (!message || typeof message !== 'string') return 'Done';
|
||||
|
||||
const firstLine = message
|
||||
.split('\n')
|
||||
.map(l => l.trim())
|
||||
.find(l => l.length > 0);
|
||||
|
||||
if (!firstLine) return 'Done';
|
||||
|
||||
return firstLine.length > MAX_BODY_LENGTH
|
||||
? `${firstLine.slice(0, MAX_BODY_LENGTH)}...`
|
||||
: firstLine;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a macOS notification via osascript.
|
||||
* AppleScript strings do not support backslash escapes, so we replace
|
||||
* double quotes with curly quotes and strip backslashes before embedding.
|
||||
*/
|
||||
function notifyMacOS(title, body) {
|
||||
const safeBody = body.replace(/\\/g, '').replace(/"/g, '\u201C');
|
||||
const safeTitle = title.replace(/\\/g, '').replace(/"/g, '\u201C');
|
||||
const script = `display notification "${safeBody}" with title "${safeTitle}"`;
|
||||
const result = spawnSync('osascript', ['-e', script], { stdio: 'ignore', timeout: 5000 });
|
||||
if (result.error || result.status !== 0) {
|
||||
log(`[DesktopNotify] osascript failed: ${result.error ? result.error.message : `exit ${result.status}`}`);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: future platform support
|
||||
// function notifyWindows(title, body) { ... }
|
||||
// function notifyLinux(title, body) { ... }
|
||||
|
||||
/**
|
||||
* Fast-path entry point for run-with-flags.js (avoids extra process spawn).
|
||||
*/
|
||||
function run(raw) {
|
||||
try {
|
||||
if (!isMacOS) return raw;
|
||||
|
||||
const input = raw.trim() ? JSON.parse(raw) : {};
|
||||
const summary = extractSummary(input.last_assistant_message);
|
||||
notifyMacOS(TITLE, summary);
|
||||
} catch (err) {
|
||||
log(`[DesktopNotify] Error: ${err.message}`);
|
||||
}
|
||||
|
||||
return raw;
|
||||
}
|
||||
|
||||
module.exports = { run };
|
||||
|
||||
// Legacy stdin path (when invoked directly rather than via run-with-flags)
|
||||
if (require.main === module) {
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let data = '';
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
data += chunk.substring(0, MAX_STDIN - data.length);
|
||||
}
|
||||
});
|
||||
process.stdin.on('end', () => {
|
||||
const output = run(data);
|
||||
if (output) process.stdout.write(output);
|
||||
});
|
||||
}
|
||||
@@ -10,6 +10,7 @@
|
||||
* - policy_violation: Actions that violate configured policies
|
||||
* - security_finding: Security-relevant tool invocations
|
||||
* - approval_requested: Operations requiring explicit approval
|
||||
* - hook_input_truncated: Hook input exceeded the safe inspection limit
|
||||
*
|
||||
* Enable: Set ECC_GOVERNANCE_CAPTURE=1
|
||||
* Configure session: Set ECC_SESSION_ID for session correlation
|
||||
@@ -101,6 +102,37 @@ function detectSensitivePath(filePath) {
|
||||
return SENSITIVE_PATHS.some(pattern => pattern.test(filePath));
|
||||
}
|
||||
|
||||
function fingerprintCommand(command) {
|
||||
if (!command || typeof command !== 'string') return null;
|
||||
return crypto.createHash('sha256').update(command).digest('hex').slice(0, 12);
|
||||
}
|
||||
|
||||
function summarizeCommand(command) {
|
||||
if (!command || typeof command !== 'string') {
|
||||
return {
|
||||
commandName: null,
|
||||
commandFingerprint: null,
|
||||
};
|
||||
}
|
||||
|
||||
const trimmed = command.trim();
|
||||
if (!trimmed) {
|
||||
return {
|
||||
commandName: null,
|
||||
commandFingerprint: null,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
commandName: trimmed.split(/\s+/)[0] || null,
|
||||
commandFingerprint: fingerprintCommand(trimmed),
|
||||
};
|
||||
}
|
||||
|
||||
function emitGovernanceEvent(event) {
|
||||
process.stderr.write(`[governance] ${JSON.stringify(event)}\n`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyze a hook input payload and return governance events to capture.
|
||||
*
|
||||
@@ -146,6 +178,7 @@ function analyzeForGovernanceEvents(input, context = {}) {
|
||||
if (toolName === 'Bash') {
|
||||
const command = toolInput.command || '';
|
||||
const approvalFindings = detectApprovalRequired(command);
|
||||
const commandSummary = summarizeCommand(command);
|
||||
|
||||
if (approvalFindings.length > 0) {
|
||||
events.push({
|
||||
@@ -155,7 +188,7 @@ function analyzeForGovernanceEvents(input, context = {}) {
|
||||
payload: {
|
||||
toolName,
|
||||
hookPhase,
|
||||
command: command.slice(0, 200),
|
||||
...commandSummary,
|
||||
matchedPatterns: approvalFindings.map(f => f.pattern),
|
||||
severity: 'high',
|
||||
},
|
||||
@@ -188,6 +221,7 @@ function analyzeForGovernanceEvents(input, context = {}) {
|
||||
if (SECURITY_RELEVANT_TOOLS.has(toolName) && hookPhase === 'post') {
|
||||
const command = toolInput.command || '';
|
||||
const hasElevated = /sudo\s/.test(command) || /chmod\s/.test(command) || /chown\s/.test(command);
|
||||
const commandSummary = summarizeCommand(command);
|
||||
|
||||
if (hasElevated) {
|
||||
events.push({
|
||||
@@ -197,7 +231,7 @@ function analyzeForGovernanceEvents(input, context = {}) {
|
||||
payload: {
|
||||
toolName,
|
||||
hookPhase,
|
||||
command: command.slice(0, 200),
|
||||
...commandSummary,
|
||||
reason: 'elevated_privilege_command',
|
||||
severity: 'medium',
|
||||
},
|
||||
@@ -216,16 +250,32 @@ function analyzeForGovernanceEvents(input, context = {}) {
|
||||
* @param {string} rawInput - Raw JSON string from stdin
|
||||
* @returns {string} The original input (pass-through)
|
||||
*/
|
||||
function run(rawInput) {
|
||||
function run(rawInput, options = {}) {
|
||||
// Gate on feature flag
|
||||
if (String(process.env.ECC_GOVERNANCE_CAPTURE || '').toLowerCase() !== '1') {
|
||||
return rawInput;
|
||||
}
|
||||
|
||||
const sessionId = process.env.ECC_SESSION_ID || null;
|
||||
const hookPhase = process.env.CLAUDE_HOOK_EVENT_NAME || 'unknown';
|
||||
|
||||
if (options.truncated) {
|
||||
emitGovernanceEvent({
|
||||
id: generateEventId(),
|
||||
sessionId,
|
||||
eventType: 'hook_input_truncated',
|
||||
payload: {
|
||||
hookPhase: hookPhase.startsWith('Pre') ? 'pre' : 'post',
|
||||
sizeLimitBytes: options.maxStdin || MAX_STDIN,
|
||||
severity: 'warning',
|
||||
},
|
||||
resolvedAt: null,
|
||||
resolution: null,
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const input = JSON.parse(rawInput);
|
||||
const sessionId = process.env.ECC_SESSION_ID || null;
|
||||
const hookPhase = process.env.CLAUDE_HOOK_EVENT_NAME || 'unknown';
|
||||
|
||||
const events = analyzeForGovernanceEvents(input, {
|
||||
sessionId,
|
||||
@@ -233,13 +283,8 @@ function run(rawInput) {
|
||||
});
|
||||
|
||||
if (events.length > 0) {
|
||||
// Write events to stderr as JSON-lines for the caller to capture.
|
||||
// The state store write is async and handled by a separate process
|
||||
// to avoid blocking the hook pipeline.
|
||||
for (const event of events) {
|
||||
process.stderr.write(
|
||||
`[governance] ${JSON.stringify(event)}\n`
|
||||
);
|
||||
emitGovernanceEvent(event);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
@@ -252,16 +297,25 @@ function run(rawInput) {
|
||||
// ── stdin entry point ────────────────────────────────
|
||||
if (require.main === module) {
|
||||
let raw = '';
|
||||
let truncated = /^(1|true|yes)$/i.test(String(process.env.ECC_HOOK_INPUT_TRUNCATED || ''));
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
if (chunk.length > remaining) {
|
||||
truncated = true;
|
||||
}
|
||||
} else {
|
||||
truncated = true;
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
const result = run(raw);
|
||||
const result = run(raw, {
|
||||
truncated,
|
||||
maxStdin: Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN,
|
||||
});
|
||||
process.stdout.write(result);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -99,15 +99,21 @@ function saveState(filePath, state) {
|
||||
function readRawStdin() {
|
||||
return new Promise(resolve => {
|
||||
let raw = '';
|
||||
let truncated = /^(1|true|yes)$/i.test(String(process.env.ECC_HOOK_INPUT_TRUNCATED || ''));
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
if (chunk.length > remaining) {
|
||||
truncated = true;
|
||||
}
|
||||
} else {
|
||||
truncated = true;
|
||||
}
|
||||
});
|
||||
process.stdin.on('end', () => resolve(raw));
|
||||
process.stdin.on('error', () => resolve(raw));
|
||||
process.stdin.on('end', () => resolve({ raw, truncated }));
|
||||
process.stdin.on('error', () => resolve({ raw, truncated }));
|
||||
});
|
||||
}
|
||||
|
||||
@@ -155,6 +161,18 @@ function extractMcpTarget(input) {
|
||||
};
|
||||
}
|
||||
|
||||
function extractMcpTargetFromRaw(raw) {
|
||||
const toolNameMatch = raw.match(/"(?:tool_name|name)"\s*:\s*"([^"]+)"/);
|
||||
const serverMatch = raw.match(/"(?:server|mcp_server|connector)"\s*:\s*"([^"]+)"/);
|
||||
const toolMatch = raw.match(/"(?:tool|mcp_tool)"\s*:\s*"([^"]+)"/);
|
||||
|
||||
return extractMcpTarget({
|
||||
tool_name: toolNameMatch ? toolNameMatch[1] : '',
|
||||
server: serverMatch ? serverMatch[1] : undefined,
|
||||
tool: toolMatch ? toolMatch[1] : undefined
|
||||
});
|
||||
}
|
||||
|
||||
function resolveServerConfig(serverName) {
|
||||
for (const filePath of configPaths()) {
|
||||
const data = readJsonFile(filePath);
|
||||
@@ -559,9 +577,9 @@ async function handlePostToolUseFailure(rawInput, input, target, statePathValue,
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const rawInput = await readRawStdin();
|
||||
const { raw: rawInput, truncated } = await readRawStdin();
|
||||
const input = safeParse(rawInput);
|
||||
const target = extractMcpTarget(input);
|
||||
const target = extractMcpTarget(input) || (truncated ? extractMcpTargetFromRaw(rawInput) : null);
|
||||
|
||||
if (!target) {
|
||||
process.stdout.write(rawInput);
|
||||
@@ -569,6 +587,19 @@ async function main() {
|
||||
return;
|
||||
}
|
||||
|
||||
if (truncated) {
|
||||
const limit = Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN;
|
||||
const logs = [
|
||||
shouldFailOpen()
|
||||
? `[MCPHealthCheck] Hook input exceeded ${limit} bytes while checking ${target.server}; allowing ${target.tool || 'tool'} because fail-open mode is enabled`
|
||||
: `[MCPHealthCheck] Hook input exceeded ${limit} bytes while checking ${target.server}; blocking ${target.tool || 'tool'} to avoid bypassing MCP health checks`
|
||||
];
|
||||
emitLogs(logs);
|
||||
process.stdout.write(rawInput);
|
||||
process.exit(shouldFailOpen() ? 0 : 2);
|
||||
return;
|
||||
}
|
||||
|
||||
const eventName = process.env.CLAUDE_HOOK_EVENT_NAME || 'PreToolUse';
|
||||
const now = Date.now();
|
||||
const statePathValue = stateFilePath();
|
||||
|
||||
@@ -18,18 +18,66 @@ const MAX_STDIN = 1024 * 1024;
|
||||
function readStdinRaw() {
|
||||
return new Promise(resolve => {
|
||||
let raw = '';
|
||||
let truncated = false;
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
if (chunk.length > remaining) {
|
||||
truncated = true;
|
||||
}
|
||||
} else {
|
||||
truncated = true;
|
||||
}
|
||||
});
|
||||
process.stdin.on('end', () => resolve(raw));
|
||||
process.stdin.on('error', () => resolve(raw));
|
||||
process.stdin.on('end', () => resolve({ raw, truncated }));
|
||||
process.stdin.on('error', () => resolve({ raw, truncated }));
|
||||
});
|
||||
}
|
||||
|
||||
function writeStderr(stderr) {
|
||||
if (typeof stderr !== 'string' || stderr.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
process.stderr.write(stderr.endsWith('\n') ? stderr : `${stderr}\n`);
|
||||
}
|
||||
|
||||
function emitHookResult(raw, output) {
|
||||
if (typeof output === 'string' || Buffer.isBuffer(output)) {
|
||||
process.stdout.write(String(output));
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (output && typeof output === 'object') {
|
||||
writeStderr(output.stderr);
|
||||
|
||||
if (Object.prototype.hasOwnProperty.call(output, 'stdout')) {
|
||||
process.stdout.write(String(output.stdout ?? ''));
|
||||
} else if (!Number.isInteger(output.exitCode) || output.exitCode === 0) {
|
||||
process.stdout.write(raw);
|
||||
}
|
||||
|
||||
return Number.isInteger(output.exitCode) ? output.exitCode : 0;
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
return 0;
|
||||
}
|
||||
|
||||
function writeLegacySpawnOutput(raw, result) {
|
||||
const stdout = typeof result.stdout === 'string' ? result.stdout : '';
|
||||
if (stdout) {
|
||||
process.stdout.write(stdout);
|
||||
return;
|
||||
}
|
||||
|
||||
if (Number.isInteger(result.status) && result.status === 0) {
|
||||
process.stdout.write(raw);
|
||||
}
|
||||
}
|
||||
|
||||
function getPluginRoot() {
|
||||
if (process.env.CLAUDE_PLUGIN_ROOT && process.env.CLAUDE_PLUGIN_ROOT.trim()) {
|
||||
return process.env.CLAUDE_PLUGIN_ROOT;
|
||||
@@ -39,7 +87,7 @@ function getPluginRoot() {
|
||||
|
||||
async function main() {
|
||||
const [, , hookId, relScriptPath, profilesCsv] = process.argv;
|
||||
const raw = await readStdinRaw();
|
||||
const { raw, truncated } = await readStdinRaw();
|
||||
|
||||
if (!hookId || !relScriptPath) {
|
||||
process.stdout.write(raw);
|
||||
@@ -89,8 +137,8 @@ async function main() {
|
||||
|
||||
if (hookModule && typeof hookModule.run === 'function') {
|
||||
try {
|
||||
const output = hookModule.run(raw);
|
||||
if (output !== null && output !== undefined) process.stdout.write(output);
|
||||
const output = hookModule.run(raw, { truncated, maxStdin: MAX_STDIN });
|
||||
process.exit(emitHookResult(raw, output));
|
||||
} catch (runErr) {
|
||||
process.stderr.write(`[Hook] run() error for ${hookId}: ${runErr.message}\n`);
|
||||
process.stdout.write(raw);
|
||||
@@ -99,19 +147,32 @@ async function main() {
|
||||
}
|
||||
|
||||
// Legacy path: spawn a child Node process for hooks without run() export
|
||||
const result = spawnSync('node', [scriptPath], {
|
||||
const result = spawnSync(process.execPath, [scriptPath], {
|
||||
input: raw,
|
||||
encoding: 'utf8',
|
||||
env: process.env,
|
||||
env: {
|
||||
...process.env,
|
||||
ECC_HOOK_INPUT_TRUNCATED: truncated ? '1' : '0',
|
||||
ECC_HOOK_INPUT_MAX_BYTES: String(MAX_STDIN)
|
||||
},
|
||||
cwd: process.cwd(),
|
||||
timeout: 30000
|
||||
});
|
||||
|
||||
if (result.stdout) process.stdout.write(result.stdout);
|
||||
writeLegacySpawnOutput(raw, result);
|
||||
if (result.stderr) process.stderr.write(result.stderr);
|
||||
|
||||
const code = Number.isInteger(result.status) ? result.status : 0;
|
||||
process.exit(code);
|
||||
if (result.error || result.signal || result.status === null) {
|
||||
const failureDetail = result.error
|
||||
? result.error.message
|
||||
: result.signal
|
||||
? `terminated by signal ${result.signal}`
|
||||
: 'missing exit status';
|
||||
writeStderr(`[Hook] legacy hook execution failed for ${hookId}: ${failureDetail}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
process.exit(Number.isInteger(result.status) ? result.status : 0);
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
|
||||
@@ -11,28 +11,59 @@
|
||||
|
||||
const {
|
||||
getSessionsDir,
|
||||
getSessionSearchDirs,
|
||||
getLearnedSkillsDir,
|
||||
findFiles,
|
||||
ensureDir,
|
||||
readFile,
|
||||
stripAnsi,
|
||||
log,
|
||||
output
|
||||
log
|
||||
} = require('../lib/utils');
|
||||
const { getPackageManager, getSelectionPrompt } = require('../lib/package-manager');
|
||||
const { listAliases } = require('../lib/session-aliases');
|
||||
const { detectProjectType } = require('../lib/project-detect');
|
||||
const path = require('path');
|
||||
|
||||
function dedupeRecentSessions(searchDirs) {
|
||||
const recentSessionsByName = new Map();
|
||||
|
||||
for (const [dirIndex, dir] of searchDirs.entries()) {
|
||||
const matches = findFiles(dir, '*-session.tmp', { maxAge: 7 });
|
||||
|
||||
for (const match of matches) {
|
||||
const basename = path.basename(match.path);
|
||||
const current = {
|
||||
...match,
|
||||
basename,
|
||||
dirIndex,
|
||||
};
|
||||
const existing = recentSessionsByName.get(basename);
|
||||
|
||||
if (
|
||||
!existing
|
||||
|| current.mtime > existing.mtime
|
||||
|| (current.mtime === existing.mtime && current.dirIndex < existing.dirIndex)
|
||||
) {
|
||||
recentSessionsByName.set(basename, current);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Array.from(recentSessionsByName.values())
|
||||
.sort((left, right) => right.mtime - left.mtime || left.dirIndex - right.dirIndex);
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const sessionsDir = getSessionsDir();
|
||||
const learnedDir = getLearnedSkillsDir();
|
||||
const additionalContextParts = [];
|
||||
|
||||
// Ensure directories exist
|
||||
ensureDir(sessionsDir);
|
||||
ensureDir(learnedDir);
|
||||
|
||||
// Check for recent session files (last 7 days)
|
||||
const recentSessions = findFiles(sessionsDir, '*-session.tmp', { maxAge: 7 });
|
||||
const recentSessions = dedupeRecentSessions(getSessionSearchDirs());
|
||||
|
||||
if (recentSessions.length > 0) {
|
||||
const latest = recentSessions[0];
|
||||
@@ -43,7 +74,7 @@ async function main() {
|
||||
const content = stripAnsi(readFile(latest.path));
|
||||
if (content && !content.includes('[Session context goes here]')) {
|
||||
// Only inject if the session has actual content (not the blank template)
|
||||
output(`Previous session summary:\n${content}`);
|
||||
additionalContextParts.push(`Previous session summary:\n${content}`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,15 +115,49 @@ async function main() {
|
||||
parts.push(`frameworks: ${projectInfo.frameworks.join(', ')}`);
|
||||
}
|
||||
log(`[SessionStart] Project detected — ${parts.join('; ')}`);
|
||||
output(`Project type: ${JSON.stringify(projectInfo)}`);
|
||||
additionalContextParts.push(`Project type: ${JSON.stringify(projectInfo)}`);
|
||||
} else {
|
||||
log('[SessionStart] No specific project type detected');
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
await writeSessionStartPayload(additionalContextParts.join('\n\n'));
|
||||
}
|
||||
|
||||
function writeSessionStartPayload(additionalContext) {
|
||||
return new Promise((resolve, reject) => {
|
||||
let settled = false;
|
||||
const payload = JSON.stringify({
|
||||
hookSpecificOutput: {
|
||||
hookEventName: 'SessionStart',
|
||||
additionalContext
|
||||
}
|
||||
});
|
||||
|
||||
const handleError = (err) => {
|
||||
if (settled) return;
|
||||
settled = true;
|
||||
if (err) {
|
||||
log(`[SessionStart] stdout write error: ${err.message}`);
|
||||
}
|
||||
reject(err || new Error('stdout stream error'));
|
||||
};
|
||||
|
||||
process.stdout.once('error', handleError);
|
||||
process.stdout.write(payload, (err) => {
|
||||
process.stdout.removeListener('error', handleError);
|
||||
if (settled) return;
|
||||
settled = true;
|
||||
if (err) {
|
||||
log(`[SessionStart] stdout write error: ${err.message}`);
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
console.error('[SessionStart] Error:', err.message);
|
||||
process.exit(0); // Don't block on errors
|
||||
process.exitCode = 0; // Don't block on errors
|
||||
});
|
||||
|
||||
24
scripts/install-apply.js
Normal file → Executable file
24
scripts/install-apply.js
Normal file → Executable file
@@ -6,6 +6,7 @@
|
||||
* target-specific mutation logic into testable Node code.
|
||||
*/
|
||||
|
||||
const os = require('os');
|
||||
const {
|
||||
SUPPORTED_INSTALL_TARGETS,
|
||||
listLegacyCompatibilityLanguages,
|
||||
@@ -16,10 +17,10 @@ const {
|
||||
parseInstallArgs,
|
||||
} = require('./lib/install/request');
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
function getHelpText() {
|
||||
const languages = listLegacyCompatibilityLanguages();
|
||||
|
||||
console.log(`
|
||||
return `
|
||||
Usage: install.sh [--target <${LEGACY_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] <language> [<language> ...]
|
||||
install.sh [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] --profile <name> [--with <component>]... [--without <component>]...
|
||||
install.sh [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] --modules <id,id,...> [--with <component>]... [--without <component>]...
|
||||
@@ -43,8 +44,11 @@ Options:
|
||||
|
||||
Available languages:
|
||||
${languages.map(language => ` - ${language}`).join('\n')}
|
||||
`);
|
||||
`;
|
||||
}
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
console.log(getHelpText());
|
||||
process.exit(exitCode);
|
||||
}
|
||||
|
||||
@@ -100,19 +104,25 @@ function main() {
|
||||
showHelp(0);
|
||||
}
|
||||
|
||||
const { loadInstallConfig } = require('./lib/install/config');
|
||||
const {
|
||||
findDefaultInstallConfigPath,
|
||||
loadInstallConfig,
|
||||
} = require('./lib/install/config');
|
||||
const { applyInstallPlan } = require('./lib/install-executor');
|
||||
const { createInstallPlanFromRequest } = require('./lib/install/runtime');
|
||||
const defaultConfigPath = options.configPath || options.languages.length > 0
|
||||
? null
|
||||
: findDefaultInstallConfigPath({ cwd: process.cwd() });
|
||||
const config = options.configPath
|
||||
? loadInstallConfig(options.configPath, { cwd: process.cwd() })
|
||||
: null;
|
||||
: (defaultConfigPath ? loadInstallConfig(defaultConfigPath, { cwd: process.cwd() }) : null);
|
||||
const request = normalizeInstallRequest({
|
||||
...options,
|
||||
config,
|
||||
});
|
||||
const plan = createInstallPlanFromRequest(request, {
|
||||
projectRoot: process.cwd(),
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
claudeRulesDir: process.env.CLAUDE_RULES_DIR || null,
|
||||
});
|
||||
|
||||
@@ -132,7 +142,7 @@ function main() {
|
||||
printHumanPlan(result, false);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(`Error: ${error.message}`);
|
||||
process.stderr.write(`Error: ${error.message}${getHelpText()}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,10 @@ const {
|
||||
listInstallProfiles,
|
||||
resolveInstallPlan,
|
||||
} = require('./lib/install-manifests');
|
||||
const { loadInstallConfig } = require('./lib/install/config');
|
||||
const {
|
||||
findDefaultInstallConfigPath,
|
||||
loadInstallConfig,
|
||||
} = require('./lib/install/config');
|
||||
const { normalizeInstallRequest } = require('./lib/install/request');
|
||||
|
||||
function showHelp() {
|
||||
@@ -186,7 +189,7 @@ function main() {
|
||||
try {
|
||||
const options = parseArgs(process.argv);
|
||||
|
||||
if (options.help || process.argv.length <= 2) {
|
||||
if (options.help) {
|
||||
showHelp();
|
||||
process.exit(0);
|
||||
}
|
||||
@@ -224,9 +227,18 @@ function main() {
|
||||
return;
|
||||
}
|
||||
|
||||
const defaultConfigPath = options.configPath
|
||||
? null
|
||||
: findDefaultInstallConfigPath({ cwd: process.cwd() });
|
||||
const config = options.configPath
|
||||
? loadInstallConfig(options.configPath, { cwd: process.cwd() })
|
||||
: null;
|
||||
: (defaultConfigPath ? loadInstallConfig(defaultConfigPath, { cwd: process.cwd() }) : null);
|
||||
|
||||
if (process.argv.length <= 2 && !config) {
|
||||
showHelp();
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const request = normalizeInstallRequest({
|
||||
...options,
|
||||
languages: [],
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { execFileSync } = require('child_process');
|
||||
|
||||
@@ -442,7 +443,7 @@ function planAntigravityLegacyInstall(context) {
|
||||
function createLegacyInstallPlan(options = {}) {
|
||||
const sourceRoot = options.sourceRoot || getSourceRoot();
|
||||
const projectRoot = options.projectRoot || process.cwd();
|
||||
const homeDir = options.homeDir || process.env.HOME;
|
||||
const homeDir = options.homeDir || process.env.HOME || os.homedir();
|
||||
const target = options.target || 'claude';
|
||||
|
||||
validateLegacyTarget(target);
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
|
||||
const { resolveInstallPlan, loadInstallManifests } = require('./install-manifests');
|
||||
@@ -696,7 +697,7 @@ function buildDiscoveryRecord(adapter, context) {
|
||||
|
||||
function discoverInstalledStates(options = {}) {
|
||||
const context = {
|
||||
homeDir: options.homeDir || process.env.HOME,
|
||||
homeDir: options.homeDir || process.env.HOME || os.homedir(),
|
||||
projectRoot: options.projectRoot || process.cwd(),
|
||||
};
|
||||
const targets = normalizeTargets(options.targets);
|
||||
@@ -904,7 +905,7 @@ function buildDoctorReport(options = {}) {
|
||||
}).filter(record => record.exists);
|
||||
const context = {
|
||||
repoRoot,
|
||||
homeDir: options.homeDir || process.env.HOME,
|
||||
homeDir: options.homeDir || process.env.HOME || os.homedir(),
|
||||
projectRoot: options.projectRoot || process.cwd(),
|
||||
manifestVersion: manifests.modulesVersion,
|
||||
packageVersion: readPackageVersion(repoRoot),
|
||||
@@ -988,7 +989,7 @@ function repairInstalledStates(options = {}) {
|
||||
const manifests = loadInstallManifests({ repoRoot });
|
||||
const context = {
|
||||
repoRoot,
|
||||
homeDir: options.homeDir || process.env.HOME,
|
||||
homeDir: options.homeDir || process.env.HOME || os.homedir(),
|
||||
projectRoot: options.projectRoot || process.cwd(),
|
||||
manifestVersion: manifests.modulesVersion,
|
||||
packageVersion: readPackageVersion(repoRoot),
|
||||
|
||||
@@ -216,6 +216,45 @@ function listInstallComponents(options = {}) {
|
||||
.filter(component => !target || component.targets.includes(target));
|
||||
}
|
||||
|
||||
function getInstallComponent(componentId, options = {}) {
|
||||
const manifests = loadInstallManifests(options);
|
||||
const normalizedComponentId = String(componentId || '').trim();
|
||||
|
||||
if (!normalizedComponentId) {
|
||||
throw new Error('An install component ID is required');
|
||||
}
|
||||
|
||||
const component = manifests.componentsById.get(normalizedComponentId);
|
||||
if (!component) {
|
||||
throw new Error(`Unknown install component: ${normalizedComponentId}`);
|
||||
}
|
||||
|
||||
const moduleIds = dedupeStrings(component.modules);
|
||||
const modules = moduleIds
|
||||
.map(moduleId => manifests.modulesById.get(moduleId))
|
||||
.filter(Boolean)
|
||||
.map(module => ({
|
||||
id: module.id,
|
||||
kind: module.kind,
|
||||
description: module.description,
|
||||
targets: module.targets,
|
||||
defaultInstall: module.defaultInstall,
|
||||
cost: module.cost,
|
||||
stability: module.stability,
|
||||
dependencies: dedupeStrings(module.dependencies),
|
||||
}));
|
||||
|
||||
return {
|
||||
id: component.id,
|
||||
family: component.family,
|
||||
description: component.description,
|
||||
moduleIds,
|
||||
moduleCount: moduleIds.length,
|
||||
targets: intersectTargets(modules),
|
||||
modules,
|
||||
};
|
||||
}
|
||||
|
||||
function expandComponentIdsToModuleIds(componentIds, manifests) {
|
||||
const expandedModuleIds = [];
|
||||
|
||||
@@ -438,6 +477,7 @@ module.exports = {
|
||||
SUPPORTED_INSTALL_TARGETS,
|
||||
getManifestPaths,
|
||||
loadInstallManifests,
|
||||
getInstallComponent,
|
||||
listInstallComponents,
|
||||
listLegacyCompatibilityLanguages,
|
||||
listInstallModules,
|
||||
|
||||
@@ -1,15 +1,109 @@
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const { writeInstallState } = require('../install-state');
|
||||
|
||||
function readJsonObject(filePath, label) {
|
||||
let parsed;
|
||||
try {
|
||||
parsed = JSON.parse(fs.readFileSync(filePath, 'utf8'));
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to parse ${label} at ${filePath}: ${error.message}`);
|
||||
}
|
||||
|
||||
if (!parsed || typeof parsed !== 'object' || Array.isArray(parsed)) {
|
||||
throw new Error(`Invalid ${label} at ${filePath}: expected a JSON object`);
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function mergeHookEntries(existingEntries, incomingEntries) {
|
||||
const mergedEntries = [];
|
||||
const seenEntries = new Set();
|
||||
|
||||
for (const entry of [...existingEntries, ...incomingEntries]) {
|
||||
const entryKey = JSON.stringify(entry);
|
||||
if (seenEntries.has(entryKey)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
seenEntries.add(entryKey);
|
||||
mergedEntries.push(entry);
|
||||
}
|
||||
|
||||
return mergedEntries;
|
||||
}
|
||||
|
||||
function findHooksSourcePath(plan, hooksDestinationPath) {
|
||||
const operation = plan.operations.find(item => item.destinationPath === hooksDestinationPath);
|
||||
return operation ? operation.sourcePath : null;
|
||||
}
|
||||
|
||||
function buildMergedSettings(plan) {
|
||||
if (!plan.adapter || plan.adapter.target !== 'claude') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hooksDestinationPath = path.join(plan.targetRoot, 'hooks', 'hooks.json');
|
||||
const hooksSourcePath = findHooksSourcePath(plan, hooksDestinationPath) || hooksDestinationPath;
|
||||
if (!fs.existsSync(hooksSourcePath)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hooksConfig = readJsonObject(hooksSourcePath, 'hooks config');
|
||||
const incomingHooks = hooksConfig.hooks;
|
||||
if (!incomingHooks || typeof incomingHooks !== 'object' || Array.isArray(incomingHooks)) {
|
||||
throw new Error(`Invalid hooks config at ${hooksSourcePath}: expected "hooks" to be a JSON object`);
|
||||
}
|
||||
|
||||
const settingsPath = path.join(plan.targetRoot, 'settings.json');
|
||||
let settings = {};
|
||||
if (fs.existsSync(settingsPath)) {
|
||||
settings = readJsonObject(settingsPath, 'existing settings');
|
||||
}
|
||||
|
||||
const existingHooks = settings.hooks && typeof settings.hooks === 'object' && !Array.isArray(settings.hooks)
|
||||
? settings.hooks
|
||||
: {};
|
||||
const mergedHooks = { ...existingHooks };
|
||||
|
||||
for (const [eventName, incomingEntries] of Object.entries(incomingHooks)) {
|
||||
const currentEntries = Array.isArray(existingHooks[eventName]) ? existingHooks[eventName] : [];
|
||||
const nextEntries = Array.isArray(incomingEntries) ? incomingEntries : [];
|
||||
mergedHooks[eventName] = mergeHookEntries(currentEntries, nextEntries);
|
||||
}
|
||||
|
||||
const mergedSettings = {
|
||||
...settings,
|
||||
hooks: mergedHooks,
|
||||
};
|
||||
|
||||
return {
|
||||
settingsPath,
|
||||
mergedSettings,
|
||||
};
|
||||
}
|
||||
|
||||
function applyInstallPlan(plan) {
|
||||
const mergedSettingsPlan = buildMergedSettings(plan);
|
||||
|
||||
for (const operation of plan.operations) {
|
||||
fs.mkdirSync(require('path').dirname(operation.destinationPath), { recursive: true });
|
||||
fs.mkdirSync(path.dirname(operation.destinationPath), { recursive: true });
|
||||
fs.copyFileSync(operation.sourcePath, operation.destinationPath);
|
||||
}
|
||||
|
||||
if (mergedSettingsPlan) {
|
||||
fs.mkdirSync(path.dirname(mergedSettingsPlan.settingsPath), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
mergedSettingsPlan.settingsPath,
|
||||
JSON.stringify(mergedSettingsPlan.mergedSettings, null, 2) + '\n',
|
||||
'utf8'
|
||||
);
|
||||
}
|
||||
|
||||
writeInstallState(plan.installStatePath, plan.statePreview);
|
||||
|
||||
return {
|
||||
|
||||
@@ -47,6 +47,12 @@ function resolveInstallConfigPath(configPath, options = {}) {
|
||||
: path.normalize(path.join(cwd, configPath));
|
||||
}
|
||||
|
||||
function findDefaultInstallConfigPath(options = {}) {
|
||||
const cwd = options.cwd || process.cwd();
|
||||
const candidatePath = path.join(cwd, DEFAULT_INSTALL_CONFIG);
|
||||
return fs.existsSync(candidatePath) ? candidatePath : null;
|
||||
}
|
||||
|
||||
function loadInstallConfig(configPath, options = {}) {
|
||||
const resolvedPath = resolveInstallConfigPath(configPath, options);
|
||||
|
||||
@@ -77,6 +83,7 @@ function loadInstallConfig(configPath, options = {}) {
|
||||
|
||||
module.exports = {
|
||||
DEFAULT_INSTALL_CONFIG,
|
||||
findDefaultInstallConfigPath,
|
||||
loadInstallConfig,
|
||||
resolveInstallConfigPath,
|
||||
};
|
||||
|
||||
@@ -10,8 +10,9 @@ const os = require('os');
|
||||
* Tries, in order:
|
||||
* 1. CLAUDE_PLUGIN_ROOT env var (set by Claude Code for hooks, or by user)
|
||||
* 2. Standard install location (~/.claude/) — when scripts exist there
|
||||
* 3. Plugin cache auto-detection — scans ~/.claude/plugins/cache/everything-claude-code/
|
||||
* 4. Fallback to ~/.claude/ (original behaviour)
|
||||
* 3. Exact legacy plugin roots under ~/.claude/plugins/
|
||||
* 4. Plugin cache auto-detection — scans ~/.claude/plugins/cache/everything-claude-code/
|
||||
* 5. Fallback to ~/.claude/ (original behaviour)
|
||||
*
|
||||
* @param {object} [options]
|
||||
* @param {string} [options.homeDir] Override home directory (for testing)
|
||||
@@ -38,6 +39,20 @@ function resolveEccRoot(options = {}) {
|
||||
return claudeDir;
|
||||
}
|
||||
|
||||
// Exact legacy plugin install locations. These preserve backwards
|
||||
// compatibility without scanning arbitrary plugin trees.
|
||||
const legacyPluginRoots = [
|
||||
path.join(claudeDir, 'plugins', 'everything-claude-code'),
|
||||
path.join(claudeDir, 'plugins', 'everything-claude-code@everything-claude-code'),
|
||||
path.join(claudeDir, 'plugins', 'marketplace', 'everything-claude-code')
|
||||
];
|
||||
|
||||
for (const candidate of legacyPluginRoots) {
|
||||
if (fs.existsSync(path.join(candidate, probe))) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
// Plugin cache — Claude Code stores marketplace plugins under
|
||||
// ~/.claude/plugins/cache/<plugin-name>/<org>/<version>/
|
||||
try {
|
||||
@@ -81,7 +96,7 @@ function resolveEccRoot(options = {}) {
|
||||
* const _r = <paste INLINE_RESOLVE>;
|
||||
* const sm = require(_r + '/scripts/lib/session-manager');
|
||||
*/
|
||||
const INLINE_RESOLVE = `(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b))for(var v of f.readdirSync(p.join(b,o))){var c=p.join(b,o,v);if(f.existsSync(p.join(c,q)))return c}}catch(x){}return d})()`;
|
||||
const INLINE_RESOLVE = `(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var l of [p.join(d,'plugins','everything-claude-code'),p.join(d,'plugins','everything-claude-code@everything-claude-code'),p.join(d,'plugins','marketplace','everything-claude-code')])if(f.existsSync(p.join(l,q)))return l;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}catch(x){}return d})()`;
|
||||
|
||||
module.exports = {
|
||||
resolveEccRoot,
|
||||
|
||||
3
scripts/lib/session-manager.d.ts
vendored
3
scripts/lib/session-manager.d.ts
vendored
@@ -1,6 +1,7 @@
|
||||
/**
|
||||
* Session Manager Library for Claude Code.
|
||||
* Provides CRUD operations for session files stored as markdown in ~/.claude/sessions/.
|
||||
* Provides CRUD operations for session files stored as markdown in
|
||||
* ~/.claude/session-data/ with legacy read compatibility for ~/.claude/sessions/.
|
||||
*/
|
||||
|
||||
/** Parsed metadata from a session filename */
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
* Session Manager Library for Claude Code
|
||||
* Provides core session CRUD operations for listing, loading, and managing sessions
|
||||
*
|
||||
* Sessions are stored as markdown files in ~/.claude/sessions/ with format:
|
||||
* Sessions are stored as markdown files in ~/.claude/session-data/ with
|
||||
* legacy read compatibility for ~/.claude/sessions/:
|
||||
* - YYYY-MM-DD-session.tmp (old format)
|
||||
* - YYYY-MM-DD-<short-id>-session.tmp (new format)
|
||||
*/
|
||||
@@ -12,6 +13,7 @@ const path = require('path');
|
||||
|
||||
const {
|
||||
getSessionsDir,
|
||||
getSessionSearchDirs,
|
||||
readFile,
|
||||
log
|
||||
} = require('./utils');
|
||||
@@ -30,6 +32,7 @@ const SESSION_FILENAME_REGEX = /^(\d{4}-\d{2}-\d{2})(?:-([a-zA-Z0-9_][a-zA-Z0-9_
|
||||
* @returns {object|null} Parsed metadata or null if invalid
|
||||
*/
|
||||
function parseSessionFilename(filename) {
|
||||
if (!filename || typeof filename !== 'string') return null;
|
||||
const match = filename.match(SESSION_FILENAME_REGEX);
|
||||
if (!match) return null;
|
||||
|
||||
@@ -66,6 +69,145 @@ function getSessionPath(filename) {
|
||||
return path.join(getSessionsDir(), filename);
|
||||
}
|
||||
|
||||
function getSessionCandidates(options = {}) {
|
||||
const {
|
||||
date = null,
|
||||
search = null
|
||||
} = options;
|
||||
|
||||
const candidates = [];
|
||||
|
||||
for (const sessionsDir of getSessionSearchDirs()) {
|
||||
if (!fs.existsSync(sessionsDir)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let entries;
|
||||
try {
|
||||
entries = fs.readdirSync(sessionsDir, { withFileTypes: true });
|
||||
} catch (error) {
|
||||
log(`[SessionManager] Error reading sessions directory ${sessionsDir}: ${error.message}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isFile() || !entry.name.endsWith('.tmp')) continue;
|
||||
|
||||
const filename = entry.name;
|
||||
const metadata = parseSessionFilename(filename);
|
||||
|
||||
if (!metadata) continue;
|
||||
if (date && metadata.date !== date) continue;
|
||||
if (search && !metadata.shortId.includes(search)) continue;
|
||||
|
||||
const sessionPath = path.join(sessionsDir, filename);
|
||||
|
||||
let stats;
|
||||
try {
|
||||
stats = fs.statSync(sessionPath);
|
||||
} catch (error) {
|
||||
log(`[SessionManager] Error stating session ${sessionPath}: ${error.message}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
candidates.push({
|
||||
...metadata,
|
||||
sessionPath,
|
||||
hasContent: stats.size > 0,
|
||||
size: stats.size,
|
||||
modifiedTime: stats.mtime,
|
||||
createdTime: stats.birthtime || stats.ctime
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const deduped = [];
|
||||
const seenFilenames = new Set();
|
||||
|
||||
for (const session of candidates) {
|
||||
if (seenFilenames.has(session.filename)) {
|
||||
continue;
|
||||
}
|
||||
seenFilenames.add(session.filename);
|
||||
deduped.push(session);
|
||||
}
|
||||
|
||||
deduped.sort((a, b) => b.modifiedTime - a.modifiedTime);
|
||||
return deduped;
|
||||
}
|
||||
|
||||
function buildSessionRecord(sessionPath, metadata) {
|
||||
let stats;
|
||||
try {
|
||||
stats = fs.statSync(sessionPath);
|
||||
} catch (error) {
|
||||
log(`[SessionManager] Error stating session ${sessionPath}: ${error.message}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
...metadata,
|
||||
sessionPath,
|
||||
hasContent: stats.size > 0,
|
||||
size: stats.size,
|
||||
modifiedTime: stats.mtime,
|
||||
createdTime: stats.birthtime || stats.ctime
|
||||
};
|
||||
}
|
||||
|
||||
function sessionMatchesId(metadata, normalizedSessionId) {
|
||||
const filename = metadata.filename;
|
||||
const shortIdMatch = metadata.shortId !== 'no-id' && metadata.shortId.startsWith(normalizedSessionId);
|
||||
const filenameMatch = filename === normalizedSessionId || filename === `${normalizedSessionId}.tmp`;
|
||||
const noIdMatch = metadata.shortId === 'no-id' && filename === `${normalizedSessionId}-session.tmp`;
|
||||
|
||||
return shortIdMatch || filenameMatch || noIdMatch;
|
||||
}
|
||||
|
||||
function getMatchingSessionCandidates(normalizedSessionId) {
|
||||
const matches = [];
|
||||
const seenFilenames = new Set();
|
||||
|
||||
for (const sessionsDir of getSessionSearchDirs()) {
|
||||
if (!fs.existsSync(sessionsDir)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let entries;
|
||||
try {
|
||||
entries = fs.readdirSync(sessionsDir, { withFileTypes: true });
|
||||
} catch (error) {
|
||||
log(`[SessionManager] Error reading sessions directory ${sessionsDir}: ${error.message}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isFile() || !entry.name.endsWith('.tmp')) continue;
|
||||
|
||||
const metadata = parseSessionFilename(entry.name);
|
||||
if (!metadata || !sessionMatchesId(metadata, normalizedSessionId)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (seenFilenames.has(metadata.filename)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const sessionPath = path.join(sessionsDir, metadata.filename);
|
||||
const sessionRecord = buildSessionRecord(sessionPath, metadata);
|
||||
if (!sessionRecord) {
|
||||
continue;
|
||||
}
|
||||
|
||||
seenFilenames.add(metadata.filename);
|
||||
matches.push(sessionRecord);
|
||||
}
|
||||
}
|
||||
|
||||
matches.sort((a, b) => b.modifiedTime - a.modifiedTime);
|
||||
return matches;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read and parse session markdown content
|
||||
* @param {string} sessionPath - Full path to session file
|
||||
@@ -228,58 +370,12 @@ function getAllSessions(options = {}) {
|
||||
const limitNum = Number(rawLimit);
|
||||
const limit = Number.isNaN(limitNum) ? 50 : Math.max(1, Math.floor(limitNum));
|
||||
|
||||
const sessionsDir = getSessionsDir();
|
||||
const sessions = getSessionCandidates({ date, search });
|
||||
|
||||
if (!fs.existsSync(sessionsDir)) {
|
||||
if (sessions.length === 0) {
|
||||
return { sessions: [], total: 0, offset, limit, hasMore: false };
|
||||
}
|
||||
|
||||
const entries = fs.readdirSync(sessionsDir, { withFileTypes: true });
|
||||
const sessions = [];
|
||||
|
||||
for (const entry of entries) {
|
||||
// Skip non-files (only process .tmp files)
|
||||
if (!entry.isFile() || !entry.name.endsWith('.tmp')) continue;
|
||||
|
||||
const filename = entry.name;
|
||||
const metadata = parseSessionFilename(filename);
|
||||
|
||||
if (!metadata) continue;
|
||||
|
||||
// Apply date filter
|
||||
if (date && metadata.date !== date) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Apply search filter (search in short ID)
|
||||
if (search && !metadata.shortId.includes(search)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const sessionPath = path.join(sessionsDir, filename);
|
||||
|
||||
// Get file stats (wrapped in try-catch to handle TOCTOU race where
|
||||
// file is deleted between readdirSync and statSync)
|
||||
let stats;
|
||||
try {
|
||||
stats = fs.statSync(sessionPath);
|
||||
} catch {
|
||||
continue; // File was deleted between readdir and stat
|
||||
}
|
||||
|
||||
sessions.push({
|
||||
...metadata,
|
||||
sessionPath,
|
||||
hasContent: stats.size > 0,
|
||||
size: stats.size,
|
||||
modifiedTime: stats.mtime,
|
||||
createdTime: stats.birthtime || stats.ctime
|
||||
});
|
||||
}
|
||||
|
||||
// Sort by modified time (newest first)
|
||||
sessions.sort((a, b) => b.modifiedTime - a.modifiedTime);
|
||||
|
||||
// Apply pagination
|
||||
const paginatedSessions = sessions.slice(offset, offset + limit);
|
||||
|
||||
@@ -299,55 +395,28 @@ function getAllSessions(options = {}) {
|
||||
* @returns {object|null} Session object or null if not found
|
||||
*/
|
||||
function getSessionById(sessionId, includeContent = false) {
|
||||
const sessionsDir = getSessionsDir();
|
||||
|
||||
if (!fs.existsSync(sessionsDir)) {
|
||||
if (typeof sessionId !== 'string') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const entries = fs.readdirSync(sessionsDir, { withFileTypes: true });
|
||||
const normalizedSessionId = sessionId.trim();
|
||||
if (!normalizedSessionId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
for (const entry of entries) {
|
||||
if (!entry.isFile() || !entry.name.endsWith('.tmp')) continue;
|
||||
const sessions = getMatchingSessionCandidates(normalizedSessionId);
|
||||
|
||||
const filename = entry.name;
|
||||
const metadata = parseSessionFilename(filename);
|
||||
|
||||
if (!metadata) continue;
|
||||
|
||||
// Check if session ID matches (short ID or full filename without .tmp)
|
||||
const shortIdMatch = sessionId.length > 0 && metadata.shortId !== 'no-id' && metadata.shortId.startsWith(sessionId);
|
||||
const filenameMatch = filename === sessionId || filename === `${sessionId}.tmp`;
|
||||
const noIdMatch = metadata.shortId === 'no-id' && filename === `${sessionId}-session.tmp`;
|
||||
|
||||
if (!shortIdMatch && !filenameMatch && !noIdMatch) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const sessionPath = path.join(sessionsDir, filename);
|
||||
let stats;
|
||||
try {
|
||||
stats = fs.statSync(sessionPath);
|
||||
} catch {
|
||||
return null; // File was deleted between readdir and stat
|
||||
}
|
||||
|
||||
const session = {
|
||||
...metadata,
|
||||
sessionPath,
|
||||
size: stats.size,
|
||||
modifiedTime: stats.mtime,
|
||||
createdTime: stats.birthtime || stats.ctime
|
||||
};
|
||||
for (const session of sessions) {
|
||||
const sessionRecord = { ...session };
|
||||
|
||||
if (includeContent) {
|
||||
session.content = getSessionContent(sessionPath);
|
||||
session.metadata = parseSessionMetadata(session.content);
|
||||
sessionRecord.content = getSessionContent(sessionRecord.sessionPath);
|
||||
sessionRecord.metadata = parseSessionMetadata(sessionRecord.content);
|
||||
// Pass pre-read content to avoid a redundant disk read
|
||||
session.stats = getSessionStats(session.content || '');
|
||||
sessionRecord.stats = getSessionStats(sessionRecord.content || '');
|
||||
}
|
||||
|
||||
return session;
|
||||
return sessionRecord;
|
||||
}
|
||||
|
||||
return null;
|
||||
|
||||
17
scripts/lib/utils.d.ts
vendored
17
scripts/lib/utils.d.ts
vendored
@@ -18,9 +18,15 @@ export function getHomeDir(): string;
|
||||
/** Get the Claude config directory (~/.claude) */
|
||||
export function getClaudeDir(): string;
|
||||
|
||||
/** Get the sessions directory (~/.claude/sessions) */
|
||||
/** Get the canonical ECC sessions directory (~/.claude/session-data) */
|
||||
export function getSessionsDir(): string;
|
||||
|
||||
/** Get the legacy Claude-managed sessions directory (~/.claude/sessions) */
|
||||
export function getLegacySessionsDir(): string;
|
||||
|
||||
/** Get session directories to search, with canonical storage first and legacy fallback second */
|
||||
export function getSessionSearchDirs(): string[];
|
||||
|
||||
/** Get the learned skills directory (~/.claude/skills/learned) */
|
||||
export function getLearnedSkillsDir(): string;
|
||||
|
||||
@@ -47,9 +53,16 @@ export function getDateTimeString(): string;
|
||||
|
||||
// --- Session/Project ---
|
||||
|
||||
/**
|
||||
* Sanitize a string for use as a session filename segment.
|
||||
* Replaces invalid characters, strips leading dots, and returns null when
|
||||
* nothing meaningful remains. Non-ASCII names are hashed for stability.
|
||||
*/
|
||||
export function sanitizeSessionId(raw: string | null | undefined): string | null;
|
||||
|
||||
/**
|
||||
* Get short session ID from CLAUDE_SESSION_ID environment variable.
|
||||
* Returns last 8 characters, falls back to project name then the provided fallback.
|
||||
* Returns last 8 characters, falls back to a sanitized project name then the provided fallback.
|
||||
*/
|
||||
export function getSessionIdShort(fallback?: string): string;
|
||||
|
||||
|
||||
@@ -6,12 +6,20 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
const crypto = require('crypto');
|
||||
const { execSync, spawnSync } = require('child_process');
|
||||
|
||||
// Platform detection
|
||||
const isWindows = process.platform === 'win32';
|
||||
const isMacOS = process.platform === 'darwin';
|
||||
const isLinux = process.platform === 'linux';
|
||||
const SESSION_DATA_DIR_NAME = 'session-data';
|
||||
const LEGACY_SESSIONS_DIR_NAME = 'sessions';
|
||||
const WINDOWS_RESERVED_SESSION_IDS = new Set([
|
||||
'CON', 'PRN', 'AUX', 'NUL',
|
||||
'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
|
||||
'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9'
|
||||
]);
|
||||
|
||||
/**
|
||||
* Get the user's home directory (cross-platform)
|
||||
@@ -31,7 +39,21 @@ function getClaudeDir() {
|
||||
* Get the sessions directory
|
||||
*/
|
||||
function getSessionsDir() {
|
||||
return path.join(getClaudeDir(), 'sessions');
|
||||
return path.join(getClaudeDir(), SESSION_DATA_DIR_NAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the legacy sessions directory used by older ECC installs
|
||||
*/
|
||||
function getLegacySessionsDir() {
|
||||
return path.join(getClaudeDir(), LEGACY_SESSIONS_DIR_NAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all session directories to search, in canonical-first order
|
||||
*/
|
||||
function getSessionSearchDirs() {
|
||||
return Array.from(new Set([getSessionsDir(), getLegacySessionsDir()]));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -107,16 +129,52 @@ function getProjectName() {
|
||||
return path.basename(process.cwd()) || null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sanitize a string for use as a session filename segment.
|
||||
* Replaces invalid characters with hyphens, collapses runs, strips
|
||||
* leading/trailing hyphens, and removes leading dots so hidden-dir names
|
||||
* like ".claude" map cleanly to "claude".
|
||||
*
|
||||
* Pure non-ASCII inputs get a stable 8-char hash so distinct names do not
|
||||
* collapse to the same fallback session id. Mixed-script inputs retain their
|
||||
* ASCII part and gain a short hash suffix for disambiguation.
|
||||
*/
|
||||
function sanitizeSessionId(raw) {
|
||||
if (!raw || typeof raw !== 'string') return null;
|
||||
|
||||
const hasNonAscii = Array.from(raw).some(char => char.codePointAt(0) > 0x7f);
|
||||
const normalized = raw.replace(/^\.+/, '');
|
||||
const sanitized = normalized
|
||||
.replace(/[^a-zA-Z0-9_-]/g, '-')
|
||||
.replace(/-{2,}/g, '-')
|
||||
.replace(/^-+|-+$/g, '');
|
||||
|
||||
if (sanitized.length > 0) {
|
||||
const suffix = crypto.createHash('sha256').update(normalized).digest('hex').slice(0, 6);
|
||||
if (WINDOWS_RESERVED_SESSION_IDS.has(sanitized.toUpperCase())) {
|
||||
return `${sanitized}-${suffix}`;
|
||||
}
|
||||
if (!hasNonAscii) return sanitized;
|
||||
return `${sanitized}-${suffix}`;
|
||||
}
|
||||
|
||||
const meaningful = normalized.replace(/[\s\p{P}]/gu, '');
|
||||
if (meaningful.length === 0) return null;
|
||||
|
||||
return crypto.createHash('sha256').update(normalized).digest('hex').slice(0, 8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get short session ID from CLAUDE_SESSION_ID environment variable
|
||||
* Returns last 8 characters, falls back to project name then 'default'
|
||||
* Returns last 8 characters, falls back to a sanitized project name then 'default'.
|
||||
*/
|
||||
function getSessionIdShort(fallback = 'default') {
|
||||
const sessionId = process.env.CLAUDE_SESSION_ID;
|
||||
if (sessionId && sessionId.length > 0) {
|
||||
return sessionId.slice(-8);
|
||||
const sanitized = sanitizeSessionId(sessionId.slice(-8));
|
||||
if (sanitized) return sanitized;
|
||||
}
|
||||
return getProjectName() || fallback;
|
||||
return sanitizeSessionId(getProjectName()) || sanitizeSessionId(fallback) || 'default';
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -525,6 +583,8 @@ module.exports = {
|
||||
getHomeDir,
|
||||
getClaudeDir,
|
||||
getSessionsDir,
|
||||
getLegacySessionsDir,
|
||||
getSessionSearchDirs,
|
||||
getLearnedSkillsDir,
|
||||
getTempDir,
|
||||
ensureDir,
|
||||
@@ -535,6 +595,7 @@ module.exports = {
|
||||
getDateTimeString,
|
||||
|
||||
// Session/Project
|
||||
sanitizeSessionId,
|
||||
getSessionIdShort,
|
||||
getGitRepoName,
|
||||
getProjectName,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const os = require('os');
|
||||
const { discoverInstalledStates } = require('./lib/install-lifecycle');
|
||||
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
|
||||
|
||||
@@ -70,7 +71,7 @@ function main() {
|
||||
}
|
||||
|
||||
const records = discoverInstalledStates({
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
projectRoot: process.cwd(),
|
||||
targets: options.targets,
|
||||
}).filter(record => record.exists);
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const os = require('os');
|
||||
const { repairInstalledStates } = require('./lib/install-lifecycle');
|
||||
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
|
||||
|
||||
@@ -74,7 +75,7 @@ function main() {
|
||||
|
||||
const result = repairInstalledStates({
|
||||
repoRoot: require('path').join(__dirname, '..'),
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
projectRoot: process.cwd(),
|
||||
targets: options.targets,
|
||||
dryRun: options.dryRun,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const os = require('os');
|
||||
const { createStateStore } = require('./lib/state-store');
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
@@ -134,7 +135,7 @@ async function main() {
|
||||
|
||||
store = await createStateStore({
|
||||
dbPath: options.dbPath,
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
});
|
||||
|
||||
if (!options.sessionId) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const os = require('os');
|
||||
const { createStateStore } = require('./lib/state-store');
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
@@ -139,7 +140,7 @@ async function main() {
|
||||
|
||||
store = await createStateStore({
|
||||
dbPath: options.dbPath,
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
});
|
||||
|
||||
const payload = {
|
||||
|
||||
20
scripts/sync-ecc-to-codex.sh
Normal file → Executable file
20
scripts/sync-ecc-to-codex.sh
Normal file → Executable file
@@ -43,9 +43,11 @@ log() { printf '[ecc-sync] %s\n' "$*"; }
|
||||
|
||||
run_or_echo() {
|
||||
if [[ "$MODE" == "dry-run" ]]; then
|
||||
printf '[dry-run] %s\n' "$*"
|
||||
printf '[dry-run]'
|
||||
printf ' %q' "$@"
|
||||
printf '\n'
|
||||
else
|
||||
eval "$@"
|
||||
"$@"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -149,10 +151,10 @@ log "Repo root: $REPO_ROOT"
|
||||
log "Codex home: $CODEX_HOME"
|
||||
|
||||
log "Creating backup folder: $BACKUP_DIR"
|
||||
run_or_echo "mkdir -p \"$BACKUP_DIR\""
|
||||
run_or_echo "cp \"$CONFIG_FILE\" \"$BACKUP_DIR/config.toml\""
|
||||
run_or_echo mkdir -p "$BACKUP_DIR"
|
||||
run_or_echo cp "$CONFIG_FILE" "$BACKUP_DIR/config.toml"
|
||||
if [[ -f "$AGENTS_FILE" ]]; then
|
||||
run_or_echo "cp \"$AGENTS_FILE\" \"$BACKUP_DIR/AGENTS.md\""
|
||||
run_or_echo cp "$AGENTS_FILE" "$BACKUP_DIR/AGENTS.md"
|
||||
fi
|
||||
|
||||
ECC_BEGIN_MARKER="<!-- BEGIN ECC -->"
|
||||
@@ -234,19 +236,19 @@ else
|
||||
fi
|
||||
|
||||
log "Syncing ECC Codex skills"
|
||||
run_or_echo "mkdir -p \"$SKILLS_DEST\""
|
||||
run_or_echo mkdir -p "$SKILLS_DEST"
|
||||
skills_count=0
|
||||
for skill_dir in "$SKILLS_SRC"/*; do
|
||||
[[ -d "$skill_dir" ]] || continue
|
||||
skill_name="$(basename "$skill_dir")"
|
||||
dest="$SKILLS_DEST/$skill_name"
|
||||
run_or_echo "rm -rf \"$dest\""
|
||||
run_or_echo "cp -R \"$skill_dir\" \"$dest\""
|
||||
run_or_echo rm -rf "$dest"
|
||||
run_or_echo cp -R "$skill_dir" "$dest"
|
||||
skills_count=$((skills_count + 1))
|
||||
done
|
||||
|
||||
log "Generating prompt files from ECC commands"
|
||||
run_or_echo "mkdir -p \"$PROMPTS_DEST\""
|
||||
run_or_echo mkdir -p "$PROMPTS_DEST"
|
||||
manifest="$PROMPTS_DEST/ecc-prompts-manifest.txt"
|
||||
if [[ "$MODE" == "dry-run" ]]; then
|
||||
printf '[dry-run] > %s\n' "$manifest"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const os = require('os');
|
||||
const { uninstallInstalledStates } = require('./lib/install-lifecycle');
|
||||
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
|
||||
|
||||
@@ -73,7 +74,7 @@ function main() {
|
||||
}
|
||||
|
||||
const result = uninstallInstalledStates({
|
||||
homeDir: process.env.HOME,
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
projectRoot: process.cwd(),
|
||||
targets: options.targets,
|
||||
dryRun: options.dryRun,
|
||||
|
||||
93
skills/benchmark/SKILL.md
Normal file
93
skills/benchmark/SKILL.md
Normal file
@@ -0,0 +1,93 @@
|
||||
---
|
||||
name: benchmark
|
||||
description: Use this skill to measure performance baselines, detect regressions before/after PRs, and compare stack alternatives.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Benchmark — Performance Baseline & Regression Detection
|
||||
|
||||
## When to Use
|
||||
|
||||
- Before and after a PR to measure performance impact
|
||||
- Setting up performance baselines for a project
|
||||
- When users report "it feels slow"
|
||||
- Before a launch — ensure you meet performance targets
|
||||
- Comparing your stack against alternatives
|
||||
|
||||
## How It Works
|
||||
|
||||
### Mode 1: Page Performance
|
||||
|
||||
Measures real browser metrics via browser MCP:
|
||||
|
||||
```
|
||||
1. Navigate to each target URL
|
||||
2. Measure Core Web Vitals:
|
||||
- LCP (Largest Contentful Paint) — target < 2.5s
|
||||
- CLS (Cumulative Layout Shift) — target < 0.1
|
||||
- INP (Interaction to Next Paint) — target < 200ms
|
||||
- FCP (First Contentful Paint) — target < 1.8s
|
||||
- TTFB (Time to First Byte) — target < 800ms
|
||||
3. Measure resource sizes:
|
||||
- Total page weight (target < 1MB)
|
||||
- JS bundle size (target < 200KB gzipped)
|
||||
- CSS size
|
||||
- Image weight
|
||||
- Third-party script weight
|
||||
4. Count network requests
|
||||
5. Check for render-blocking resources
|
||||
```
|
||||
|
||||
### Mode 2: API Performance
|
||||
|
||||
Benchmarks API endpoints:
|
||||
|
||||
```
|
||||
1. Hit each endpoint 100 times
|
||||
2. Measure: p50, p95, p99 latency
|
||||
3. Track: response size, status codes
|
||||
4. Test under load: 10 concurrent requests
|
||||
5. Compare against SLA targets
|
||||
```
|
||||
|
||||
### Mode 3: Build Performance
|
||||
|
||||
Measures development feedback loop:
|
||||
|
||||
```
|
||||
1. Cold build time
|
||||
2. Hot reload time (HMR)
|
||||
3. Test suite duration
|
||||
4. TypeScript check time
|
||||
5. Lint time
|
||||
6. Docker build time
|
||||
```
|
||||
|
||||
### Mode 4: Before/After Comparison
|
||||
|
||||
Run before and after a change to measure impact:
|
||||
|
||||
```
|
||||
/benchmark baseline # saves current metrics
|
||||
# ... make changes ...
|
||||
/benchmark compare # compares against baseline
|
||||
```
|
||||
|
||||
Output:
|
||||
```
|
||||
| Metric | Before | After | Delta | Verdict |
|
||||
|--------|--------|-------|-------|---------|
|
||||
| LCP | 1.2s | 1.4s | +200ms | ⚠ WARN |
|
||||
| Bundle | 180KB | 175KB | -5KB | ✓ BETTER |
|
||||
| Build | 12s | 14s | +2s | ⚠ WARN |
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
Stores baselines in `.ecc/benchmarks/` as JSON. Git-tracked so the team shares baselines.
|
||||
|
||||
## Integration
|
||||
|
||||
- CI: run `/benchmark compare` on every PR
|
||||
- Pair with `/canary-watch` for post-deploy monitoring
|
||||
- Pair with `/browser-qa` for full pre-ship checklist
|
||||
87
skills/browser-qa/SKILL.md
Normal file
87
skills/browser-qa/SKILL.md
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
name: browser-qa
|
||||
description: Use this skill to automate visual testing and UI interaction verification using browser automation after deploying features.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Browser QA — Automated Visual Testing & Interaction
|
||||
|
||||
## When to Use
|
||||
|
||||
- After deploying a feature to staging/preview
|
||||
- When you need to verify UI behavior across pages
|
||||
- Before shipping — confirm layouts, forms, interactions actually work
|
||||
- When reviewing PRs that touch frontend code
|
||||
- Accessibility audits and responsive testing
|
||||
|
||||
## How It Works
|
||||
|
||||
Uses the browser automation MCP (claude-in-chrome, Playwright, or Puppeteer) to interact with live pages like a real user.
|
||||
|
||||
### Phase 1: Smoke Test
|
||||
```
|
||||
1. Navigate to target URL
|
||||
2. Check for console errors (filter noise: analytics, third-party)
|
||||
3. Verify no 4xx/5xx in network requests
|
||||
4. Screenshot above-the-fold on desktop + mobile viewport
|
||||
5. Check Core Web Vitals: LCP < 2.5s, CLS < 0.1, INP < 200ms
|
||||
```
|
||||
|
||||
### Phase 2: Interaction Test
|
||||
```
|
||||
1. Click every nav link — verify no dead links
|
||||
2. Submit forms with valid data — verify success state
|
||||
3. Submit forms with invalid data — verify error state
|
||||
4. Test auth flow: login → protected page → logout
|
||||
5. Test critical user journeys (checkout, onboarding, search)
|
||||
```
|
||||
|
||||
### Phase 3: Visual Regression
|
||||
```
|
||||
1. Screenshot key pages at 3 breakpoints (375px, 768px, 1440px)
|
||||
2. Compare against baseline screenshots (if stored)
|
||||
3. Flag layout shifts > 5px, missing elements, overflow
|
||||
4. Check dark mode if applicable
|
||||
```
|
||||
|
||||
### Phase 4: Accessibility
|
||||
```
|
||||
1. Run axe-core or equivalent on each page
|
||||
2. Flag WCAG AA violations (contrast, labels, focus order)
|
||||
3. Verify keyboard navigation works end-to-end
|
||||
4. Check screen reader landmarks
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```markdown
|
||||
## QA Report — [URL] — [timestamp]
|
||||
|
||||
### Smoke Test
|
||||
- Console errors: 0 critical, 2 warnings (analytics noise)
|
||||
- Network: all 200/304, no failures
|
||||
- Core Web Vitals: LCP 1.2s ✓, CLS 0.02 ✓, INP 89ms ✓
|
||||
|
||||
### Interactions
|
||||
- [✓] Nav links: 12/12 working
|
||||
- [✗] Contact form: missing error state for invalid email
|
||||
- [✓] Auth flow: login/logout working
|
||||
|
||||
### Visual
|
||||
- [✗] Hero section overflows on 375px viewport
|
||||
- [✓] Dark mode: all pages consistent
|
||||
|
||||
### Accessibility
|
||||
- 2 AA violations: missing alt text on hero image, low contrast on footer links
|
||||
|
||||
### Verdict: SHIP WITH FIXES (2 issues, 0 blockers)
|
||||
```
|
||||
|
||||
## Integration
|
||||
|
||||
Works with any browser MCP:
|
||||
- `mChild__claude-in-chrome__*` tools (preferred — uses your actual Chrome)
|
||||
- Playwright via `mcp__browserbase__*`
|
||||
- Direct Puppeteer scripts
|
||||
|
||||
Pair with `/canary-watch` for post-deploy monitoring.
|
||||
99
skills/canary-watch/SKILL.md
Normal file
99
skills/canary-watch/SKILL.md
Normal file
@@ -0,0 +1,99 @@
|
||||
---
|
||||
name: canary-watch
|
||||
description: Use this skill to monitor a deployed URL for regressions after deploys, merges, or dependency upgrades.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Canary Watch — Post-Deploy Monitoring
|
||||
|
||||
## When to Use
|
||||
|
||||
- After deploying to production or staging
|
||||
- After merging a risky PR
|
||||
- When you want to verify a fix actually fixed it
|
||||
- Continuous monitoring during a launch window
|
||||
- After dependency upgrades
|
||||
|
||||
## How It Works
|
||||
|
||||
Monitors a deployed URL for regressions. Runs in a loop until stopped or until the watch window expires.
|
||||
|
||||
### What It Watches
|
||||
|
||||
```
|
||||
1. HTTP Status — is the page returning 200?
|
||||
2. Console Errors — new errors that weren't there before?
|
||||
3. Network Failures — failed API calls, 5xx responses?
|
||||
4. Performance — LCP/CLS/INP regression vs baseline?
|
||||
5. Content — did key elements disappear? (h1, nav, footer, CTA)
|
||||
6. API Health — are critical endpoints responding within SLA?
|
||||
```
|
||||
|
||||
### Watch Modes
|
||||
|
||||
**Quick check** (default): single pass, report results
|
||||
```
|
||||
/canary-watch https://myapp.com
|
||||
```
|
||||
|
||||
**Sustained watch**: check every N minutes for M hours
|
||||
```
|
||||
/canary-watch https://myapp.com --interval 5m --duration 2h
|
||||
```
|
||||
|
||||
**Diff mode**: compare staging vs production
|
||||
```
|
||||
/canary-watch --compare https://staging.myapp.com https://myapp.com
|
||||
```
|
||||
|
||||
### Alert Thresholds
|
||||
|
||||
```yaml
|
||||
critical: # immediate alert
|
||||
- HTTP status != 200
|
||||
- Console error count > 5 (new errors only)
|
||||
- LCP > 4s
|
||||
- API endpoint returns 5xx
|
||||
|
||||
warning: # flag in report
|
||||
- LCP increased > 500ms from baseline
|
||||
- CLS > 0.1
|
||||
- New console warnings
|
||||
- Response time > 2x baseline
|
||||
|
||||
info: # log only
|
||||
- Minor performance variance
|
||||
- New network requests (third-party scripts added?)
|
||||
```
|
||||
|
||||
### Notifications
|
||||
|
||||
When a critical threshold is crossed:
|
||||
- Desktop notification (macOS/Linux)
|
||||
- Optional: Slack/Discord webhook
|
||||
- Log to `~/.claude/canary-watch.log`
|
||||
|
||||
## Output
|
||||
|
||||
```markdown
|
||||
## Canary Report — myapp.com — 2026-03-23 03:15 PST
|
||||
|
||||
### Status: HEALTHY ✓
|
||||
|
||||
| Check | Result | Baseline | Delta |
|
||||
|-------|--------|----------|-------|
|
||||
| HTTP | 200 ✓ | 200 | — |
|
||||
| Console errors | 0 ✓ | 0 | — |
|
||||
| LCP | 1.8s ✓ | 1.6s | +200ms |
|
||||
| CLS | 0.01 ✓ | 0.01 | — |
|
||||
| API /health | 145ms ✓ | 120ms | +25ms |
|
||||
|
||||
### No regressions detected. Deploy is clean.
|
||||
```
|
||||
|
||||
## Integration
|
||||
|
||||
Pair with:
|
||||
- `/browser-qa` for pre-deploy verification
|
||||
- Hooks: add as a PostToolUse hook on `git push` to auto-check after deploys
|
||||
- CI: run in GitHub Actions after deploy step
|
||||
@@ -55,15 +55,25 @@ analyze_observations() {
|
||||
# Sample recent observations instead of loading the entire file (#521).
|
||||
# This prevents multi-MB payloads from being passed to the LLM.
|
||||
MAX_ANALYSIS_LINES="${ECC_OBSERVER_MAX_ANALYSIS_LINES:-500}"
|
||||
analysis_file="$(mktemp "${TMPDIR:-/tmp}/ecc-observer-analysis.XXXXXX.jsonl")"
|
||||
observer_tmp_dir="${PROJECT_DIR}/.observer-tmp"
|
||||
mkdir -p "$observer_tmp_dir"
|
||||
analysis_file="$(mktemp "${observer_tmp_dir}/ecc-observer-analysis.XXXXXX.jsonl")"
|
||||
tail -n "$MAX_ANALYSIS_LINES" "$OBSERVATIONS_FILE" > "$analysis_file"
|
||||
analysis_count=$(wc -l < "$analysis_file" 2>/dev/null || echo 0)
|
||||
echo "[$(date)] Using last $analysis_count of $obs_count observations for analysis" >> "$LOG_FILE"
|
||||
|
||||
prompt_file="$(mktemp "${TMPDIR:-/tmp}/ecc-observer-prompt.XXXXXX")"
|
||||
# Use relative path from PROJECT_DIR for cross-platform compatibility (#842).
|
||||
# On Windows (Git Bash/MSYS2), absolute paths from mktemp may use MSYS-style
|
||||
# prefixes (e.g. /c/Users/...) that the Claude subprocess cannot resolve.
|
||||
analysis_relpath=".observer-tmp/$(basename "$analysis_file")"
|
||||
|
||||
prompt_file="$(mktemp "${observer_tmp_dir}/ecc-observer-prompt.XXXXXX")"
|
||||
cat > "$prompt_file" <<PROMPT
|
||||
Read ${analysis_file} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences).
|
||||
If you find 3+ occurrences of the same pattern, create an instinct file in ${INSTINCTS_DIR}/<id>.md.
|
||||
IMPORTANT: You are running in non-interactive --print mode. You MUST use the Write tool directly to create files. Do NOT ask for permission, do NOT ask for confirmation, do NOT output summaries instead of writing. Just read, analyze, and write.
|
||||
|
||||
Read ${analysis_relpath} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences).
|
||||
If you find 3+ occurrences of the same pattern, you MUST write an instinct file directly to ${INSTINCTS_DIR}/<id>.md using the Write tool.
|
||||
Do NOT ask for permission to write files, do NOT describe what you would write, and do NOT stop at analysis when a qualifying pattern exists.
|
||||
|
||||
CRITICAL: Every instinct file MUST use this exact format:
|
||||
|
||||
@@ -92,6 +102,7 @@ Rules:
|
||||
- Be conservative, only clear patterns with 3+ observations
|
||||
- Use narrow, specific triggers
|
||||
- Never include actual code snippets, only describe patterns
|
||||
- When a qualifying pattern exists, write or update the instinct file in this run instead of asking for confirmation
|
||||
- If a similar instinct already exists in ${INSTINCTS_DIR}/, update it instead of creating a duplicate
|
||||
- The YAML frontmatter (between --- markers) with id field is MANDATORY
|
||||
- If a pattern seems universal (not project-specific), set scope to global instead of project
|
||||
@@ -113,11 +124,19 @@ PROMPT
|
||||
max_turns=10
|
||||
fi
|
||||
|
||||
# Prevent observe.sh from recording this automated Haiku session as observations
|
||||
# Ensure CWD is PROJECT_DIR so the relative analysis_relpath resolves correctly
|
||||
# on all platforms, not just when the observer happens to be launched from the project root.
|
||||
cd "$PROJECT_DIR" || { echo "[$(date)] Failed to cd to PROJECT_DIR ($PROJECT_DIR), skipping analysis" >> "$LOG_FILE"; rm -f "$prompt_file" "$analysis_file"; return; }
|
||||
|
||||
# Prevent observe.sh from recording this automated Haiku session as observations.
|
||||
# Pass prompt via -p flag instead of stdin redirect for Windows compatibility (#842).
|
||||
ECC_SKIP_OBSERVE=1 ECC_HOOK_PROFILE=minimal claude --model haiku --max-turns "$max_turns" --print \
|
||||
--allowedTools "Read,Write" \
|
||||
< "$prompt_file" >> "$LOG_FILE" 2>&1 &
|
||||
-p "$(cat "$prompt_file")" >> "$LOG_FILE" 2>&1 &
|
||||
claude_pid=$!
|
||||
# prompt_file content was already expanded by the shell; remove early to avoid
|
||||
# leaving stale temp files during the (potentially long) analysis window.
|
||||
rm -f "$prompt_file"
|
||||
|
||||
(
|
||||
sleep "$timeout_seconds"
|
||||
@@ -131,7 +150,7 @@ PROMPT
|
||||
wait "$claude_pid"
|
||||
exit_code=$?
|
||||
kill "$watchdog_pid" 2>/dev/null || true
|
||||
rm -f "$prompt_file" "$analysis_file"
|
||||
rm -f "$analysis_file"
|
||||
|
||||
if [ "$exit_code" -ne 0 ]; then
|
||||
echo "[$(date)] Claude analysis failed (exit $exit_code)" >> "$LOG_FILE"
|
||||
|
||||
82
skills/design-system/SKILL.md
Normal file
82
skills/design-system/SKILL.md
Normal file
@@ -0,0 +1,82 @@
|
||||
---
|
||||
name: design-system
|
||||
description: Use this skill to generate or audit design systems, check visual consistency, and review PRs that touch styling.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Design System — Generate & Audit Visual Systems
|
||||
|
||||
## When to Use
|
||||
|
||||
- Starting a new project that needs a design system
|
||||
- Auditing an existing codebase for visual consistency
|
||||
- Before a redesign — understand what you have
|
||||
- When the UI looks "off" but you can't pinpoint why
|
||||
- Reviewing PRs that touch styling
|
||||
|
||||
## How It Works
|
||||
|
||||
### Mode 1: Generate Design System
|
||||
|
||||
Analyzes your codebase and generates a cohesive design system:
|
||||
|
||||
```
|
||||
1. Scan CSS/Tailwind/styled-components for existing patterns
|
||||
2. Extract: colors, typography, spacing, border-radius, shadows, breakpoints
|
||||
3. Research 3 competitor sites for inspiration (via browser MCP)
|
||||
4. Propose a design token set (JSON + CSS custom properties)
|
||||
5. Generate DESIGN.md with rationale for each decision
|
||||
6. Create an interactive HTML preview page (self-contained, no deps)
|
||||
```
|
||||
|
||||
Output: `DESIGN.md` + `design-tokens.json` + `design-preview.html`
|
||||
|
||||
### Mode 2: Visual Audit
|
||||
|
||||
Scores your UI across 10 dimensions (0-10 each):
|
||||
|
||||
```
|
||||
1. Color consistency — are you using your palette or random hex values?
|
||||
2. Typography hierarchy — clear h1 > h2 > h3 > body > caption?
|
||||
3. Spacing rhythm — consistent scale (4px/8px/16px) or arbitrary?
|
||||
4. Component consistency — do similar elements look similar?
|
||||
5. Responsive behavior — fluid or broken at breakpoints?
|
||||
6. Dark mode — complete or half-done?
|
||||
7. Animation — purposeful or gratuitous?
|
||||
8. Accessibility — contrast ratios, focus states, touch targets
|
||||
9. Information density — cluttered or clean?
|
||||
10. Polish — hover states, transitions, loading states, empty states
|
||||
```
|
||||
|
||||
Each dimension gets a score, specific examples, and a fix with exact file:line.
|
||||
|
||||
### Mode 3: AI Slop Detection
|
||||
|
||||
Identifies generic AI-generated design patterns:
|
||||
|
||||
```
|
||||
- Gratuitous gradients on everything
|
||||
- Purple-to-blue defaults
|
||||
- "Glass morphism" cards with no purpose
|
||||
- Rounded corners on things that shouldn't be rounded
|
||||
- Excessive animations on scroll
|
||||
- Generic hero with centered text over stock gradient
|
||||
- Sans-serif font stack with no personality
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
**Generate for a SaaS app:**
|
||||
```
|
||||
/design-system generate --style minimal --palette earth-tones
|
||||
```
|
||||
|
||||
**Audit existing UI:**
|
||||
```
|
||||
/design-system audit --url http://localhost:3000 --pages / /pricing /docs
|
||||
```
|
||||
|
||||
**Check for AI slop:**
|
||||
```
|
||||
/design-system slop-check
|
||||
```
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
name: laravel-verification
|
||||
description: Verification loop for Laravel projects: env checks, linting, static analysis, tests with coverage, security scans, and deployment readiness.
|
||||
description: "Verification loop for Laravel projects: env checks, linting, static analysis, tests with coverage, security scans, and deployment readiness."
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
|
||||
85
skills/product-lens/SKILL.md
Normal file
85
skills/product-lens/SKILL.md
Normal file
@@ -0,0 +1,85 @@
|
||||
---
|
||||
name: product-lens
|
||||
description: Use this skill to validate the "why" before building, run product diagnostics, and convert vague ideas into specs.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Product Lens — Think Before You Build
|
||||
|
||||
## When to Use
|
||||
|
||||
- Before starting any feature — validate the "why"
|
||||
- Weekly product review — are we building the right thing?
|
||||
- When stuck choosing between features
|
||||
- Before a launch — sanity check the user journey
|
||||
- When converting a vague idea into a spec
|
||||
|
||||
## How It Works
|
||||
|
||||
### Mode 1: Product Diagnostic
|
||||
|
||||
Like YC office hours but automated. Asks the hard questions:
|
||||
|
||||
```
|
||||
1. Who is this for? (specific person, not "developers")
|
||||
2. What's the pain? (quantify: how often, how bad, what do they do today?)
|
||||
3. Why now? (what changed that makes this possible/necessary?)
|
||||
4. What's the 10-star version? (if money/time were unlimited)
|
||||
5. What's the MVP? (smallest thing that proves the thesis)
|
||||
6. What's the anti-goal? (what are you explicitly NOT building?)
|
||||
7. How do you know it's working? (metric, not vibes)
|
||||
```
|
||||
|
||||
Output: a `PRODUCT-BRIEF.md` with answers, risks, and a go/no-go recommendation.
|
||||
|
||||
### Mode 2: Founder Review
|
||||
|
||||
Reviews your current project through a founder lens:
|
||||
|
||||
```
|
||||
1. Read README, CLAUDE.md, package.json, recent commits
|
||||
2. Infer: what is this trying to be?
|
||||
3. Score: product-market fit signals (0-10)
|
||||
- Usage growth trajectory
|
||||
- Retention indicators (repeat contributors, return users)
|
||||
- Revenue signals (pricing page, billing code, Stripe integration)
|
||||
- Competitive moat (what's hard to copy?)
|
||||
4. Identify: the one thing that would 10x this
|
||||
5. Flag: things you're building that don't matter
|
||||
```
|
||||
|
||||
### Mode 3: User Journey Audit
|
||||
|
||||
Maps the actual user experience:
|
||||
|
||||
```
|
||||
1. Clone/install the product as a new user
|
||||
2. Document every friction point (confusing steps, errors, missing docs)
|
||||
3. Time each step
|
||||
4. Compare to competitor onboarding
|
||||
5. Score: time-to-value (how long until the user gets their first win?)
|
||||
6. Recommend: top 3 fixes for onboarding
|
||||
```
|
||||
|
||||
### Mode 4: Feature Prioritization
|
||||
|
||||
When you have 10 ideas and need to pick 2:
|
||||
|
||||
```
|
||||
1. List all candidate features
|
||||
2. Score each on: impact (1-5) × confidence (1-5) ÷ effort (1-5)
|
||||
3. Rank by ICE score
|
||||
4. Apply constraints: runway, team size, dependencies
|
||||
5. Output: prioritized roadmap with rationale
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
All modes output actionable docs, not essays. Every recommendation has a specific next step.
|
||||
|
||||
## Integration
|
||||
|
||||
Pair with:
|
||||
- `/browser-qa` to verify the user journey audit findings
|
||||
- `/design-system audit` for visual polish assessment
|
||||
- `/canary-watch` for post-launch monitoring
|
||||
75
skills/safety-guard/SKILL.md
Normal file
75
skills/safety-guard/SKILL.md
Normal file
@@ -0,0 +1,75 @@
|
||||
---
|
||||
name: safety-guard
|
||||
description: Use this skill to prevent destructive operations when working on production systems or running agents autonomously.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Safety Guard — Prevent Destructive Operations
|
||||
|
||||
## When to Use
|
||||
|
||||
- When working on production systems
|
||||
- When agents are running autonomously (full-auto mode)
|
||||
- When you want to restrict edits to a specific directory
|
||||
- During sensitive operations (migrations, deploys, data changes)
|
||||
|
||||
## How It Works
|
||||
|
||||
Three modes of protection:
|
||||
|
||||
### Mode 1: Careful Mode
|
||||
|
||||
Intercepts destructive commands before execution and warns:
|
||||
|
||||
```
|
||||
Watched patterns:
|
||||
- rm -rf (especially /, ~, or project root)
|
||||
- git push --force
|
||||
- git reset --hard
|
||||
- git checkout . (discard all changes)
|
||||
- DROP TABLE / DROP DATABASE
|
||||
- docker system prune
|
||||
- kubectl delete
|
||||
- chmod 777
|
||||
- sudo rm
|
||||
- npm publish (accidental publishes)
|
||||
- Any command with --no-verify
|
||||
```
|
||||
|
||||
When detected: shows what the command does, asks for confirmation, suggests safer alternative.
|
||||
|
||||
### Mode 2: Freeze Mode
|
||||
|
||||
Locks file edits to a specific directory tree:
|
||||
|
||||
```
|
||||
/safety-guard freeze src/components/
|
||||
```
|
||||
|
||||
Any Write/Edit outside `src/components/` is blocked with an explanation. Useful when you want an agent to focus on one area without touching unrelated code.
|
||||
|
||||
### Mode 3: Guard Mode (Careful + Freeze combined)
|
||||
|
||||
Both protections active. Maximum safety for autonomous agents.
|
||||
|
||||
```
|
||||
/safety-guard guard --dir src/api/ --allow-read-all
|
||||
```
|
||||
|
||||
Agents can read anything but only write to `src/api/`. Destructive commands are blocked everywhere.
|
||||
|
||||
### Unlock
|
||||
|
||||
```
|
||||
/safety-guard off
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
Uses PreToolUse hooks to intercept Bash, Write, Edit, and MultiEdit tool calls. Checks the command/path against the active rules before allowing execution.
|
||||
|
||||
## Integration
|
||||
|
||||
- Enable by default for `codex -a never` sessions
|
||||
- Pair with observability risk scoring in ECC 2.0
|
||||
- Logs all blocked actions to `~/.claude/safety-guard.log`
|
||||
157
tests/hooks/config-protection.test.js
Normal file
157
tests/hooks/config-protection.test.js
Normal file
@@ -0,0 +1,157 @@
|
||||
/**
|
||||
* Tests for scripts/hooks/config-protection.js via run-with-flags.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const runner = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'run-with-flags.js');
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${error.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function runHook(input, env = {}) {
|
||||
const rawInput = typeof input === 'string' ? input : JSON.stringify(input);
|
||||
const result = spawnSync('node', [runner, 'pre:config-protection', 'scripts/hooks/config-protection.js', 'standard,strict'], {
|
||||
input: rawInput,
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
ECC_HOOK_PROFILE: 'standard',
|
||||
...env
|
||||
},
|
||||
timeout: 15000,
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
return {
|
||||
code: Number.isInteger(result.status) ? result.status : 1,
|
||||
stdout: result.stdout || '',
|
||||
stderr: result.stderr || ''
|
||||
};
|
||||
}
|
||||
|
||||
function runCustomHook(pluginRoot, hookId, relScriptPath, input, env = {}) {
|
||||
const rawInput = typeof input === 'string' ? input : JSON.stringify(input);
|
||||
const result = spawnSync('node', [runner, hookId, relScriptPath, 'standard,strict'], {
|
||||
input: rawInput,
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
CLAUDE_PLUGIN_ROOT: pluginRoot,
|
||||
ECC_HOOK_PROFILE: 'standard',
|
||||
...env
|
||||
},
|
||||
timeout: 15000,
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
return {
|
||||
code: Number.isInteger(result.status) ? result.status : 1,
|
||||
stdout: result.stdout || '',
|
||||
stderr: result.stderr || ''
|
||||
};
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing config-protection ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
if (test('blocks protected config file edits through run-with-flags', () => {
|
||||
const input = {
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: '.eslintrc.js',
|
||||
content: 'module.exports = {};'
|
||||
}
|
||||
};
|
||||
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 2, 'Expected protected config edit to be blocked');
|
||||
assert.strictEqual(result.stdout, '', 'Blocked hook should not echo raw input');
|
||||
assert.ok(result.stderr.includes('BLOCKED: Modifying .eslintrc.js is not allowed.'), `Expected block message, got: ${result.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('passes through safe file edits unchanged', () => {
|
||||
const input = {
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: 'src/index.js',
|
||||
content: 'console.log("ok");'
|
||||
}
|
||||
};
|
||||
|
||||
const rawInput = JSON.stringify(input);
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 0, 'Expected safe file edit to pass');
|
||||
assert.strictEqual(result.stdout, rawInput, 'Expected exact raw JSON passthrough');
|
||||
assert.strictEqual(result.stderr, '', 'Expected no stderr for safe edits');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('blocks truncated protected config payloads instead of failing open', () => {
|
||||
const rawInput = JSON.stringify({
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: '.eslintrc.js',
|
||||
content: 'x'.repeat(1024 * 1024 + 2048)
|
||||
}
|
||||
});
|
||||
|
||||
const result = runHook(rawInput);
|
||||
assert.strictEqual(result.code, 2, 'Expected truncated protected payload to be blocked');
|
||||
assert.strictEqual(result.stdout, '', 'Blocked truncated payload should not echo raw input');
|
||||
assert.ok(result.stderr.includes('Hook input exceeded 1048576 bytes'), `Expected size warning, got: ${result.stderr}`);
|
||||
assert.ok(result.stderr.includes('truncated payload'), `Expected truncated payload warning, got: ${result.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('legacy hooks do not echo raw input when they fail without stdout', () => {
|
||||
const pluginRoot = path.join(__dirname, '..', `tmp-runner-plugin-${Date.now()}`);
|
||||
const scriptDir = path.join(pluginRoot, 'scripts', 'hooks');
|
||||
const scriptPath = path.join(scriptDir, 'legacy-block.js');
|
||||
|
||||
try {
|
||||
fs.mkdirSync(scriptDir, { recursive: true });
|
||||
fs.writeFileSync(
|
||||
scriptPath,
|
||||
'#!/usr/bin/env node\nprocess.stderr.write("blocked by legacy hook\\n");\nprocess.exit(2);\n'
|
||||
);
|
||||
|
||||
const rawInput = JSON.stringify({
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: '.eslintrc.js',
|
||||
content: 'module.exports = {};'
|
||||
}
|
||||
});
|
||||
|
||||
const result = runCustomHook(pluginRoot, 'pre:legacy-block', 'scripts/hooks/legacy-block.js', rawInput);
|
||||
assert.strictEqual(result.code, 2, 'Expected failing legacy hook exit code to propagate');
|
||||
assert.strictEqual(result.stdout, '', 'Expected failing legacy hook to avoid raw passthrough');
|
||||
assert.ok(result.stderr.includes('blocked by legacy hook'), `Expected legacy hook stderr, got: ${result.stderr}`);
|
||||
} finally {
|
||||
try {
|
||||
fs.rmSync(pluginRoot, { recursive: true, force: true });
|
||||
} catch {
|
||||
// best-effort cleanup
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
@@ -156,6 +156,35 @@ async function runTests() {
|
||||
assert.strictEqual(approvalEvent.payload.severity, 'high');
|
||||
})) passed += 1; else failed += 1;
|
||||
|
||||
if (await test('approval events fingerprint commands instead of storing raw command text', async () => {
|
||||
const command = 'git push origin main --force';
|
||||
const events = analyzeForGovernanceEvents({
|
||||
tool_name: 'Bash',
|
||||
tool_input: { command },
|
||||
});
|
||||
|
||||
const approvalEvent = events.find(e => e.eventType === 'approval_requested');
|
||||
assert.ok(approvalEvent);
|
||||
assert.strictEqual(approvalEvent.payload.commandName, 'git');
|
||||
assert.ok(/^[a-f0-9]{12}$/.test(approvalEvent.payload.commandFingerprint), 'Expected short command fingerprint');
|
||||
assert.ok(!Object.prototype.hasOwnProperty.call(approvalEvent.payload, 'command'), 'Should not store raw command text');
|
||||
})) passed += 1; else failed += 1;
|
||||
|
||||
if (await test('security findings fingerprint elevated commands instead of storing raw command text', async () => {
|
||||
const command = 'sudo chmod 600 ~/.ssh/id_rsa';
|
||||
const events = analyzeForGovernanceEvents({
|
||||
tool_name: 'Bash',
|
||||
tool_input: { command },
|
||||
}, {
|
||||
hookPhase: 'post',
|
||||
});
|
||||
|
||||
const securityEvent = events.find(e => e.eventType === 'security_finding');
|
||||
assert.ok(securityEvent);
|
||||
assert.strictEqual(securityEvent.payload.commandName, 'sudo');
|
||||
assert.ok(/^[a-f0-9]{12}$/.test(securityEvent.payload.commandFingerprint), 'Expected short command fingerprint');
|
||||
assert.ok(!Object.prototype.hasOwnProperty.call(securityEvent.payload, 'command'), 'Should not store raw command text');
|
||||
})) passed += 1; else failed += 1;
|
||||
if (await test('analyzeForGovernanceEvents detects sensitive file access', async () => {
|
||||
const events = analyzeForGovernanceEvents({
|
||||
tool_name: 'Edit',
|
||||
@@ -273,6 +302,43 @@ async function runTests() {
|
||||
}
|
||||
})) passed += 1; else failed += 1;
|
||||
|
||||
if (await test('run() emits hook_input_truncated event without logging raw command text', async () => {
|
||||
const original = process.env.ECC_GOVERNANCE_CAPTURE;
|
||||
const originalHookEvent = process.env.CLAUDE_HOOK_EVENT_NAME;
|
||||
const originalWrite = process.stderr.write;
|
||||
const stderr = [];
|
||||
process.env.ECC_GOVERNANCE_CAPTURE = '1';
|
||||
process.env.CLAUDE_HOOK_EVENT_NAME = 'PreToolUse';
|
||||
process.stderr.write = (chunk, encoding, callback) => {
|
||||
stderr.push(String(chunk));
|
||||
if (typeof encoding === 'function') encoding();
|
||||
if (typeof callback === 'function') callback();
|
||||
return true;
|
||||
};
|
||||
|
||||
try {
|
||||
const input = JSON.stringify({ tool_name: 'Bash', tool_input: { command: 'rm -rf /tmp/important' } });
|
||||
const result = run(input, { truncated: true, maxStdin: 1024 });
|
||||
assert.strictEqual(result, input);
|
||||
} finally {
|
||||
process.stderr.write = originalWrite;
|
||||
if (original !== undefined) {
|
||||
process.env.ECC_GOVERNANCE_CAPTURE = original;
|
||||
} else {
|
||||
delete process.env.ECC_GOVERNANCE_CAPTURE;
|
||||
}
|
||||
if (originalHookEvent !== undefined) {
|
||||
process.env.CLAUDE_HOOK_EVENT_NAME = originalHookEvent;
|
||||
} else {
|
||||
delete process.env.CLAUDE_HOOK_EVENT_NAME;
|
||||
}
|
||||
}
|
||||
|
||||
const combined = stderr.join('');
|
||||
assert.ok(combined.includes('"eventType":"hook_input_truncated"'), 'Should emit truncation event');
|
||||
assert.ok(combined.includes('"sizeLimitBytes":1024'), 'Should record the truncation limit');
|
||||
assert.ok(!combined.includes('rm -rf /tmp/important'), 'Should not leak raw command text to governance logs');
|
||||
})) passed += 1; else failed += 1;
|
||||
if (await test('run() can detect multiple event types in one input', async () => {
|
||||
// Bash command with force push AND secret in command
|
||||
const events = analyzeForGovernanceEvents({
|
||||
|
||||
@@ -82,6 +82,22 @@ function sleepMs(ms) {
|
||||
Atomics.wait(new Int32Array(new SharedArrayBuffer(4)), 0, 0, ms);
|
||||
}
|
||||
|
||||
function getCanonicalSessionsDir(homeDir) {
|
||||
return path.join(homeDir, '.claude', 'session-data');
|
||||
}
|
||||
|
||||
function getLegacySessionsDir(homeDir) {
|
||||
return path.join(homeDir, '.claude', 'sessions');
|
||||
}
|
||||
|
||||
function getSessionStartAdditionalContext(stdout) {
|
||||
assert.ok(stdout.trim(), 'Expected SessionStart hook to emit stdout payload');
|
||||
const payload = JSON.parse(stdout);
|
||||
assert.strictEqual(payload.hookSpecificOutput?.hookEventName, 'SessionStart', 'Should emit SessionStart hook payload');
|
||||
assert.strictEqual(typeof payload.hookSpecificOutput?.additionalContext, 'string', 'Should include additionalContext text');
|
||||
return payload.hookSpecificOutput.additionalContext;
|
||||
}
|
||||
|
||||
// Test helper
|
||||
function test(name, fn) {
|
||||
try {
|
||||
@@ -336,7 +352,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('exits 0 even with isolated empty HOME', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-iso-start-${Date.now()}`);
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
|
||||
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
try {
|
||||
const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', {
|
||||
@@ -364,7 +380,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('skips template session content', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-tpl-start-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getLegacySessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
|
||||
@@ -378,8 +394,8 @@ async function runTests() {
|
||||
USERPROFILE: isoHome
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
// stdout should NOT contain the template content
|
||||
assert.ok(!result.stdout.includes('Previous session summary'), 'Should not inject template session content');
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(!additionalContext.includes('Previous session summary'), 'Should not inject template session content');
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
@@ -391,7 +407,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('injects real session content', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-real-start-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getLegacySessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
|
||||
@@ -405,8 +421,47 @@ async function runTests() {
|
||||
USERPROFILE: isoHome
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Previous session summary'), 'Should inject real session content');
|
||||
assert.ok(result.stdout.includes('authentication refactor'), 'Should include session content text');
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(additionalContext.includes('Previous session summary'), 'Should inject real session content');
|
||||
assert.ok(additionalContext.includes('authentication refactor'), 'Should include session content text');
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
await asyncTest('prefers canonical session-data content over legacy duplicates', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-canonical-start-${Date.now()}`);
|
||||
const canonicalDir = getCanonicalSessionsDir(isoHome);
|
||||
const legacyDir = getLegacySessionsDir(isoHome);
|
||||
const now = new Date();
|
||||
const filename = `${now.toISOString().slice(0, 10)}-dupe1234-session.tmp`;
|
||||
const canonicalFile = path.join(canonicalDir, filename);
|
||||
const legacyFile = path.join(legacyDir, filename);
|
||||
const canonicalTime = new Date(now.getTime() - 60 * 1000);
|
||||
const legacyTime = new Date(canonicalTime.getTime());
|
||||
|
||||
fs.mkdirSync(canonicalDir, { recursive: true });
|
||||
fs.mkdirSync(legacyDir, { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
|
||||
fs.writeFileSync(canonicalFile, '# Canonical Session\n\nUse the canonical session-data copy.\n');
|
||||
fs.writeFileSync(legacyFile, '# Legacy Session\n\nDo not prefer the legacy duplicate.\n');
|
||||
fs.utimesSync(canonicalFile, canonicalTime, canonicalTime);
|
||||
fs.utimesSync(legacyFile, legacyTime, legacyTime);
|
||||
|
||||
try {
|
||||
const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', {
|
||||
HOME: isoHome,
|
||||
USERPROFILE: isoHome
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(additionalContext.includes('canonical session-data copy'));
|
||||
assert.ok(!additionalContext.includes('legacy duplicate'));
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
@@ -418,7 +473,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('strips ANSI escape codes from injected session content', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-ansi-start-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getLegacySessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
|
||||
@@ -434,9 +489,10 @@ async function runTests() {
|
||||
USERPROFILE: isoHome
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Previous session summary'), 'Should inject real session content');
|
||||
assert.ok(result.stdout.includes('Windows terminal handling'), 'Should preserve sanitized session text');
|
||||
assert.ok(!result.stdout.includes('\x1b['), 'Should not emit ANSI escape codes');
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(additionalContext.includes('Previous session summary'), 'Should inject real session content');
|
||||
assert.ok(additionalContext.includes('Windows terminal handling'), 'Should preserve sanitized session text');
|
||||
assert.ok(!additionalContext.includes('\x1b['), 'Should not emit ANSI escape codes');
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
@@ -450,7 +506,7 @@ async function runTests() {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-skills-start-${Date.now()}`);
|
||||
const learnedDir = path.join(isoHome, '.claude', 'skills', 'learned');
|
||||
fs.mkdirSync(learnedDir, { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
|
||||
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
|
||||
|
||||
// Create learned skill files
|
||||
fs.writeFileSync(path.join(learnedDir, 'testing-patterns.md'), '# Testing');
|
||||
@@ -548,7 +604,7 @@ async function runTests() {
|
||||
// Check if session file was created
|
||||
// Note: Without CLAUDE_SESSION_ID, falls back to project/worktree name (not 'default')
|
||||
// Use local time to match the script's getDateString() function
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
const now = new Date();
|
||||
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
|
||||
|
||||
@@ -581,7 +637,7 @@ async function runTests() {
|
||||
|
||||
// Check if session file was created with session ID
|
||||
// Use local time to match the script's getDateString() function
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
const now = new Date();
|
||||
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
|
||||
const sessionFile = path.join(sessionsDir, `${today}-${expectedShortId}-session.tmp`);
|
||||
@@ -614,7 +670,7 @@ async function runTests() {
|
||||
|
||||
const now = new Date();
|
||||
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
|
||||
const sessionFile = path.join(isoHome, '.claude', 'sessions', `${today}-${expectedShortId}-session.tmp`);
|
||||
const sessionFile = path.join(getCanonicalSessionsDir(isoHome), `${today}-${expectedShortId}-session.tmp`);
|
||||
const content = fs.readFileSync(sessionFile, 'utf8');
|
||||
|
||||
assert.ok(content.includes(`**Project:** ${project}`), 'Should persist project metadata');
|
||||
@@ -652,7 +708,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('creates compaction log', async () => {
|
||||
await runScript(path.join(scriptsDir, 'pre-compact.js'));
|
||||
const logFile = path.join(os.homedir(), '.claude', 'sessions', 'compaction-log.txt');
|
||||
const logFile = path.join(getCanonicalSessionsDir(os.homedir()), 'compaction-log.txt');
|
||||
assert.ok(fs.existsSync(logFile), 'Compaction log should exist');
|
||||
})
|
||||
)
|
||||
@@ -662,7 +718,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('annotates active session file with compaction marker', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-compact-annotate-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
// Create an active .tmp session file
|
||||
@@ -688,7 +744,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('compaction log contains timestamp', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-compact-ts-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
try {
|
||||
@@ -1544,7 +1600,7 @@ async function runTests() {
|
||||
assert.strictEqual(result.code, 0, 'Should handle backticks without crash');
|
||||
|
||||
// Find the session file in the temp HOME
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -1579,7 +1635,7 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -1613,7 +1669,7 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -1648,7 +1704,7 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -1686,7 +1742,7 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -1723,7 +1779,7 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -1757,7 +1813,7 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -1800,7 +1856,7 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -1873,9 +1929,8 @@ async function runTests() {
|
||||
const isNpx = hook.command.startsWith('npx ');
|
||||
const isSkillScript = hook.command.includes('/skills/') && (/^(bash|sh)\s/.test(hook.command) || hook.command.startsWith('${CLAUDE_PLUGIN_ROOT}/skills/'));
|
||||
const isHookShellWrapper = /^(bash|sh)\s+["']?\$\{CLAUDE_PLUGIN_ROOT\}\/scripts\/hooks\/run-with-flags-shell\.sh/.test(hook.command);
|
||||
const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js');
|
||||
assert.ok(
|
||||
isNode || isNpx || isSkillScript || isHookShellWrapper || isSessionStartFallback,
|
||||
isNode || isNpx || isSkillScript || isHookShellWrapper,
|
||||
`Hook command should use node or approved shell wrapper: ${hook.command.substring(0, 100)}...`
|
||||
);
|
||||
}
|
||||
@@ -1892,7 +1947,25 @@ async function runTests() {
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('script references use CLAUDE_PLUGIN_ROOT variable (except SessionStart fallback)', () => {
|
||||
test('SessionStart hook uses safe inline resolver without plugin-tree scanning', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
|
||||
const sessionStartHook = hooks.hooks.SessionStart?.[0]?.hooks?.[0];
|
||||
|
||||
assert.ok(sessionStartHook, 'Should define a SessionStart hook');
|
||||
assert.ok(sessionStartHook.command.startsWith('node -e "'), 'SessionStart should use inline node resolver');
|
||||
assert.ok(sessionStartHook.command.includes('session:start'), 'SessionStart should invoke the session:start profile');
|
||||
assert.ok(sessionStartHook.command.includes('run-with-flags.js'), 'SessionStart should resolve the runner script');
|
||||
assert.ok(sessionStartHook.command.includes('CLAUDE_PLUGIN_ROOT'), 'SessionStart should consult CLAUDE_PLUGIN_ROOT');
|
||||
assert.ok(sessionStartHook.command.includes('plugins'), 'SessionStart should probe known plugin roots');
|
||||
assert.ok(!sessionStartHook.command.includes('find '), 'Should not scan arbitrary plugin paths with find');
|
||||
assert.ok(!sessionStartHook.command.includes('head -n 1'), 'Should not pick the first matching plugin path');
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
if (
|
||||
test('script references use CLAUDE_PLUGIN_ROOT variable or safe SessionStart inline resolver', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
|
||||
|
||||
@@ -1901,8 +1974,8 @@ async function runTests() {
|
||||
for (const hook of entry.hooks) {
|
||||
if (hook.type === 'command' && hook.command.includes('scripts/hooks/')) {
|
||||
// Check for the literal string "${CLAUDE_PLUGIN_ROOT}" in the command
|
||||
const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js');
|
||||
const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}') || isSessionStartFallback;
|
||||
const isSessionStartInlineResolver = hook.command.startsWith('node -e') && hook.command.includes('session:start') && hook.command.includes('run-with-flags.js');
|
||||
const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}') || isSessionStartInlineResolver;
|
||||
assert.ok(hasPluginRoot, `Script paths should use CLAUDE_PLUGIN_ROOT: ${hook.command.substring(0, 80)}...`);
|
||||
}
|
||||
}
|
||||
@@ -2766,7 +2839,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('updates Last Updated timestamp in existing session file', async () => {
|
||||
const testDir = createTestDir();
|
||||
const sessionsDir = path.join(testDir, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(testDir);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
// Get the expected filename
|
||||
@@ -2798,7 +2871,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('normalizes existing session headers with project, branch, and worktree metadata', async () => {
|
||||
const testDir = createTestDir();
|
||||
const sessionsDir = path.join(testDir, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(testDir);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
const utils = require('../../scripts/lib/utils');
|
||||
@@ -2831,7 +2904,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('replaces blank template with summary when updating existing file', async () => {
|
||||
const testDir = createTestDir();
|
||||
const sessionsDir = path.join(testDir, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(testDir);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
const utils = require('../../scripts/lib/utils');
|
||||
@@ -2869,7 +2942,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('always updates session summary content on session end', async () => {
|
||||
const testDir = createTestDir();
|
||||
const sessionsDir = path.join(testDir, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(testDir);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
const utils = require('../../scripts/lib/utils');
|
||||
@@ -2906,7 +2979,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('only annotates *-session.tmp files, not other .tmp files', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-compact-glob-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
// Create a session .tmp file and a non-session .tmp file
|
||||
@@ -2937,7 +3010,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('handles no active session files gracefully', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-compact-nosession-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
try {
|
||||
@@ -2976,7 +3049,7 @@ async function runTests() {
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
// With no user messages, extractSessionSummary returns null → blank template
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -3016,7 +3089,7 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -3192,7 +3265,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('exits 0 with empty sessions directory (no recent sessions)', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-start-empty-${Date.now()}`);
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
|
||||
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
try {
|
||||
const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', {
|
||||
@@ -3201,7 +3274,8 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0 with no sessions');
|
||||
// Should NOT inject any previous session data (stdout should be empty or minimal)
|
||||
assert.ok(!result.stdout.includes('Previous session summary'), 'Should not inject when no sessions');
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(!additionalContext.includes('Previous session summary'), 'Should not inject when no sessions');
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
@@ -3213,7 +3287,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('does not inject blank template session into context', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-start-blank-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
|
||||
@@ -3229,7 +3303,8 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
// Should NOT inject blank template
|
||||
assert.ok(!result.stdout.includes('Previous session summary'), 'Should skip blank template sessions');
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(!additionalContext.includes('Previous session summary'), 'Should skip blank template sessions');
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
@@ -3825,7 +3900,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('annotates only the newest session file when multiple exist', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-compact-multi-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
// Create two session files with different mtimes
|
||||
@@ -3877,7 +3952,7 @@ async function runTests() {
|
||||
assert.strictEqual(result.code, 0);
|
||||
|
||||
// Find the session file and verify newlines were collapsed
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -3903,7 +3978,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('does not inject empty session file content into context', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-start-empty-file-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
|
||||
@@ -3919,7 +3994,8 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0 with empty session file');
|
||||
// readFile returns '' (falsy) → the if (content && ...) guard skips injection
|
||||
assert.ok(!result.stdout.includes('Previous session summary'), 'Should NOT inject empty string into context');
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(!additionalContext.includes('Previous session summary'), 'Should NOT inject empty string into context');
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
@@ -3963,7 +4039,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('summary omits Files Modified and Tools Used when none found', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-notools-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
const testDir = createTestDir();
|
||||
@@ -4001,7 +4077,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('reports available session aliases on startup', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-start-alias-${Date.now()}`);
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
|
||||
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
|
||||
// Pre-populate the aliases file
|
||||
@@ -4038,7 +4114,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('parallel compaction runs all append to log without loss', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-compact-par-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
try {
|
||||
@@ -4073,7 +4149,7 @@ async function runTests() {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-start-blocked-${Date.now()}`);
|
||||
fs.mkdirSync(path.join(isoHome, '.claude'), { recursive: true });
|
||||
// Block sessions dir creation by placing a file at that path
|
||||
fs.writeFileSync(path.join(isoHome, '.claude', 'sessions'), 'blocked');
|
||||
fs.writeFileSync(getCanonicalSessionsDir(isoHome), 'blocked');
|
||||
|
||||
try {
|
||||
const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', {
|
||||
@@ -4136,7 +4212,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('excludes session files older than 7 days', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-start-7day-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
|
||||
@@ -4159,8 +4235,9 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stderr.includes('1 recent session'), `Should find 1 recent session (6.9-day included, 8-day excluded), stderr: ${result.stderr}`);
|
||||
assert.ok(result.stdout.includes('RECENT CONTENT HERE'), 'Should inject the 6.9-day-old session content');
|
||||
assert.ok(!result.stdout.includes('OLD CONTENT SHOULD NOT APPEAR'), 'Should NOT inject the 8-day-old session content');
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(additionalContext.includes('RECENT CONTENT HERE'), 'Should inject the 6.9-day-old session content');
|
||||
assert.ok(!additionalContext.includes('OLD CONTENT SHOULD NOT APPEAR'), 'Should NOT inject the 8-day-old session content');
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
@@ -4174,7 +4251,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('injects newest session when multiple recent sessions exist', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-start-multi-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
|
||||
@@ -4198,7 +4275,8 @@ async function runTests() {
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stderr.includes('2 recent session'), `Should find 2 recent sessions, stderr: ${result.stderr}`);
|
||||
// Should inject the NEWER session, not the older one
|
||||
assert.ok(result.stdout.includes('NEWER_CONTEXT_MARKER'), 'Should inject the newest session content');
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(additionalContext.includes('NEWER_CONTEXT_MARKER'), 'Should inject the newest session content');
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
@@ -4305,7 +4383,7 @@ async function runTests() {
|
||||
return;
|
||||
}
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-start-unreadable-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
// Create a session file with real content, then make it unreadable
|
||||
@@ -4320,7 +4398,8 @@ async function runTests() {
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0 even with unreadable session file');
|
||||
// readFile returns null for unreadable files → content is null → no injection
|
||||
assert.ok(!result.stdout.includes('Sensitive session content'), 'Should NOT inject content from unreadable file');
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(!additionalContext.includes('Sensitive session content'), 'Should NOT inject content from unreadable file');
|
||||
} finally {
|
||||
try {
|
||||
fs.chmodSync(sessionFile, 0o644);
|
||||
@@ -4366,7 +4445,7 @@ async function runTests() {
|
||||
return;
|
||||
}
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-compact-ro-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
// Create a session file then make it read-only
|
||||
@@ -4407,7 +4486,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('logs warning when existing session file lacks Last Updated field', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-end-nots-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
// Create transcript with a user message so a summary is produced
|
||||
@@ -4498,7 +4577,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('extracts user messages from role-only format (no type field)', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-role-only-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
const testDir = createTestDir();
|
||||
@@ -4534,7 +4613,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('logs "Transcript not found" for nonexistent transcript_path', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-notfound-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
const stdinJson = JSON.stringify({ transcript_path: '/tmp/nonexistent-transcript-99999.jsonl' });
|
||||
@@ -4563,7 +4642,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('extracts tool name and file path from entry.name/entry.input (not tool_name/tool_input)', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-r70-entryname-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
const transcriptPath = path.join(isoHome, 'transcript.jsonl');
|
||||
|
||||
@@ -4611,7 +4690,7 @@ async function runTests() {
|
||||
await asyncTest('shows selection prompt when no package manager preference found (default source)', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-r71-ss-default-${Date.now()}`);
|
||||
const isoProject = path.join(isoHome, 'project');
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true });
|
||||
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
|
||||
fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true });
|
||||
fs.mkdirSync(isoProject, { recursive: true });
|
||||
// No package.json, no lock files, no package-manager.json — forces default source
|
||||
@@ -4758,7 +4837,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('extracts user messages from entries where only message.role is user (not type or role)', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-msgrole-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
const testDir = createTestDir();
|
||||
@@ -4825,7 +4904,7 @@ async function runTests() {
|
||||
// session-end.js line 50-55: rawContent is checked for string, then array, else ''
|
||||
// When content is a number (42), neither branch matches, text = '', message is skipped.
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-r81-numcontent-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
const transcriptPath = path.join(isoHome, 'transcript.jsonl');
|
||||
|
||||
@@ -4874,7 +4953,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('collects tool name from entry with tool_name but non-tool_use type', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-r82-toolname-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
const transcriptPath = path.join(isoHome, 'transcript.jsonl');
|
||||
@@ -4912,7 +4991,7 @@ async function runTests() {
|
||||
if (
|
||||
await asyncTest('preserves file when marker present but regex does not match corrupted template', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-r82-tmpl-${Date.now()}`);
|
||||
const sessionsDir = path.join(isoHome, '.claude', 'sessions');
|
||||
const sessionsDir = getCanonicalSessionsDir(isoHome);
|
||||
fs.mkdirSync(sessionsDir, { recursive: true });
|
||||
|
||||
const today = new Date().toISOString().split('T')[0];
|
||||
@@ -5072,7 +5151,7 @@ Some random content without the expected ### Context to Load section
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
|
||||
// Read the session file to verify tool names and file paths were extracted
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
@@ -5193,7 +5272,7 @@ Some random content without the expected ### Context to Load section
|
||||
});
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
|
||||
const claudeDir = path.join(testDir, '.claude', 'sessions');
|
||||
const claudeDir = getCanonicalSessionsDir(testDir);
|
||||
if (fs.existsSync(claudeDir)) {
|
||||
const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp'));
|
||||
if (files.length > 0) {
|
||||
|
||||
@@ -79,6 +79,25 @@ function runHook(input, env = {}) {
|
||||
};
|
||||
}
|
||||
|
||||
function runRawHook(rawInput, env = {}) {
|
||||
const result = spawnSync('node', [script], {
|
||||
input: rawInput,
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
ECC_HOOK_PROFILE: 'standard',
|
||||
...env
|
||||
},
|
||||
timeout: 15000,
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
return {
|
||||
code: result.status || 0,
|
||||
stdout: result.stdout || '',
|
||||
stderr: result.stderr || ''
|
||||
};
|
||||
}
|
||||
async function runTests() {
|
||||
console.log('\n=== Testing mcp-health-check.js ===\n');
|
||||
|
||||
@@ -95,6 +114,19 @@ async function runTests() {
|
||||
assert.strictEqual(result.stderr, '', 'Expected no stderr for non-MCP tool');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('blocks truncated MCP hook input by default', () => {
|
||||
const rawInput = JSON.stringify({ tool_name: 'mcp__flaky__search', tool_input: {} });
|
||||
const result = runRawHook(rawInput, {
|
||||
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
|
||||
ECC_HOOK_INPUT_TRUNCATED: '1',
|
||||
ECC_HOOK_INPUT_MAX_BYTES: '512'
|
||||
});
|
||||
|
||||
assert.strictEqual(result.code, 2, 'Expected truncated MCP input to block by default');
|
||||
assert.strictEqual(result.stdout, rawInput, 'Expected raw input passthrough on stdout');
|
||||
assert.ok(result.stderr.includes('Hook input exceeded 512 bytes'), `Expected size warning, got: ${result.stderr}`);
|
||||
assert.ok(/blocking search/i.test(result.stderr), `Expected blocking message, got: ${result.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
if (await asyncTest('marks healthy command MCP servers and allows the tool call', async () => {
|
||||
const tempDir = createTempDir();
|
||||
const configPath = path.join(tempDir, 'claude.json');
|
||||
|
||||
@@ -148,6 +148,24 @@ test('analysis temp file is created and cleaned up', () => {
|
||||
assert.ok(content.includes('rm -f "$prompt_file" "$analysis_file"'), 'Should clean up both prompt and analysis temp files');
|
||||
});
|
||||
|
||||
test('observer-loop uses project-local temp directory for analysis artifacts', () => {
|
||||
const content = fs.readFileSync(observerLoopPath, 'utf8');
|
||||
assert.ok(content.includes('observer_tmp_dir="${PROJECT_DIR}/.observer-tmp"'), 'Should keep observer temp files inside the project');
|
||||
assert.ok(content.includes('mktemp "${observer_tmp_dir}/ecc-observer-analysis.'), 'Analysis temp file should use the project temp dir');
|
||||
assert.ok(content.includes('mktemp "${observer_tmp_dir}/ecc-observer-prompt.'), 'Prompt temp file should use the project temp dir');
|
||||
});
|
||||
|
||||
test('observer-loop prompt requires direct instinct writes without asking permission', () => {
|
||||
const content = fs.readFileSync(observerLoopPath, 'utf8');
|
||||
const heredocStart = content.indexOf('cat > "$prompt_file" <<PROMPT');
|
||||
const heredocEnd = content.indexOf('\nPROMPT', heredocStart + 1);
|
||||
assert.ok(heredocStart > 0, 'Should find prompt heredoc start');
|
||||
assert.ok(heredocEnd > heredocStart, 'Should find prompt heredoc end');
|
||||
const promptSection = content.substring(heredocStart, heredocEnd);
|
||||
assert.ok(promptSection.includes('MUST write an instinct file directly'), 'Prompt should require direct file creation');
|
||||
assert.ok(promptSection.includes('Do NOT ask for permission'), 'Prompt should forbid permission-seeking');
|
||||
assert.ok(promptSection.includes('write or update the instinct file in this run'), 'Prompt should require same-run writes');
|
||||
});
|
||||
test('prompt references analysis_file not full OBSERVATIONS_FILE', () => {
|
||||
const content = fs.readFileSync(observerLoopPath, 'utf8');
|
||||
// The prompt heredoc should reference analysis_file for the Read instruction.
|
||||
@@ -157,7 +175,7 @@ test('prompt references analysis_file not full OBSERVATIONS_FILE', () => {
|
||||
assert.ok(heredocStart > 0, 'Should find prompt heredoc start');
|
||||
assert.ok(heredocEnd > heredocStart, 'Should find prompt heredoc end');
|
||||
const promptSection = content.substring(heredocStart, heredocEnd);
|
||||
assert.ok(promptSection.includes('${analysis_file}'), 'Prompt should point Claude at the sampled analysis file, not the full observations file');
|
||||
assert.ok(promptSection.includes('${analysis_relpath}'), 'Prompt should point Claude at the sampled analysis file (via relative path), not the full observations file');
|
||||
});
|
||||
|
||||
// ──────────────────────────────────────────────────────
|
||||
|
||||
@@ -54,47 +54,56 @@ function getCounterFilePath(sessionId) {
|
||||
return path.join(os.tmpdir(), `claude-tool-count-${sessionId}`);
|
||||
}
|
||||
|
||||
let counterContextSeq = 0;
|
||||
|
||||
function createCounterContext(prefix = 'test-compact') {
|
||||
counterContextSeq += 1;
|
||||
const sessionId = `${prefix}-${Date.now()}-${counterContextSeq}`;
|
||||
const counterFile = getCounterFilePath(sessionId);
|
||||
|
||||
return {
|
||||
sessionId,
|
||||
counterFile,
|
||||
cleanup() {
|
||||
try {
|
||||
fs.unlinkSync(counterFile);
|
||||
} catch (_err) {
|
||||
// Ignore missing temp files between runs
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing suggest-compact.js ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
// Use a unique session ID per test run to avoid collisions
|
||||
const testSession = `test-compact-${Date.now()}`;
|
||||
const counterFile = getCounterFilePath(testSession);
|
||||
|
||||
// Cleanup helper
|
||||
function cleanupCounter() {
|
||||
try {
|
||||
fs.unlinkSync(counterFile);
|
||||
} catch (_err) {
|
||||
// Ignore error
|
||||
}
|
||||
}
|
||||
|
||||
// Basic functionality
|
||||
console.log('Basic counter functionality:');
|
||||
|
||||
if (test('creates counter file on first run', () => {
|
||||
cleanupCounter();
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
assert.ok(fs.existsSync(counterFile), 'Counter file should be created');
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Counter should be 1 after first run');
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('increments counter on subsequent runs', () => {
|
||||
cleanupCounter();
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 3, 'Counter should be 3 after three runs');
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
@@ -102,28 +111,30 @@ function runTests() {
|
||||
console.log('\nThreshold suggestion:');
|
||||
|
||||
if (test('suggests compact at threshold (COMPACT_THRESHOLD=3)', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
// Run 3 times with threshold=3
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
|
||||
assert.ok(
|
||||
result.stderr.includes('3 tool calls reached') || result.stderr.includes('consider /compact'),
|
||||
`Should suggest compact at threshold. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('does NOT suggest compact before threshold', () => {
|
||||
cleanupCounter();
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '5' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '5' });
|
||||
const { sessionId, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '5' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '5' });
|
||||
assert.ok(
|
||||
!result.stderr.includes('StrategicCompact'),
|
||||
'Should NOT suggest compact before threshold'
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
@@ -131,18 +142,19 @@ function runTests() {
|
||||
console.log('\nInterval suggestion:');
|
||||
|
||||
if (test('suggests at threshold + 25 interval', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
// Set counter to threshold+24 (so next run = threshold+25)
|
||||
// threshold=3, so we need count=28 → 25 calls past threshold
|
||||
// Write 27 to the counter file, next run will be 28 = 3 + 25
|
||||
fs.writeFileSync(counterFile, '27');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
|
||||
// count=28, threshold=3, 28-3=25, 25 % 25 === 0 → should suggest
|
||||
assert.ok(
|
||||
result.stderr.includes('28 tool calls') || result.stderr.includes('checkpoint'),
|
||||
`Should suggest at threshold+25 interval. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
@@ -150,42 +162,45 @@ function runTests() {
|
||||
console.log('\nEnvironment variable handling:');
|
||||
|
||||
if (test('uses default threshold (50) when COMPACT_THRESHOLD is not set', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
// Write counter to 49, next run will be 50 = default threshold
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
// Remove COMPACT_THRESHOLD from env
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should use default threshold of 50. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('ignores invalid COMPACT_THRESHOLD (negative)', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '-5' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '-5' });
|
||||
// Invalid threshold falls back to 50
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should fallback to 50 for negative threshold. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('ignores non-numeric COMPACT_THRESHOLD', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: 'abc' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: 'abc' });
|
||||
// NaN falls back to 50
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should fallback to 50 for non-numeric threshold. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
@@ -193,38 +208,41 @@ function runTests() {
|
||||
console.log('\nCorrupted counter file:');
|
||||
|
||||
if (test('resets counter on corrupted file content', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, 'not-a-number');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
assert.strictEqual(result.code, 0);
|
||||
// Corrupted file → parsed is NaN → falls back to count=1
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Should reset to 1 on corrupted file');
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('resets counter on extremely large value', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
// Value > 1000000 should be clamped
|
||||
fs.writeFileSync(counterFile, '9999999');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
assert.strictEqual(result.code, 0);
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Should reset to 1 for value > 1000000');
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('handles empty counter file', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
assert.strictEqual(result.code, 0);
|
||||
// Empty file → bytesRead=0 → count starts at 1
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Should start at 1 for empty file');
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
@@ -255,10 +273,11 @@ function runTests() {
|
||||
console.log('\nExit code:');
|
||||
|
||||
if (test('always exits 0 (never blocks Claude)', () => {
|
||||
cleanupCounter();
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
const { sessionId, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
assert.strictEqual(result.code, 0, 'Should always exit 0');
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
@@ -266,48 +285,52 @@ function runTests() {
|
||||
console.log('\nThreshold boundary values:');
|
||||
|
||||
if (test('rejects COMPACT_THRESHOLD=0 (falls back to 50)', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '0' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '0' });
|
||||
// 0 is invalid (must be > 0), falls back to 50, count becomes 50 → should suggest
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should fallback to 50 for threshold=0. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('accepts COMPACT_THRESHOLD=10000 (boundary max)', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '9999');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '10000' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '10000' });
|
||||
// count becomes 10000, threshold=10000 → should suggest
|
||||
assert.ok(
|
||||
result.stderr.includes('10000 tool calls reached'),
|
||||
`Should accept threshold=10000. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('rejects COMPACT_THRESHOLD=10001 (falls back to 50)', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '10001' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '10001' });
|
||||
// 10001 > 10000, invalid, falls back to 50, count becomes 50 → should suggest
|
||||
assert.ok(
|
||||
result.stderr.includes('50 tool calls reached'),
|
||||
`Should fallback to 50 for threshold=10001. Got stderr: ${result.stderr}`
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('rejects float COMPACT_THRESHOLD (e.g. 3.5)', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3.5' });
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3.5' });
|
||||
// parseInt('3.5') = 3, which is valid (> 0 && <= 10000)
|
||||
// count becomes 50, threshold=3, 50-3=47, 47%25≠0 and 50≠3 → no suggestion
|
||||
assert.strictEqual(result.code, 0);
|
||||
@@ -316,28 +339,30 @@ function runTests() {
|
||||
!result.stderr.includes('StrategicCompact'),
|
||||
'Float threshold should be parseInt-ed to 3, no suggestion at count=50'
|
||||
);
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('counter value at exact boundary 1000000 is valid', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '999999');
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
|
||||
// 999999 is valid (> 0, <= 1000000), count becomes 1000000
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1000000, 'Counter at 1000000 boundary should be valid');
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('counter value at 1000001 is clamped (reset to 1)', () => {
|
||||
cleanupCounter();
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '1000001');
|
||||
runCompact({ CLAUDE_SESSION_ID: testSession });
|
||||
runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
|
||||
assert.strictEqual(count, 1, 'Counter > 1000000 should be reset to 1');
|
||||
cleanupCounter();
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
|
||||
@@ -90,6 +90,14 @@ function runHookWithInput(scriptPath, input = {}, env = {}, timeoutMs = 10000) {
|
||||
});
|
||||
}
|
||||
|
||||
function getSessionStartPayload(stdout) {
|
||||
assert.ok(stdout.trim(), 'Expected SessionStart hook to emit stdout payload');
|
||||
const payload = JSON.parse(stdout);
|
||||
assert.strictEqual(payload.hookSpecificOutput?.hookEventName, 'SessionStart');
|
||||
assert.strictEqual(typeof payload.hookSpecificOutput?.additionalContext, 'string');
|
||||
return payload;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a hook command string exactly as declared in hooks.json.
|
||||
* Supports wrapped node script commands and shell wrappers.
|
||||
@@ -249,11 +257,14 @@ async function runTests() {
|
||||
// ==========================================
|
||||
console.log('\nHook Output Format:');
|
||||
|
||||
if (await asyncTest('hooks output messages to stderr (not stdout)', async () => {
|
||||
if (await asyncTest('session-start logs diagnostics to stderr and emits structured stdout when context exists', async () => {
|
||||
const result = await runHookWithInput(path.join(scriptsDir, 'session-start.js'), {});
|
||||
// Session-start should write info to stderr
|
||||
assert.ok(result.stderr.length > 0, 'Should have stderr output');
|
||||
assert.ok(result.stderr.includes('[SessionStart]'), 'Should have [SessionStart] prefix');
|
||||
const payload = getSessionStartPayload(result.stdout);
|
||||
assert.ok(payload.hookSpecificOutput, 'Should include hookSpecificOutput');
|
||||
assert.strictEqual(payload.hookSpecificOutput.hookEventName, 'SessionStart');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('PreCompact hook logs to stderr', async () => {
|
||||
|
||||
@@ -8,6 +8,7 @@ const os = require('os');
|
||||
const path = require('path');
|
||||
|
||||
const {
|
||||
findDefaultInstallConfigPath,
|
||||
loadInstallConfig,
|
||||
resolveInstallConfigPath,
|
||||
} = require('../../scripts/lib/install/config');
|
||||
@@ -49,6 +50,32 @@ function runTests() {
|
||||
assert.strictEqual(resolved, path.join(cwd, 'configs', 'ecc-install.json'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('finds the default project install config in the provided cwd', () => {
|
||||
const cwd = createTempDir('install-config-');
|
||||
|
||||
try {
|
||||
const configPath = path.join(cwd, 'ecc-install.json');
|
||||
writeJson(configPath, {
|
||||
version: 1,
|
||||
profile: 'core',
|
||||
});
|
||||
|
||||
assert.strictEqual(findDefaultInstallConfigPath({ cwd }), configPath);
|
||||
} finally {
|
||||
cleanup(cwd);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('returns null when no default project install config exists', () => {
|
||||
const cwd = createTempDir('install-config-');
|
||||
|
||||
try {
|
||||
assert.strictEqual(findDefaultInstallConfigPath({ cwd }), null);
|
||||
} finally {
|
||||
cleanup(cwd);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('loads and normalizes a valid install config', () => {
|
||||
const cwd = createTempDir('install-config-');
|
||||
|
||||
|
||||
@@ -112,14 +112,17 @@ function runTests() {
|
||||
);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('resolves antigravity profiles by skipping incompatible dependency trees', () => {
|
||||
if (test('resolves antigravity profiles while skipping only unsupported modules', () => {
|
||||
const projectRoot = '/workspace/app';
|
||||
const plan = resolveInstallPlan({ profileId: 'core', target: 'antigravity', projectRoot });
|
||||
|
||||
assert.deepStrictEqual(plan.selectedModuleIds, ['rules-core', 'agents-core', 'commands-core']);
|
||||
assert.deepStrictEqual(
|
||||
plan.selectedModuleIds,
|
||||
['rules-core', 'agents-core', 'commands-core', 'platform-configs', 'workflow-quality']
|
||||
);
|
||||
assert.ok(plan.skippedModuleIds.includes('hooks-runtime'));
|
||||
assert.ok(plan.skippedModuleIds.includes('platform-configs'));
|
||||
assert.ok(plan.skippedModuleIds.includes('workflow-quality'));
|
||||
assert.ok(!plan.skippedModuleIds.includes('platform-configs'));
|
||||
assert.ok(!plan.skippedModuleIds.includes('workflow-quality'));
|
||||
assert.strictEqual(plan.targetAdapterId, 'antigravity-project');
|
||||
assert.strictEqual(plan.targetRoot, path.join(projectRoot, '.agent'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
@@ -4,8 +4,9 @@
|
||||
* Covers the ECC root resolution fallback chain:
|
||||
* 1. CLAUDE_PLUGIN_ROOT env var
|
||||
* 2. Standard install (~/.claude/)
|
||||
* 3. Plugin cache auto-detection
|
||||
* 4. Fallback to ~/.claude/
|
||||
* 3. Exact legacy plugin roots under ~/.claude/plugins/
|
||||
* 4. Plugin cache auto-detection
|
||||
* 5. Fallback to ~/.claude/
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
@@ -39,6 +40,13 @@ function setupStandardInstall(homeDir) {
|
||||
return claudeDir;
|
||||
}
|
||||
|
||||
function setupLegacyPluginInstall(homeDir, segments) {
|
||||
const legacyDir = path.join(homeDir, '.claude', 'plugins', ...segments);
|
||||
const scriptDir = path.join(legacyDir, 'scripts', 'lib');
|
||||
fs.mkdirSync(scriptDir, { recursive: true });
|
||||
fs.writeFileSync(path.join(scriptDir, 'utils.js'), '// stub');
|
||||
return legacyDir;
|
||||
}
|
||||
function setupPluginCache(homeDir, orgName, version) {
|
||||
const cacheDir = path.join(
|
||||
homeDir, '.claude', 'plugins', 'cache',
|
||||
@@ -103,6 +111,50 @@ function runTests() {
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('finds exact legacy plugin install at ~/.claude/plugins/everything-claude-code', () => {
|
||||
const homeDir = createTempDir();
|
||||
try {
|
||||
const expected = setupLegacyPluginInstall(homeDir, ['everything-claude-code']);
|
||||
const result = resolveEccRoot({ envRoot: '', homeDir });
|
||||
assert.strictEqual(result, expected);
|
||||
} finally {
|
||||
fs.rmSync(homeDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('finds exact legacy plugin install at ~/.claude/plugins/everything-claude-code@everything-claude-code', () => {
|
||||
const homeDir = createTempDir();
|
||||
try {
|
||||
const expected = setupLegacyPluginInstall(homeDir, ['everything-claude-code@everything-claude-code']);
|
||||
const result = resolveEccRoot({ envRoot: '', homeDir });
|
||||
assert.strictEqual(result, expected);
|
||||
} finally {
|
||||
fs.rmSync(homeDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('finds marketplace legacy plugin install at ~/.claude/plugins/marketplace/everything-claude-code', () => {
|
||||
const homeDir = createTempDir();
|
||||
try {
|
||||
const expected = setupLegacyPluginInstall(homeDir, ['marketplace', 'everything-claude-code']);
|
||||
const result = resolveEccRoot({ envRoot: '', homeDir });
|
||||
assert.strictEqual(result, expected);
|
||||
} finally {
|
||||
fs.rmSync(homeDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('prefers exact legacy plugin install over plugin cache', () => {
|
||||
const homeDir = createTempDir();
|
||||
try {
|
||||
const expected = setupLegacyPluginInstall(homeDir, ['marketplace', 'everything-claude-code']);
|
||||
setupPluginCache(homeDir, 'everything-claude-code', '1.8.0');
|
||||
const result = resolveEccRoot({ envRoot: '', homeDir });
|
||||
assert.strictEqual(result, expected);
|
||||
} finally {
|
||||
fs.rmSync(homeDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
// ─── Plugin Cache Auto-Detection ───
|
||||
|
||||
if (test('discovers plugin root from cache directory', () => {
|
||||
@@ -207,6 +259,22 @@ function runTests() {
|
||||
assert.strictEqual(result, '/inline/test/root');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('INLINE_RESOLVE discovers exact legacy plugin root when env var is unset', () => {
|
||||
const homeDir = createTempDir();
|
||||
try {
|
||||
const expected = setupLegacyPluginInstall(homeDir, ['marketplace', 'everything-claude-code']);
|
||||
const { execFileSync } = require('child_process');
|
||||
const result = execFileSync('node', [
|
||||
'-e', `console.log(${INLINE_RESOLVE})`,
|
||||
], {
|
||||
env: { PATH: process.env.PATH, HOME: homeDir, USERPROFILE: homeDir },
|
||||
encoding: 'utf8',
|
||||
}).trim();
|
||||
assert.strictEqual(result, expected);
|
||||
} finally {
|
||||
fs.rmSync(homeDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
if (test('INLINE_RESOLVE discovers plugin cache when env var is unset', () => {
|
||||
const homeDir = createTempDir();
|
||||
try {
|
||||
|
||||
@@ -341,8 +341,10 @@ src/main.ts
|
||||
// Override HOME to a temp dir for isolated getAllSessions/getSessionById tests
|
||||
// On Windows, os.homedir() uses USERPROFILE, not HOME — set both for cross-platform
|
||||
const tmpHome = path.join(os.tmpdir(), `ecc-session-mgr-test-${Date.now()}`);
|
||||
const tmpSessionsDir = path.join(tmpHome, '.claude', 'sessions');
|
||||
fs.mkdirSync(tmpSessionsDir, { recursive: true });
|
||||
const tmpCanonicalSessionsDir = path.join(tmpHome, '.claude', 'session-data');
|
||||
const tmpLegacySessionsDir = path.join(tmpHome, '.claude', 'sessions');
|
||||
fs.mkdirSync(tmpCanonicalSessionsDir, { recursive: true });
|
||||
fs.mkdirSync(tmpLegacySessionsDir, { recursive: true });
|
||||
const origHome = process.env.HOME;
|
||||
const origUserProfile = process.env.USERPROFILE;
|
||||
|
||||
@@ -355,7 +357,10 @@ src/main.ts
|
||||
{ name: '2026-02-10-session.tmp', content: '# Old format session' },
|
||||
];
|
||||
for (let i = 0; i < testSessions.length; i++) {
|
||||
const filePath = path.join(tmpSessionsDir, testSessions[i].name);
|
||||
const targetDir = testSessions[i].name === '2026-02-10-session.tmp'
|
||||
? tmpLegacySessionsDir
|
||||
: tmpCanonicalSessionsDir;
|
||||
const filePath = path.join(targetDir, testSessions[i].name);
|
||||
fs.writeFileSync(filePath, testSessions[i].content);
|
||||
// Stagger modification times so sort order is deterministic
|
||||
const mtime = new Date(Date.now() - (testSessions.length - i) * 60000);
|
||||
@@ -399,6 +404,23 @@ src/main.ts
|
||||
assert.strictEqual(result.sessions[0].shortId, 'abcd1234');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions prefers canonical session-data duplicates over newer legacy copies', () => {
|
||||
const duplicateName = '2026-01-15-abcd1234-session.tmp';
|
||||
const legacyDuplicatePath = path.join(tmpLegacySessionsDir, duplicateName);
|
||||
const legacyMtime = new Date(Date.now() + 60000);
|
||||
|
||||
try {
|
||||
fs.writeFileSync(legacyDuplicatePath, '# Legacy duplicate');
|
||||
fs.utimesSync(legacyDuplicatePath, legacyMtime, legacyMtime);
|
||||
|
||||
const result = sessionManager.getAllSessions({ search: 'abcd', limit: 100 });
|
||||
assert.strictEqual(result.total, 1, 'Duplicate filenames should be deduped');
|
||||
assert.ok(result.sessions[0].sessionPath.includes('session-data'), 'Canonical session-data copy should win');
|
||||
} finally {
|
||||
fs.rmSync(legacyDuplicatePath, { force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions returns sorted by newest first', () => {
|
||||
const result = sessionManager.getAllSessions({ limit: 100 });
|
||||
for (let i = 1; i < result.sessions.length; i++) {
|
||||
@@ -423,8 +445,8 @@ src/main.ts
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getAllSessions ignores non-.tmp files', () => {
|
||||
fs.writeFileSync(path.join(tmpSessionsDir, 'notes.txt'), 'not a session');
|
||||
fs.writeFileSync(path.join(tmpSessionsDir, 'compaction-log.txt'), 'log');
|
||||
fs.writeFileSync(path.join(tmpCanonicalSessionsDir, 'notes.txt'), 'not a session');
|
||||
fs.writeFileSync(path.join(tmpCanonicalSessionsDir, 'compaction-log.txt'), 'log');
|
||||
const result = sessionManager.getAllSessions({ limit: 100 });
|
||||
assert.strictEqual(result.total, 5, 'Should only count .tmp session files');
|
||||
})) passed++; else failed++;
|
||||
@@ -444,6 +466,23 @@ src/main.ts
|
||||
assert.strictEqual(result.shortId, 'abcd1234');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionById prefers canonical session-data duplicates over newer legacy copies', () => {
|
||||
const duplicateName = '2026-01-15-abcd1234-session.tmp';
|
||||
const legacyDuplicatePath = path.join(tmpLegacySessionsDir, duplicateName);
|
||||
const legacyMtime = new Date(Date.now() + 120000);
|
||||
|
||||
try {
|
||||
fs.writeFileSync(legacyDuplicatePath, '# Legacy duplicate');
|
||||
fs.utimesSync(legacyDuplicatePath, legacyMtime, legacyMtime);
|
||||
|
||||
const result = sessionManager.getSessionById('abcd1234');
|
||||
assert.ok(result, 'Should still resolve the duplicate session');
|
||||
assert.ok(result.sessionPath.includes('session-data'), 'Canonical session-data copy should win');
|
||||
} finally {
|
||||
fs.rmSync(legacyDuplicatePath, { force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionById finds by full filename', () => {
|
||||
const result = sessionManager.getSessionById('2026-01-15-abcd1234-session.tmp');
|
||||
assert.ok(result, 'Should find session by full filename');
|
||||
@@ -477,6 +516,12 @@ src/main.ts
|
||||
assert.strictEqual(result, null, 'Empty string should not match any session');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionById returns null for non-string IDs', () => {
|
||||
assert.strictEqual(sessionManager.getSessionById(null), null);
|
||||
assert.strictEqual(sessionManager.getSessionById(undefined), null);
|
||||
assert.strictEqual(sessionManager.getSessionById(42), null);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionById metadata and stats populated when includeContent=true', () => {
|
||||
const result = sessionManager.getSessionById('abcd1234', true);
|
||||
assert.ok(result, 'Should find session');
|
||||
@@ -990,7 +1035,7 @@ src/main.ts
|
||||
assert.ok(result.endsWith(filename), `Path should end with filename, got: ${result}`);
|
||||
// Since HOME is overridden, sessions dir should be under tmpHome
|
||||
assert.ok(result.includes('.claude'), 'Path should include .claude directory');
|
||||
assert.ok(result.includes('sessions'), 'Path should include sessions directory');
|
||||
assert.ok(result.includes('session-data'), 'Path should use canonical session-data directory');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 66: getSessionById noIdMatch path (date-only string for old format) ──
|
||||
@@ -1601,18 +1646,13 @@ src/main.ts
|
||||
'Null search should return sessions (confirming they exist but space filtered them)');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 98: getSessionById with null sessionId throws TypeError ──
|
||||
console.log('\nRound 98: getSessionById (null sessionId — crashes at line 297):');
|
||||
// ── Round 98: getSessionById with null sessionId returns null ──
|
||||
console.log('\nRound 98: getSessionById (null sessionId — guarded null return):');
|
||||
|
||||
if (test('getSessionById(null) throws TypeError when session files exist', () => {
|
||||
// session-manager.js line 297: `sessionId.length > 0` — calling .length on null
|
||||
// throws TypeError because there's no early guard for null/undefined input.
|
||||
// This only surfaces when valid .tmp files exist in the sessions directory.
|
||||
assert.throws(
|
||||
() => sessionManager.getSessionById(null),
|
||||
{ name: 'TypeError' },
|
||||
'null.length should throw TypeError (no input guard at function entry)'
|
||||
);
|
||||
if (test('getSessionById(null) returns null when session files exist', () => {
|
||||
// Keep a populated sessions directory so the early input guard is exercised even when
|
||||
// candidate files are present.
|
||||
assert.strictEqual(sessionManager.getSessionById(null), null);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Cleanup test environment for Rounds 95-98 that needed sessions
|
||||
@@ -1629,18 +1669,13 @@ src/main.ts
|
||||
// best-effort
|
||||
}
|
||||
|
||||
// ── Round 98: parseSessionFilename with null input throws TypeError ──
|
||||
console.log('\nRound 98: parseSessionFilename (null input — crashes at line 30):');
|
||||
// ── Round 98: parseSessionFilename with null input returns null ──
|
||||
console.log('\nRound 98: parseSessionFilename (null input is safely rejected):');
|
||||
|
||||
if (test('parseSessionFilename(null) throws TypeError because null has no .match()', () => {
|
||||
// session-manager.js line 30: `filename.match(SESSION_FILENAME_REGEX)`
|
||||
// When filename is null, null.match() throws TypeError.
|
||||
// Function lacks a type guard like `if (!filename || typeof filename !== 'string')`.
|
||||
assert.throws(
|
||||
() => sessionManager.parseSessionFilename(null),
|
||||
{ name: 'TypeError' },
|
||||
'null.match() should throw TypeError (no type guard on filename parameter)'
|
||||
);
|
||||
if (test('parseSessionFilename(null) returns null instead of throwing', () => {
|
||||
assert.strictEqual(sessionManager.parseSessionFilename(null), null);
|
||||
assert.strictEqual(sessionManager.parseSessionFilename(undefined), null);
|
||||
assert.strictEqual(sessionManager.parseSessionFilename(123), null);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 99: writeSessionContent with null path returns false (error caught) ──
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
// Import the module
|
||||
const utils = require('../../scripts/lib/utils');
|
||||
@@ -68,7 +69,13 @@ function runTests() {
|
||||
const sessionsDir = utils.getSessionsDir();
|
||||
const claudeDir = utils.getClaudeDir();
|
||||
assert.ok(sessionsDir.startsWith(claudeDir), 'Sessions should be under Claude dir');
|
||||
assert.ok(sessionsDir.includes('sessions'), 'Should contain sessions');
|
||||
assert.ok(sessionsDir.endsWith(path.join('.claude', 'session-data')) || sessionsDir.endsWith('/.claude/session-data'), 'Should use canonical session-data directory');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionSearchDirs includes canonical and legacy paths', () => {
|
||||
const searchDirs = utils.getSessionSearchDirs();
|
||||
assert.strictEqual(searchDirs[0], utils.getSessionsDir(), 'Canonical session dir should be searched first');
|
||||
assert.strictEqual(searchDirs[1], utils.getLegacySessionsDir(), 'Legacy session dir should be searched second');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getTempDir returns valid temp directory', () => {
|
||||
@@ -118,17 +125,94 @@ function runTests() {
|
||||
assert.ok(name && name.length > 0);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// sanitizeSessionId tests
|
||||
console.log('\nsanitizeSessionId:');
|
||||
|
||||
if (test('sanitizeSessionId strips leading dots', () => {
|
||||
assert.strictEqual(utils.sanitizeSessionId('.claude'), 'claude');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('sanitizeSessionId replaces dots and spaces', () => {
|
||||
assert.strictEqual(utils.sanitizeSessionId('my.project'), 'my-project');
|
||||
assert.strictEqual(utils.sanitizeSessionId('my project'), 'my-project');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('sanitizeSessionId replaces special chars and collapses runs', () => {
|
||||
assert.strictEqual(utils.sanitizeSessionId('project@v2'), 'project-v2');
|
||||
assert.strictEqual(utils.sanitizeSessionId('a...b'), 'a-b');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('sanitizeSessionId preserves valid chars', () => {
|
||||
assert.strictEqual(utils.sanitizeSessionId('my-project_123'), 'my-project_123');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('sanitizeSessionId appends hash suffix for all Windows reserved device names', () => {
|
||||
for (const reservedName of ['CON', 'prn', 'Aux', 'nul', 'COM1', 'lpt9']) {
|
||||
const sanitized = utils.sanitizeSessionId(reservedName);
|
||||
assert.ok(sanitized, `Expected sanitized output for ${reservedName}`);
|
||||
assert.notStrictEqual(sanitized.toUpperCase(), reservedName.toUpperCase());
|
||||
assert.ok(/-[a-f0-9]{6}$/i.test(sanitized), `Expected deterministic hash suffix for ${reservedName}, got ${sanitized}`);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('sanitizeSessionId returns null for empty or punctuation-only values', () => {
|
||||
assert.strictEqual(utils.sanitizeSessionId(''), null);
|
||||
assert.strictEqual(utils.sanitizeSessionId(null), null);
|
||||
assert.strictEqual(utils.sanitizeSessionId(undefined), null);
|
||||
assert.strictEqual(utils.sanitizeSessionId('...'), null);
|
||||
assert.strictEqual(utils.sanitizeSessionId('…'), null);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('sanitizeSessionId returns stable hashes for non-ASCII values', () => {
|
||||
const chinese = utils.sanitizeSessionId('我的项目');
|
||||
const cyrillic = utils.sanitizeSessionId('проект');
|
||||
const emoji = utils.sanitizeSessionId('🚀🎉');
|
||||
assert.ok(/^[a-f0-9]{8}$/.test(chinese), `Expected 8-char hash, got: ${chinese}`);
|
||||
assert.ok(/^[a-f0-9]{8}$/.test(cyrillic), `Expected 8-char hash, got: ${cyrillic}`);
|
||||
assert.ok(/^[a-f0-9]{8}$/.test(emoji), `Expected 8-char hash, got: ${emoji}`);
|
||||
assert.notStrictEqual(chinese, cyrillic);
|
||||
assert.notStrictEqual(chinese, emoji);
|
||||
assert.strictEqual(utils.sanitizeSessionId('日本語プロジェクト'), utils.sanitizeSessionId('日本語プロジェクト'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('sanitizeSessionId disambiguates mixed-script names from pure ASCII', () => {
|
||||
const mixed = utils.sanitizeSessionId('我的app');
|
||||
const mixedTwo = utils.sanitizeSessionId('他的app');
|
||||
const pure = utils.sanitizeSessionId('app');
|
||||
assert.strictEqual(pure, 'app');
|
||||
assert.ok(mixed.startsWith('app-'), `Expected mixed-script prefix, got: ${mixed}`);
|
||||
assert.notStrictEqual(mixed, pure);
|
||||
assert.notStrictEqual(mixed, mixedTwo);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('sanitizeSessionId is idempotent', () => {
|
||||
for (const input of ['.claude', 'my.project', 'project@v2', 'a...b', 'my-project_123']) {
|
||||
const once = utils.sanitizeSessionId(input);
|
||||
const twice = utils.sanitizeSessionId(once);
|
||||
assert.strictEqual(once, twice, `Expected idempotent result for ${input}`);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('sanitizeSessionId preserves readable prefixes for Windows reserved device names', () => {
|
||||
const con = utils.sanitizeSessionId('CON');
|
||||
const aux = utils.sanitizeSessionId('aux');
|
||||
assert.ok(con.startsWith('CON-'), `Expected CON to get a suffix, got: ${con}`);
|
||||
assert.ok(aux.startsWith('aux-'), `Expected aux to get a suffix, got: ${aux}`);
|
||||
assert.notStrictEqual(utils.sanitizeSessionId('COM1'), 'COM1');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Session ID tests
|
||||
console.log('\nSession ID Functions:');
|
||||
|
||||
if (test('getSessionIdShort falls back to project name', () => {
|
||||
if (test('getSessionIdShort falls back to sanitized project name', () => {
|
||||
const original = process.env.CLAUDE_SESSION_ID;
|
||||
delete process.env.CLAUDE_SESSION_ID;
|
||||
try {
|
||||
const shortId = utils.getSessionIdShort();
|
||||
assert.strictEqual(shortId, utils.getProjectName());
|
||||
assert.strictEqual(shortId, utils.sanitizeSessionId(utils.getProjectName()));
|
||||
} finally {
|
||||
if (original) process.env.CLAUDE_SESSION_ID = original;
|
||||
if (original !== undefined) process.env.CLAUDE_SESSION_ID = original;
|
||||
else delete process.env.CLAUDE_SESSION_ID;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
@@ -154,6 +238,28 @@ function runTests() {
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('getSessionIdShort sanitizes explicit fallback parameter', () => {
|
||||
if (process.platform === 'win32') {
|
||||
console.log(' (skipped — root CWD differs on Windows)');
|
||||
return true;
|
||||
}
|
||||
|
||||
const utilsPath = path.join(__dirname, '..', '..', 'scripts', 'lib', 'utils.js');
|
||||
const script = `
|
||||
const utils = require('${utilsPath.replace(/'/g, "\\'")}');
|
||||
process.stdout.write(utils.getSessionIdShort('my.fallback'));
|
||||
`;
|
||||
const result = spawnSync('node', ['-e', script], {
|
||||
encoding: 'utf8',
|
||||
cwd: '/',
|
||||
env: { ...process.env, CLAUDE_SESSION_ID: '' },
|
||||
timeout: 10000
|
||||
});
|
||||
|
||||
assert.strictEqual(result.status, 0, `Expected exit 0, got ${result.status}. stderr: ${result.stderr}`);
|
||||
assert.strictEqual(result.stdout, 'my-fallback');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// File operations tests
|
||||
console.log('\nFile Operations:');
|
||||
|
||||
@@ -1415,25 +1521,26 @@ function runTests() {
|
||||
// ── Round 97: getSessionIdShort with whitespace-only CLAUDE_SESSION_ID ──
|
||||
console.log('\nRound 97: getSessionIdShort (whitespace-only session ID):');
|
||||
|
||||
if (test('getSessionIdShort returns whitespace when CLAUDE_SESSION_ID is all spaces', () => {
|
||||
// utils.js line 116: if (sessionId && sessionId.length > 0) — ' ' is truthy
|
||||
// and has length > 0, so it passes the check instead of falling back.
|
||||
const original = process.env.CLAUDE_SESSION_ID;
|
||||
try {
|
||||
process.env.CLAUDE_SESSION_ID = ' '; // 10 spaces
|
||||
const result = utils.getSessionIdShort('fallback');
|
||||
// slice(-8) on 10 spaces returns 8 spaces — not the expected fallback
|
||||
assert.strictEqual(result, ' ',
|
||||
'Whitespace-only ID should return 8 trailing spaces (no trim check)');
|
||||
assert.strictEqual(result.trim().length, 0,
|
||||
'Result should be entirely whitespace (demonstrating the missing trim)');
|
||||
} finally {
|
||||
if (original !== undefined) {
|
||||
process.env.CLAUDE_SESSION_ID = original;
|
||||
} else {
|
||||
delete process.env.CLAUDE_SESSION_ID;
|
||||
}
|
||||
if (test('getSessionIdShort sanitizes whitespace-only CLAUDE_SESSION_ID to fallback', () => {
|
||||
if (process.platform === 'win32') {
|
||||
console.log(' (skipped — root CWD differs on Windows)');
|
||||
return true;
|
||||
}
|
||||
|
||||
const utilsPath = path.join(__dirname, '..', '..', 'scripts', 'lib', 'utils.js');
|
||||
const script = `
|
||||
const utils = require('${utilsPath.replace(/'/g, "\\'")}');
|
||||
process.stdout.write(utils.getSessionIdShort('fallback'));
|
||||
`;
|
||||
const result = spawnSync('node', ['-e', script], {
|
||||
encoding: 'utf8',
|
||||
cwd: '/',
|
||||
env: { ...process.env, CLAUDE_SESSION_ID: ' ' },
|
||||
timeout: 10000
|
||||
});
|
||||
|
||||
assert.strictEqual(result.status, 0, `Expected exit 0, got ${result.status}. stderr: ${result.stderr}`);
|
||||
assert.strictEqual(result.stdout, 'fallback');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Round 97: countInFile with same RegExp object called twice (lastIndex reuse) ──
|
||||
|
||||
277
tests/plugin-manifest.test.js
Normal file
277
tests/plugin-manifest.test.js
Normal file
@@ -0,0 +1,277 @@
|
||||
/**
|
||||
* Tests for plugin manifests:
|
||||
* - .claude-plugin/plugin.json (Claude Code plugin)
|
||||
* - .codex-plugin/plugin.json (Codex native plugin)
|
||||
* - .mcp.json (MCP server config at plugin root)
|
||||
* - .agents/plugins/marketplace.json (Codex marketplace discovery)
|
||||
*
|
||||
* Enforces rules from:
|
||||
* - .claude-plugin/PLUGIN_SCHEMA_NOTES.md (Claude Code validator rules)
|
||||
* - https://platform.openai.com/docs/codex/plugins (Codex official docs)
|
||||
*
|
||||
* Run with: node tests/run-all.js
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const repoRoot = path.resolve(__dirname, '..');
|
||||
const repoRootWithSep = `${repoRoot}${path.sep}`;
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
passed++;
|
||||
} catch (err) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
function loadJsonObject(filePath, label) {
|
||||
assert.ok(fs.existsSync(filePath), `Expected ${label} to exist`);
|
||||
|
||||
let parsed;
|
||||
try {
|
||||
parsed = JSON.parse(fs.readFileSync(filePath, 'utf8'));
|
||||
} catch (error) {
|
||||
assert.fail(`Expected ${label} to contain valid JSON: ${error.message}`);
|
||||
}
|
||||
|
||||
assert.ok(
|
||||
parsed && typeof parsed === 'object' && !Array.isArray(parsed),
|
||||
`Expected ${label} to contain a JSON object`,
|
||||
);
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function assertSafeRepoRelativePath(relativePath, label) {
|
||||
const normalized = path.posix.normalize(relativePath.replace(/\\/g, '/'));
|
||||
|
||||
assert.ok(!path.isAbsolute(relativePath), `${label} must not be absolute: ${relativePath}`);
|
||||
assert.ok(
|
||||
!normalized.startsWith('../') && !normalized.includes('/../'),
|
||||
`${label} must not traverse directories: ${relativePath}`,
|
||||
);
|
||||
}
|
||||
|
||||
// ── Claude plugin manifest ────────────────────────────────────────────────────
|
||||
console.log('\n=== .claude-plugin/plugin.json ===\n');
|
||||
|
||||
const claudePluginPath = path.join(repoRoot, '.claude-plugin', 'plugin.json');
|
||||
|
||||
test('claude plugin.json exists', () => {
|
||||
assert.ok(fs.existsSync(claudePluginPath), 'Expected .claude-plugin/plugin.json to exist');
|
||||
});
|
||||
|
||||
const claudePlugin = loadJsonObject(claudePluginPath, '.claude-plugin/plugin.json');
|
||||
|
||||
test('claude plugin.json has version field', () => {
|
||||
assert.ok(claudePlugin.version, 'Expected version field');
|
||||
});
|
||||
|
||||
test('claude plugin.json agents is an array', () => {
|
||||
assert.ok(Array.isArray(claudePlugin.agents), 'Expected agents to be an array (not a string/directory)');
|
||||
});
|
||||
|
||||
test('claude plugin.json agents uses explicit file paths (not directories)', () => {
|
||||
for (const agentPath of claudePlugin.agents) {
|
||||
assertSafeRepoRelativePath(agentPath, 'Agent path');
|
||||
assert.ok(
|
||||
agentPath.endsWith('.md'),
|
||||
`Expected explicit .md file path, got: ${agentPath}`,
|
||||
);
|
||||
assert.ok(
|
||||
!agentPath.endsWith('/'),
|
||||
`Expected explicit file path, not directory, got: ${agentPath}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
test('claude plugin.json all agent files exist', () => {
|
||||
for (const agentRelPath of claudePlugin.agents) {
|
||||
assertSafeRepoRelativePath(agentRelPath, 'Agent path');
|
||||
const absolute = path.resolve(repoRoot, agentRelPath);
|
||||
assert.ok(
|
||||
absolute === repoRoot || absolute.startsWith(repoRootWithSep),
|
||||
`Agent path resolves outside repo root: ${agentRelPath}`,
|
||||
);
|
||||
assert.ok(
|
||||
fs.existsSync(absolute),
|
||||
`Agent file missing: ${agentRelPath}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
test('claude plugin.json skills is an array', () => {
|
||||
assert.ok(Array.isArray(claudePlugin.skills), 'Expected skills to be an array');
|
||||
});
|
||||
|
||||
test('claude plugin.json commands is an array', () => {
|
||||
assert.ok(Array.isArray(claudePlugin.commands), 'Expected commands to be an array');
|
||||
});
|
||||
|
||||
test('claude plugin.json does NOT have explicit hooks declaration', () => {
|
||||
assert.ok(
|
||||
!('hooks' in claudePlugin),
|
||||
'hooks field must NOT be declared — Claude Code v2.1+ auto-loads hooks/hooks.json by convention',
|
||||
);
|
||||
});
|
||||
|
||||
// ── Codex plugin manifest ─────────────────────────────────────────────────────
|
||||
// Per official docs: https://platform.openai.com/docs/codex/plugins
|
||||
// - .codex-plugin/plugin.json is the required manifest
|
||||
// - skills, mcpServers, apps are STRING paths relative to plugin root (not arrays)
|
||||
// - .mcp.json must be at plugin root (NOT inside .codex-plugin/)
|
||||
console.log('\n=== .codex-plugin/plugin.json ===\n');
|
||||
|
||||
const codexPluginPath = path.join(repoRoot, '.codex-plugin', 'plugin.json');
|
||||
|
||||
test('codex plugin.json exists', () => {
|
||||
assert.ok(fs.existsSync(codexPluginPath), 'Expected .codex-plugin/plugin.json to exist');
|
||||
});
|
||||
|
||||
const codexPlugin = loadJsonObject(codexPluginPath, '.codex-plugin/plugin.json');
|
||||
|
||||
test('codex plugin.json has name field', () => {
|
||||
assert.ok(codexPlugin.name, 'Expected name field');
|
||||
});
|
||||
|
||||
test('codex plugin.json has version field', () => {
|
||||
assert.ok(codexPlugin.version, 'Expected version field');
|
||||
});
|
||||
|
||||
test('codex plugin.json skills is a string (not array) per official spec', () => {
|
||||
assert.strictEqual(
|
||||
typeof codexPlugin.skills,
|
||||
'string',
|
||||
'skills must be a string path per Codex official docs, not an array',
|
||||
);
|
||||
});
|
||||
|
||||
test('codex plugin.json mcpServers is a string path (not array) per official spec', () => {
|
||||
assert.strictEqual(
|
||||
typeof codexPlugin.mcpServers,
|
||||
'string',
|
||||
'mcpServers must be a string path per Codex official docs',
|
||||
);
|
||||
});
|
||||
|
||||
test('codex plugin.json mcpServers exactly matches "./.mcp.json"', () => {
|
||||
assert.strictEqual(
|
||||
codexPlugin.mcpServers,
|
||||
'./.mcp.json',
|
||||
'mcpServers must point exactly to "./.mcp.json" per official docs',
|
||||
);
|
||||
const mcpPath = path.join(repoRoot, codexPlugin.mcpServers.replace(/^\.\//, ''));
|
||||
assert.ok(
|
||||
fs.existsSync(mcpPath),
|
||||
`mcpServers file missing at plugin root: ${codexPlugin.mcpServers}`,
|
||||
);
|
||||
});
|
||||
|
||||
test('codex plugin.json has interface.displayName', () => {
|
||||
assert.ok(
|
||||
codexPlugin.interface && codexPlugin.interface.displayName,
|
||||
'Expected interface.displayName for plugin directory presentation',
|
||||
);
|
||||
});
|
||||
|
||||
// ── .mcp.json at plugin root ──────────────────────────────────────────────────
|
||||
// Per official docs: keep .mcp.json at plugin root, NOT inside .codex-plugin/
|
||||
console.log('\n=== .mcp.json (plugin root) ===\n');
|
||||
|
||||
const mcpJsonPath = path.join(repoRoot, '.mcp.json');
|
||||
|
||||
test('.mcp.json exists at plugin root (not inside .codex-plugin/)', () => {
|
||||
assert.ok(fs.existsSync(mcpJsonPath), 'Expected .mcp.json at repo root (plugin root)');
|
||||
assert.ok(
|
||||
!fs.existsSync(path.join(repoRoot, '.codex-plugin', '.mcp.json')),
|
||||
'.mcp.json must NOT be inside .codex-plugin/ — only plugin.json belongs there',
|
||||
);
|
||||
});
|
||||
|
||||
const mcpConfig = loadJsonObject(mcpJsonPath, '.mcp.json');
|
||||
|
||||
test('.mcp.json has mcpServers object', () => {
|
||||
assert.ok(
|
||||
mcpConfig.mcpServers && typeof mcpConfig.mcpServers === 'object',
|
||||
'Expected mcpServers object',
|
||||
);
|
||||
});
|
||||
|
||||
test('.mcp.json includes at least github, context7, and exa servers', () => {
|
||||
const servers = Object.keys(mcpConfig.mcpServers);
|
||||
assert.ok(servers.includes('github'), 'Expected github MCP server');
|
||||
assert.ok(servers.includes('context7'), 'Expected context7 MCP server');
|
||||
assert.ok(servers.includes('exa'), 'Expected exa MCP server');
|
||||
});
|
||||
|
||||
// ── Codex marketplace file ────────────────────────────────────────────────────
|
||||
// Per official docs: repo marketplace lives at $REPO_ROOT/.agents/plugins/marketplace.json
|
||||
console.log('\n=== .agents/plugins/marketplace.json ===\n');
|
||||
|
||||
const marketplacePath = path.join(repoRoot, '.agents', 'plugins', 'marketplace.json');
|
||||
|
||||
test('marketplace.json exists at .agents/plugins/', () => {
|
||||
assert.ok(
|
||||
fs.existsSync(marketplacePath),
|
||||
'Expected .agents/plugins/marketplace.json for Codex repo marketplace discovery',
|
||||
);
|
||||
});
|
||||
|
||||
const marketplace = loadJsonObject(marketplacePath, '.agents/plugins/marketplace.json');
|
||||
|
||||
test('marketplace.json has name field', () => {
|
||||
assert.ok(marketplace.name, 'Expected name field');
|
||||
});
|
||||
|
||||
test('marketplace.json has plugins array with at least one entry', () => {
|
||||
assert.ok(Array.isArray(marketplace.plugins) && marketplace.plugins.length > 0, 'Expected plugins array');
|
||||
});
|
||||
|
||||
test('marketplace.json plugin entries have required fields', () => {
|
||||
for (const plugin of marketplace.plugins) {
|
||||
assert.ok(plugin.name, `Plugin entry missing name`);
|
||||
assert.ok(plugin.source && plugin.source.source, `Plugin "${plugin.name}" missing source.source`);
|
||||
assert.ok(plugin.policy && plugin.policy.installation, `Plugin "${plugin.name}" missing policy.installation`);
|
||||
assert.ok(plugin.category, `Plugin "${plugin.name}" missing category`);
|
||||
}
|
||||
});
|
||||
|
||||
test('marketplace local plugin path resolves to the repo-root Codex bundle', () => {
|
||||
for (const plugin of marketplace.plugins) {
|
||||
if (!plugin.source || plugin.source.source !== 'local') {
|
||||
continue;
|
||||
}
|
||||
|
||||
const resolvedRoot = path.resolve(path.dirname(marketplacePath), plugin.source.path);
|
||||
assert.strictEqual(
|
||||
resolvedRoot,
|
||||
repoRoot,
|
||||
`Expected local marketplace path to resolve to repo root, got: ${plugin.source.path}`,
|
||||
);
|
||||
assert.ok(
|
||||
fs.existsSync(path.join(resolvedRoot, '.codex-plugin', 'plugin.json')),
|
||||
`Codex plugin manifest missing under resolved marketplace root: ${plugin.source.path}`,
|
||||
);
|
||||
assert.ok(
|
||||
fs.existsSync(path.join(resolvedRoot, '.mcp.json')),
|
||||
`Root MCP config missing under resolved marketplace root: ${plugin.source.path}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// ── Summary ───────────────────────────────────────────────────────────────────
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
104
tests/scripts/catalog.test.js
Normal file
104
tests/scripts/catalog.test.js
Normal file
@@ -0,0 +1,104 @@
|
||||
/**
|
||||
* Tests for scripts/catalog.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const path = require('path');
|
||||
const { execFileSync } = require('child_process');
|
||||
|
||||
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'catalog.js');
|
||||
|
||||
function run(args = []) {
|
||||
try {
|
||||
const stdout = execFileSync('node', [SCRIPT, ...args], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 10000,
|
||||
});
|
||||
return { code: 0, stdout, stderr: '' };
|
||||
} catch (error) {
|
||||
return {
|
||||
code: error.status || 1,
|
||||
stdout: error.stdout || '',
|
||||
stderr: error.stderr || '',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` \u2713 ${name}`);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.log(` \u2717 ${name}`);
|
||||
console.log(` Error: ${error.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing catalog.js ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
if (test('shows help with no arguments', () => {
|
||||
const result = run();
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Discover ECC install components and profiles'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('shows help with an explicit help flag', () => {
|
||||
const result = run(['--help']);
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Usage:'));
|
||||
assert.ok(result.stdout.includes('node scripts/catalog.js show <component-id>'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('lists install profiles', () => {
|
||||
const result = run(['profiles']);
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.ok(result.stdout.includes('Install profiles'));
|
||||
assert.ok(result.stdout.includes('core'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('filters components by family and emits JSON', () => {
|
||||
const result = run(['components', '--family', 'language', '--json']);
|
||||
assert.strictEqual(result.code, 0, result.stderr);
|
||||
const parsed = JSON.parse(result.stdout);
|
||||
assert.ok(Array.isArray(parsed.components));
|
||||
assert.ok(parsed.components.length > 0);
|
||||
assert.ok(parsed.components.every(component => component.family === 'language'));
|
||||
assert.ok(parsed.components.some(component => component.id === 'lang:typescript'));
|
||||
assert.ok(parsed.components.every(component => component.id !== 'framework:nextjs'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('shows a resolved component payload', () => {
|
||||
const result = run(['show', 'framework:nextjs', '--json']);
|
||||
assert.strictEqual(result.code, 0, result.stderr);
|
||||
const parsed = JSON.parse(result.stdout);
|
||||
assert.strictEqual(parsed.id, 'framework:nextjs');
|
||||
assert.strictEqual(parsed.family, 'framework');
|
||||
assert.deepStrictEqual(parsed.moduleIds, ['framework-language']);
|
||||
assert.ok(Array.isArray(parsed.modules));
|
||||
assert.strictEqual(parsed.modules[0].id, 'framework-language');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails on unknown subcommands', () => {
|
||||
const result = run(['bogus']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('Unknown catalog command'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('fails on unknown component ids', () => {
|
||||
const result = run(['show', 'framework:not-real']);
|
||||
assert.strictEqual(result.code, 1);
|
||||
assert.ok(result.stderr.includes('Unknown install component'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
94
tests/scripts/codex-hooks.test.js
Normal file
94
tests/scripts/codex-hooks.test.js
Normal file
@@ -0,0 +1,94 @@
|
||||
/**
|
||||
* Tests for Codex shell helpers.
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const repoRoot = path.join(__dirname, '..', '..');
|
||||
const installScript = path.join(repoRoot, 'scripts', 'codex', 'install-global-git-hooks.sh');
|
||||
const installSource = fs.readFileSync(installScript, 'utf8');
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${error.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function createTempDir(prefix) {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
|
||||
}
|
||||
|
||||
function cleanup(dirPath) {
|
||||
fs.rmSync(dirPath, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function toBashPath(filePath) {
|
||||
if (process.platform !== 'win32') {
|
||||
return filePath;
|
||||
}
|
||||
|
||||
return String(filePath)
|
||||
.replace(/^([A-Za-z]):/, (_, driveLetter) => `/${driveLetter.toLowerCase()}`)
|
||||
.replace(/\\/g, '/');
|
||||
}
|
||||
|
||||
function runBash(scriptPath, args = [], env = {}, cwd = repoRoot) {
|
||||
return spawnSync('bash', [toBashPath(scriptPath), ...args], {
|
||||
cwd,
|
||||
env: {
|
||||
...process.env,
|
||||
...env
|
||||
},
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
}
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
if (
|
||||
test('install-global-git-hooks.sh does not use eval and executes argv directly', () => {
|
||||
assert.ok(!installSource.includes('eval "$*"'), 'Expected installer to avoid eval');
|
||||
assert.ok(installSource.includes(' "$@"'), 'Expected installer to execute argv directly');
|
||||
assert.ok(installSource.includes(`printf ' %q' "$@"`), 'Expected dry-run logging to shell-escape argv');
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('install-global-git-hooks.sh handles shell-sensitive hook paths without shell injection', () => {
|
||||
const homeDir = createTempDir('codex-hooks-home-');
|
||||
const weirdHooksDir = path.join(homeDir, "git-hooks 'quoted' & spaced");
|
||||
|
||||
try {
|
||||
const result = runBash(installScript, [], {
|
||||
HOME: toBashPath(homeDir),
|
||||
ECC_GLOBAL_HOOKS_DIR: toBashPath(weirdHooksDir)
|
||||
});
|
||||
|
||||
assert.strictEqual(result.status, 0, result.stderr || result.stdout);
|
||||
assert.ok(fs.existsSync(path.join(weirdHooksDir, 'pre-commit')));
|
||||
assert.ok(fs.existsSync(path.join(weirdHooksDir, 'pre-push')));
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user