mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-30 13:43:26 +08:00
Compare commits
138 Commits
f07797533d
...
fix/codex-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ebcfc368e | ||
|
|
ab49c9adf5 | ||
|
|
b7a82cf240 | ||
|
|
9a55fd069b | ||
|
|
d9e8305aa1 | ||
|
|
f2bf72c005 | ||
|
|
3ae0df781f | ||
|
|
a346a304b0 | ||
|
|
81acf0c928 | ||
|
|
06a77911e6 | ||
|
|
9406f35fab | ||
|
|
c5e3658ba6 | ||
|
|
eeeea506a6 | ||
|
|
fc1ea4fbea | ||
|
|
00787d68e4 | ||
|
|
1e3572becf | ||
|
|
7462168377 | ||
|
|
3c3781ca43 | ||
|
|
27d71c9548 | ||
|
|
6f16e75f9d | ||
|
|
0d30da1fc7 | ||
|
|
e686bcbc82 | ||
|
|
25c8a5de08 | ||
|
|
ec104c94c5 | ||
|
|
14a51404c0 | ||
|
|
666c639206 | ||
|
|
a8e088a54e | ||
|
|
eac0228f88 | ||
|
|
b6e3434ff4 | ||
|
|
4eaee83448 | ||
|
|
1e43639cc7 | ||
|
|
766f846478 | ||
|
|
dd38518afe | ||
|
|
c1d98b071e | ||
|
|
70b98f3178 | ||
|
|
dcc4d914d2 | ||
|
|
71219ff656 | ||
|
|
e815f0d05c | ||
|
|
b3a43f34e6 | ||
|
|
0d26f5295d | ||
|
|
9181382065 | ||
|
|
9434e07749 | ||
|
|
9cde3427e2 | ||
|
|
c6b4c719b2 | ||
|
|
f98207feea | ||
|
|
52e9bd58f1 | ||
|
|
4257c093ca | ||
|
|
23d743b92c | ||
|
|
414ea90e11 | ||
|
|
d473cf87e6 | ||
|
|
64847d0a21 | ||
|
|
c865d4c676 | ||
|
|
72de19effd | ||
|
|
56076edd48 | ||
|
|
04d7eeb16f | ||
|
|
4e7773c2ce | ||
|
|
a3fc90f7ac | ||
|
|
55efeb7f20 | ||
|
|
1e7c299706 | ||
|
|
47aa415b06 | ||
|
|
d7e6bb242a | ||
|
|
9f37a5d8c7 | ||
|
|
d9ec51c9e9 | ||
|
|
9033f2a997 | ||
|
|
67660540ac | ||
|
|
432788d0b5 | ||
|
|
6a7a115e18 | ||
|
|
1181d93498 | ||
|
|
80d6a89f12 | ||
|
|
28a1fbc3f2 | ||
|
|
4fcaaf8a89 | ||
|
|
7a4cb8c570 | ||
|
|
4b4f077d18 | ||
|
|
78c98dd4fd | ||
|
|
9ad4351f53 | ||
|
|
451732164f | ||
|
|
ebd14cde7d | ||
|
|
c14765e701 | ||
|
|
194bc0000b | ||
|
|
1e44475458 | ||
|
|
31af1adcc8 | ||
|
|
c80631fc1d | ||
|
|
00f8628b83 | ||
|
|
ba09a34432 | ||
|
|
27e0d53f6d | ||
|
|
17f6f95090 | ||
|
|
1e226ba556 | ||
|
|
9b24bedf85 | ||
|
|
e3f2bda9fc | ||
|
|
63737544a1 | ||
|
|
dafc9bcd60 | ||
|
|
2d0fddf174 | ||
|
|
f471f27658 | ||
|
|
925d830c53 | ||
|
|
9348751b8e | ||
|
|
da74f85c10 | ||
|
|
c146fae2ce | ||
|
|
3f5e042b40 | ||
|
|
b5148f184a | ||
|
|
b44ba7096f | ||
|
|
45baaa1ea5 | ||
|
|
4da1fb388c | ||
|
|
917c35bb6f | ||
|
|
ee3f348dcb | ||
|
|
e6eb99271f | ||
|
|
7cabf77142 | ||
|
|
9cfcfac665 | ||
|
|
0284f60871 | ||
|
|
7a17ec9b14 | ||
|
|
243fae8476 | ||
|
|
dc92b5c62b | ||
|
|
3fbfd7f7ff | ||
|
|
a6a81490f6 | ||
|
|
d170cdd175 | ||
|
|
57e9983c88 | ||
|
|
d952a07c73 | ||
|
|
369f66297a | ||
|
|
9cc5d085e1 | ||
|
|
7229e09df1 | ||
|
|
bf7ed1fce2 | ||
|
|
fee93f2dab | ||
|
|
a61947bb5c | ||
|
|
3c59d8dc60 | ||
|
|
46f6e3644b | ||
|
|
39a34e46db | ||
|
|
95a1435f61 | ||
|
|
e57ad5c33d | ||
|
|
f7d589ce21 | ||
|
|
9c381b4469 | ||
|
|
e3510f62a8 | ||
|
|
6af7ca1afc | ||
|
|
d6061cf937 | ||
|
|
ec921e5202 | ||
|
|
d016e68cee | ||
|
|
aed18eb571 | ||
|
|
f3cf808814 | ||
|
|
e22cb57718 | ||
|
|
4811e8c73b |
20
.agents/plugins/marketplace.json
Normal file
20
.agents/plugins/marketplace.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"interface": {
|
||||
"displayName": "Everything Claude Code"
|
||||
},
|
||||
"plugins": [
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"source": {
|
||||
"source": "local",
|
||||
"path": "../.."
|
||||
},
|
||||
"policy": {
|
||||
"installation": "AVAILABLE",
|
||||
"authentication": "ON_INSTALL"
|
||||
},
|
||||
"category": "Productivity"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -21,5 +21,37 @@
|
||||
"workflow",
|
||||
"automation",
|
||||
"best-practices"
|
||||
]
|
||||
],
|
||||
"agents": [
|
||||
"./agents/architect.md",
|
||||
"./agents/build-error-resolver.md",
|
||||
"./agents/chief-of-staff.md",
|
||||
"./agents/code-reviewer.md",
|
||||
"./agents/cpp-build-resolver.md",
|
||||
"./agents/cpp-reviewer.md",
|
||||
"./agents/database-reviewer.md",
|
||||
"./agents/doc-updater.md",
|
||||
"./agents/docs-lookup.md",
|
||||
"./agents/e2e-runner.md",
|
||||
"./agents/flutter-reviewer.md",
|
||||
"./agents/go-build-resolver.md",
|
||||
"./agents/go-reviewer.md",
|
||||
"./agents/harness-optimizer.md",
|
||||
"./agents/java-build-resolver.md",
|
||||
"./agents/java-reviewer.md",
|
||||
"./agents/kotlin-build-resolver.md",
|
||||
"./agents/kotlin-reviewer.md",
|
||||
"./agents/loop-operator.md",
|
||||
"./agents/planner.md",
|
||||
"./agents/python-reviewer.md",
|
||||
"./agents/pytorch-build-resolver.md",
|
||||
"./agents/refactor-cleaner.md",
|
||||
"./agents/rust-build-resolver.md",
|
||||
"./agents/rust-reviewer.md",
|
||||
"./agents/security-reviewer.md",
|
||||
"./agents/tdd-guide.md",
|
||||
"./agents/typescript-reviewer.md"
|
||||
],
|
||||
"skills": ["./skills/"],
|
||||
"commands": ["./commands/"]
|
||||
}
|
||||
|
||||
47
.claude/rules/node.md
Normal file
47
.claude/rules/node.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Node.js Rules for everything-claude-code
|
||||
|
||||
> Project-specific rules for the ECC codebase. Extends common rules.
|
||||
|
||||
## Stack
|
||||
|
||||
- **Runtime**: Node.js >=18 (no transpilation, plain CommonJS)
|
||||
- **Test runner**: `node tests/run-all.js` — individual files via `node tests/**/*.test.js`
|
||||
- **Linter**: ESLint (`@eslint/js`, flat config)
|
||||
- **Coverage**: c8
|
||||
- **Lint**: markdownlint-cli for `.md` files
|
||||
|
||||
## File Conventions
|
||||
|
||||
- `scripts/` — Node.js utilities, hooks. CommonJS (`require`/`module.exports`)
|
||||
- `agents/`, `commands/`, `skills/`, `rules/` — Markdown with YAML frontmatter
|
||||
- `tests/` — Mirror the `scripts/` structure. Test files named `*.test.js`
|
||||
- File naming: **lowercase with hyphens** (e.g. `session-start.js`, `post-edit-format.js`)
|
||||
|
||||
## Code Style
|
||||
|
||||
- CommonJS only — no ESM (`import`/`export`) unless file ends in `.mjs`
|
||||
- No TypeScript — plain `.js` throughout
|
||||
- Prefer `const` over `let`; never `var`
|
||||
- Keep hook scripts under 200 lines — extract helpers to `scripts/lib/`
|
||||
- All hooks must `exit 0` on non-critical errors (never block tool execution unexpectedly)
|
||||
|
||||
## Hook Development
|
||||
|
||||
- Hook scripts normally receive JSON on stdin, but hooks routed through `scripts/hooks/run-with-flags.js` can export `run(rawInput)` and let the wrapper handle parsing/gating
|
||||
- Async hooks: mark `"async": true` in `settings.json` with a timeout ≤30s
|
||||
- Blocking hooks (PreToolUse, stop): keep fast (<200ms) — no network calls
|
||||
- Use `run-with-flags.js` wrapper for all hooks so `ECC_HOOK_PROFILE` and `ECC_DISABLED_HOOKS` runtime gating works
|
||||
- Always exit 0 on parse errors; log to stderr with `[HookName]` prefix
|
||||
|
||||
## Testing Requirements
|
||||
|
||||
- Run `node tests/run-all.js` before committing
|
||||
- New scripts in `scripts/lib/` require a matching test in `tests/lib/`
|
||||
- New hooks require at least one integration test in `tests/hooks/`
|
||||
|
||||
## Markdown / Agent Files
|
||||
|
||||
- Agents: YAML frontmatter with `name`, `description`, `tools`, `model`
|
||||
- Skills: sections — When to Use, How It Works, Examples
|
||||
- Commands: `description:` frontmatter line required
|
||||
- Run `npx markdownlint-cli '**/*.md' --ignore node_modules` before committing
|
||||
49
.codex-plugin/README.md
Normal file
49
.codex-plugin/README.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# .codex-plugin — Codex Native Plugin for ECC
|
||||
|
||||
This directory contains the **Codex plugin manifest** for Everything Claude Code.
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
.codex-plugin/
|
||||
└── plugin.json — Codex plugin manifest (name, version, skills ref, MCP ref)
|
||||
.mcp.json — MCP server configurations at plugin root (NOT inside .codex-plugin/)
|
||||
```
|
||||
|
||||
## What This Provides
|
||||
|
||||
- **125 skills** from `./skills/` — reusable Codex workflows for TDD, security,
|
||||
code review, architecture, and more
|
||||
- **6 MCP servers** — GitHub, Context7, Exa, Memory, Playwright, Sequential Thinking
|
||||
|
||||
## Installation
|
||||
|
||||
Codex plugin support is currently in preview. Once generally available:
|
||||
|
||||
```bash
|
||||
# Install from Codex CLI
|
||||
codex plugin install affaan-m/everything-claude-code
|
||||
|
||||
# Or reference locally during development
|
||||
codex plugin install ./
|
||||
|
||||
Run this from the repository root so `./` points to the repo root and `.mcp.json` resolves correctly.
|
||||
```
|
||||
|
||||
## MCP Servers Included
|
||||
|
||||
| Server | Purpose |
|
||||
|---|---|
|
||||
| `github` | GitHub API access |
|
||||
| `context7` | Live documentation lookup |
|
||||
| `exa` | Neural web search |
|
||||
| `memory` | Persistent memory across sessions |
|
||||
| `playwright` | Browser automation & E2E testing |
|
||||
| `sequential-thinking` | Step-by-step reasoning |
|
||||
|
||||
## Notes
|
||||
|
||||
- The `skills/` directory at the repo root is shared between Claude Code (`.claude-plugin/`)
|
||||
and Codex (`.codex-plugin/`) — same source of truth, no duplication
|
||||
- MCP server credentials are inherited from the launching environment (env vars)
|
||||
- This manifest does **not** override `~/.codex/config.toml` settings
|
||||
30
.codex-plugin/plugin.json
Normal file
30
.codex-plugin/plugin.json
Normal file
@@ -0,0 +1,30 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"version": "1.9.0",
|
||||
"description": "Battle-tested Codex workflows — 125 skills, production-ready MCP configs, and agent definitions for TDD, security scanning, code review, and autonomous development.",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
"email": "me@affaanmustafa.com",
|
||||
"url": "https://x.com/affaanmustafa"
|
||||
},
|
||||
"homepage": "https://github.com/affaan-m/everything-claude-code",
|
||||
"repository": "https://github.com/affaan-m/everything-claude-code",
|
||||
"license": "MIT",
|
||||
"keywords": ["codex", "agents", "skills", "tdd", "code-review", "security", "workflow", "automation"],
|
||||
"skills": "./skills/",
|
||||
"mcpServers": "./.mcp.json",
|
||||
"interface": {
|
||||
"displayName": "Everything Claude Code",
|
||||
"shortDescription": "125 battle-tested skills for TDD, security, code review, and autonomous development.",
|
||||
"longDescription": "Everything Claude Code (ECC) is a community-maintained collection of Codex skills and MCP configs evolved over 10+ months of intensive daily use. It covers TDD workflows, security scanning, code review, architecture decisions, and more — all in one installable plugin.",
|
||||
"developerName": "Affaan Mustafa",
|
||||
"category": "Productivity",
|
||||
"capabilities": ["Read", "Write"],
|
||||
"websiteURL": "https://github.com/affaan-m/everything-claude-code",
|
||||
"defaultPrompt": [
|
||||
"Use the tdd-workflow skill to write tests before implementation.",
|
||||
"Use the security-review skill to scan for OWASP Top 10 vulnerabilities.",
|
||||
"Use the code-review skill to review this PR for correctness and security."
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -46,12 +46,15 @@ Available skills:
|
||||
|
||||
Treat the project-local `.codex/config.toml` as the default Codex baseline for ECC. The current ECC baseline enables GitHub, Context7, Exa, Memory, Playwright, and Sequential Thinking; add heavier extras in `~/.codex/config.toml` only when a task actually needs them.
|
||||
|
||||
ECC's canonical Codex section name is `[mcp_servers.context7]`. The launcher package remains `@upstash/context7-mcp`; only the TOML section name is normalized for consistency with `codex mcp list` and the reference config.
|
||||
|
||||
### Automatic config.toml merging
|
||||
|
||||
The sync script (`scripts/sync-ecc-to-codex.sh`) uses a Node-based TOML parser to safely merge ECC MCP servers into `~/.codex/config.toml`:
|
||||
|
||||
- **Add-only by default** — missing ECC servers are appended; existing servers are never modified or removed.
|
||||
- **7 managed servers** — Supabase, Playwright, Context7, Exa, GitHub, Memory, Sequential Thinking.
|
||||
- **Canonical naming** — ECC manages Context7 as `[mcp_servers.context7]`; legacy `[mcp_servers.context7-mcp]` entries are treated as aliases during updates.
|
||||
- **Package-manager aware** — uses the project's configured package manager (npm/pnpm/yarn/bun) instead of hardcoding `pnpm`.
|
||||
- **Drift warnings** — if an existing server's config differs from the ECC recommendation, the script logs a warning.
|
||||
- **`--update-mcp`** — explicitly replaces all ECC-managed servers with the latest recommended config (safely removes subtables like `[mcp_servers.supabase.env]`).
|
||||
|
||||
@@ -27,7 +27,10 @@ notify = [
|
||||
"-sound", "default",
|
||||
]
|
||||
|
||||
# Prefer AGENTS.md and project-local .codex/AGENTS.md for instructions.
|
||||
# Persistent instructions are appended to every prompt (additive, unlike
|
||||
# model_instructions_file which replaces AGENTS.md).
|
||||
persistent_instructions = "Follow project AGENTS.md guidelines. Use available MCP servers when they can help."
|
||||
|
||||
# model_instructions_file replaces built-in instructions instead of AGENTS.md,
|
||||
# so leave it unset unless you intentionally want a single override file.
|
||||
# model_instructions_file = "/absolute/path/to/instructions.md"
|
||||
@@ -38,10 +41,14 @@ notify = [
|
||||
[mcp_servers.github]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-github"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.context7]
|
||||
command = "npx"
|
||||
# Canonical Codex section name is `context7`; the package itself remains
|
||||
# `@upstash/context7-mcp`.
|
||||
args = ["-y", "@upstash/context7-mcp@latest"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.exa]
|
||||
url = "https://mcp.exa.ai/mcp"
|
||||
@@ -49,14 +56,17 @@ url = "https://mcp.exa.ai/mcp"
|
||||
[mcp_servers.memory]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-memory"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.playwright]
|
||||
command = "npx"
|
||||
args = ["-y", "@playwright/mcp@latest", "--extension"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
[mcp_servers.sequential-thinking]
|
||||
command = "npx"
|
||||
args = ["-y", "@modelcontextprotocol/server-sequential-thinking"]
|
||||
startup_timeout_sec = 30
|
||||
|
||||
# Additional MCP servers (uncomment as needed):
|
||||
# [mcp_servers.supabase]
|
||||
@@ -76,7 +86,8 @@ args = ["-y", "@modelcontextprotocol/server-sequential-thinking"]
|
||||
# args = ["-y", "@cloudflare/mcp-server-cloudflare"]
|
||||
|
||||
[features]
|
||||
# Codex multi-agent support is experimental as of March 2026.
|
||||
# Codex multi-agent collaboration is stable and on by default in current builds.
|
||||
# Keep the explicit toggle here so the repo documents its expectation clearly.
|
||||
multi_agent = true
|
||||
|
||||
# Profiles — switch with `codex -p <name>`
|
||||
@@ -91,6 +102,9 @@ sandbox_mode = "workspace-write"
|
||||
web_search = "live"
|
||||
|
||||
[agents]
|
||||
[agents]
|
||||
# Multi-agent role limits and local role definitions.
|
||||
# These map to `.codex/agents/*.toml` and mirror the repo's explorer/reviewer/docs workflow.
|
||||
max_threads = 6
|
||||
max_depth = 1
|
||||
|
||||
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -44,7 +44,7 @@ jobs:
|
||||
# Package manager setup
|
||||
- name: Setup pnpm
|
||||
if: matrix.pm == 'pnpm'
|
||||
uses: pnpm/action-setup@v4
|
||||
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
|
||||
with:
|
||||
version: latest
|
||||
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
|
||||
- name: Setup Bun
|
||||
if: matrix.pm == 'bun'
|
||||
uses: oven-sh/setup-bun@v2
|
||||
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2
|
||||
|
||||
# Cache configuration
|
||||
- name: Get npm cache directory
|
||||
|
||||
6
.github/workflows/release.yml
vendored
6
.github/workflows/release.yml
vendored
@@ -20,11 +20,13 @@ jobs:
|
||||
|
||||
- name: Validate version tag
|
||||
run: |
|
||||
if ! [[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
if ! [[ "${REF_NAME}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Invalid version tag format. Expected vX.Y.Z"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
env:
|
||||
REF_NAME: ${{ github.ref_name }}
|
||||
- name: Verify plugin.json version matches tag
|
||||
env:
|
||||
TAG_NAME: ${{ github.ref_name }}
|
||||
@@ -61,7 +63,7 @@ jobs:
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2
|
||||
with:
|
||||
body_path: release_body.md
|
||||
generate_release_notes: true
|
||||
|
||||
2
.github/workflows/reusable-release.yml
vendored
2
.github/workflows/reusable-release.yml
vendored
@@ -49,7 +49,7 @@ jobs:
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2
|
||||
with:
|
||||
tag_name: ${{ inputs.tag }}
|
||||
body_path: release_body.md
|
||||
|
||||
4
.github/workflows/reusable-test.yml
vendored
4
.github/workflows/reusable-test.yml
vendored
@@ -36,7 +36,7 @@ jobs:
|
||||
|
||||
- name: Setup pnpm
|
||||
if: inputs.package-manager == 'pnpm'
|
||||
uses: pnpm/action-setup@v4
|
||||
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
|
||||
with:
|
||||
version: latest
|
||||
|
||||
@@ -49,7 +49,7 @@ jobs:
|
||||
|
||||
- name: Setup Bun
|
||||
if: inputs.package-manager == 'bun'
|
||||
uses: oven-sh/setup-bun@v2
|
||||
uses: oven-sh/setup-bun@0c5077e51419868618aeaa5fe8019c62421857d6 # v2
|
||||
|
||||
- name: Get npm cache directory
|
||||
if: inputs.package-manager == 'npm'
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -83,6 +83,9 @@ temp/
|
||||
*.bak
|
||||
*.backup
|
||||
|
||||
# Observer temp files (continuous-learning-v2)
|
||||
.observer-tmp/
|
||||
|
||||
# Rust build artifacts
|
||||
ecc2/target/
|
||||
|
||||
|
||||
27
.mcp.json
Normal file
27
.mcp.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"mcpServers": {
|
||||
"github": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-github"]
|
||||
},
|
||||
"context7": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@upstash/context7-mcp@2.1.4"]
|
||||
},
|
||||
"exa": {
|
||||
"url": "https://mcp.exa.ai/mcp"
|
||||
},
|
||||
"memory": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-memory"]
|
||||
},
|
||||
"playwright": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@playwright/mcp@0.0.68", "--extension"]
|
||||
},
|
||||
"sequential-thinking": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "@modelcontextprotocol/server-sequential-thinking"]
|
||||
}
|
||||
}
|
||||
}
|
||||
184
.trae/README.md
Normal file
184
.trae/README.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# Everything Claude Code for Trae
|
||||
|
||||
Bring Everything Claude Code (ECC) workflows to Trae IDE. This repository provides custom commands, agents, skills, and rules that can be installed into any Trae project with a single command.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Option 1: Local Installation (Current Project Only)
|
||||
|
||||
```bash
|
||||
# Install to current project
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
```
|
||||
|
||||
This creates `.trae-cn/` in your project directory.
|
||||
|
||||
### Option 2: Global Installation (All Projects)
|
||||
|
||||
```bash
|
||||
# Install globally to ~/.trae-cn/
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh ~
|
||||
|
||||
# Or from the .trae folder directly
|
||||
cd /path/to/your/project/.trae
|
||||
TRAE_ENV=cn ./install.sh ~
|
||||
```
|
||||
|
||||
This creates `~/.trae-cn/` which applies to all Trae projects.
|
||||
|
||||
### Option 3: Quick Install to Current Directory
|
||||
|
||||
```bash
|
||||
# If already in project directory with .trae folder
|
||||
cd .trae
|
||||
./install.sh
|
||||
```
|
||||
|
||||
The installer uses non-destructive copy - it will not overwrite your existing files.
|
||||
|
||||
## Installation Modes
|
||||
|
||||
### Local Installation
|
||||
|
||||
Install to the current project's `.trae-cn` directory:
|
||||
|
||||
```bash
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
```
|
||||
|
||||
This creates `/path/to/your/project/.trae-cn/` with all ECC components.
|
||||
|
||||
### Global Installation
|
||||
|
||||
Install to your home directory's `.trae-cn` directory (applies to all Trae projects):
|
||||
|
||||
```bash
|
||||
# From project directory
|
||||
TRAE_ENV=cn .trae/install.sh ~
|
||||
|
||||
# Or directly from .trae folder
|
||||
cd .trae
|
||||
TRAE_ENV=cn ./install.sh ~
|
||||
```
|
||||
|
||||
This creates `~/.trae-cn/` with all ECC components. All Trae projects will use these global installations.
|
||||
|
||||
**Note**: Global installation is useful when you want to maintain a single copy of ECC across all your projects.
|
||||
|
||||
## Environment Support
|
||||
|
||||
- **Default**: Uses `.trae` directory
|
||||
- **CN Environment**: Uses `.trae-cn` directory (set via `TRAE_ENV=cn`)
|
||||
|
||||
### Force Environment
|
||||
|
||||
```bash
|
||||
# From project root, force the CN environment
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
|
||||
# From inside the .trae folder
|
||||
cd .trae
|
||||
TRAE_ENV=cn ./install.sh
|
||||
```
|
||||
|
||||
**Note**: `TRAE_ENV` is a global environment variable that applies to the entire installation session.
|
||||
|
||||
## Uninstall
|
||||
|
||||
The uninstaller uses a manifest file (`.ecc-manifest`) to track installed files, ensuring safe removal:
|
||||
|
||||
```bash
|
||||
# Uninstall from current directory (if already inside .trae or .trae-cn)
|
||||
cd .trae-cn
|
||||
./uninstall.sh
|
||||
|
||||
# Or uninstall from project root
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/uninstall.sh
|
||||
|
||||
# Uninstall globally from home directory
|
||||
TRAE_ENV=cn .trae/uninstall.sh ~
|
||||
|
||||
# Will ask for confirmation before uninstalling
|
||||
```
|
||||
|
||||
### Uninstall Behavior
|
||||
|
||||
- **Safe removal**: Only removes files tracked in the manifest (installed by ECC)
|
||||
- **User files preserved**: Any files you added manually are kept
|
||||
- **Non-empty directories**: Directories containing user-added files are skipped
|
||||
- **Manifest-based**: Requires `.ecc-manifest` file (created during install)
|
||||
|
||||
### Environment Support
|
||||
|
||||
Uninstall respects the same `TRAE_ENV` environment variable as install:
|
||||
|
||||
```bash
|
||||
# Uninstall from .trae-cn (CN environment)
|
||||
TRAE_ENV=cn ./uninstall.sh
|
||||
|
||||
# Uninstall from .trae (default environment)
|
||||
./uninstall.sh
|
||||
```
|
||||
|
||||
**Note**: If no manifest file is found (old installation), the uninstaller will ask whether to remove the entire directory.
|
||||
|
||||
## What's Included
|
||||
|
||||
### Commands
|
||||
|
||||
Commands are on-demand workflows invocable via the `/` menu in Trae chat. All commands are reused directly from the project root's `commands/` folder.
|
||||
|
||||
### Agents
|
||||
|
||||
Agents are specialized AI assistants with specific tool configurations. All agents are reused directly from the project root's `agents/` folder.
|
||||
|
||||
### Skills
|
||||
|
||||
Skills are on-demand workflows invocable via the `/` menu in chat. All skills are reused directly from the project's `skills/` folder.
|
||||
|
||||
### Rules
|
||||
|
||||
Rules provide always-on rules and context that shape how the agent works with your code. All rules are reused directly from the project root's `rules/` folder.
|
||||
|
||||
## Usage
|
||||
|
||||
1. Type `/` in chat to open the commands menu
|
||||
2. Select a command or skill
|
||||
3. The agent will guide you through the workflow with specific instructions and checklists
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
.trae/ (or .trae-cn/)
|
||||
├── commands/ # Command files (reused from project root)
|
||||
├── agents/ # Agent files (reused from project root)
|
||||
├── skills/ # Skill files (reused from skills/)
|
||||
├── rules/ # Rule files (reused from project root)
|
||||
├── install.sh # Install script
|
||||
├── uninstall.sh # Uninstall script
|
||||
└── README.md # This file
|
||||
```
|
||||
|
||||
## Customization
|
||||
|
||||
All files are yours to modify after installation. The installer never overwrites existing files, so your customizations are safe across re-installs.
|
||||
|
||||
**Note**: The `install.sh` and `uninstall.sh` scripts are automatically copied to the target directory during installation, so you can run these commands directly from your project.
|
||||
|
||||
## Recommended Workflow
|
||||
|
||||
1. **Start with planning**: Use `/plan` command to break down complex features
|
||||
2. **Write tests first**: Invoke `/tdd` command before implementing
|
||||
3. **Review your code**: Use `/code-review` after writing code
|
||||
4. **Check security**: Use `/code-review` again for auth, API endpoints, or sensitive data handling
|
||||
5. **Fix build errors**: Use `/build-fix` if there are build errors
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Open your project in Trae
|
||||
- Type `/` to see available commands
|
||||
- Enjoy the ECC workflows!
|
||||
192
.trae/README.zh-CN.md
Normal file
192
.trae/README.zh-CN.md
Normal file
@@ -0,0 +1,192 @@
|
||||
# Everything Claude Code for Trae
|
||||
|
||||
为 Trae IDE 带来 Everything Claude Code (ECC) 工作流。此仓库提供自定义命令、智能体、技能和规则,可以通过单个命令安装到任何 Trae 项目中。
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 方式一:本地安装到 `.trae` 目录(默认环境)
|
||||
|
||||
```bash
|
||||
# 安装到当前项目的 .trae 目录
|
||||
cd /path/to/your/project
|
||||
.trae/install.sh
|
||||
```
|
||||
|
||||
这将在您的项目目录中创建 `.trae/`。
|
||||
|
||||
### 方式二:本地安装到 `.trae-cn` 目录(CN 环境)
|
||||
|
||||
```bash
|
||||
# 安装到当前项目的 .trae-cn 目录
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
```
|
||||
|
||||
这将在您的项目目录中创建 `.trae-cn/`。
|
||||
|
||||
### 方式三:全局安装到 `~/.trae` 目录(默认环境)
|
||||
|
||||
```bash
|
||||
# 全局安装到 ~/.trae/
|
||||
cd /path/to/your/project
|
||||
.trae/install.sh ~
|
||||
```
|
||||
|
||||
这将创建 `~/.trae/`,适用于所有 Trae 项目。
|
||||
|
||||
### 方式四:全局安装到 `~/.trae-cn` 目录(CN 环境)
|
||||
|
||||
```bash
|
||||
# 全局安装到 ~/.trae-cn/
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh ~
|
||||
```
|
||||
|
||||
这将创建 `~/.trae-cn/`,适用于所有 Trae 项目。
|
||||
|
||||
安装程序使用非破坏性复制 - 它不会覆盖您现有的文件。
|
||||
|
||||
## 安装模式
|
||||
|
||||
### 本地安装
|
||||
|
||||
安装到当前项目的 `.trae` 或 `.trae-cn` 目录:
|
||||
|
||||
```bash
|
||||
# 安装到当前项目的 .trae 目录(默认)
|
||||
cd /path/to/your/project
|
||||
.trae/install.sh
|
||||
|
||||
# 安装到当前项目的 .trae-cn 目录(CN 环境)
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
```
|
||||
|
||||
### 全局安装
|
||||
|
||||
安装到您主目录的 `.trae` 或 `.trae-cn` 目录(适用于所有 Trae 项目):
|
||||
|
||||
```bash
|
||||
# 全局安装到 ~/.trae/(默认)
|
||||
.trae/install.sh ~
|
||||
|
||||
# 全局安装到 ~/.trae-cn/(CN 环境)
|
||||
TRAE_ENV=cn .trae/install.sh ~
|
||||
```
|
||||
|
||||
**注意**:全局安装适用于希望在所有项目之间维护单个 ECC 副本的场景。
|
||||
|
||||
## 环境支持
|
||||
|
||||
- **默认**:使用 `.trae` 目录
|
||||
- **CN 环境**:使用 `.trae-cn` 目录(通过 `TRAE_ENV=cn` 设置)
|
||||
|
||||
### 强制指定环境
|
||||
|
||||
```bash
|
||||
# 从项目根目录强制使用 CN 环境
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
|
||||
# 进入 .trae 目录后使用默认环境
|
||||
cd .trae
|
||||
./install.sh
|
||||
```
|
||||
|
||||
**注意**:`TRAE_ENV` 是一个全局环境变量,适用于整个安装会话。
|
||||
|
||||
## 卸载
|
||||
|
||||
卸载程序使用清单文件(`.ecc-manifest`)跟踪已安装的文件,确保安全删除:
|
||||
|
||||
```bash
|
||||
# 从当前目录卸载(如果已经在 .trae 或 .trae-cn 目录中)
|
||||
cd .trae-cn
|
||||
./uninstall.sh
|
||||
|
||||
# 或者从项目根目录卸载
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/uninstall.sh
|
||||
|
||||
# 从主目录全局卸载
|
||||
TRAE_ENV=cn .trae/uninstall.sh ~
|
||||
|
||||
# 卸载前会询问确认
|
||||
```
|
||||
|
||||
### 卸载行为
|
||||
|
||||
- **安全删除**:仅删除清单中跟踪的文件(由 ECC 安装的文件)
|
||||
- **保留用户文件**:您手动添加的任何文件都会被保留
|
||||
- **非空目录**:包含用户添加文件的目录会被跳过
|
||||
- **基于清单**:需要 `.ecc-manifest` 文件(在安装时创建)
|
||||
|
||||
### 环境支持
|
||||
|
||||
卸载程序遵循与安装程序相同的 `TRAE_ENV` 环境变量:
|
||||
|
||||
```bash
|
||||
# 从 .trae-cn 卸载(CN 环境)
|
||||
TRAE_ENV=cn ./uninstall.sh
|
||||
|
||||
# 从 .trae 卸载(默认环境)
|
||||
./uninstall.sh
|
||||
```
|
||||
|
||||
**注意**:如果找不到清单文件(旧版本安装),卸载程序将询问是否删除整个目录。
|
||||
|
||||
## 包含的内容
|
||||
|
||||
### 命令
|
||||
|
||||
命令是通过 Trae 聊天中的 `/` 菜单调用的按需工作流。所有命令都直接复用自项目根目录的 `commands/` 文件夹。
|
||||
|
||||
### 智能体
|
||||
|
||||
智能体是具有特定工具配置的专门 AI 助手。所有智能体都直接复用自项目根目录的 `agents/` 文件夹。
|
||||
|
||||
### 技能
|
||||
|
||||
技能是通过聊天中的 `/` 菜单调用的按需工作流。所有技能都直接复用自项目的 `skills/` 文件夹。
|
||||
|
||||
### 规则
|
||||
|
||||
规则提供始终适用的规则和上下文,塑造智能体处理代码的方式。所有规则都直接复用自项目根目录的 `rules/` 文件夹。
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 在聊天中输入 `/` 以打开命令菜单
|
||||
2. 选择一个命令或技能
|
||||
3. 智能体将通过具体说明和检查清单指导您完成工作流
|
||||
|
||||
## 项目结构
|
||||
|
||||
```
|
||||
.trae/ (或 .trae-cn/)
|
||||
├── commands/ # 命令文件(复用自项目根目录)
|
||||
├── agents/ # 智能体文件(复用自项目根目录)
|
||||
├── skills/ # 技能文件(复用自 skills/)
|
||||
├── rules/ # 规则文件(复用自项目根目录)
|
||||
├── install.sh # 安装脚本
|
||||
├── uninstall.sh # 卸载脚本
|
||||
└── README.md # 此文件
|
||||
```
|
||||
|
||||
## 自定义
|
||||
|
||||
安装后,所有文件都归您修改。安装程序永远不会覆盖现有文件,因此您的自定义在重新安装时是安全的。
|
||||
|
||||
**注意**:安装时会自动将 `install.sh` 和 `uninstall.sh` 脚本复制到目标目录,这样您可以在项目本地直接运行这些命令。
|
||||
|
||||
## 推荐的工作流
|
||||
|
||||
1. **从计划开始**:使用 `/plan` 命令分解复杂功能
|
||||
2. **先写测试**:在实现之前调用 `/tdd` 命令
|
||||
3. **审查您的代码**:编写代码后使用 `/code-review`
|
||||
4. **检查安全性**:对于身份验证、API 端点或敏感数据处理,再次使用 `/code-review`
|
||||
5. **修复构建错误**:如果有构建错误,使用 `/build-fix`
|
||||
|
||||
## 下一步
|
||||
|
||||
- 在 Trae 中打开您的项目
|
||||
- 输入 `/` 以查看可用命令
|
||||
- 享受 ECC 工作流!
|
||||
221
.trae/install.sh
Executable file
221
.trae/install.sh
Executable file
@@ -0,0 +1,221 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# ECC Trae Installer
|
||||
# Installs Everything Claude Code workflows into a Trae project.
|
||||
#
|
||||
# Usage:
|
||||
# ./install.sh # Install to current directory
|
||||
# ./install.sh ~ # Install globally to ~/.trae/ or ~/.trae-cn/
|
||||
#
|
||||
# Environment:
|
||||
# TRAE_ENV=cn # Force use .trae-cn directory
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# When globs match nothing, expand to empty list instead of the literal pattern
|
||||
shopt -s nullglob
|
||||
|
||||
# Resolve the directory where this script lives (the repo root)
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
# Get the trae directory name (.trae or .trae-cn)
|
||||
get_trae_dir() {
|
||||
if [ "${TRAE_ENV:-}" = "cn" ]; then
|
||||
echo ".trae-cn"
|
||||
else
|
||||
echo ".trae"
|
||||
fi
|
||||
}
|
||||
|
||||
ensure_manifest_entry() {
|
||||
local manifest="$1"
|
||||
local entry="$2"
|
||||
|
||||
touch "$manifest"
|
||||
if ! grep -Fqx "$entry" "$manifest"; then
|
||||
echo "$entry" >> "$manifest"
|
||||
fi
|
||||
}
|
||||
|
||||
# Install function
|
||||
do_install() {
|
||||
local target_dir="$PWD"
|
||||
local trae_dir="$(get_trae_dir)"
|
||||
|
||||
# Check if ~ was specified (or expanded to $HOME)
|
||||
if [ "$#" -ge 1 ]; then
|
||||
if [ "$1" = "~" ] || [ "$1" = "$HOME" ]; then
|
||||
target_dir="$HOME"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if we're already inside a .trae or .trae-cn directory
|
||||
local current_dir_name="$(basename "$target_dir")"
|
||||
local trae_full_path
|
||||
|
||||
if [ "$current_dir_name" = ".trae" ] || [ "$current_dir_name" = ".trae-cn" ]; then
|
||||
# Already inside the trae directory, use it directly
|
||||
trae_full_path="$target_dir"
|
||||
else
|
||||
# Normal case: append trae_dir to target_dir
|
||||
trae_full_path="$target_dir/$trae_dir"
|
||||
fi
|
||||
|
||||
echo "ECC Trae Installer"
|
||||
echo "=================="
|
||||
echo ""
|
||||
echo "Source: $REPO_ROOT"
|
||||
echo "Target: $trae_full_path/"
|
||||
echo ""
|
||||
|
||||
# Subdirectories to create
|
||||
SUBDIRS="commands agents skills rules"
|
||||
|
||||
# Create all required trae subdirectories
|
||||
for dir in $SUBDIRS; do
|
||||
mkdir -p "$trae_full_path/$dir"
|
||||
done
|
||||
|
||||
# Manifest file to track installed files
|
||||
MANIFEST="$trae_full_path/.ecc-manifest"
|
||||
touch "$MANIFEST"
|
||||
|
||||
# Counters for summary
|
||||
commands=0
|
||||
agents=0
|
||||
skills=0
|
||||
rules=0
|
||||
other=0
|
||||
|
||||
# Copy commands from repo root
|
||||
if [ -d "$REPO_ROOT/commands" ]; then
|
||||
for f in "$REPO_ROOT/commands"/*.md; do
|
||||
[ -f "$f" ] || continue
|
||||
local_name=$(basename "$f")
|
||||
target_path="$trae_full_path/commands/$local_name"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$f" "$target_path"
|
||||
ensure_manifest_entry "$MANIFEST" "commands/$local_name"
|
||||
commands=$((commands + 1))
|
||||
else
|
||||
ensure_manifest_entry "$MANIFEST" "commands/$local_name"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Copy agents from repo root
|
||||
if [ -d "$REPO_ROOT/agents" ]; then
|
||||
for f in "$REPO_ROOT/agents"/*.md; do
|
||||
[ -f "$f" ] || continue
|
||||
local_name=$(basename "$f")
|
||||
target_path="$trae_full_path/agents/$local_name"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$f" "$target_path"
|
||||
ensure_manifest_entry "$MANIFEST" "agents/$local_name"
|
||||
agents=$((agents + 1))
|
||||
else
|
||||
ensure_manifest_entry "$MANIFEST" "agents/$local_name"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Copy skills from repo root (if available)
|
||||
if [ -d "$REPO_ROOT/skills" ]; then
|
||||
for d in "$REPO_ROOT/skills"/*/; do
|
||||
[ -d "$d" ] || continue
|
||||
skill_name="$(basename "$d")"
|
||||
target_skill_dir="$trae_full_path/skills/$skill_name"
|
||||
skill_copied=0
|
||||
|
||||
while IFS= read -r source_file; do
|
||||
relative_path="${source_file#$d}"
|
||||
target_path="$target_skill_dir/$relative_path"
|
||||
|
||||
mkdir -p "$(dirname "$target_path")"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$source_file" "$target_path"
|
||||
skill_copied=1
|
||||
fi
|
||||
ensure_manifest_entry "$MANIFEST" "skills/$skill_name/$relative_path"
|
||||
done < <(find "$d" -type f | sort)
|
||||
|
||||
if [ "$skill_copied" -eq 1 ]; then
|
||||
skills=$((skills + 1))
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Copy rules from repo root
|
||||
if [ -d "$REPO_ROOT/rules" ]; then
|
||||
while IFS= read -r rule_file; do
|
||||
relative_path="${rule_file#$REPO_ROOT/rules/}"
|
||||
target_path="$trae_full_path/rules/$relative_path"
|
||||
|
||||
mkdir -p "$(dirname "$target_path")"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$rule_file" "$target_path"
|
||||
rules=$((rules + 1))
|
||||
fi
|
||||
ensure_manifest_entry "$MANIFEST" "rules/$relative_path"
|
||||
done < <(find "$REPO_ROOT/rules" -type f | sort)
|
||||
fi
|
||||
|
||||
# Copy README files from this directory
|
||||
for readme_file in "$SCRIPT_DIR/README.md" "$SCRIPT_DIR/README.zh-CN.md"; do
|
||||
if [ -f "$readme_file" ]; then
|
||||
local_name=$(basename "$readme_file")
|
||||
target_path="$trae_full_path/$local_name"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$readme_file" "$target_path"
|
||||
ensure_manifest_entry "$MANIFEST" "$local_name"
|
||||
other=$((other + 1))
|
||||
else
|
||||
ensure_manifest_entry "$MANIFEST" "$local_name"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Copy install and uninstall scripts
|
||||
for script_file in "$SCRIPT_DIR/install.sh" "$SCRIPT_DIR/uninstall.sh"; do
|
||||
if [ -f "$script_file" ]; then
|
||||
local_name=$(basename "$script_file")
|
||||
target_path="$trae_full_path/$local_name"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$script_file" "$target_path"
|
||||
chmod +x "$target_path"
|
||||
ensure_manifest_entry "$MANIFEST" "$local_name"
|
||||
other=$((other + 1))
|
||||
else
|
||||
ensure_manifest_entry "$MANIFEST" "$local_name"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Add manifest file itself to manifest
|
||||
ensure_manifest_entry "$MANIFEST" ".ecc-manifest"
|
||||
|
||||
# Installation summary
|
||||
echo "Installation complete!"
|
||||
echo ""
|
||||
echo "Components installed:"
|
||||
echo " Commands: $commands"
|
||||
echo " Agents: $agents"
|
||||
echo " Skills: $skills"
|
||||
echo " Rules: $rules"
|
||||
echo ""
|
||||
echo "Directory: $(basename "$trae_full_path")"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Open your project in Trae"
|
||||
echo " 2. Type / to see available commands"
|
||||
echo " 3. Enjoy the ECC workflows!"
|
||||
echo ""
|
||||
echo "To uninstall later:"
|
||||
echo " cd $trae_full_path"
|
||||
echo " ./uninstall.sh"
|
||||
}
|
||||
|
||||
# Main logic
|
||||
do_install "$@"
|
||||
194
.trae/uninstall.sh
Executable file
194
.trae/uninstall.sh
Executable file
@@ -0,0 +1,194 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# ECC Trae Uninstaller
|
||||
# Uninstalls Everything Claude Code workflows from a Trae project.
|
||||
#
|
||||
# Usage:
|
||||
# ./uninstall.sh # Uninstall from current directory
|
||||
# ./uninstall.sh ~ # Uninstall globally from ~/.trae/
|
||||
#
|
||||
# Environment:
|
||||
# TRAE_ENV=cn # Force use .trae-cn directory
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Resolve the directory where this script lives
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
# Get the trae directory name (.trae or .trae-cn)
|
||||
get_trae_dir() {
|
||||
# Check environment variable first
|
||||
if [ "${TRAE_ENV:-}" = "cn" ]; then
|
||||
echo ".trae-cn"
|
||||
else
|
||||
echo ".trae"
|
||||
fi
|
||||
}
|
||||
|
||||
resolve_path() {
|
||||
python3 -c 'import os, sys; print(os.path.realpath(sys.argv[1]))' "$1"
|
||||
}
|
||||
|
||||
is_valid_manifest_entry() {
|
||||
local file_path="$1"
|
||||
|
||||
case "$file_path" in
|
||||
""|/*|~*|*/../*|../*|*/..|..)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Main uninstall function
|
||||
do_uninstall() {
|
||||
local target_dir="$PWD"
|
||||
local trae_dir="$(get_trae_dir)"
|
||||
|
||||
# Check if ~ was specified (or expanded to $HOME)
|
||||
if [ "$#" -ge 1 ]; then
|
||||
if [ "$1" = "~" ] || [ "$1" = "$HOME" ]; then
|
||||
target_dir="$HOME"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if we're already inside a .trae or .trae-cn directory
|
||||
local current_dir_name="$(basename "$target_dir")"
|
||||
local trae_full_path
|
||||
|
||||
if [ "$current_dir_name" = ".trae" ] || [ "$current_dir_name" = ".trae-cn" ]; then
|
||||
# Already inside the trae directory, use it directly
|
||||
trae_full_path="$target_dir"
|
||||
else
|
||||
# Normal case: append trae_dir to target_dir
|
||||
trae_full_path="$target_dir/$trae_dir"
|
||||
fi
|
||||
|
||||
echo "ECC Trae Uninstaller"
|
||||
echo "===================="
|
||||
echo ""
|
||||
echo "Target: $trae_full_path/"
|
||||
echo ""
|
||||
|
||||
if [ ! -d "$trae_full_path" ]; then
|
||||
echo "Error: $trae_dir directory not found at $target_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trae_root_resolved="$(resolve_path "$trae_full_path")"
|
||||
|
||||
# Manifest file path
|
||||
MANIFEST="$trae_full_path/.ecc-manifest"
|
||||
|
||||
if [ ! -f "$MANIFEST" ]; then
|
||||
echo "Warning: No manifest file found (.ecc-manifest)"
|
||||
echo ""
|
||||
echo "This could mean:"
|
||||
echo " 1. ECC was installed with an older version without manifest support"
|
||||
echo " 2. The manifest file was manually deleted"
|
||||
echo ""
|
||||
read -p "Do you want to remove the entire $trae_dir directory? (y/N) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Uninstall cancelled."
|
||||
exit 0
|
||||
fi
|
||||
rm -rf "$trae_full_path"
|
||||
echo "Uninstall complete!"
|
||||
echo ""
|
||||
echo "Removed: $trae_full_path/"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Found manifest file - will only remove files installed by ECC"
|
||||
echo ""
|
||||
read -p "Are you sure you want to uninstall ECC from $trae_dir? (y/N) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Uninstall cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Counters
|
||||
removed=0
|
||||
skipped=0
|
||||
|
||||
# Read manifest and remove files
|
||||
while IFS= read -r file_path; do
|
||||
[ -z "$file_path" ] && continue
|
||||
|
||||
if ! is_valid_manifest_entry "$file_path"; then
|
||||
echo "Skipped: $file_path (invalid manifest entry)"
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
full_path="$trae_full_path/$file_path"
|
||||
resolved_full="$(resolve_path "$full_path")"
|
||||
|
||||
case "$resolved_full" in
|
||||
"$trae_root_resolved"|"$trae_root_resolved"/*)
|
||||
;;
|
||||
*)
|
||||
echo "Skipped: $file_path (invalid manifest entry)"
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -f "$resolved_full" ]; then
|
||||
rm -f "$resolved_full"
|
||||
echo "Removed: $file_path"
|
||||
removed=$((removed + 1))
|
||||
elif [ -d "$resolved_full" ]; then
|
||||
# Only remove directory if it's empty
|
||||
if [ -z "$(ls -A "$resolved_full" 2>/dev/null)" ]; then
|
||||
rmdir "$resolved_full" 2>/dev/null || true
|
||||
if [ ! -d "$resolved_full" ]; then
|
||||
echo "Removed: $file_path/"
|
||||
removed=$((removed + 1))
|
||||
fi
|
||||
else
|
||||
echo "Skipped: $file_path/ (not empty - contains user files)"
|
||||
skipped=$((skipped + 1))
|
||||
fi
|
||||
else
|
||||
skipped=$((skipped + 1))
|
||||
fi
|
||||
done < "$MANIFEST"
|
||||
|
||||
while IFS= read -r empty_dir; do
|
||||
[ "$empty_dir" = "$trae_full_path" ] && continue
|
||||
relative_dir="${empty_dir#$trae_full_path/}"
|
||||
rmdir "$empty_dir" 2>/dev/null || true
|
||||
if [ ! -d "$empty_dir" ]; then
|
||||
echo "Removed: $relative_dir/"
|
||||
removed=$((removed + 1))
|
||||
fi
|
||||
done < <(find "$trae_full_path" -depth -type d -empty 2>/dev/null | sort -r)
|
||||
|
||||
# Try to remove the main trae directory if it's empty
|
||||
if [ -d "$trae_full_path" ] && [ -z "$(ls -A "$trae_full_path" 2>/dev/null)" ]; then
|
||||
rmdir "$trae_full_path" 2>/dev/null || true
|
||||
if [ ! -d "$trae_full_path" ]; then
|
||||
echo "Removed: $trae_dir/"
|
||||
removed=$((removed + 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Uninstall complete!"
|
||||
echo ""
|
||||
echo "Summary:"
|
||||
echo " Removed: $removed items"
|
||||
echo " Skipped: $skipped items (not found or user-modified)"
|
||||
echo ""
|
||||
if [ -d "$trae_full_path" ]; then
|
||||
echo "Note: $trae_dir directory still exists (contains user-added files)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Execute uninstall
|
||||
do_uninstall "$@"
|
||||
@@ -1,6 +1,6 @@
|
||||
# Everything Claude Code (ECC) — Agent Instructions
|
||||
|
||||
This is a **production-ready AI coding plugin** providing 28 specialized agents, 125 skills, 60 commands, and automated hook workflows for software development.
|
||||
This is a **production-ready AI coding plugin** providing 30 specialized agents, 135 skills, 60 commands, and automated hook workflows for software development.
|
||||
|
||||
**Version:** 1.9.0
|
||||
|
||||
@@ -141,8 +141,8 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
agents/ — 28 specialized subagents
|
||||
skills/ — 125 workflow skills and domain knowledge
|
||||
agents/ — 30 specialized subagents
|
||||
skills/ — 135 workflow skills and domain knowledge
|
||||
commands/ — 60 slash commands
|
||||
hooks/ — Trigger-based automations
|
||||
rules/ — Always-follow guidelines (common + per-language)
|
||||
|
||||
159
COMMANDS-QUICK-REF.md
Normal file
159
COMMANDS-QUICK-REF.md
Normal file
@@ -0,0 +1,159 @@
|
||||
# Commands Quick Reference
|
||||
|
||||
> 59 slash commands installed globally. Type `/` in any Claude Code session to invoke.
|
||||
|
||||
---
|
||||
|
||||
## Core Workflow
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/plan` | Restate requirements, assess risks, write step-by-step implementation plan — **waits for your confirm before touching code** |
|
||||
| `/tdd` | Enforce test-driven development: scaffold interface → write failing test → implement → verify 80%+ coverage |
|
||||
| `/code-review` | Full code quality, security, and maintainability review of changed files |
|
||||
| `/build-fix` | Detect and fix build errors — delegates to the right build-resolver agent automatically |
|
||||
| `/verify` | Run the full verification loop: build → lint → test → type-check |
|
||||
| `/quality-gate` | Quality gate check against project standards |
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/tdd` | Universal TDD workflow (any language) |
|
||||
| `/e2e` | Generate + run Playwright end-to-end tests, capture screenshots/videos/traces |
|
||||
| `/test-coverage` | Report test coverage, identify gaps |
|
||||
| `/go-test` | TDD workflow for Go (table-driven, 80%+ coverage with `go test -cover`) |
|
||||
| `/kotlin-test` | TDD for Kotlin (Kotest + Kover) |
|
||||
| `/rust-test` | TDD for Rust (cargo test, integration tests) |
|
||||
| `/cpp-test` | TDD for C++ (GoogleTest + gcov/lcov) |
|
||||
|
||||
---
|
||||
|
||||
## Code Review
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/code-review` | Universal code review |
|
||||
| `/python-review` | Python — PEP 8, type hints, security, idiomatic patterns |
|
||||
| `/go-review` | Go — idiomatic patterns, concurrency safety, error handling |
|
||||
| `/kotlin-review` | Kotlin — null safety, coroutine safety, clean architecture |
|
||||
| `/rust-review` | Rust — ownership, lifetimes, unsafe usage |
|
||||
| `/cpp-review` | C++ — memory safety, modern idioms, concurrency |
|
||||
|
||||
---
|
||||
|
||||
## Build Fixers
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/build-fix` | Auto-detect language and fix build errors |
|
||||
| `/go-build` | Fix Go build errors and `go vet` warnings |
|
||||
| `/kotlin-build` | Fix Kotlin/Gradle compiler errors |
|
||||
| `/rust-build` | Fix Rust build + borrow checker issues |
|
||||
| `/cpp-build` | Fix C++ CMake and linker problems |
|
||||
| `/gradle-build` | Fix Gradle errors for Android / KMP |
|
||||
|
||||
---
|
||||
|
||||
## Planning & Architecture
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/plan` | Implementation plan with risk assessment |
|
||||
| `/multi-plan` | Multi-model collaborative planning |
|
||||
| `/multi-workflow` | Multi-model collaborative development |
|
||||
| `/multi-backend` | Backend-focused multi-model development |
|
||||
| `/multi-frontend` | Frontend-focused multi-model development |
|
||||
| `/multi-execute` | Multi-model collaborative execution |
|
||||
| `/orchestrate` | Guide for tmux/worktree multi-agent orchestration |
|
||||
| `/devfleet` | Orchestrate parallel Claude Code agents via DevFleet |
|
||||
|
||||
---
|
||||
|
||||
## Session Management
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/save-session` | Save current session state to `~/.claude/session-data/` |
|
||||
| `/resume-session` | Load the most recent saved session from the canonical session store and resume from where you left off |
|
||||
| `/sessions` | Browse, search, and manage session history with aliases from `~/.claude/session-data/` (with legacy reads from `~/.claude/sessions/`) |
|
||||
| `/checkpoint` | Mark a checkpoint in the current session |
|
||||
| `/aside` | Answer a quick side question without losing current task context |
|
||||
| `/context-budget` | Analyse context window usage — find token overhead, optimise |
|
||||
|
||||
---
|
||||
|
||||
## Learning & Improvement
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/learn` | Extract reusable patterns from the current session |
|
||||
| `/learn-eval` | Extract patterns + self-evaluate quality before saving |
|
||||
| `/evolve` | Analyse learned instincts, suggest evolved skill structures |
|
||||
| `/promote` | Promote project-scoped instincts to global scope |
|
||||
| `/instinct-status` | Show all learned instincts (project + global) with confidence scores |
|
||||
| `/instinct-export` | Export instincts to a file |
|
||||
| `/instinct-import` | Import instincts from a file or URL |
|
||||
| `/skill-create` | Analyse local git history → generate a reusable skill |
|
||||
| `/skill-health` | Skill portfolio health dashboard with analytics |
|
||||
| `/rules-distill` | Scan skills, extract cross-cutting principles, distill into rules |
|
||||
|
||||
---
|
||||
|
||||
## Refactoring & Cleanup
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/refactor-clean` | Remove dead code, consolidate duplicates, clean up structure |
|
||||
| `/prompt-optimize` | Analyse a draft prompt and output an optimised ECC-enriched version |
|
||||
|
||||
---
|
||||
|
||||
## Docs & Research
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/docs` | Look up current library/API documentation via Context7 |
|
||||
| `/update-docs` | Update project documentation |
|
||||
| `/update-codemaps` | Regenerate codemaps for the codebase |
|
||||
|
||||
---
|
||||
|
||||
## Loops & Automation
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/loop-start` | Start a recurring agent loop on an interval |
|
||||
| `/loop-status` | Check status of running loops |
|
||||
| `/claw` | Start NanoClaw v2 — persistent REPL with model routing, skill hot-load, branching, and metrics |
|
||||
|
||||
---
|
||||
|
||||
## Project & Infrastructure
|
||||
|
||||
| Command | What it does |
|
||||
|---------|-------------|
|
||||
| `/projects` | List known projects and their instinct statistics |
|
||||
| `/harness-audit` | Audit the agent harness configuration for reliability and cost |
|
||||
| `/eval` | Run the evaluation harness |
|
||||
| `/model-route` | Route a task to the right model (Haiku / Sonnet / Opus) |
|
||||
| `/pm2` | PM2 process manager initialisation |
|
||||
| `/setup-pm` | Configure package manager (npm / pnpm / yarn / bun) |
|
||||
|
||||
---
|
||||
|
||||
## Quick Decision Guide
|
||||
|
||||
```
|
||||
Starting a new feature? → /plan first, then /tdd
|
||||
Code just written? → /code-review
|
||||
Build broken? → /build-fix
|
||||
Need live docs? → /docs <library>
|
||||
Session about to end? → /save-session or /learn-eval
|
||||
Resuming next day? → /resume-session
|
||||
Context getting heavy? → /context-budget then /checkpoint
|
||||
Want to extract what you learned? → /learn-eval then /evolve
|
||||
Running repeated tasks? → /loop-start
|
||||
```
|
||||
@@ -73,6 +73,13 @@ git add . && git commit -m "feat: add my-skill" && git push -u origin feat/my-co
|
||||
|
||||
Skills are knowledge modules that Claude Code loads based on context.
|
||||
|
||||
> **📚 Comprehensive Guide:** For detailed guidance on creating effective skills, see [Skill Development Guide](docs/SKILL-DEVELOPMENT-GUIDE.md). It covers:
|
||||
> - Skill architecture and categories
|
||||
> - Writing effective content with examples
|
||||
> - Best practices and common patterns
|
||||
> - Testing and validation
|
||||
> - Complete examples gallery
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
@@ -86,7 +93,7 @@ skills/
|
||||
```markdown
|
||||
---
|
||||
name: your-skill-name
|
||||
description: Brief description shown in skill list
|
||||
description: Brief description shown in skill list and used for auto-activation
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
@@ -94,6 +101,10 @@ origin: ECC
|
||||
|
||||
Brief overview of what this skill covers.
|
||||
|
||||
## When to Activate
|
||||
|
||||
Describe scenarios where Claude should use this skill. This is critical for auto-activation.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
Explain key patterns and guidelines.
|
||||
@@ -107,33 +118,54 @@ function example() {
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
Show what NOT to do with examples.
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Actionable guidelines
|
||||
- Do's and don'ts
|
||||
- Common pitfalls to avoid
|
||||
|
||||
## When to Use
|
||||
## Related Skills
|
||||
|
||||
Describe scenarios where this skill applies.
|
||||
Link to complementary skills (e.g., `related-skill-1`, `related-skill-2`).
|
||||
```
|
||||
|
||||
### Skill Categories
|
||||
|
||||
| Category | Purpose | Examples |
|
||||
|----------|---------|----------|
|
||||
| **Language Standards** | Idioms, conventions, best practices | `python-patterns`, `golang-patterns` |
|
||||
| **Framework Patterns** | Framework-specific guidance | `django-patterns`, `nextjs-patterns` |
|
||||
| **Workflow** | Step-by-step processes | `tdd-workflow`, `refactoring-workflow` |
|
||||
| **Domain Knowledge** | Specialized domains | `security-review`, `api-design` |
|
||||
| **Tool Integration** | Tool/library usage | `docker-patterns`, `supabase-patterns` |
|
||||
| **Template** | Project-specific skill templates | `project-guidelines-example` |
|
||||
|
||||
### Skill Checklist
|
||||
|
||||
- [ ] Focused on one domain/technology
|
||||
- [ ] Includes practical code examples
|
||||
- [ ] Under 500 lines
|
||||
- [ ] Focused on one domain/technology (not too broad)
|
||||
- [ ] Includes "When to Activate" section for auto-activation
|
||||
- [ ] Includes practical, copy-pasteable code examples
|
||||
- [ ] Shows anti-patterns (what NOT to do)
|
||||
- [ ] Under 500 lines (800 max)
|
||||
- [ ] Uses clear section headers
|
||||
- [ ] Tested with Claude Code
|
||||
- [ ] Links to related skills
|
||||
- [ ] No sensitive data (API keys, tokens, paths)
|
||||
|
||||
### Example Skills
|
||||
|
||||
| Skill | Purpose |
|
||||
|-------|---------|
|
||||
| `coding-standards/` | TypeScript/JavaScript patterns |
|
||||
| `frontend-patterns/` | React and Next.js best practices |
|
||||
| `backend-patterns/` | API and database patterns |
|
||||
| `security-review/` | Security checklist |
|
||||
| Skill | Category | Purpose |
|
||||
|-------|----------|---------|
|
||||
| `coding-standards/` | Language Standards | TypeScript/JavaScript patterns |
|
||||
| `frontend-patterns/` | Framework Patterns | React and Next.js best practices |
|
||||
| `backend-patterns/` | Framework Patterns | API and database patterns |
|
||||
| `security-review/` | Domain Knowledge | Security checklist |
|
||||
| `tdd-workflow/` | Workflow | Test-driven development process |
|
||||
| `project-guidelines-example/` | Template | Project-specific skill template |
|
||||
|
||||
---
|
||||
|
||||
|
||||
122
EVALUATION.md
Normal file
122
EVALUATION.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# Repo Evaluation vs Current Setup
|
||||
|
||||
**Date:** 2026-03-21
|
||||
**Branch:** `claude/evaluate-repo-comparison-ASZ9Y`
|
||||
|
||||
---
|
||||
|
||||
## Current Setup (`~/.claude/`)
|
||||
|
||||
The active Claude Code installation is near-minimal:
|
||||
|
||||
| Component | Current |
|
||||
|-----------|---------|
|
||||
| Agents | 0 |
|
||||
| Skills | 0 installed |
|
||||
| Commands | 0 |
|
||||
| Hooks | 1 (Stop: git check) |
|
||||
| Rules | 0 |
|
||||
| MCP configs | 0 |
|
||||
|
||||
**Installed hooks:**
|
||||
- `Stop` → `stop-hook-git-check.sh` — blocks session end if there are uncommitted changes or unpushed commits
|
||||
|
||||
**Installed permissions:**
|
||||
- `Skill` — allows skill invocations
|
||||
|
||||
**Plugins:** Only `blocklist.json` (no active plugins installed)
|
||||
|
||||
---
|
||||
|
||||
## This Repo (`everything-claude-code` v1.9.0)
|
||||
|
||||
| Component | Repo |
|
||||
|-----------|------|
|
||||
| Agents | 28 |
|
||||
| Skills | 116 |
|
||||
| Commands | 59 |
|
||||
| Rules sets | 12 languages + common (60+ rule files) |
|
||||
| Hooks | Comprehensive system (PreToolUse, PostToolUse, SessionStart, Stop) |
|
||||
| MCP configs | 1 (Context7 + others) |
|
||||
| Schemas | 9 JSON validators |
|
||||
| Scripts/CLI | 46+ Node.js modules + multiple CLIs |
|
||||
| Tests | 58 test files |
|
||||
| Install profiles | core, developer, security, research, full |
|
||||
| Supported harnesses | Claude Code, Codex, Cursor, OpenCode |
|
||||
|
||||
---
|
||||
|
||||
## Gap Analysis
|
||||
|
||||
### Hooks
|
||||
- **Current:** 1 Stop hook (git hygiene check)
|
||||
- **Repo:** Full hook matrix covering:
|
||||
- Dangerous command blocking (`rm -rf`, force pushes)
|
||||
- Auto-formatting on file edits
|
||||
- Dev server tmux enforcement
|
||||
- Cost tracking
|
||||
- Session evaluation and governance capture
|
||||
- MCP health monitoring
|
||||
|
||||
### Agents (28 missing)
|
||||
The repo provides specialized agents for every major workflow:
|
||||
- Language reviewers: TypeScript, Python, Go, Java, Kotlin, Rust, C++, Flutter
|
||||
- Build resolvers: Go, Java, Kotlin, Rust, C++, PyTorch
|
||||
- Workflow agents: planner, tdd-guide, code-reviewer, security-reviewer, architect
|
||||
- Automation: loop-operator, doc-updater, refactor-cleaner, harness-optimizer
|
||||
|
||||
### Skills (116 missing)
|
||||
Domain knowledge modules covering:
|
||||
- Language patterns (Python, Go, Kotlin, Rust, C++, Java, Swift, Perl, Laravel, Django)
|
||||
- Testing strategies (TDD, E2E, coverage)
|
||||
- Architecture patterns (backend, frontend, API design, database migrations)
|
||||
- AI/ML workflows (Claude API, eval harness, agent loops, cost-aware pipelines)
|
||||
- Business workflows (investor materials, market research, content engine)
|
||||
|
||||
### Commands (59 missing)
|
||||
- `/tdd`, `/plan`, `/e2e`, `/code-review` — core dev workflows
|
||||
- `/sessions`, `/save-session`, `/resume-session` — session persistence
|
||||
- `/orchestrate`, `/multi-plan`, `/multi-execute` — multi-agent coordination
|
||||
- `/learn`, `/skill-create`, `/evolve` — continuous improvement
|
||||
- `/build-fix`, `/verify`, `/quality-gate` — build/quality automation
|
||||
|
||||
### Rules (60+ files missing)
|
||||
Language-specific coding style, patterns, testing, and security guidelines for:
|
||||
TypeScript, Python, Go, Java, Kotlin, Rust, C++, C#, Swift, Perl, PHP, and common/cross-language rules.
|
||||
|
||||
---
|
||||
|
||||
## Recommendations
|
||||
|
||||
### Immediate value (core install)
|
||||
Run `ecc install --profile core` to get:
|
||||
- Core agents (code-reviewer, planner, tdd-guide, security-reviewer)
|
||||
- Essential skills (tdd-workflow, coding-standards, security-review)
|
||||
- Key commands (/tdd, /plan, /code-review, /build-fix)
|
||||
|
||||
### Full install
|
||||
Run `ecc install --profile full` to get all 28 agents, 116 skills, and 59 commands.
|
||||
|
||||
### Hooks upgrade
|
||||
The current Stop hook is solid. The repo's `hooks.json` adds:
|
||||
- Dangerous command blocking (safety)
|
||||
- Auto-formatting (quality)
|
||||
- Cost tracking (observability)
|
||||
- Session evaluation (learning)
|
||||
|
||||
### Rules
|
||||
Adding language rules (e.g., TypeScript, Python) provides always-on coding guidelines without relying on per-session prompts.
|
||||
|
||||
---
|
||||
|
||||
## What the Current Setup Does Well
|
||||
|
||||
- The `stop-hook-git-check.sh` Stop hook is production-quality and already enforces good git hygiene
|
||||
- The `Skill` permission is correctly configured
|
||||
- The setup is clean with no conflicts or cruft
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
The current setup is essentially a blank slate with one well-implemented git hygiene hook. This repo provides a complete, production-tested enhancement layer covering agents, skills, commands, hooks, and rules — with a selective install system so you can add exactly what you need without bloating the configuration.
|
||||
46
README.md
46
README.md
@@ -1,6 +1,4 @@
|
||||
**Language:** English | [Português (Brasil)](docs/pt-BR/README.md) | [简体中文](README.zh-CN.md) | [繁體中文](docs/zh-TW/README.md) | [日本語](docs/ja-JP/README.md) | [한국어](docs/ko-KR/README.md)
|
||||
[Türkçe](docs/tr/README.md)
|
||||
|
||||
**Language:** English | [Português (Brasil)](docs/pt-BR/README.md) | [简体中文](README.zh-CN.md) | [繁體中文](docs/zh-TW/README.md) | [日本語](docs/ja-JP/README.md) | [한국어](docs/ko-KR/README.md) | [Türkçe](docs/tr/README.md)
|
||||
|
||||
# Everything Claude Code
|
||||
|
||||
@@ -207,7 +205,7 @@ npm install # or: pnpm install | yarn install | bun install
|
||||
npx ecc-install typescript
|
||||
```
|
||||
|
||||
For manual install instructions see the README in the `rules/` folder.
|
||||
For manual install instructions see the README in the `rules/` folder. When copying rules manually, copy the whole language directory (for example `rules/common` or `rules/golang`), not the files inside it, so relative references keep working and filenames do not collide.
|
||||
|
||||
### Step 3: Start Using
|
||||
|
||||
@@ -222,7 +220,7 @@ For manual install instructions see the README in the `rules/` folder.
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
✨ **That's it!** You now have access to 28 agents, 125 skills, and 60 commands.
|
||||
✨ **That's it!** You now have access to 30 agents, 135 skills, and 60 commands.
|
||||
|
||||
### Multi-model commands require additional setup
|
||||
|
||||
@@ -297,7 +295,7 @@ everything-claude-code/
|
||||
| |-- plugin.json # Plugin metadata and component paths
|
||||
| |-- marketplace.json # Marketplace catalog for /plugin marketplace add
|
||||
|
|
||||
|-- agents/ # 28 specialized subagents for delegation
|
||||
|-- agents/ # 29 specialized subagents for delegation
|
||||
| |-- planner.md # Feature implementation planning
|
||||
| |-- architect.md # System design decisions
|
||||
| |-- tdd-guide.md # Test-driven development
|
||||
@@ -638,16 +636,16 @@ This gives you instant access to all commands, agents, skills, and hooks.
|
||||
>
|
||||
> # Option A: User-level rules (applies to all projects)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/php/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/php ~/.claude/rules/
|
||||
>
|
||||
> # Option B: Project-level rules (applies to current project only)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* .claude/rules/ # pick your stack
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript .claude/rules/ # pick your stack
|
||||
> ```
|
||||
|
||||
---
|
||||
@@ -663,12 +661,13 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
# Copy agents to your Claude config
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Copy rules (common + language-specific)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/php/* ~/.claude/rules/
|
||||
# Copy rules directories (common + language-specific)
|
||||
mkdir -p ~/.claude/rules
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # pick your stack
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/php ~/.claude/rules/
|
||||
|
||||
# Copy commands
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
@@ -874,7 +873,8 @@ Yes. Use Option 2 (manual installation) and copy only what you need:
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Just rules
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
mkdir -p ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
```
|
||||
|
||||
Each component is fully independent.
|
||||
@@ -1023,6 +1023,8 @@ cp .codex/config.toml ~/.codex/config.toml
|
||||
|
||||
The sync script safely merges ECC MCP servers into your existing `~/.codex/config.toml` using an **add-only** strategy — it never removes or modifies your existing servers. Run with `--dry-run` to preview changes, or `--update-mcp` to force-refresh ECC servers to the latest recommended config.
|
||||
|
||||
For Context7, ECC uses the canonical Codex section name `[mcp_servers.context7]` while still launching the `@upstash/context7-mcp` package. If you already have a legacy `[mcp_servers.context7-mcp]` entry, `--update-mcp` migrates it to the canonical section name.
|
||||
|
||||
Codex macOS app:
|
||||
- Open this repository as your workspace.
|
||||
- The root `AGENTS.md` is auto-detected.
|
||||
@@ -1107,9 +1109,9 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|
||||
|
||||
| Feature | Claude Code | OpenCode | Status |
|
||||
|---------|-------------|----------|--------|
|
||||
| Agents | ✅ 28 agents | ✅ 12 agents | **Claude Code leads** |
|
||||
| Agents | ✅ 30 agents | ✅ 12 agents | **Claude Code leads** |
|
||||
| Commands | ✅ 60 commands | ✅ 31 commands | **Claude Code leads** |
|
||||
| Skills | ✅ 125 skills | ✅ 37 skills | **Claude Code leads** |
|
||||
| Skills | ✅ 135 skills | ✅ 37 skills | **Claude Code leads** |
|
||||
| Hooks | ✅ 8 event types | ✅ 11 events | **OpenCode has more!** |
|
||||
| Rules | ✅ 29 rules | ✅ 13 instructions | **Claude Code leads** |
|
||||
| MCP Servers | ✅ 14 servers | ✅ Full | **Full parity** |
|
||||
|
||||
@@ -82,14 +82,17 @@
|
||||
# 首先克隆仓库
|
||||
git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
|
||||
# 复制规则(通用 + 语言特定)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
# 复制规则目录(通用 + 语言特定)
|
||||
mkdir -p ~/.claude/rules
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl ~/.claude/rules/
|
||||
```
|
||||
|
||||
复制规则时,请复制整个目录(例如 `rules/common`、`rules/golang`),而不是复制目录内的文件;这样可以保留相对引用,并避免不同规则集中的同名文件互相覆盖。
|
||||
|
||||
### 第三步:开始使用
|
||||
|
||||
```bash
|
||||
@@ -366,11 +369,20 @@ everything-claude-code/
|
||||
> git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
>
|
||||
> # 选项 A:用户级规则(应用于所有项目)
|
||||
> cp -r everything-claude-code/rules/* ~/.claude/rules/
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/perl ~/.claude/rules/
|
||||
>
|
||||
> # 选项 B:项目级规则(仅应用于当前项目)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript .claude/rules/
|
||||
> cp -r everything-claude-code/rules/python .claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang .claude/rules/
|
||||
> cp -r everything-claude-code/rules/perl .claude/rules/
|
||||
> ```
|
||||
|
||||
---
|
||||
@@ -386,12 +398,13 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
# 将代理复制到你的 Claude 配置
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# 复制规则(通用 + 语言特定)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl/* ~/.claude/rules/
|
||||
# 复制规则目录(通用 + 语言特定)
|
||||
mkdir -p ~/.claude/rules
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/ # 选择你的技术栈
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/perl ~/.claude/rules/
|
||||
|
||||
# 复制命令
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
|
||||
196
REPO-ASSESSMENT.md
Normal file
196
REPO-ASSESSMENT.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# Repo & Fork Assessment + Setup Recommendations
|
||||
|
||||
**Date:** 2026-03-21
|
||||
|
||||
---
|
||||
|
||||
## What's Available
|
||||
|
||||
### Repo: `Infiniteyieldai/everything-claude-code`
|
||||
|
||||
This is a **fork of `affaan-m/everything-claude-code`** (the upstream project with 50K+ stars, 6K+ forks).
|
||||
|
||||
| Attribute | Value |
|
||||
|-----------|-------|
|
||||
| Version | 1.9.0 (current) |
|
||||
| Status | Clean fork — 1 commit ahead of upstream `main` (the EVALUATION.md doc added in this session) |
|
||||
| Remote branches | `main`, `claude/evaluate-repo-comparison-ASZ9Y` |
|
||||
| Upstream sync | Fully synced — last upstream commit merged was the zh-CN docs PR (#728) |
|
||||
| License | MIT |
|
||||
|
||||
**This is the right repo to work from.** It's the latest upstream version with no divergence or merge conflicts.
|
||||
|
||||
---
|
||||
|
||||
### Current `~/.claude/` Installation
|
||||
|
||||
| Component | Installed | Available in Repo |
|
||||
|-----------|-----------|-------------------|
|
||||
| Agents | 0 | 28 |
|
||||
| Skills | 0 | 116 |
|
||||
| Commands | 0 | 59 |
|
||||
| Rules | 0 | 60+ files (12 languages) |
|
||||
| Hooks | 1 (git Stop check) | Full PreToolUse/PostToolUse matrix |
|
||||
| MCP configs | 0 | 1 (Context7) |
|
||||
|
||||
The existing Stop hook (`stop-hook-git-check.sh`) is solid — blocks session end on uncommitted/unpushed work. Keep it.
|
||||
|
||||
---
|
||||
|
||||
## Install Profile Recommendations
|
||||
|
||||
The repo ships 5 install profiles. Choose based on your primary use case:
|
||||
|
||||
### Profile: `core` (Minimum viable setup)
|
||||
> Fastest to install. Gets you commands, core agents, hooks runtime, and quality workflow.
|
||||
|
||||
**Best for:** Trying ECC out, minimal footprint, or a constrained environment.
|
||||
|
||||
```bash
|
||||
node scripts/install-plan.js --profile core
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
|
||||
**Installs:** rules-core, agents-core, commands-core, hooks-runtime, platform-configs, workflow-quality
|
||||
|
||||
---
|
||||
|
||||
### Profile: `developer` (Recommended for daily dev work)
|
||||
> The default engineering profile for most ECC users.
|
||||
|
||||
**Best for:** General software development across app codebases.
|
||||
|
||||
```bash
|
||||
node scripts/install-plan.js --profile developer
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
|
||||
**Adds over core:** framework-language skills, database patterns, orchestration commands
|
||||
|
||||
---
|
||||
|
||||
### Profile: `security`
|
||||
> Baseline runtime + security-specific agents and rules.
|
||||
|
||||
**Best for:** Security-focused workflows, code audits, vulnerability reviews.
|
||||
|
||||
---
|
||||
|
||||
### Profile: `research`
|
||||
> Investigation, synthesis, and publishing workflows.
|
||||
|
||||
**Best for:** Content creation, investor materials, market research, cross-posting.
|
||||
|
||||
---
|
||||
|
||||
### Profile: `full`
|
||||
> Everything — all 18 modules.
|
||||
|
||||
**Best for:** Power users who want the complete toolkit.
|
||||
|
||||
```bash
|
||||
node scripts/install-plan.js --profile full
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Priority Additions (High Value, Low Risk)
|
||||
|
||||
Regardless of profile, these components add immediate value:
|
||||
|
||||
### 1. Core Agents (highest ROI)
|
||||
|
||||
| Agent | Why it matters |
|
||||
|-------|----------------|
|
||||
| `planner.md` | Breaks complex tasks into implementation plans |
|
||||
| `code-reviewer.md` | Quality and maintainability review |
|
||||
| `tdd-guide.md` | TDD workflow (RED→GREEN→IMPROVE) |
|
||||
| `security-reviewer.md` | Vulnerability detection |
|
||||
| `architect.md` | System design & scalability decisions |
|
||||
|
||||
### 2. Key Commands
|
||||
|
||||
| Command | Why it matters |
|
||||
|---------|----------------|
|
||||
| `/plan` | Implementation planning before coding |
|
||||
| `/tdd` | Test-driven workflow |
|
||||
| `/code-review` | On-demand review |
|
||||
| `/build-fix` | Automated build error resolution |
|
||||
| `/learn` | Extract patterns from current session |
|
||||
|
||||
### 3. Hook Upgrades (from `hooks/hooks.json`)
|
||||
The repo's hook system adds these over the current single Stop hook:
|
||||
|
||||
| Hook | Trigger | Value |
|
||||
|------|---------|-------|
|
||||
| `block-no-verify` | PreToolUse: Bash | Blocks `--no-verify` git flag abuse |
|
||||
| `pre-bash-git-push-reminder` | PreToolUse: Bash | Pre-push review reminder |
|
||||
| `doc-file-warning` | PreToolUse: Write | Warns on non-standard doc files |
|
||||
| `suggest-compact` | PreToolUse: Edit/Write | Suggests compaction at logical intervals |
|
||||
| Continuous learning observer | PreToolUse: * | Captures tool use patterns for skill improvement |
|
||||
|
||||
### 4. Rules (Always-on guidelines)
|
||||
The `rules/common/` directory provides baseline guidelines that fire on every session:
|
||||
- `security.md` — Security guardrails
|
||||
- `testing.md` — 80%+ coverage requirement
|
||||
- `git-workflow.md` — Conventional commits, branch strategy
|
||||
- `coding-style.md` — Cross-language style standards
|
||||
|
||||
---
|
||||
|
||||
## What to Do With the Fork
|
||||
|
||||
### Option A: Use as upstream tracker (current state)
|
||||
Keep the fork synced with `affaan-m/everything-claude-code` upstream. Periodically merge upstream changes:
|
||||
```bash
|
||||
git fetch upstream
|
||||
git merge upstream/main
|
||||
```
|
||||
Install from the local clone. This is clean and maintainable.
|
||||
|
||||
### Option B: Customize the fork
|
||||
Add personal skills, agents, or commands to the fork. Good for:
|
||||
- Business-specific domain skills (your vertical)
|
||||
- Team-specific coding conventions
|
||||
- Custom hooks for your stack
|
||||
|
||||
The fork already has the EVALUATION.md and REPO-ASSESSMENT.md docs — that's fine for a working fork.
|
||||
|
||||
### Option C: Install from npm (simplest for fresh machines)
|
||||
```bash
|
||||
npx ecc-universal install --profile developer
|
||||
```
|
||||
No need to clone the repo. This is the recommended install method for most users.
|
||||
|
||||
---
|
||||
|
||||
## Recommended Setup Steps
|
||||
|
||||
1. **Keep the existing Stop hook** — it's doing its job
|
||||
2. **Run the developer profile install** from the local fork:
|
||||
```bash
|
||||
cd /path/to/everything-claude-code
|
||||
node scripts/install-plan.js --profile developer
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
3. **Add language rules** for your primary stack (TypeScript, Python, Go, etc.):
|
||||
```bash
|
||||
node scripts/install-plan.js --add rules/typescript
|
||||
node scripts/install-apply.js
|
||||
```
|
||||
4. **Enable MCP Context7** for live documentation lookup:
|
||||
- Copy `mcp-configs/mcp-servers.json` into your project's `.claude/` dir
|
||||
5. **Review hooks** — enable the `hooks/hooks.json` additions selectively, starting with `block-no-verify` and `pre-bash-git-push-reminder`
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| Question | Answer |
|
||||
|----------|--------|
|
||||
| Is the fork healthy? | Yes — fully synced with upstream v1.9.0 |
|
||||
| Other forks to consider? | None visible in this environment; upstream `affaan-m/everything-claude-code` is the source of truth |
|
||||
| Best install profile? | `developer` for day-to-day dev work |
|
||||
| Biggest gap in current setup? | 0 agents installed — add at minimum: planner, code-reviewer, tdd-guide, security-reviewer |
|
||||
| Quickest win? | Run `node scripts/install-plan.js --profile core && node scripts/install-apply.js` |
|
||||
83
agents/healthcare-reviewer.md
Normal file
83
agents/healthcare-reviewer.md
Normal file
@@ -0,0 +1,83 @@
|
||||
---
|
||||
name: healthcare-reviewer
|
||||
description: Reviews healthcare application code for clinical safety, CDSS accuracy, PHI compliance, and medical data integrity. Specialized for EMR/EHR, clinical decision support, and health information systems.
|
||||
tools: ["Read", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
# Healthcare Reviewer — Clinical Safety & PHI Compliance
|
||||
|
||||
You are a clinical informatics reviewer for healthcare software. Patient safety is your top priority. You review code for clinical accuracy, data protection, and regulatory compliance.
|
||||
|
||||
## Your Responsibilities
|
||||
|
||||
1. **CDSS accuracy** — Verify drug interaction logic, dose validation rules, and clinical scoring implementations match published medical standards
|
||||
2. **PHI/PII protection** — Scan for patient data exposure in logs, errors, responses, URLs, and client storage
|
||||
3. **Clinical data integrity** — Ensure audit trails, locked records, and cascade protection
|
||||
4. **Medical data correctness** — Verify ICD-10/SNOMED mappings, lab reference ranges, and drug database entries
|
||||
5. **Integration compliance** — Validate HL7/FHIR message handling and error recovery
|
||||
|
||||
## Critical Checks
|
||||
|
||||
### CDSS Engine
|
||||
|
||||
- [ ] All drug interaction pairs produce correct alerts (both directions)
|
||||
- [ ] Dose validation rules fire on out-of-range values
|
||||
- [ ] Clinical scoring matches published specification (NEWS2 = Royal College of Physicians, qSOFA = Sepsis-3)
|
||||
- [ ] No false negatives (missed interaction = patient safety event)
|
||||
- [ ] Malformed inputs produce errors, NOT silent passes
|
||||
|
||||
### PHI Protection
|
||||
|
||||
- [ ] No patient data in `console.log`, `console.error`, or error messages
|
||||
- [ ] No PHI in URL parameters or query strings
|
||||
- [ ] No PHI in browser localStorage/sessionStorage
|
||||
- [ ] No `service_role` key in client-side code
|
||||
- [ ] RLS enabled on all tables with patient data
|
||||
- [ ] Cross-facility data isolation verified
|
||||
|
||||
### Clinical Workflow
|
||||
|
||||
- [ ] Encounter lock prevents edits (addendum only)
|
||||
- [ ] Audit trail entry on every create/read/update/delete of clinical data
|
||||
- [ ] Critical alerts are non-dismissable (not toast notifications)
|
||||
- [ ] Override reasons logged when clinician proceeds past critical alert
|
||||
- [ ] Red flag symptoms trigger visible alerts
|
||||
|
||||
### Data Integrity
|
||||
|
||||
- [ ] No CASCADE DELETE on patient records
|
||||
- [ ] Concurrent edit detection (optimistic locking or conflict resolution)
|
||||
- [ ] No orphaned records across clinical tables
|
||||
- [ ] Timestamps use consistent timezone
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
## Healthcare Review: [module/feature]
|
||||
|
||||
### Patient Safety Impact: [CRITICAL / HIGH / MEDIUM / LOW / NONE]
|
||||
|
||||
### Clinical Accuracy
|
||||
- CDSS: [checks passed/failed]
|
||||
- Drug DB: [verified/issues]
|
||||
- Scoring: [matches spec/deviates]
|
||||
|
||||
### PHI Compliance
|
||||
- Exposure vectors checked: [list]
|
||||
- Issues found: [list or none]
|
||||
|
||||
### Issues
|
||||
1. [PATIENT SAFETY / CLINICAL / PHI / TECHNICAL] Description
|
||||
- Impact: [potential harm or exposure]
|
||||
- Fix: [required change]
|
||||
|
||||
### Verdict: [SAFE TO DEPLOY / NEEDS FIXES / BLOCK — PATIENT SAFETY RISK]
|
||||
```
|
||||
|
||||
## Rules
|
||||
|
||||
- When in doubt about clinical accuracy, flag as NEEDS REVIEW — never approve uncertain clinical logic
|
||||
- A single missed drug interaction is worse than a hundred false alarms
|
||||
- PHI exposure is always CRITICAL severity, regardless of how small the leak
|
||||
- Never approve code that silently catches CDSS errors
|
||||
446
agents/performance-optimizer.md
Normal file
446
agents/performance-optimizer.md
Normal file
@@ -0,0 +1,446 @@
|
||||
---
|
||||
name: performance-optimizer
|
||||
description: Performance analysis and optimization specialist. Use PROACTIVELY for identifying bottlenecks, optimizing slow code, reducing bundle sizes, and improving runtime performance. Profiling, memory leaks, render optimization, and algorithmic improvements.
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
|
||||
model: sonnet
|
||||
---
|
||||
|
||||
# Performance Optimizer
|
||||
|
||||
You are an expert performance specialist focused on identifying bottlenecks and optimizing application speed, memory usage, and efficiency. Your mission is to make code faster, lighter, and more responsive.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Performance Profiling** — Identify slow code paths, memory leaks, and bottlenecks
|
||||
2. **Bundle Optimization** — Reduce JavaScript bundle sizes, lazy loading, code splitting
|
||||
3. **Runtime Optimization** — Improve algorithmic efficiency, reduce unnecessary computations
|
||||
4. **React/Rendering Optimization** — Prevent unnecessary re-renders, optimize component trees
|
||||
5. **Database & Network** — Optimize queries, reduce API calls, implement caching
|
||||
6. **Memory Management** — Detect leaks, optimize memory usage, cleanup resources
|
||||
|
||||
## Analysis Commands
|
||||
|
||||
```bash
|
||||
# Bundle analysis
|
||||
npx bundle-analyzer
|
||||
npx source-map-explorer build/static/js/*.js
|
||||
|
||||
# Lighthouse performance audit
|
||||
npx lighthouse https://your-app.com --view
|
||||
|
||||
# Node.js profiling
|
||||
node --prof your-app.js
|
||||
node --prof-process isolate-*.log
|
||||
|
||||
# Memory analysis
|
||||
node --inspect your-app.js # Then use Chrome DevTools
|
||||
|
||||
# React profiling (in browser)
|
||||
# React DevTools > Profiler tab
|
||||
|
||||
# Network analysis
|
||||
npx webpack-bundle-analyzer
|
||||
```
|
||||
|
||||
## Performance Review Workflow
|
||||
|
||||
### 1. Identify Performance Issues
|
||||
|
||||
**Critical Performance Indicators:**
|
||||
|
||||
| Metric | Target | Action if Exceeded |
|
||||
|--------|--------|-------------------|
|
||||
| First Contentful Paint | < 1.8s | Optimize critical path, inline critical CSS |
|
||||
| Largest Contentful Paint | < 2.5s | Lazy load images, optimize server response |
|
||||
| Time to Interactive | < 3.8s | Code splitting, reduce JavaScript |
|
||||
| Cumulative Layout Shift | < 0.1 | Reserve space for images, avoid layout thrashing |
|
||||
| Total Blocking Time | < 200ms | Break up long tasks, use web workers |
|
||||
| Bundle Size (gzipped) | < 200KB | Tree shaking, lazy loading, code splitting |
|
||||
|
||||
### 2. Algorithmic Analysis
|
||||
|
||||
Check for inefficient algorithms:
|
||||
|
||||
| Pattern | Complexity | Better Alternative |
|
||||
|---------|------------|-------------------|
|
||||
| Nested loops on same data | O(n²) | Use Map/Set for O(1) lookups |
|
||||
| Repeated array searches | O(n) per search | Convert to Map for O(1) |
|
||||
| Sorting inside loop | O(n² log n) | Sort once outside loop |
|
||||
| String concatenation in loop | O(n²) | Use array.join() |
|
||||
| Deep cloning large objects | O(n) each time | Use shallow copy or immer |
|
||||
| Recursion without memoization | O(2^n) | Add memoization |
|
||||
|
||||
```typescript
|
||||
// BAD: O(n²) - searching array in loop
|
||||
for (const user of users) {
|
||||
const posts = allPosts.filter(p => p.userId === user.id); // O(n) per user
|
||||
}
|
||||
|
||||
// GOOD: O(n) - group once with Map
|
||||
const postsByUser = new Map<number, Post[]>();
|
||||
for (const post of allPosts) {
|
||||
const userPosts = postsByUser.get(post.userId) || [];
|
||||
userPosts.push(post);
|
||||
postsByUser.set(post.userId, userPosts);
|
||||
}
|
||||
// Now O(1) lookup per user
|
||||
```
|
||||
|
||||
### 3. React Performance Optimization
|
||||
|
||||
**Common React Anti-patterns:**
|
||||
|
||||
```tsx
|
||||
// BAD: Inline function creation in render
|
||||
<Button onClick={() => handleClick(id)}>Submit</Button>
|
||||
|
||||
// GOOD: Stable callback with useCallback
|
||||
const handleButtonClick = useCallback(() => handleClick(id), [handleClick, id]);
|
||||
<Button onClick={handleButtonClick}>Submit</Button>
|
||||
|
||||
// BAD: Object creation in render
|
||||
<Child style={{ color: 'red' }} />
|
||||
|
||||
// GOOD: Stable object reference
|
||||
const style = useMemo(() => ({ color: 'red' }), []);
|
||||
<Child style={style} />
|
||||
|
||||
// BAD: Expensive computation on every render
|
||||
const sortedItems = items.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
// GOOD: Memoize expensive computations
|
||||
const sortedItems = useMemo(
|
||||
() => [...items].sort((a, b) => a.name.localeCompare(b.name)),
|
||||
[items]
|
||||
);
|
||||
|
||||
// BAD: List without keys or with index
|
||||
{items.map((item, index) => <Item key={index} />)}
|
||||
|
||||
// GOOD: Stable unique keys
|
||||
{items.map(item => <Item key={item.id} item={item} />)}
|
||||
```
|
||||
|
||||
**React Performance Checklist:**
|
||||
|
||||
- [ ] `useMemo` for expensive computations
|
||||
- [ ] `useCallback` for functions passed to children
|
||||
- [ ] `React.memo` for frequently re-rendered components
|
||||
- [ ] Proper dependency arrays in hooks
|
||||
- [ ] Virtualization for long lists (react-window, react-virtualized)
|
||||
- [ ] Lazy loading for heavy components (`React.lazy`)
|
||||
- [ ] Code splitting at route level
|
||||
|
||||
### 4. Bundle Size Optimization
|
||||
|
||||
**Bundle Analysis Checklist:**
|
||||
|
||||
```bash
|
||||
# Analyze bundle composition
|
||||
npx webpack-bundle-analyzer build/static/js/*.js
|
||||
|
||||
# Check for duplicate dependencies
|
||||
npx duplicate-package-checker-analyzer
|
||||
|
||||
# Find largest files
|
||||
du -sh node_modules/* | sort -hr | head -20
|
||||
```
|
||||
|
||||
**Optimization Strategies:**
|
||||
|
||||
| Issue | Solution |
|
||||
|-------|----------|
|
||||
| Large vendor bundle | Tree shaking, smaller alternatives |
|
||||
| Duplicate code | Extract to shared module |
|
||||
| Unused exports | Remove dead code with knip |
|
||||
| Moment.js | Use date-fns or dayjs (smaller) |
|
||||
| Lodash | Use lodash-es or native methods |
|
||||
| Large icons library | Import only needed icons |
|
||||
|
||||
```javascript
|
||||
// BAD: Import entire library
|
||||
import _ from 'lodash';
|
||||
import moment from 'moment';
|
||||
|
||||
// GOOD: Import only what you need
|
||||
import debounce from 'lodash/debounce';
|
||||
import { format, addDays } from 'date-fns';
|
||||
|
||||
// Or use lodash-es with tree shaking
|
||||
import { debounce, throttle } from 'lodash-es';
|
||||
```
|
||||
|
||||
### 5. Database & Query Optimization
|
||||
|
||||
**Query Optimization Patterns:**
|
||||
|
||||
```sql
|
||||
-- BAD: Select all columns
|
||||
SELECT * FROM users WHERE active = true;
|
||||
|
||||
-- GOOD: Select only needed columns
|
||||
SELECT id, name, email FROM users WHERE active = true;
|
||||
|
||||
-- BAD: N+1 queries (in application loop)
|
||||
-- 1 query for users, then N queries for each user's orders
|
||||
|
||||
-- GOOD: Single query with JOIN or batch fetch
|
||||
SELECT u.*, o.id as order_id, o.total
|
||||
FROM users u
|
||||
LEFT JOIN orders o ON u.id = o.user_id
|
||||
WHERE u.active = true;
|
||||
|
||||
-- Add index for frequently queried columns
|
||||
CREATE INDEX idx_users_active ON users(active);
|
||||
CREATE INDEX idx_orders_user_id ON orders(user_id);
|
||||
```
|
||||
|
||||
**Database Performance Checklist:**
|
||||
|
||||
- [ ] Indexes on frequently queried columns
|
||||
- [ ] Composite indexes for multi-column queries
|
||||
- [ ] Avoid SELECT * in production code
|
||||
- [ ] Use connection pooling
|
||||
- [ ] Implement query result caching
|
||||
- [ ] Use pagination for large result sets
|
||||
- [ ] Monitor slow query logs
|
||||
|
||||
### 6. Network & API Optimization
|
||||
|
||||
**Network Optimization Strategies:**
|
||||
|
||||
```typescript
|
||||
// BAD: Multiple sequential requests
|
||||
const user = await fetchUser(id);
|
||||
const posts = await fetchPosts(user.id);
|
||||
const comments = await fetchComments(posts[0].id);
|
||||
|
||||
// GOOD: Parallel requests when independent
|
||||
const [user, posts] = await Promise.all([
|
||||
fetchUser(id),
|
||||
fetchPosts(id)
|
||||
]);
|
||||
|
||||
// GOOD: Batch requests when possible
|
||||
const results = await batchFetch(['user1', 'user2', 'user3']);
|
||||
|
||||
// Implement request caching
|
||||
const fetchWithCache = async (url: string, ttl = 300000) => {
|
||||
const cached = cache.get(url);
|
||||
if (cached) return cached;
|
||||
|
||||
const data = await fetch(url).then(r => r.json());
|
||||
cache.set(url, data, ttl);
|
||||
return data;
|
||||
};
|
||||
|
||||
// Debounce rapid API calls
|
||||
const debouncedSearch = debounce(async (query: string) => {
|
||||
const results = await searchAPI(query);
|
||||
setResults(results);
|
||||
}, 300);
|
||||
```
|
||||
|
||||
**Network Optimization Checklist:**
|
||||
|
||||
- [ ] Parallel independent requests with `Promise.all`
|
||||
- [ ] Implement request caching
|
||||
- [ ] Debounce rapid-fire requests
|
||||
- [ ] Use streaming for large responses
|
||||
- [ ] Implement pagination for large datasets
|
||||
- [ ] Use GraphQL or API batching to reduce requests
|
||||
- [ ] Enable compression (gzip/brotli) on server
|
||||
|
||||
### 7. Memory Leak Detection
|
||||
|
||||
**Common Memory Leak Patterns:**
|
||||
|
||||
```typescript
|
||||
// BAD: Event listener without cleanup
|
||||
useEffect(() => {
|
||||
window.addEventListener('resize', handleResize);
|
||||
// Missing cleanup!
|
||||
}, []);
|
||||
|
||||
// GOOD: Clean up event listeners
|
||||
useEffect(() => {
|
||||
window.addEventListener('resize', handleResize);
|
||||
return () => window.removeEventListener('resize', handleResize);
|
||||
}, []);
|
||||
|
||||
// BAD: Timer without cleanup
|
||||
useEffect(() => {
|
||||
setInterval(() => pollData(), 1000);
|
||||
// Missing cleanup!
|
||||
}, []);
|
||||
|
||||
// GOOD: Clean up timers
|
||||
useEffect(() => {
|
||||
const interval = setInterval(() => pollData(), 1000);
|
||||
return () => clearInterval(interval);
|
||||
}, []);
|
||||
|
||||
// BAD: Holding references in closures
|
||||
const Component = () => {
|
||||
const largeData = useLargeData();
|
||||
useEffect(() => {
|
||||
eventEmitter.on('update', () => {
|
||||
console.log(largeData); // Closure keeps reference
|
||||
});
|
||||
}, [largeData]);
|
||||
};
|
||||
|
||||
// GOOD: Use refs or proper dependencies
|
||||
const largeDataRef = useRef(largeData);
|
||||
useEffect(() => {
|
||||
largeDataRef.current = largeData;
|
||||
}, [largeData]);
|
||||
|
||||
useEffect(() => {
|
||||
const handleUpdate = () => {
|
||||
console.log(largeDataRef.current);
|
||||
};
|
||||
eventEmitter.on('update', handleUpdate);
|
||||
return () => eventEmitter.off('update', handleUpdate);
|
||||
}, []);
|
||||
```
|
||||
|
||||
**Memory Leak Detection:**
|
||||
|
||||
```bash
|
||||
# Chrome DevTools Memory tab:
|
||||
# 1. Take heap snapshot
|
||||
# 2. Perform action
|
||||
# 3. Take another snapshot
|
||||
# 4. Compare to find objects that shouldn't exist
|
||||
# 5. Look for detached DOM nodes, event listeners, closures
|
||||
|
||||
# Node.js memory debugging
|
||||
node --inspect app.js
|
||||
# Open chrome://inspect
|
||||
# Take heap snapshots and compare
|
||||
```
|
||||
|
||||
## Performance Testing
|
||||
|
||||
### Lighthouse Audits
|
||||
|
||||
```bash
|
||||
# Run full lighthouse audit
|
||||
npx lighthouse https://your-app.com --view --preset=desktop
|
||||
|
||||
# CI mode for automated checks
|
||||
npx lighthouse https://your-app.com --output=json --output-path=./lighthouse.json
|
||||
|
||||
# Check specific metrics
|
||||
npx lighthouse https://your-app.com --only-categories=performance
|
||||
```
|
||||
|
||||
### Performance Budgets
|
||||
|
||||
```json
|
||||
// package.json
|
||||
{
|
||||
"bundlesize": [
|
||||
{
|
||||
"path": "./build/static/js/*.js",
|
||||
"maxSize": "200 kB"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Web Vitals Monitoring
|
||||
|
||||
```typescript
|
||||
// Track Core Web Vitals
|
||||
import { getCLS, getFID, getLCP, getFCP, getTTFB } from 'web-vitals';
|
||||
|
||||
getCLS(console.log); // Cumulative Layout Shift
|
||||
getFID(console.log); // First Input Delay
|
||||
getLCP(console.log); // Largest Contentful Paint
|
||||
getFCP(console.log); // First Contentful Paint
|
||||
getTTFB(console.log); // Time to First Byte
|
||||
```
|
||||
|
||||
## Performance Report Template
|
||||
|
||||
````markdown
|
||||
# Performance Audit Report
|
||||
|
||||
## Executive Summary
|
||||
- **Overall Score**: X/100
|
||||
- **Critical Issues**: X
|
||||
- **Recommendations**: X
|
||||
|
||||
## Bundle Analysis
|
||||
| Metric | Current | Target | Status |
|
||||
|--------|---------|--------|--------|
|
||||
| Total Size (gzip) | XXX KB | < 200 KB | ⚠️ |
|
||||
| Main Bundle | XXX KB | < 100 KB | ✅ |
|
||||
| Vendor Bundle | XXX KB | < 150 KB | ⚠️ |
|
||||
|
||||
## Web Vitals
|
||||
| Metric | Current | Target | Status |
|
||||
|--------|---------|--------|--------|
|
||||
| LCP | X.Xs | < 2.5s | ✅ |
|
||||
| FID | XXms | < 100ms | ✅ |
|
||||
| CLS | X.XX | < 0.1 | ⚠️ |
|
||||
|
||||
## Critical Issues
|
||||
|
||||
### 1. [Issue Title]
|
||||
**File**: path/to/file.ts:42
|
||||
**Impact**: High - Causes XXXms delay
|
||||
**Fix**: [Description of fix]
|
||||
|
||||
```typescript
|
||||
// Before (slow)
|
||||
const slowCode = ...;
|
||||
|
||||
// After (optimized)
|
||||
const fastCode = ...;
|
||||
```
|
||||
|
||||
### 2. [Issue Title]
|
||||
...
|
||||
|
||||
## Recommendations
|
||||
1. [Priority recommendation]
|
||||
2. [Priority recommendation]
|
||||
3. [Priority recommendation]
|
||||
|
||||
## Estimated Impact
|
||||
- Bundle size reduction: XX KB (XX%)
|
||||
- LCP improvement: XXms
|
||||
- Time to Interactive improvement: XXms
|
||||
````
|
||||
|
||||
## When to Run
|
||||
|
||||
**ALWAYS:** Before major releases, after adding new features, when users report slowness, during performance regression testing.
|
||||
|
||||
**IMMEDIATELY:** Lighthouse score drops, bundle size increases >10%, memory usage grows, slow page loads.
|
||||
|
||||
## Red Flags - Act Immediately
|
||||
|
||||
| Issue | Action |
|
||||
|-------|--------|
|
||||
| Bundle > 500KB gzip | Code split, lazy load, tree shake |
|
||||
| LCP > 4s | Optimize critical path, preload resources |
|
||||
| Memory usage growing | Check for leaks, review useEffect cleanup |
|
||||
| CPU spikes | Profile with Chrome DevTools |
|
||||
| Database query > 1s | Add index, optimize query, cache results |
|
||||
|
||||
## Success Metrics
|
||||
|
||||
- Lighthouse performance score > 90
|
||||
- All Core Web Vitals in "good" range
|
||||
- Bundle size under budget
|
||||
- No memory leaks detected
|
||||
- Test suite still passing
|
||||
- No performance regressions
|
||||
|
||||
---
|
||||
|
||||
**Remember**: Performance is a feature. Users notice speed. Every 100ms of improvement matters. Optimize for the 90th percentile, not the average.
|
||||
919
docs/SKILL-DEVELOPMENT-GUIDE.md
Normal file
919
docs/SKILL-DEVELOPMENT-GUIDE.md
Normal file
@@ -0,0 +1,919 @@
|
||||
# Skill Development Guide
|
||||
|
||||
A comprehensive guide to creating effective skills for Everything Claude Code (ECC).
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [What Are Skills?](#what-are-skills)
|
||||
- [Skill Architecture](#skill-architecture)
|
||||
- [Creating Your First Skill](#creating-your-first-skill)
|
||||
- [Skill Categories](#skill-categories)
|
||||
- [Writing Effective Skill Content](#writing-effective-skill-content)
|
||||
- [Best Practices](#best-practices)
|
||||
- [Common Patterns](#common-patterns)
|
||||
- [Testing Your Skill](#testing-your-skill)
|
||||
- [Submitting Your Skill](#submitting-your-skill)
|
||||
- [Examples Gallery](#examples-gallery)
|
||||
|
||||
---
|
||||
|
||||
## What Are Skills?
|
||||
|
||||
Skills are **knowledge modules** that Claude Code loads based on context. They provide:
|
||||
|
||||
- **Domain expertise**: Framework patterns, language idioms, best practices
|
||||
- **Workflow definitions**: Step-by-step processes for common tasks
|
||||
- **Reference material**: Code snippets, checklists, decision trees
|
||||
- **Context injection**: Activate when specific conditions are met
|
||||
|
||||
Unlike **agents** (specialized subassistants) or **commands** (user-triggered actions), skills are passive knowledge that Claude Code references when relevant.
|
||||
|
||||
### When Skills Activate
|
||||
|
||||
Skills activate when:
|
||||
- The user's task matches the skill's domain
|
||||
- Claude Code detects relevant context
|
||||
- A command references a skill
|
||||
- An agent needs domain knowledge
|
||||
|
||||
### Skill vs Agent vs Command
|
||||
|
||||
| Component | Purpose | Activation |
|
||||
|-----------|---------|------------|
|
||||
| **Skill** | Knowledge repository | Context-based (automatic) |
|
||||
| **Agent** | Task executor | Explicit delegation |
|
||||
| **Command** | User action | User-invoked (`/command`) |
|
||||
| **Hook** | Automation | Event-triggered |
|
||||
| **Rule** | Always-on guidelines | Always active |
|
||||
|
||||
---
|
||||
|
||||
## Skill Architecture
|
||||
|
||||
### File Structure
|
||||
|
||||
```
|
||||
skills/
|
||||
└── your-skill-name/
|
||||
├── SKILL.md # Required: Main skill definition
|
||||
├── examples/ # Optional: Code examples
|
||||
│ ├── basic.ts
|
||||
│ └── advanced.ts
|
||||
└── references/ # Optional: External references
|
||||
└── links.md
|
||||
```
|
||||
|
||||
### SKILL.md Format
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: skill-name
|
||||
description: Brief description shown in skill list and used for auto-activation
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Skill Title
|
||||
|
||||
Brief overview of what this skill covers.
|
||||
|
||||
## When to Activate
|
||||
|
||||
Describe scenarios where Claude should use this skill.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
Main patterns and guidelines.
|
||||
|
||||
## Code Examples
|
||||
|
||||
\`\`\`typescript
|
||||
// Practical, tested examples
|
||||
\`\`\`
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
Show what NOT to do with concrete examples.
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Actionable guidelines
|
||||
- Do's and don'ts
|
||||
|
||||
## Related Skills
|
||||
|
||||
Link to complementary skills.
|
||||
```
|
||||
|
||||
### YAML Frontmatter Fields
|
||||
|
||||
| Field | Required | Description |
|
||||
|-------|----------|-------------|
|
||||
| `name` | Yes | Lowercase, hyphenated identifier (e.g., `react-patterns`) |
|
||||
| `description` | Yes | One-line description for skill list and auto-activation |
|
||||
| `origin` | No | Source identifier (e.g., `ECC`, `community`, project name) |
|
||||
| `tags` | No | Array of tags for categorization |
|
||||
| `version` | No | Skill version for tracking updates |
|
||||
|
||||
---
|
||||
|
||||
## Creating Your First Skill
|
||||
|
||||
### Step 1: Choose a Focus
|
||||
|
||||
Good skills are **focused and actionable**:
|
||||
|
||||
| ✅ Good Focus | ❌ Too Broad |
|
||||
|---------------|--------------|
|
||||
| `react-hook-patterns` | `react` |
|
||||
| `postgresql-indexing` | `databases` |
|
||||
| `pytest-fixtures` | `python-testing` |
|
||||
| `nextjs-app-router` | `nextjs` |
|
||||
|
||||
### Step 2: Create the Directory
|
||||
|
||||
```bash
|
||||
mkdir -p skills/your-skill-name
|
||||
```
|
||||
|
||||
### Step 3: Write SKILL.md
|
||||
|
||||
Here's a minimal template:
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: your-skill-name
|
||||
description: Brief description of when to use this skill
|
||||
---
|
||||
|
||||
# Your Skill Title
|
||||
|
||||
Brief overview (1-2 sentences).
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Scenario 1
|
||||
- Scenario 2
|
||||
- Scenario 3
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### Concept 1
|
||||
|
||||
Explanation with examples.
|
||||
|
||||
### Concept 2
|
||||
|
||||
Another pattern with code.
|
||||
|
||||
## Code Examples
|
||||
|
||||
\`\`\`typescript
|
||||
// Practical example
|
||||
\`\`\`
|
||||
|
||||
## Best Practices
|
||||
|
||||
- Do this
|
||||
- Avoid that
|
||||
|
||||
## Related Skills
|
||||
|
||||
- `related-skill-1`
|
||||
- `related-skill-2`
|
||||
```
|
||||
|
||||
### Step 4: Add Content
|
||||
|
||||
Write content that Claude can **immediately use**:
|
||||
|
||||
- ✅ Copy-pasteable code examples
|
||||
- ✅ Clear decision trees
|
||||
- ✅ Checklists for verification
|
||||
- ❌ Vague explanations without examples
|
||||
- ❌ Long prose without actionable guidance
|
||||
|
||||
---
|
||||
|
||||
## Skill Categories
|
||||
|
||||
### Language Standards
|
||||
|
||||
Focus on idiomatic code, naming conventions, and language-specific patterns.
|
||||
|
||||
**Examples:** `python-patterns`, `golang-patterns`, `typescript-standards`
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: python-patterns
|
||||
description: Python idioms, best practices, and patterns for clean, idiomatic code.
|
||||
---
|
||||
|
||||
# Python Patterns
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Writing Python code
|
||||
- Refactoring Python modules
|
||||
- Python code review
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### Context Managers
|
||||
|
||||
\`\`\`python
|
||||
# Always use context managers for resources
|
||||
with open('file.txt') as f:
|
||||
content = f.read()
|
||||
\`\`\`
|
||||
```
|
||||
|
||||
### Framework Patterns
|
||||
|
||||
Focus on framework-specific conventions, common patterns, and anti-patterns.
|
||||
|
||||
**Examples:** `django-patterns`, `nextjs-patterns`, `springboot-patterns`
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: django-patterns
|
||||
description: Django best practices for models, views, URLs, and templates.
|
||||
---
|
||||
|
||||
# Django Patterns
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Building Django applications
|
||||
- Creating models and views
|
||||
- Django URL configuration
|
||||
```
|
||||
|
||||
### Workflow Skills
|
||||
|
||||
Define step-by-step processes for common development tasks.
|
||||
|
||||
**Examples:** `tdd-workflow`, `code-review-workflow`, `deployment-checklist`
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: code-review-workflow
|
||||
description: Systematic code review process for quality and security.
|
||||
---
|
||||
|
||||
# Code Review Workflow
|
||||
|
||||
## Steps
|
||||
|
||||
1. **Understand Context** - Read PR description and linked issues
|
||||
2. **Check Tests** - Verify test coverage and quality
|
||||
3. **Review Logic** - Analyze implementation for correctness
|
||||
4. **Check Security** - Look for vulnerabilities
|
||||
5. **Verify Style** - Ensure code follows conventions
|
||||
```
|
||||
|
||||
### Domain Knowledge
|
||||
|
||||
Specialized knowledge for specific domains (security, performance, etc.).
|
||||
|
||||
**Examples:** `security-review`, `performance-optimization`, `api-design`
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: api-design
|
||||
description: REST and GraphQL API design patterns, versioning, and best practices.
|
||||
---
|
||||
|
||||
# API Design Patterns
|
||||
|
||||
## RESTful Conventions
|
||||
|
||||
| Method | Endpoint | Purpose |
|
||||
|--------|----------|---------|
|
||||
| GET | /resources | List all |
|
||||
| GET | /resources/:id | Get one |
|
||||
| POST | /resources | Create |
|
||||
```
|
||||
|
||||
### Tool Integration
|
||||
|
||||
Guidance for using specific tools, libraries, or services.
|
||||
|
||||
**Examples:** `supabase-patterns`, `docker-patterns`, `mcp-server-patterns`
|
||||
|
||||
---
|
||||
|
||||
## Writing Effective Skill Content
|
||||
|
||||
### 1. Start with "When to Activate"
|
||||
|
||||
This section is **critical** for auto-activation. Be specific:
|
||||
|
||||
```markdown
|
||||
## When to Activate
|
||||
|
||||
- Creating new React components
|
||||
- Refactoring existing components
|
||||
- Debugging React state issues
|
||||
- Reviewing React code for best practices
|
||||
```
|
||||
|
||||
### 2. Use "Show, Don't Tell"
|
||||
|
||||
Bad:
|
||||
```markdown
|
||||
## Error Handling
|
||||
|
||||
Always handle errors properly in async functions.
|
||||
```
|
||||
|
||||
Good:
|
||||
```markdown
|
||||
## Error Handling
|
||||
|
||||
\`\`\`typescript
|
||||
async function fetchData(url: string) {
|
||||
try {
|
||||
const response = await fetch(url)
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(\`HTTP \${response.status}: \${response.statusText}\`)
|
||||
}
|
||||
|
||||
return await response.json()
|
||||
} catch (error) {
|
||||
console.error('Fetch failed:', error)
|
||||
throw new Error('Failed to fetch data')
|
||||
}
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
### Key Points
|
||||
|
||||
- Check \`response.ok\` before parsing
|
||||
- Log errors for debugging
|
||||
- Re-throw with user-friendly message
|
||||
```
|
||||
|
||||
### 3. Include Anti-Patterns
|
||||
|
||||
Show what NOT to do:
|
||||
|
||||
```markdown
|
||||
## Anti-Patterns
|
||||
|
||||
### ❌ Direct State Mutation
|
||||
|
||||
\`\`\`typescript
|
||||
// NEVER do this
|
||||
user.name = 'New Name'
|
||||
items.push(newItem)
|
||||
\`\`\`
|
||||
|
||||
### ✅ Immutable Updates
|
||||
|
||||
\`\`\`typescript
|
||||
// ALWAYS do this
|
||||
const updatedUser = { ...user, name: 'New Name' }
|
||||
const updatedItems = [...items, newItem]
|
||||
\`\`\`
|
||||
```
|
||||
|
||||
### 4. Provide Checklists
|
||||
|
||||
Checklists are actionable and easy to follow:
|
||||
|
||||
```markdown
|
||||
## Pre-Deployment Checklist
|
||||
|
||||
- [ ] All tests passing
|
||||
- [ ] No console.log in production code
|
||||
- [ ] Environment variables documented
|
||||
- [ ] Secrets not hardcoded
|
||||
- [ ] Error handling complete
|
||||
- [ ] Input validation in place
|
||||
```
|
||||
|
||||
### 5. Use Decision Trees
|
||||
|
||||
For complex decisions:
|
||||
|
||||
```markdown
|
||||
## Choosing the Right Approach
|
||||
|
||||
\`\`\`
|
||||
Need to fetch data?
|
||||
├── Single request → use fetch directly
|
||||
├── Multiple independent → Promise.all()
|
||||
├── Multiple dependent → await sequentially
|
||||
└── With caching → use SWR or React Query
|
||||
\`\`\`
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Best Practices
|
||||
|
||||
### DO
|
||||
|
||||
| Practice | Example |
|
||||
|----------|---------|
|
||||
| **Be specific** | "Use \`useCallback\` for event handlers passed to child components" |
|
||||
| **Show examples** | Include copy-pasteable code |
|
||||
| **Explain WHY** | "Immutability prevents unexpected side effects in React state" |
|
||||
| **Link related skills** | "See also: \`react-performance\`" |
|
||||
| **Keep focused** | One skill = one domain/concept |
|
||||
| **Use sections** | Clear headers for easy scanning |
|
||||
|
||||
### DON'T
|
||||
|
||||
| Practice | Why It's Bad |
|
||||
|----------|--------------|
|
||||
| **Be vague** | "Write good code" - not actionable |
|
||||
| **Long prose** | Hard to parse, better as code |
|
||||
| **Cover too much** | "Python, Django, and Flask patterns" - too broad |
|
||||
| **Skip examples** | Theory without practice is less useful |
|
||||
| **Ignore anti-patterns** | Learning what NOT to do is valuable |
|
||||
|
||||
### Content Guidelines
|
||||
|
||||
1. **Length**: 200-500 lines typical, 800 lines maximum
|
||||
2. **Code blocks**: Include language identifier
|
||||
3. **Headers**: Use `##` and `###` hierarchy
|
||||
4. **Lists**: Use `-` for unordered, `1.` for ordered
|
||||
5. **Tables**: For comparisons and references
|
||||
|
||||
---
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Pattern 1: Standards Skill
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: language-standards
|
||||
description: Coding standards and best practices for [language].
|
||||
---
|
||||
|
||||
# [Language] Coding Standards
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Writing [language] code
|
||||
- Code review
|
||||
- Setting up linting
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
| Element | Convention | Example |
|
||||
|---------|------------|---------|
|
||||
| Variables | camelCase | userName |
|
||||
| Constants | SCREAMING_SNAKE | MAX_RETRY |
|
||||
| Functions | camelCase | fetchUser |
|
||||
| Classes | PascalCase | UserService |
|
||||
|
||||
## Code Examples
|
||||
|
||||
[Include practical examples]
|
||||
|
||||
## Linting Setup
|
||||
|
||||
[Include configuration]
|
||||
|
||||
## Related Skills
|
||||
|
||||
- `language-testing`
|
||||
- `language-security`
|
||||
```
|
||||
|
||||
### Pattern 2: Workflow Skill
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: task-workflow
|
||||
description: Step-by-step workflow for [task].
|
||||
---
|
||||
|
||||
# [Task] Workflow
|
||||
|
||||
## When to Activate
|
||||
|
||||
- [Trigger 1]
|
||||
- [Trigger 2]
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Requirement 1]
|
||||
- [Requirement 2]
|
||||
|
||||
## Steps
|
||||
|
||||
### Step 1: [Name]
|
||||
|
||||
[Description]
|
||||
|
||||
\`\`\`bash
|
||||
[Commands]
|
||||
\`\`\`
|
||||
|
||||
### Step 2: [Name]
|
||||
|
||||
[Description]
|
||||
|
||||
## Verification
|
||||
|
||||
- [ ] [Check 1]
|
||||
- [ ] [Check 2]
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
| Problem | Solution |
|
||||
|---------|----------|
|
||||
| [Issue] | [Fix] |
|
||||
```
|
||||
|
||||
### Pattern 3: Reference Skill
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: api-reference
|
||||
description: Quick reference for [API/Library].
|
||||
---
|
||||
|
||||
# [API/Library] Reference
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Using [API/Library]
|
||||
- Looking up [API/Library] syntax
|
||||
|
||||
## Common Operations
|
||||
|
||||
### Operation 1
|
||||
|
||||
\`\`\`typescript
|
||||
// Basic usage
|
||||
\`\`\`
|
||||
|
||||
### Operation 2
|
||||
|
||||
\`\`\`typescript
|
||||
// Advanced usage
|
||||
\`\`\`
|
||||
|
||||
## Configuration
|
||||
|
||||
[Include config examples]
|
||||
|
||||
## Error Handling
|
||||
|
||||
[Include error patterns]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Your Skill
|
||||
|
||||
### Local Testing
|
||||
|
||||
1. **Copy to Claude Code skills directory**:
|
||||
```bash
|
||||
cp -r skills/your-skill-name ~/.claude/skills/
|
||||
```
|
||||
|
||||
2. **Test with Claude Code**:
|
||||
```
|
||||
You: "I need to [task that should trigger your skill]"
|
||||
|
||||
Claude should reference your skill's patterns.
|
||||
```
|
||||
|
||||
3. **Verify activation**:
|
||||
- Ask Claude to explain a concept from your skill
|
||||
- Check if it uses your examples and patterns
|
||||
- Ensure it follows your guidelines
|
||||
|
||||
### Validation Checklist
|
||||
|
||||
- [ ] **YAML frontmatter valid** - No syntax errors
|
||||
- [ ] **Name follows convention** - lowercase-with-hyphens
|
||||
- [ ] **Description is clear** - Tells when to use
|
||||
- [ ] **Examples work** - Code compiles and runs
|
||||
- [ ] **Links valid** - Related skills exist
|
||||
- [ ] **No sensitive data** - No API keys, tokens, paths
|
||||
|
||||
### Code Example Testing
|
||||
|
||||
Test all code examples:
|
||||
|
||||
```bash
|
||||
# From the repo root
|
||||
npx tsc --noEmit skills/your-skill-name/examples/*.ts
|
||||
|
||||
# Or from inside the skill directory
|
||||
npx tsc --noEmit examples/*.ts
|
||||
|
||||
# From the repo root
|
||||
python -m py_compile skills/your-skill-name/examples/*.py
|
||||
|
||||
# Or from inside the skill directory
|
||||
python -m py_compile examples/*.py
|
||||
|
||||
# From the repo root
|
||||
go build ./skills/your-skill-name/examples/...
|
||||
|
||||
# Or from inside the skill directory
|
||||
go build ./examples/...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Submitting Your Skill
|
||||
|
||||
### 1. Fork and Clone
|
||||
|
||||
```bash
|
||||
gh repo fork affaan-m/everything-claude-code --clone
|
||||
cd everything-claude-code
|
||||
```
|
||||
|
||||
### 2. Create Branch
|
||||
|
||||
```bash
|
||||
git checkout -b feat/skill-your-skill-name
|
||||
```
|
||||
|
||||
### 3. Add Your Skill
|
||||
|
||||
```bash
|
||||
mkdir -p skills/your-skill-name
|
||||
# Create SKILL.md
|
||||
```
|
||||
|
||||
### 4. Validate
|
||||
|
||||
```bash
|
||||
# Check YAML frontmatter
|
||||
head -10 skills/your-skill-name/SKILL.md
|
||||
|
||||
# Verify structure
|
||||
ls -la skills/your-skill-name/
|
||||
|
||||
# Run tests if available
|
||||
npm test
|
||||
```
|
||||
|
||||
### 5. Commit and Push
|
||||
|
||||
```bash
|
||||
git add skills/your-skill-name/
|
||||
git commit -m "feat(skills): add your-skill-name skill"
|
||||
git push -u origin feat/skill-your-skill-name
|
||||
```
|
||||
|
||||
### 6. Create Pull Request
|
||||
|
||||
Use this PR template:
|
||||
|
||||
```markdown
|
||||
## Summary
|
||||
|
||||
Brief description of the skill and why it's valuable.
|
||||
|
||||
## Skill Type
|
||||
|
||||
- [ ] Language standards
|
||||
- [ ] Framework patterns
|
||||
- [ ] Workflow
|
||||
- [ ] Domain knowledge
|
||||
- [ ] Tool integration
|
||||
|
||||
## Testing
|
||||
|
||||
How I tested this skill locally.
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] YAML frontmatter valid
|
||||
- [ ] Code examples tested
|
||||
- [ ] Follows skill guidelines
|
||||
- [ ] No sensitive data
|
||||
- [ ] Clear activation triggers
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Examples Gallery
|
||||
|
||||
### Example 1: Language Standards
|
||||
|
||||
**File:** `skills/rust-patterns/SKILL.md`
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: rust-patterns
|
||||
description: Rust idioms, ownership patterns, and best practices for safe, idiomatic code.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Rust Patterns
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Writing Rust code
|
||||
- Handling ownership and borrowing
|
||||
- Error handling with Result/Option
|
||||
- Implementing traits
|
||||
|
||||
## Ownership Patterns
|
||||
|
||||
### Borrowing Rules
|
||||
|
||||
\`\`\`rust
|
||||
// ✅ CORRECT: Borrow when you don't need ownership
|
||||
fn process_data(data: &str) -> usize {
|
||||
data.len()
|
||||
}
|
||||
|
||||
// ✅ CORRECT: Take ownership when you need to modify or consume
|
||||
fn consume_data(data: Vec<u8>) -> String {
|
||||
String::from_utf8(data).unwrap()
|
||||
}
|
||||
\`\`\`
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Result Pattern
|
||||
|
||||
\`\`\`rust
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AppError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Parse error: {0}")]
|
||||
Parse(#[from] std::num::ParseIntError),
|
||||
}
|
||||
|
||||
pub type AppResult<T> = Result<T, AppError>;
|
||||
\`\`\`
|
||||
|
||||
## Related Skills
|
||||
|
||||
- `rust-testing`
|
||||
- `rust-security`
|
||||
```
|
||||
|
||||
### Example 2: Framework Patterns
|
||||
|
||||
**File:** `skills/fastapi-patterns/SKILL.md`
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: fastapi-patterns
|
||||
description: FastAPI patterns for routing, dependency injection, validation, and async operations.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# FastAPI Patterns
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Building FastAPI applications
|
||||
- Creating API endpoints
|
||||
- Implementing dependency injection
|
||||
- Handling async database operations
|
||||
|
||||
## Project Structure
|
||||
|
||||
\`\`\`
|
||||
app/
|
||||
├── main.py # FastAPI app entry point
|
||||
├── routers/ # Route handlers
|
||||
│ ├── users.py
|
||||
│ └── items.py
|
||||
├── models/ # Pydantic models
|
||||
│ ├── user.py
|
||||
│ └── item.py
|
||||
├── services/ # Business logic
|
||||
│ └── user_service.py
|
||||
└── dependencies.py # Shared dependencies
|
||||
\`\`\`
|
||||
|
||||
## Dependency Injection
|
||||
|
||||
\`\`\`python
|
||||
from fastapi import Depends
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
async def get_db() -> AsyncSession:
|
||||
async with AsyncSessionLocal() as session:
|
||||
yield session
|
||||
|
||||
@router.get("/users/{user_id}")
|
||||
async def get_user(
|
||||
user_id: int,
|
||||
db: AsyncSession = Depends(get_db)
|
||||
):
|
||||
# Use db session
|
||||
pass
|
||||
\`\`\`
|
||||
|
||||
## Related Skills
|
||||
|
||||
- `python-patterns`
|
||||
- `pydantic-validation`
|
||||
```
|
||||
|
||||
### Example 3: Workflow Skill
|
||||
|
||||
**File:** `skills/refactoring-workflow/SKILL.md`
|
||||
|
||||
```markdown
|
||||
---
|
||||
name: refactoring-workflow
|
||||
description: Systematic refactoring workflow for improving code quality without changing behavior.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Refactoring Workflow
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Improving code structure
|
||||
- Reducing technical debt
|
||||
- Simplifying complex code
|
||||
- Extracting reusable components
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- All tests passing
|
||||
- Git working directory clean
|
||||
- Feature branch created
|
||||
|
||||
## Workflow Steps
|
||||
|
||||
### Step 1: Identify Refactoring Target
|
||||
|
||||
- Look for code smells (long methods, duplicate code, large classes)
|
||||
- Check test coverage for target area
|
||||
- Document current behavior
|
||||
|
||||
### Step 2: Ensure Tests Exist
|
||||
|
||||
\`\`\`bash
|
||||
# Run tests to verify current behavior
|
||||
npm test
|
||||
|
||||
# Check coverage for target files
|
||||
npm run test:coverage
|
||||
\`\`\`
|
||||
|
||||
### Step 3: Make Small Changes
|
||||
|
||||
- One refactoring at a time
|
||||
- Run tests after each change
|
||||
- Commit frequently
|
||||
|
||||
### Step 4: Verify Behavior Unchanged
|
||||
|
||||
\`\`\`bash
|
||||
# Run full test suite
|
||||
npm test
|
||||
|
||||
# Run E2E tests
|
||||
npm run test:e2e
|
||||
\`\`\`
|
||||
|
||||
## Common Refactorings
|
||||
|
||||
| Smell | Refactoring |
|
||||
|-------|-------------|
|
||||
| Long method | Extract method |
|
||||
| Duplicate code | Extract to shared function |
|
||||
| Large class | Extract class |
|
||||
| Long parameter list | Introduce parameter object |
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] Tests exist for target code
|
||||
- [ ] Made small, focused changes
|
||||
- [ ] Tests pass after each change
|
||||
- [ ] Behavior unchanged
|
||||
- [ ] Committed with clear message
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [CONTRIBUTING.md](../CONTRIBUTING.md) - General contribution guidelines
|
||||
- [project-guidelines-example](../skills/project-guidelines-example/SKILL.md) - Project-specific skill template
|
||||
- [coding-standards](../skills/coding-standards/SKILL.md) - Example of standards skill
|
||||
- [tdd-workflow](../skills/tdd-workflow/SKILL.md) - Example of workflow skill
|
||||
- [security-review](../skills/security-review/SKILL.md) - Example of domain knowledge skill
|
||||
|
||||
---
|
||||
|
||||
**Remember**: A good skill is focused, actionable, and immediately useful. Write skills you'd want to use yourself.
|
||||
@@ -409,7 +409,7 @@ claude --version
|
||||
Claude Code v2.1+は、インストール済みプラグインの`hooks/hooks.json`(規約)を自動読み込みします。`plugin.json`で明示的に宣言するとエラーが発生します:
|
||||
|
||||
```
|
||||
Duplicate hooks file detected: ./hooks/hooks.json resolves to already-loaded file
|
||||
Duplicate hook file detected: ./hooks/hooks.json is already resolved to a loaded file
|
||||
```
|
||||
|
||||
**背景:** これは本リポジトリで複数の修正/リバート循環を引き起こしました([#29](https://github.com/affaan-m/everything-claude-code/issues/29), [#52](https://github.com/affaan-m/everything-claude-code/issues/52), [#103](https://github.com/affaan-m/everything-claude-code/issues/103))。Claude Codeバージョン間で動作が変わったため混乱がありました。今後を防ぐため回帰テストがあります。
|
||||
|
||||
@@ -77,9 +77,9 @@ model: opus
|
||||
各問題について:
|
||||
```
|
||||
[CRITICAL] ハードコードされたAPIキー
|
||||
File: src/api/client.ts:42
|
||||
Issue: APIキーがソースコードに公開されている
|
||||
Fix: 環境変数に移動
|
||||
ファイル: src/api/client.ts:42
|
||||
問題: APIキーがソースコードに公開されている
|
||||
修正: 環境変数に移動
|
||||
|
||||
const apiKey = "sk-abc123"; // ❌ Bad
|
||||
const apiKey = process.env.API_KEY; // ✓ Good
|
||||
|
||||
@@ -341,20 +341,20 @@ x = x // 無意味な代入を削除
|
||||
各修正試行後:
|
||||
|
||||
```text
|
||||
[FIXED] internal/handler/user.go:42
|
||||
Error: undefined: UserService
|
||||
Fix: Added import "project/internal/service"
|
||||
[修正済] internal/handler/user.go:42
|
||||
エラー: undefined: UserService
|
||||
修正: import を追加 "project/internal/service"
|
||||
|
||||
Remaining errors: 3
|
||||
残りのエラー: 3
|
||||
```
|
||||
|
||||
最終サマリー:
|
||||
```text
|
||||
Build Status: SUCCESS/FAILED
|
||||
Errors Fixed: N
|
||||
Vet Warnings Fixed: N
|
||||
Files Modified: list
|
||||
Remaining Issues: list (if any)
|
||||
ビルドステータス: SUCCESS/FAILED
|
||||
修正済みエラー: N
|
||||
Vet 警告修正済み: N
|
||||
変更ファイル: list
|
||||
残りの問題: list (ある場合)
|
||||
```
|
||||
|
||||
## 重要な注意事項
|
||||
|
||||
@@ -228,9 +228,9 @@ model: opus
|
||||
各問題について:
|
||||
```text
|
||||
[CRITICAL] SQLインジェクション脆弱性
|
||||
File: internal/repository/user.go:42
|
||||
Issue: ユーザー入力がSQLクエリに直接連結されている
|
||||
Fix: パラメータ化クエリを使用
|
||||
ファイル: internal/repository/user.go:42
|
||||
問題: ユーザー入力がSQLクエリに直接連結されている
|
||||
修正: パラメータ化クエリを使用
|
||||
|
||||
query := "SELECT * FROM users WHERE id = " + userID // Bad
|
||||
query := "SELECT * FROM users WHERE id = $1" // Good
|
||||
|
||||
@@ -399,9 +399,9 @@ model: opus
|
||||
各問題について:
|
||||
```text
|
||||
[CRITICAL] SQLインジェクション脆弱性
|
||||
File: app/routes/user.py:42
|
||||
Issue: ユーザー入力がSQLクエリに直接補間されている
|
||||
Fix: パラメータ化クエリを使用
|
||||
ファイル: app/routes/user.py:42
|
||||
問題: ユーザー入力がSQLクエリに直接補間されている
|
||||
修正: パラメータ化クエリを使用
|
||||
|
||||
query = f"SELECT * FROM users WHERE id = {user_id}" # Bad
|
||||
query = "SELECT * FROM users WHERE id = %s" # Good
|
||||
|
||||
@@ -35,12 +35,12 @@ echo "$(date +%Y-%m-%d-%H:%M) | $CHECKPOINT_NAME | $(git rev-parse --short HEAD)
|
||||
3. レポート:
|
||||
|
||||
```
|
||||
CHECKPOINT COMPARISON: $NAME
|
||||
チェックポイント比較: $NAME
|
||||
============================
|
||||
Files changed: X
|
||||
Tests: +Y passed / -Z failed
|
||||
Coverage: +X% / -Y%
|
||||
Build: [PASS/FAIL]
|
||||
変更されたファイル: X
|
||||
テスト: +Y 合格 / -Z 失敗
|
||||
カバレッジ: +X% / -Y%
|
||||
ビルド: [PASS/FAIL]
|
||||
```
|
||||
|
||||
## チェックポイント一覧表示
|
||||
@@ -57,13 +57,13 @@ Build: [PASS/FAIL]
|
||||
一般的なチェックポイント流:
|
||||
|
||||
```
|
||||
[Start] --> /checkpoint create "feature-start"
|
||||
[開始] --> /checkpoint create "feature-start"
|
||||
|
|
||||
[Implement] --> /checkpoint create "core-done"
|
||||
[実装] --> /checkpoint create "core-done"
|
||||
|
|
||||
[Test] --> /checkpoint verify "core-done"
|
||||
[テスト] --> /checkpoint verify "core-done"
|
||||
|
|
||||
[Refactor] --> /checkpoint create "refactor-done"
|
||||
[リファクタリング] --> /checkpoint create "refactor-done"
|
||||
|
|
||||
[PR] --> /checkpoint verify "feature-start"
|
||||
```
|
||||
|
||||
@@ -38,24 +38,24 @@ e2e-runner エージェントは:
|
||||
## 使用します示例
|
||||
|
||||
````
|
||||
User: /e2e Test the market search and view flow
|
||||
User: /e2e マーケット検索と表示フローをテスト
|
||||
|
||||
Agent (e2e-runner):
|
||||
# E2E Test Generation: Market Search and View Flow
|
||||
# E2Eテスト生成: マーケット検索と表示フロー
|
||||
|
||||
## Test Scenario Identified
|
||||
## 特定されたテストシナリオ
|
||||
|
||||
**User Journey:** Market Search → View Results → Click Market → View Details
|
||||
**ユーザージャーニー:** マーケット検索 → 結果表示 → マーケットクリック → 詳細表示
|
||||
|
||||
**Test Coverage:**
|
||||
1. Navigate to markets page
|
||||
2. Perform semantic search
|
||||
3. Verify search results
|
||||
4. Click on first result
|
||||
5. Verify market details page loads
|
||||
6. Verify chart renders
|
||||
**テストカバレッジ:**
|
||||
1. マーケットページに遷移
|
||||
2. セマンティック検索を実行
|
||||
3. 検索結果を検証
|
||||
4. 最初の結果をクリック
|
||||
5. マーケット詳細ページの読み込みを検証
|
||||
6. チャートの描画を検証
|
||||
|
||||
## Generated Test Code
|
||||
## 生成されたテストコード
|
||||
|
||||
```typescript
|
||||
// tests/e2e/markets/search-and-view.spec.ts
|
||||
@@ -160,7 +160,7 @@ test.describe('Market Search and View Flow', () => {
|
||||
## テスト実行
|
||||
|
||||
```bash
|
||||
# Run the generated test
|
||||
# 生成されたテストを実行
|
||||
npx playwright test tests/e2e/markets/search-and-view.spec.ts
|
||||
|
||||
Running 3 tests using 3 workers
|
||||
@@ -181,54 +181,54 @@ Artifacts generated:
|
||||
|
||||
```
|
||||
╔══════════════════════════════════════════════════════════════╗
|
||||
║ E2E Test Results ║
|
||||
║ E2Eテスト結果 ║
|
||||
╠══════════════════════════════════════════════════════════════╣
|
||||
║ Status: ✅ ALL TESTS PASSED ║
|
||||
║ Total: 3 tests ║
|
||||
║ Passed: 3 (100%) ║
|
||||
║ Failed: 0 ║
|
||||
║ Flaky: 0 ║
|
||||
║ Duration: 9.1s ║
|
||||
║ ステータス: ✅ 全テスト合格 ║
|
||||
║ 合計: 3テスト ║
|
||||
║ 合格: 3 (100%) ║
|
||||
║ 失敗: 0 ║
|
||||
║ 不安定: 0 ║
|
||||
║ 所要時間: 9.1s ║
|
||||
╚══════════════════════════════════════════════════════════════╝
|
||||
|
||||
Artifacts:
|
||||
📸 Screenshots: 2 files
|
||||
📹 Videos: 0 files (only on failure)
|
||||
🔍 Traces: 0 files (only on failure)
|
||||
📊 HTML Report: playwright-report/index.html
|
||||
アーティファクト:
|
||||
📸 スクリーンショット: 2ファイル
|
||||
📹 ビデオ: 0ファイル (失敗時のみ)
|
||||
🔍 トレース: 0ファイル (失敗時のみ)
|
||||
📊 HTMLレポート: playwright-report/index.html
|
||||
|
||||
View report: npx playwright show-report
|
||||
レポート表示: npx playwright show-report
|
||||
```
|
||||
|
||||
✅ E2E テストスイートは CI/CD 統合の準備ができました!
|
||||
|
||||
````
|
||||
|
||||
## Test Artifacts
|
||||
## テストアーティファクト
|
||||
|
||||
When tests run, the following artifacts are captured:
|
||||
テスト実行時、以下のアーティファクトがキャプチャされます:
|
||||
|
||||
**On All Tests:**
|
||||
- HTML Report with timeline and results
|
||||
- JUnit XML for CI integration
|
||||
**全テスト共通:**
|
||||
- タイムラインと結果を含むHTMLレポート
|
||||
- CI統合用のJUnit XML
|
||||
|
||||
**On Failure Only:**
|
||||
- Screenshot of the failing state
|
||||
- Video recording of the test
|
||||
- Trace file for debugging (step-by-step replay)
|
||||
- Network logs
|
||||
- Console logs
|
||||
**失敗時のみ:**
|
||||
- 失敗状態のスクリーンショット
|
||||
- テストのビデオ録画
|
||||
- デバッグ用トレースファイル (ステップバイステップ再生)
|
||||
- ネットワークログ
|
||||
- コンソールログ
|
||||
|
||||
## Viewing Artifacts
|
||||
## アーティファクトの確認
|
||||
|
||||
```bash
|
||||
# View HTML report in browser
|
||||
# ブラウザでHTMLレポートを表示
|
||||
npx playwright show-report
|
||||
|
||||
# View specific trace file
|
||||
# 特定のトレースファイルを表示
|
||||
npx playwright show-trace artifacts/trace-abc123.zip
|
||||
|
||||
# Screenshots are saved in artifacts/ directory
|
||||
# スクリーンショットはartifacts/ディレクトリに保存
|
||||
open artifacts/search-results.png
|
||||
````
|
||||
|
||||
@@ -239,18 +239,18 @@ open artifacts/search-results.png
|
||||
```
|
||||
⚠️ FLAKY TEST DETECTED: tests/e2e/markets/trade.spec.ts
|
||||
|
||||
Test passed 7/10 runs (70% pass rate)
|
||||
テストは10回中7回合格 (合格率70%)
|
||||
|
||||
Common failure:
|
||||
よくある失敗:
|
||||
"Timeout waiting for element '[data-testid="confirm-btn"]'"
|
||||
|
||||
Recommended fixes:
|
||||
1. Add explicit wait: await page.waitForSelector('[data-testid="confirm-btn"]')
|
||||
2. Increase timeout: { timeout: 10000 }
|
||||
3. Check for race conditions in component
|
||||
4. Verify element is not hidden by animation
|
||||
推奨修正:
|
||||
1. 明示的な待機を追加: await page.waitForSelector('[data-testid="confirm-btn"]')
|
||||
2. タイムアウトを増加: { timeout: 10000 }
|
||||
3. コンポーネントの競合状態を確認
|
||||
4. 要素がアニメーションで隠れていないか確認
|
||||
|
||||
Quarantine recommendation: Mark as test.fixme() until fixed
|
||||
隔離推奨: 修正されるまでtest.fixme()としてマーク
|
||||
```
|
||||
|
||||
## ブラウザ設定
|
||||
@@ -350,21 +350,21 @@ PMX の場合、以下の E2E テストを優先:
|
||||
## 快速命令
|
||||
|
||||
```bash
|
||||
# Run all E2E tests
|
||||
# 全E2Eテストを実行
|
||||
npx playwright test
|
||||
|
||||
# Run specific test file
|
||||
# 特定のテストファイルを実行
|
||||
npx playwright test tests/e2e/markets/search.spec.ts
|
||||
|
||||
# Run in headed mode (see browser)
|
||||
# ヘッドモードで実行 (ブラウザ表示)
|
||||
npx playwright test --headed
|
||||
|
||||
# Debug test
|
||||
# テストをデバッグ
|
||||
npx playwright test --debug
|
||||
|
||||
# Generate test code
|
||||
# テストコードを生成
|
||||
npx playwright codegen http://localhost:3000
|
||||
|
||||
# View report
|
||||
# レポートを表示
|
||||
npx playwright show-report
|
||||
```
|
||||
|
||||
@@ -92,36 +92,36 @@ instinctsが分離の恩恵を受ける複雑な複数ステップのプロセ
|
||||
## 出力フォーマット
|
||||
|
||||
```
|
||||
🧬 Evolve Analysis
|
||||
🧬 進化分析
|
||||
==================
|
||||
|
||||
進化の準備ができた3つのクラスターを発見:
|
||||
|
||||
## クラスター1: データベースマイグレーションワークフロー
|
||||
Instincts: new-table-migration, update-schema, regenerate-types
|
||||
Type: Command
|
||||
Confidence: 85%(12件の観測に基づく)
|
||||
タイプ: Command
|
||||
信頼度: 85%(12件の観測に基づく)
|
||||
|
||||
作成: /new-tableコマンド
|
||||
Files:
|
||||
ファイル:
|
||||
- ~/.claude/homunculus/evolved/commands/new-table.md
|
||||
|
||||
## クラスター2: 関数型コードスタイル
|
||||
Instincts: prefer-functional, use-immutable, avoid-classes, pure-functions
|
||||
Type: Skill
|
||||
Confidence: 78%(8件の観測に基づく)
|
||||
タイプ: Skill
|
||||
信頼度: 78%(8件の観測に基づく)
|
||||
|
||||
作成: functional-patternsスキル
|
||||
Files:
|
||||
ファイル:
|
||||
- ~/.claude/homunculus/evolved/skills/functional-patterns.md
|
||||
|
||||
## クラスター3: デバッグプロセス
|
||||
Instincts: debug-check-logs, debug-isolate, debug-reproduce, debug-verify
|
||||
Type: Agent
|
||||
Confidence: 72%(6件の観測に基づく)
|
||||
タイプ: Agent
|
||||
信頼度: 72%(6件の観測に基づく)
|
||||
|
||||
作成: debuggerエージェント
|
||||
Files:
|
||||
ファイル:
|
||||
- ~/.claude/homunculus/evolved/agents/debugger.md
|
||||
|
||||
---
|
||||
|
||||
@@ -62,9 +62,9 @@ internal/handler/api.go:58:2: missing return at end of function
|
||||
|
||||
## 修正1: 未定義の識別子
|
||||
|
||||
File: internal/service/user.go:25
|
||||
Error: undefined: UserRepository
|
||||
Cause: インポート欠落
|
||||
ファイル: internal/service/user.go:25
|
||||
エラー: undefined: UserRepository
|
||||
原因: インポート欠落
|
||||
|
||||
```go
|
||||
// インポートを追加
|
||||
@@ -83,8 +83,8 @@ $ go build ./...
|
||||
|
||||
## 修正2: 型の不一致
|
||||
|
||||
File: internal/handler/api.go:42
|
||||
Error: cannot use x (type string) as type int
|
||||
ファイル: internal/handler/api.go:42
|
||||
エラー: cannot use x (type string) as type int
|
||||
|
||||
```go
|
||||
// 変更前
|
||||
@@ -101,8 +101,8 @@ $ go build ./...
|
||||
|
||||
## 修正3: 戻り値の欠落
|
||||
|
||||
File: internal/handler/api.go:58
|
||||
Error: missing return at end of function
|
||||
ファイル: internal/handler/api.go:58
|
||||
エラー: missing return at end of function
|
||||
|
||||
```go
|
||||
func GetUser(id string) (*User, error) {
|
||||
|
||||
@@ -85,8 +85,8 @@ Agent:
|
||||
## 発見された問題
|
||||
|
||||
[CRITICAL] 競合状態
|
||||
File: internal/service/auth.go:45
|
||||
Issue: 同期化なしで共有マップにアクセス
|
||||
ファイル: internal/service/auth.go:45
|
||||
問題: 同期化なしで共有マップにアクセス
|
||||
```go
|
||||
var cache = map[string]*Session{} // 並行アクセス!
|
||||
|
||||
@@ -94,7 +94,7 @@ func GetSession(id string) *Session {
|
||||
return cache[id] // 競合状態
|
||||
}
|
||||
```
|
||||
Fix: sync.RWMutexまたはsync.Mapを使用
|
||||
修正: sync.RWMutexまたはsync.Mapを使用
|
||||
```go
|
||||
var (
|
||||
cache = map[string]*Session{}
|
||||
@@ -109,12 +109,12 @@ func GetSession(id string) *Session {
|
||||
```
|
||||
|
||||
[HIGH] エラーコンテキストの欠落
|
||||
File: internal/handler/user.go:28
|
||||
Issue: コンテキストなしでエラーを返す
|
||||
ファイル: internal/handler/user.go:28
|
||||
問題: コンテキストなしでエラーを返す
|
||||
```go
|
||||
return err // コンテキストなし
|
||||
```
|
||||
Fix: コンテキストでラップ
|
||||
修正: コンテキストでラップ
|
||||
```go
|
||||
return fmt.Errorf("get user %s: %w", userID, err)
|
||||
```
|
||||
|
||||
@@ -45,40 +45,40 @@ python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py import <
|
||||
## インポートプロセス
|
||||
|
||||
```
|
||||
📥 Importing instincts from: team-instincts.yaml
|
||||
📥 instinctsをインポート中: team-instincts.yaml
|
||||
================================================
|
||||
|
||||
Found 12 instincts to import.
|
||||
12件のinstinctsが見つかりました。
|
||||
|
||||
Analyzing conflicts...
|
||||
競合を分析中...
|
||||
|
||||
## New Instincts (8)
|
||||
These will be added:
|
||||
## 新規instincts (8)
|
||||
以下が追加されます:
|
||||
✓ use-zod-validation (confidence: 0.7)
|
||||
✓ prefer-named-exports (confidence: 0.65)
|
||||
✓ test-async-functions (confidence: 0.8)
|
||||
...
|
||||
|
||||
## Duplicate Instincts (3)
|
||||
Already have similar instincts:
|
||||
## 重複instincts (3)
|
||||
類似のinstinctsが既に存在:
|
||||
⚠️ prefer-functional-style
|
||||
Local: 0.8 confidence, 12 observations
|
||||
Import: 0.7 confidence
|
||||
→ Keep local (higher confidence)
|
||||
ローカル: 信頼度0.8, 12回の観測
|
||||
インポート: 信頼度0.7
|
||||
→ ローカルを保持 (信頼度が高い)
|
||||
|
||||
⚠️ test-first-workflow
|
||||
Local: 0.75 confidence
|
||||
Import: 0.9 confidence
|
||||
→ Update to import (higher confidence)
|
||||
ローカル: 信頼度0.75
|
||||
インポート: 信頼度0.9
|
||||
→ インポートに更新 (信頼度が高い)
|
||||
|
||||
## Conflicting Instincts (1)
|
||||
These contradict local instincts:
|
||||
## 競合instincts (1)
|
||||
ローカルのinstinctsと矛盾:
|
||||
❌ use-classes-for-services
|
||||
Conflicts with: avoid-classes
|
||||
→ Skip (requires manual resolution)
|
||||
競合: avoid-classes
|
||||
→ スキップ (手動解決が必要)
|
||||
|
||||
---
|
||||
Import 8 new, update 1, skip 3?
|
||||
8件を新規追加、1件を更新、3件をスキップしますか?
|
||||
```
|
||||
|
||||
## マージ戦略
|
||||
@@ -130,13 +130,13 @@ Skill Creatorからインポートする場合:
|
||||
|
||||
インポート後:
|
||||
```
|
||||
✅ Import complete!
|
||||
✅ インポート完了!
|
||||
|
||||
Added: 8 instincts
|
||||
Updated: 1 instinct
|
||||
Skipped: 3 instincts (2 duplicates, 1 conflict)
|
||||
追加: 8件のinstincts
|
||||
更新: 1件のinstinct
|
||||
スキップ: 3件のinstincts (2件の重複, 1件の競合)
|
||||
|
||||
New instincts saved to: ~/.claude/homunculus/instincts/inherited/
|
||||
新規instinctsの保存先: ~/.claude/homunculus/instincts/inherited/
|
||||
|
||||
Run /instinct-status to see all instincts.
|
||||
/instinct-statusを実行してすべてのinstinctsを確認できます。
|
||||
```
|
||||
|
||||
@@ -39,42 +39,42 @@ python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py status
|
||||
## 出力形式
|
||||
|
||||
```
|
||||
📊 Instinct Status
|
||||
📊 instinctステータス
|
||||
==================
|
||||
|
||||
## Code Style (4 instincts)
|
||||
## コードスタイル (4 instincts)
|
||||
|
||||
### prefer-functional-style
|
||||
Trigger: when writing new functions
|
||||
Action: Use functional patterns over classes
|
||||
Confidence: ████████░░ 80%
|
||||
Source: session-observation | Last updated: 2025-01-22
|
||||
トリガー: 新しい関数を書くとき
|
||||
アクション: クラスより関数型パターンを使用
|
||||
信頼度: ████████░░ 80%
|
||||
ソース: session-observation | 最終更新: 2025-01-22
|
||||
|
||||
### use-path-aliases
|
||||
Trigger: when importing modules
|
||||
Action: Use @/ path aliases instead of relative imports
|
||||
Confidence: ██████░░░░ 60%
|
||||
Source: repo-analysis (github.com/acme/webapp)
|
||||
トリガー: モジュールをインポートするとき
|
||||
アクション: 相対インポートの代わりに@/パスエイリアスを使用
|
||||
信頼度: ██████░░░░ 60%
|
||||
ソース: repo-analysis (github.com/acme/webapp)
|
||||
|
||||
## Testing (2 instincts)
|
||||
## テスト (2 instincts)
|
||||
|
||||
### test-first-workflow
|
||||
Trigger: when adding new functionality
|
||||
Action: Write test first, then implementation
|
||||
Confidence: █████████░ 90%
|
||||
Source: session-observation
|
||||
トリガー: 新しい機能を追加するとき
|
||||
アクション: テストを先に書き、次に実装
|
||||
信頼度: █████████░ 90%
|
||||
ソース: session-observation
|
||||
|
||||
## Workflow (3 instincts)
|
||||
## ワークフロー (3 instincts)
|
||||
|
||||
### grep-before-edit
|
||||
Trigger: when modifying code
|
||||
Action: Search with Grep, confirm with Read, then Edit
|
||||
Confidence: ███████░░░ 70%
|
||||
Source: session-observation
|
||||
トリガー: コードを変更するとき
|
||||
アクション: Grepで検索、Readで確認、次にEdit
|
||||
信頼度: ███████░░░ 70%
|
||||
ソース: session-observation
|
||||
|
||||
---
|
||||
Total: 9 instincts (4 personal, 5 inherited)
|
||||
Observer: Running (last analysis: 5 min ago)
|
||||
合計: 9 instincts (4個人, 5継承)
|
||||
オブザーバー: 実行中 (最終分析: 5分前)
|
||||
```
|
||||
|
||||
## フラグ
|
||||
|
||||
@@ -99,36 +99,36 @@ security-reviewer -> code-reviewer -> architect
|
||||
## 最終レポート形式
|
||||
|
||||
```
|
||||
ORCHESTRATION REPORT
|
||||
オーケストレーションレポート
|
||||
====================
|
||||
Workflow: feature
|
||||
Task: Add user authentication
|
||||
Agents: planner -> tdd-guide -> code-reviewer -> security-reviewer
|
||||
ワークフロー: feature
|
||||
タスク: ユーザー認証の追加
|
||||
エージェント: planner -> tdd-guide -> code-reviewer -> security-reviewer
|
||||
|
||||
SUMMARY
|
||||
サマリー
|
||||
-------
|
||||
[1段落の要約]
|
||||
|
||||
AGENT OUTPUTS
|
||||
エージェント出力
|
||||
-------------
|
||||
Planner: [要約]
|
||||
TDD Guide: [要約]
|
||||
Code Reviewer: [要約]
|
||||
Security Reviewer: [要約]
|
||||
|
||||
FILES CHANGED
|
||||
変更ファイル
|
||||
-------------
|
||||
[変更されたすべてのファイルをリスト]
|
||||
|
||||
TEST RESULTS
|
||||
テスト結果
|
||||
------------
|
||||
[テスト合格/不合格の要約]
|
||||
|
||||
SECURITY STATUS
|
||||
セキュリティステータス
|
||||
---------------
|
||||
[セキュリティの発見事項]
|
||||
|
||||
RECOMMENDATION
|
||||
推奨事項
|
||||
--------------
|
||||
[リリース可 / 要修正 / ブロック中]
|
||||
```
|
||||
|
||||
@@ -95,26 +95,26 @@ Agent:
|
||||
## 発見された問題
|
||||
|
||||
[CRITICAL] SQLインジェクション脆弱性
|
||||
File: app/routes/user.py:42
|
||||
Issue: ユーザー入力が直接SQLクエリに挿入されている
|
||||
ファイル: app/routes/user.py:42
|
||||
問題: ユーザー入力が直接SQLクエリに挿入されている
|
||||
```python
|
||||
query = f"SELECT * FROM users WHERE id = {user_id}" # 悪い
|
||||
```
|
||||
Fix: パラメータ化クエリを使用
|
||||
修正: パラメータ化クエリを使用
|
||||
```python
|
||||
query = "SELECT * FROM users WHERE id = %s" # 良い
|
||||
cursor.execute(query, (user_id,))
|
||||
```
|
||||
|
||||
[HIGH] 可変デフォルト引数
|
||||
File: app/services/auth.py:18
|
||||
Issue: 可変デフォルト引数が共有状態を引き起こす
|
||||
ファイル: app/services/auth.py:18
|
||||
問題: 可変デフォルト引数が共有状態を引き起こす
|
||||
```python
|
||||
def process_items(items=[]): # 悪い
|
||||
items.append("new")
|
||||
return items
|
||||
```
|
||||
Fix: デフォルトにNoneを使用
|
||||
修正: デフォルトにNoneを使用
|
||||
```python
|
||||
def process_items(items=None): # 良い
|
||||
if items is None:
|
||||
@@ -124,27 +124,27 @@ def process_items(items=None): # 良い
|
||||
```
|
||||
|
||||
[MEDIUM] 型ヒントの欠落
|
||||
File: app/services/auth.py:25
|
||||
Issue: 型アノテーションのない公開関数
|
||||
ファイル: app/services/auth.py:25
|
||||
問題: 型アノテーションのない公開関数
|
||||
```python
|
||||
def get_user(user_id): # 悪い
|
||||
return db.find(user_id)
|
||||
```
|
||||
Fix: 型ヒントを追加
|
||||
修正: 型ヒントを追加
|
||||
```python
|
||||
def get_user(user_id: str) -> Optional[User]: # 良い
|
||||
return db.find(user_id)
|
||||
```
|
||||
|
||||
[MEDIUM] コンテキストマネージャーを使用していない
|
||||
File: app/routes/user.py:55
|
||||
Issue: 例外時にファイルがクローズされない
|
||||
ファイル: app/routes/user.py:55
|
||||
問題: 例外時にファイルがクローズされない
|
||||
```python
|
||||
f = open("config.json") # 悪い
|
||||
data = f.read()
|
||||
f.close()
|
||||
```
|
||||
Fix: コンテキストマネージャーを使用
|
||||
修正: コンテキストマネージャーを使用
|
||||
```python
|
||||
with open("config.json") as f: # 良い
|
||||
data = f.read()
|
||||
|
||||
@@ -36,16 +36,16 @@
|
||||
簡潔な検証レポートを生成します:
|
||||
|
||||
```
|
||||
VERIFICATION: [PASS/FAIL]
|
||||
検証結果: [PASS/FAIL]
|
||||
|
||||
Build: [OK/FAIL]
|
||||
Types: [OK/X errors]
|
||||
Lint: [OK/X issues]
|
||||
Tests: [X/Y passed, Z% coverage]
|
||||
Secrets: [OK/X found]
|
||||
Logs: [OK/X console.logs]
|
||||
ビルド: [OK/FAIL]
|
||||
型: [OK/Xエラー]
|
||||
Lint: [OK/X件の問題]
|
||||
テスト: [X/Y合格, Z%カバレッジ]
|
||||
シークレット: [OK/X件発見]
|
||||
ログ: [OK/X件のconsole.log]
|
||||
|
||||
Ready for PR: [YES/NO]
|
||||
PR準備完了: [YES/NO]
|
||||
```
|
||||
|
||||
重大な問題がある場合は、修正案とともにリストアップします。
|
||||
|
||||
@@ -43,7 +43,7 @@
|
||||
|
||||
```
|
||||
src/
|
||||
|-- app/ # Next.js app router
|
||||
|-- app/ # Next.js App Router
|
||||
|-- components/ # 再利用可能なUIコンポーネント
|
||||
|-- hooks/ # カスタムReactフック
|
||||
|-- lib/ # ユーティリティライブラリ
|
||||
|
||||
@@ -234,14 +234,14 @@ setCount(count + 1) // Can be stale in async scenarios
|
||||
### REST API規約
|
||||
|
||||
```
|
||||
GET /api/markets # List all markets
|
||||
GET /api/markets/:id # Get specific market
|
||||
POST /api/markets # Create new market
|
||||
PUT /api/markets/:id # Update market (full)
|
||||
PATCH /api/markets/:id # Update market (partial)
|
||||
DELETE /api/markets/:id # Delete market
|
||||
GET /api/markets # すべてのマーケットを一覧
|
||||
GET /api/markets/:id # 特定のマーケットを取得
|
||||
POST /api/markets # 新しいマーケットを作成
|
||||
PUT /api/markets/:id # マーケットを更新(全体)
|
||||
PATCH /api/markets/:id # マーケットを更新(部分)
|
||||
DELETE /api/markets/:id # マーケットを削除
|
||||
|
||||
# Query parameters for filtering
|
||||
# フィルタリング用クエリパラメータ
|
||||
GET /api/markets?status=active&limit=10&offset=0
|
||||
```
|
||||
|
||||
@@ -312,29 +312,29 @@ export async function POST(request: Request) {
|
||||
```
|
||||
src/
|
||||
├── app/ # Next.js App Router
|
||||
│ ├── api/ # API routes
|
||||
│ ├── markets/ # Market pages
|
||||
│ └── (auth)/ # Auth pages (route groups)
|
||||
├── components/ # React components
|
||||
│ ├── ui/ # Generic UI components
|
||||
│ ├── forms/ # Form components
|
||||
│ └── layouts/ # Layout components
|
||||
├── hooks/ # Custom React hooks
|
||||
├── lib/ # Utilities and configs
|
||||
│ ├── api/ # API clients
|
||||
│ ├── utils/ # Helper functions
|
||||
│ └── constants/ # Constants
|
||||
├── types/ # TypeScript types
|
||||
└── styles/ # Global styles
|
||||
│ ├── api/ # API ルート
|
||||
│ ├── markets/ # マーケットページ
|
||||
│ └── (auth)/ # 認証ページ(ルートグループ)
|
||||
├── components/ # React コンポーネント
|
||||
│ ├── ui/ # 汎用 UI コンポーネント
|
||||
│ ├── forms/ # フォームコンポーネント
|
||||
│ └── layouts/ # レイアウトコンポーネント
|
||||
├── hooks/ # カスタム React フック
|
||||
├── lib/ # ユーティリティと設定
|
||||
│ ├── api/ # API クライアント
|
||||
│ ├── utils/ # ヘルパー関数
|
||||
│ └── constants/ # 定数
|
||||
├── types/ # TypeScript 型定義
|
||||
└── styles/ # グローバルスタイル
|
||||
```
|
||||
|
||||
### ファイル命名
|
||||
|
||||
```
|
||||
components/Button.tsx # PascalCase for components
|
||||
hooks/useAuth.ts # camelCase with 'use' prefix
|
||||
lib/formatDate.ts # camelCase for utilities
|
||||
types/market.types.ts # camelCase with .types suffix
|
||||
components/Button.tsx # コンポーネントは PascalCase
|
||||
hooks/useAuth.ts # フックは 'use' プレフィックス付き camelCase
|
||||
lib/formatDate.ts # ユーティリティは camelCase
|
||||
types/market.types.ts # 型定義は .types サフィックス付き camelCase
|
||||
```
|
||||
|
||||
## コメントとドキュメント
|
||||
|
||||
@@ -51,13 +51,13 @@ source: "session-observation"
|
||||
## 仕組み
|
||||
|
||||
```
|
||||
Session Activity
|
||||
セッションアクティビティ
|
||||
│
|
||||
│ フックがプロンプト + ツール使用をキャプチャ(100%信頼性)
|
||||
▼
|
||||
┌─────────────────────────────────────────┐
|
||||
│ observations.jsonl │
|
||||
│ (prompts, tool calls, outcomes) │
|
||||
│ (プロンプト、ツール呼び出し、結果) │
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
│ Observerエージェントが読み取り(バックグラウンド、Haiku)
|
||||
|
||||
@@ -22,24 +22,24 @@ Claude Codeセッションの正式な評価フレームワークで、評価駆
|
||||
Claudeが以前できなかったことができるようになったかをテスト:
|
||||
```markdown
|
||||
[CAPABILITY EVAL: feature-name]
|
||||
Task: Claudeが達成すべきことの説明
|
||||
Success Criteria:
|
||||
タスク: Claudeが達成すべきことの説明
|
||||
成功基準:
|
||||
- [ ] 基準1
|
||||
- [ ] 基準2
|
||||
- [ ] 基準3
|
||||
Expected Output: 期待される結果の説明
|
||||
期待される出力: 期待される結果の説明
|
||||
```
|
||||
|
||||
### リグレッション評価
|
||||
変更が既存の機能を破壊しないことを確認:
|
||||
```markdown
|
||||
[REGRESSION EVAL: feature-name]
|
||||
Baseline: SHAまたはチェックポイント名
|
||||
Tests:
|
||||
ベースライン: SHAまたはチェックポイント名
|
||||
テスト:
|
||||
- existing-test-1: PASS/FAIL
|
||||
- existing-test-2: PASS/FAIL
|
||||
- existing-test-3: PASS/FAIL
|
||||
Result: X/Y passed (previously Y/Y)
|
||||
結果: X/Y 成功(以前は Y/Y)
|
||||
```
|
||||
|
||||
## 評価者タイプ
|
||||
@@ -67,17 +67,17 @@ Claudeを使用して自由形式の出力を評価:
|
||||
3. エッジケースは処理されていますか?
|
||||
4. エラー処理は適切ですか?
|
||||
|
||||
Score: 1-5 (1=poor, 5=excellent)
|
||||
Reasoning: [説明]
|
||||
スコア: 1-5(1=不良、5=優秀)
|
||||
理由: [説明]
|
||||
```
|
||||
|
||||
### 3. 人間評価者
|
||||
手動レビューのためにフラグを立てる:
|
||||
```markdown
|
||||
[HUMAN REVIEW REQUIRED]
|
||||
Change: 何が変更されたかの説明
|
||||
Reason: 人間のレビューが必要な理由
|
||||
Risk Level: LOW/MEDIUM/HIGH
|
||||
変更内容: 何が変更されたかの説明
|
||||
理由: 人間のレビューが必要な理由
|
||||
リスクレベル: LOW/MEDIUM/HIGH
|
||||
```
|
||||
|
||||
## メトリクス
|
||||
@@ -98,21 +98,21 @@ Risk Level: LOW/MEDIUM/HIGH
|
||||
|
||||
### 1. 定義(コーディング前)
|
||||
```markdown
|
||||
## EVAL DEFINITION: feature-xyz
|
||||
## 評価定義: feature-xyz
|
||||
|
||||
### Capability Evals
|
||||
### 能力評価
|
||||
1. 新しいユーザーアカウントを作成できる
|
||||
2. メール形式を検証できる
|
||||
3. パスワードを安全にハッシュ化できる
|
||||
|
||||
### Regression Evals
|
||||
### リグレッション評価
|
||||
1. 既存のログインが引き続き機能する
|
||||
2. セッション管理が変更されていない
|
||||
3. ログアウトフローが維持されている
|
||||
|
||||
### Success Metrics
|
||||
- pass@3 > 90% for capability evals
|
||||
- pass^3 = 100% for regression evals
|
||||
### 成功メトリクス
|
||||
- 能力評価で pass@3 > 90%
|
||||
- リグレッション評価で pass^3 = 100%
|
||||
```
|
||||
|
||||
### 2. 実装
|
||||
@@ -131,26 +131,26 @@ npm test -- --testPathPattern="existing"
|
||||
|
||||
### 4. レポート
|
||||
```markdown
|
||||
EVAL REPORT: feature-xyz
|
||||
評価レポート: feature-xyz
|
||||
========================
|
||||
|
||||
Capability Evals:
|
||||
能力評価:
|
||||
create-user: PASS (pass@1)
|
||||
validate-email: PASS (pass@2)
|
||||
hash-password: PASS (pass@1)
|
||||
Overall: 3/3 passed
|
||||
全体: 3/3 成功
|
||||
|
||||
Regression Evals:
|
||||
リグレッション評価:
|
||||
login-flow: PASS
|
||||
session-mgmt: PASS
|
||||
logout-flow: PASS
|
||||
Overall: 3/3 passed
|
||||
全体: 3/3 成功
|
||||
|
||||
Metrics:
|
||||
メトリクス:
|
||||
pass@1: 67% (2/3)
|
||||
pass@3: 100% (3/3)
|
||||
|
||||
Status: READY FOR REVIEW
|
||||
ステータス: レビュー準備完了
|
||||
```
|
||||
|
||||
## 統合パターン
|
||||
@@ -199,29 +199,29 @@ Status: READY FOR REVIEW
|
||||
```markdown
|
||||
## EVAL: add-authentication
|
||||
|
||||
### Phase 1: Define (10 min)
|
||||
Capability Evals:
|
||||
### フェーズ 1: 定義(10分)
|
||||
能力評価:
|
||||
- [ ] ユーザーはメール/パスワードで登録できる
|
||||
- [ ] ユーザーは有効な資格情報でログインできる
|
||||
- [ ] 無効な資格情報は適切なエラーで拒否される
|
||||
- [ ] セッションはページリロード後も持続する
|
||||
- [ ] ログアウトはセッションをクリアする
|
||||
|
||||
Regression Evals:
|
||||
リグレッション評価:
|
||||
- [ ] 公開ルートは引き続きアクセス可能
|
||||
- [ ] APIレスポンスは変更されていない
|
||||
- [ ] データベーススキーマは互換性がある
|
||||
|
||||
### Phase 2: Implement (varies)
|
||||
### フェーズ 2: 実装(可変)
|
||||
[コードを書く]
|
||||
|
||||
### Phase 3: Evaluate
|
||||
### フェーズ 3: 評価
|
||||
Run: /eval check add-authentication
|
||||
|
||||
### Phase 4: Report
|
||||
EVAL REPORT: add-authentication
|
||||
### フェーズ 4: レポート
|
||||
評価レポート: add-authentication
|
||||
==============================
|
||||
Capability: 5/5 passed (pass@3: 100%)
|
||||
Regression: 3/3 passed (pass^3: 100%)
|
||||
Status: SHIP IT
|
||||
能力: 5/5 成功(pass@3: 100%)
|
||||
リグレッション: 3/3 成功(pass^3: 100%)
|
||||
ステータス: 出荷可能
|
||||
```
|
||||
|
||||
@@ -368,17 +368,17 @@ func WriteAndFlush(w io.Writer, data []byte) error {
|
||||
myproject/
|
||||
├── cmd/
|
||||
│ └── myapp/
|
||||
│ └── main.go # Entry point
|
||||
│ └── main.go # エントリポイント
|
||||
├── internal/
|
||||
│ ├── handler/ # HTTP handlers
|
||||
│ ├── service/ # Business logic
|
||||
│ ├── repository/ # Data access
|
||||
│ └── config/ # Configuration
|
||||
│ ├── handler/ # HTTP ハンドラー
|
||||
│ ├── service/ # ビジネスロジック
|
||||
│ ├── repository/ # データアクセス
|
||||
│ └── config/ # 設定
|
||||
├── pkg/
|
||||
│ └── client/ # Public API client
|
||||
│ └── client/ # 公開 API クライアント
|
||||
├── api/
|
||||
│ └── v1/ # API definitions (proto, OpenAPI)
|
||||
├── testdata/ # Test fixtures
|
||||
│ └── v1/ # API 定義(proto、OpenAPI)
|
||||
├── testdata/ # テストフィクスチャ
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
└── Makefile
|
||||
|
||||
@@ -113,7 +113,7 @@ mypackage/
|
||||
├── testdata/ # テストフィクスチャ
|
||||
│ ├── valid_user.json
|
||||
│ └── invalid_user.json
|
||||
└── export_test.go # 内部のテストのための非公開のエクスポート
|
||||
└── export_test.go # 内部テスト用の非公開エクスポート
|
||||
```
|
||||
|
||||
### テストパッケージ
|
||||
|
||||
@@ -594,18 +594,18 @@ def test_with_tmpdir(tmpdir):
|
||||
|
||||
```
|
||||
tests/
|
||||
├── conftest.py # Shared fixtures
|
||||
├── conftest.py # 共有フィクスチャ
|
||||
├── __init__.py
|
||||
├── unit/ # Unit tests
|
||||
├── unit/ # ユニットテスト
|
||||
│ ├── __init__.py
|
||||
│ ├── test_models.py
|
||||
│ ├── test_utils.py
|
||||
│ └── test_services.py
|
||||
├── integration/ # Integration tests
|
||||
├── integration/ # 統合テスト
|
||||
│ ├── __init__.py
|
||||
│ ├── test_api.py
|
||||
│ └── test_database.py
|
||||
└── e2e/ # End-to-end tests
|
||||
└── e2e/ # エンドツーエンドテスト
|
||||
├── __init__.py
|
||||
└── test_user_flow.py
|
||||
```
|
||||
|
||||
31
docs/zh-CN/commands/prune.md
Normal file
31
docs/zh-CN/commands/prune.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
name: prune
|
||||
description: 删除超过 30 天且从未被提升的待处理本能
|
||||
command: true
|
||||
---
|
||||
|
||||
# 清理待处理本能
|
||||
|
||||
删除那些由系统自动生成、但从未经过审查或提升的过期待处理本能。
|
||||
|
||||
## 实现
|
||||
|
||||
使用插件根目录路径运行本能 CLI:
|
||||
|
||||
```bash
|
||||
python3 "${CLAUDE_PLUGIN_ROOT}/skills/continuous-learning-v2/scripts/instinct-cli.py" prune
|
||||
```
|
||||
|
||||
或者如果 `CLAUDE_PLUGIN_ROOT` 未设置(手动安装):
|
||||
|
||||
```bash
|
||||
python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py prune
|
||||
```
|
||||
|
||||
## 用法
|
||||
|
||||
```
|
||||
/prune # 删除超过 30 天的本能
|
||||
/prune --max-age 60 # 自定义年龄阈值(天)
|
||||
/prune --dry-run # 仅预览,不实际删除
|
||||
```
|
||||
@@ -1,6 +1,7 @@
|
||||
---
|
||||
name: prompt-optimizer
|
||||
description: 分析原始提示,识别意图和差距,匹配ECC组件(技能/命令/代理/钩子),并输出一个可直接粘贴的优化提示。仅提供咨询角色——绝不自行执行任务。触发时机:当用户说“优化提示”、“改进我的提示”、“如何编写提示”、“帮我优化这个指令”或明确要求提高提示质量时。中文等效表达同样触发:“优化prompt”、“改进prompt”、“怎么写prompt”、“帮我优化这个指令”。不触发时机:当用户希望直接执行任务,或说“直接做”时。不触发时机:当用户说“优化代码”、“优化性能”、“optimize performance”、“optimize this code”时——这些是重构/性能优化任务,而非提示优化。origin: community
|
||||
description: 分析原始提示,识别意图和差距,匹配ECC组件(技能/命令/代理/钩子),并输出一个可直接粘贴的优化提示。仅提供咨询角色——绝不自行执行任务。触发时机:当用户说“优化提示”、“改进我的提示”、“如何编写提示”、“帮我优化这个指令”或明确要求提高提示质量时。中文等效表达同样触发:“优化prompt”、“改进prompt”、“怎么写prompt”、“帮我优化这个指令”。不触发时机:当用户希望直接执行任务,或说“直接做”时。不触发时机:当用户说“优化代码”、“优化性能”、“optimize performance”、“optimize this code”时——这些是重构/性能优化任务,而非提示优化。
|
||||
origin: community
|
||||
metadata:
|
||||
author: YannJY02
|
||||
version: "1.0.0"
|
||||
|
||||
@@ -23,6 +23,7 @@ User request → Claude picks a tool → PreToolUse hook runs → Tool executes
|
||||
| **Dev server blocker** | `Bash` | Blocks `npm run dev` etc. outside tmux — ensures log access | 2 (blocks) |
|
||||
| **Tmux reminder** | `Bash` | Suggests tmux for long-running commands (npm test, cargo build, docker) | 0 (warns) |
|
||||
| **Git push reminder** | `Bash` | Reminds to review changes before `git push` | 0 (warns) |
|
||||
| **Pre-commit quality check** | `Bash` | Runs quality checks before `git commit`: lints staged files, validates commit message format when provided via `-m/--message`, detects console.log/debugger/secrets | 2 (blocks critical) / 0 (warns) |
|
||||
| **Doc file warning** | `Write` | Warns about non-standard `.md`/`.txt` files (allows README, CLAUDE, CONTRIBUTING, CHANGELOG, LICENSE, SKILL, docs/, skills/); cross-platform path handling | 0 (warns) |
|
||||
| **Strategic compact** | `Edit\|Write` | Suggests manual `/compact` at logical intervals (every ~50 tool calls) | 0 (warns) |
|
||||
| **InsAIts security monitor (opt-in)** | `Bash\|Write\|Edit\|MultiEdit` | Optional security scan for high-signal tool inputs. Disabled unless `ECC_ENABLE_INSAITS=1`. Blocks on critical findings, warns on non-critical, and writes audit log to `.insaits_audit_session.jsonl`. Requires `pip install insa-its`. [Details](../scripts/hooks/insaits-security-monitor.py) | 2 (blocks critical) / 0 (warns) |
|
||||
|
||||
@@ -42,6 +42,16 @@
|
||||
],
|
||||
"description": "Reminder before git push to review changes"
|
||||
},
|
||||
{
|
||||
"matcher": "Bash",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:bash:commit-quality\" \"scripts/hooks/pre-bash-commit-quality.js\" \"strict\""
|
||||
}
|
||||
],
|
||||
"description": "Pre-commit quality check: lint staged files, validate commit message format, detect console.log/debugger/secrets before committing"
|
||||
},
|
||||
{
|
||||
"matcher": "Write",
|
||||
"hooks": [
|
||||
|
||||
15
install.ps1
15
install.ps1
@@ -34,5 +34,20 @@ while ($true) {
|
||||
$scriptDir = Split-Path -Parent $scriptPath
|
||||
$installerScript = Join-Path -Path (Join-Path -Path $scriptDir -ChildPath 'scripts') -ChildPath 'install-apply.js'
|
||||
|
||||
# Auto-install Node dependencies when running from a git clone
|
||||
$nodeModules = Join-Path -Path $scriptDir -ChildPath 'node_modules'
|
||||
if (-not (Test-Path -LiteralPath $nodeModules)) {
|
||||
Write-Host '[ECC] Installing dependencies...'
|
||||
Push-Location $scriptDir
|
||||
try {
|
||||
& npm install --no-audit --no-fund --loglevel=error
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Error "npm install failed with exit code $LASTEXITCODE"
|
||||
exit $LASTEXITCODE
|
||||
}
|
||||
}
|
||||
finally { Pop-Location }
|
||||
}
|
||||
|
||||
& node $installerScript @args
|
||||
exit $LASTEXITCODE
|
||||
|
||||
@@ -14,4 +14,10 @@ while [ -L "$SCRIPT_PATH" ]; do
|
||||
done
|
||||
SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)"
|
||||
|
||||
# Auto-install Node dependencies when running from a git clone
|
||||
if [ ! -d "$SCRIPT_DIR/node_modules" ]; then
|
||||
echo "[ECC] Installing dependencies..."
|
||||
(cd "$SCRIPT_DIR" && npm install --no-audit --no-fund --loglevel=error)
|
||||
fi
|
||||
|
||||
exec node "$SCRIPT_DIR/scripts/install-apply.js" "$@"
|
||||
|
||||
@@ -125,6 +125,7 @@
|
||||
"skills/kotlin-ktor-patterns",
|
||||
"skills/kotlin-patterns",
|
||||
"skills/kotlin-testing",
|
||||
"skills/laravel-plugin-discovery",
|
||||
"skills/laravel-patterns",
|
||||
"skills/laravel-tdd",
|
||||
"skills/laravel-verification",
|
||||
@@ -407,6 +408,7 @@
|
||||
"skills/ralphinho-rfc-pipeline",
|
||||
"skills/regex-vs-llm-structured-text",
|
||||
"skills/search-first",
|
||||
"skills/token-budget-advisor",
|
||||
"skills/team-builder"
|
||||
],
|
||||
"targets": [
|
||||
|
||||
@@ -133,6 +133,11 @@
|
||||
"args": ["-y", "token-optimizer-mcp"],
|
||||
"description": "Token optimization for 95%+ context reduction via content deduplication and compression"
|
||||
},
|
||||
"laraplugins": {
|
||||
"type": "http",
|
||||
"url": "https://laraplugins.io/mcp/plugins",
|
||||
"description": "Laravel plugin discovery — search packages by keyword, health score, Laravel/PHP version compatibility. Use with laravel-plugin-discovery skill."
|
||||
},
|
||||
"confluence": {
|
||||
"command": "npx",
|
||||
"args": ["-y", "confluence-mcp-server"],
|
||||
|
||||
7
package-lock.json
generated
7
package-lock.json
generated
@@ -11,6 +11,7 @@
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.5",
|
||||
"ajv": "^8.18.0",
|
||||
"sql.js": "^1.14.1"
|
||||
},
|
||||
"bin": {
|
||||
@@ -19,7 +20,6 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.2",
|
||||
"ajv": "^8.18.0",
|
||||
"c8": "^10.1.2",
|
||||
"eslint": "^9.39.2",
|
||||
"globals": "^17.1.0",
|
||||
@@ -449,7 +449,6 @@
|
||||
"version": "8.18.0",
|
||||
"resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz",
|
||||
"integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"fast-deep-equal": "^3.1.3",
|
||||
@@ -1043,7 +1042,6 @@
|
||||
"version": "3.1.3",
|
||||
"resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
|
||||
"integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/fast-json-stable-stringify": {
|
||||
@@ -1064,7 +1062,6 @@
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz",
|
||||
"integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==",
|
||||
"dev": true,
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
@@ -1491,7 +1488,6 @@
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
|
||||
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/json-stable-stringify-without-jsonify": {
|
||||
@@ -2512,7 +2508,6 @@
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
|
||||
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
|
||||
@@ -89,6 +89,9 @@
|
||||
"AGENTS.md",
|
||||
".claude-plugin/plugin.json",
|
||||
".claude-plugin/README.md",
|
||||
".codex-plugin/plugin.json",
|
||||
".codex-plugin/README.md",
|
||||
".mcp.json",
|
||||
"install.sh",
|
||||
"install.ps1",
|
||||
"llms.txt"
|
||||
|
||||
172
research/ecc2-codebase-analysis.md
Normal file
172
research/ecc2-codebase-analysis.md
Normal file
@@ -0,0 +1,172 @@
|
||||
# ECC2 Codebase Research Report
|
||||
|
||||
**Date:** 2026-03-26
|
||||
**Subject:** `ecc-tui` v0.1.0 — Agentic IDE Control Plane
|
||||
**Total Lines:** 4,417 across 15 `.rs` files
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
ECC2 is a Rust TUI application that orchestrates AI coding agent sessions. It uses:
|
||||
- **ratatui 0.29** + **crossterm 0.28** for terminal UI
|
||||
- **rusqlite 0.32** (bundled) for local state persistence
|
||||
- **tokio 1** (full) for async runtime
|
||||
- **clap 4** (derive) for CLI
|
||||
|
||||
### Module Breakdown
|
||||
|
||||
| Module | Lines | Purpose |
|
||||
|--------|------:|---------|
|
||||
| `session/` | 1,974 | Session lifecycle, persistence, runtime, output |
|
||||
| `tui/` | 1,613 | Dashboard, app loop, custom widgets |
|
||||
| `observability/` | 409 | Tool call risk scoring and logging |
|
||||
| `config/` | 144 | Configuration (TOML file) |
|
||||
| `main.rs` | 142 | CLI entry point |
|
||||
| `worktree/` | 99 | Git worktree management |
|
||||
| `comms/` | 36 | Inter-agent messaging (send only) |
|
||||
|
||||
### Key Architectural Patterns
|
||||
|
||||
- **DbWriter thread** in `session/runtime.rs` — dedicated OS thread for SQLite writes from async context via `mpsc::unbounded_channel` with oneshot acknowledgements. Clean solution to the "SQLite from async" problem.
|
||||
- **Session state machine** with enforced transitions: `Pending → {Running, Failed, Stopped}`, `Running → {Idle, Completed, Failed, Stopped}`, etc.
|
||||
- **Ring buffer** for session output — `OUTPUT_BUFFER_LIMIT = 1000` lines per session with automatic eviction.
|
||||
- **Risk scoring** on tool calls — 4-axis analysis (base tool risk, file sensitivity, blast radius, irreversibility) producing composite 0.0–1.0 scores with suggested actions (Allow/Review/RequireConfirmation/Block).
|
||||
|
||||
## 2. Code Quality Metrics
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Total lines | 4,417 |
|
||||
| Test functions | 29 |
|
||||
| `unwrap()` calls | 3 |
|
||||
| `unsafe` blocks | 0 |
|
||||
| TODO/FIXME comments | 0 |
|
||||
| Max file size | 1,273 lines (`dashboard.rs`) |
|
||||
|
||||
**Assessment:** The codebase is clean. Only 3 `unwrap()` calls (2 in tests, 1 in config `default()`), zero `unsafe`, and all modules use proper `anyhow::Result` error propagation. The `dashboard.rs` file at 1,273 lines exceeds the repo's 800-line max-file guideline, but it is still manageable at the current scope.
|
||||
|
||||
## 3. Identified Gaps
|
||||
|
||||
### 3.1 Comms Module — Send Without Receive
|
||||
|
||||
`comms/mod.rs` (36 lines) has `send()` but no `receive()`, `poll()`, `inbox()`, or `subscribe()`. The `messages` table exists in SQLite, but nothing reads from it. The inter-agent messaging story is half-built.
|
||||
|
||||
**Impact:** Agents cannot coordinate. The `TaskHandoff`, `Query`, `Response`, and `Conflict` message types are defined but unusable.
|
||||
|
||||
### 3.2 New Session Dialog — Stub
|
||||
|
||||
`dashboard.rs:495` — `new_session()` logs `"New session dialog requested"` but does nothing. Users must use the CLI (`ecc start --task "..."`) to create sessions; the TUI dashboard cannot.
|
||||
|
||||
### 3.3 Single Agent Support
|
||||
|
||||
`session/manager.rs` — `agent_program()` only supports `"claude"`. The CLI accepts `--agent` but anything other than `"claude"` fails. No codex, opencode, or custom agent support.
|
||||
|
||||
### 3.4 Config — File-Only
|
||||
|
||||
`Config::load()` reads `~/.claude/ecc2.toml` only. The implementation lacks environment variable overrides (e.g., `ECC_DB_PATH`, `ECC_WORKTREE_ROOT`) and CLI flags for configuration.
|
||||
|
||||
### 3.5 Legacy Dependency Candidate: `git2`
|
||||
|
||||
`git2 = "0.20"` is still declared in `Cargo.toml`, but the `worktree` module shells out to the `git` CLI instead. That makes `git2` a strong removal candidate rather than an already-completed cleanup.
|
||||
|
||||
### 3.6 No Metrics Aggregation
|
||||
|
||||
`SessionMetrics` tracks tokens, cost, duration, tool_calls, files_changed per session. But there's no aggregate view: total cost across sessions, average duration, top tools by usage, etc. The Metrics pane in the dashboard shows per-session detail only.
|
||||
|
||||
### 3.7 Daemon — No Health Reporting
|
||||
|
||||
`session/daemon.rs` runs an infinite loop checking session timeouts. No health endpoint, no log rotation, no PID file, no signal handling for graceful shutdown. `Ctrl+C` during daemon mode kills the process uncleanly.
|
||||
|
||||
## 4. Test Coverage Analysis
|
||||
|
||||
34 test functions across 10 source modules:
|
||||
|
||||
| Module | Tests | Coverage Focus |
|
||||
|--------|------:|----------------|
|
||||
| `main.rs` | 1 | CLI parsing |
|
||||
| `config/mod.rs` | 5 | Defaults, deserialization, legacy fallback |
|
||||
| `observability/mod.rs` | 5 | Risk scoring, persistence, pagination |
|
||||
| `session/daemon.rs` | 2 | Crash recovery / liveness handling |
|
||||
| `session/manager.rs` | 4 | Session lifecycle, resume, stop, latest status |
|
||||
| `session/output.rs` | 2 | Ring buffer, broadcast |
|
||||
| `session/runtime.rs` | 1 | Output capture persistence/events |
|
||||
| `session/store.rs` | 3 | Buffer window, migration, state transitions |
|
||||
| `tui/dashboard.rs` | 8 | Rendering, selection, pane navigation, scrolling |
|
||||
| `tui/widgets.rs` | 3 | Token meter rendering and thresholds |
|
||||
|
||||
**Direct coverage gaps:**
|
||||
- `comms/mod.rs` — 0 tests
|
||||
- `worktree/mod.rs` — 0 tests
|
||||
|
||||
The core I/O-heavy paths are no longer completely untested: `manager.rs`, `runtime.rs`, and `daemon.rs` each have targeted tests. The remaining gap is breadth rather than total absence, especially around `comms/`, `worktree/`, and more adversarial process/worktree failure cases.
|
||||
|
||||
## 5. Security Observations
|
||||
|
||||
- **No secrets in code.** Config reads from TOML file, no hardcoded credentials.
|
||||
- **Process spawning** uses `tokio::process::Command` with explicit `Stdio::piped()` — no shell injection vectors.
|
||||
- **Risk scoring** is a strong feature — catches `rm -rf`, `git push --force origin main`, file access to `.env`/secrets.
|
||||
- **No input sanitization on session task strings.** The task string is passed directly to `claude --print`. If the task contains shell metacharacters, it could be exploited depending on how `Command` handles argument quoting. Currently safe (arguments are not shell-interpreted), but worth auditing.
|
||||
|
||||
## 6. Dependency Health
|
||||
|
||||
| Crate | Version | Latest | Notes |
|
||||
|-------|---------|--------|-------|
|
||||
| ratatui | 0.29 | **0.30.0** | Update available |
|
||||
| crossterm | 0.28 | **0.29.0** | Update available |
|
||||
| rusqlite | 0.32 | **0.39.0** | Update available |
|
||||
| tokio | 1 | **1.50.0** | Update available |
|
||||
| serde | 1 | **1.0.228** | Update available |
|
||||
| clap | 4 | **4.6.0** | Update available |
|
||||
| chrono | 0.4 | **0.4.44** | Update available |
|
||||
| uuid | 1 | **1.22.0** | Update available |
|
||||
|
||||
`git2` is still present in `Cargo.toml` even though the `worktree` module shells out to the `git` CLI. Several other dependencies are outdated; either remove `git2` or start using it before the next release.
|
||||
|
||||
## 7. Recommendations (Prioritized)
|
||||
|
||||
### P0 — Quick Wins
|
||||
|
||||
1. **Add environment variable support to `Config::load()`** — `ECC_DB_PATH`, `ECC_WORKTREE_ROOT`, `ECC_DEFAULT_AGENT`. Standard practice for CLI tools.
|
||||
|
||||
### P1 — Feature Completions
|
||||
|
||||
2. **Implement `comms::receive()` / `comms::poll()`** — read unread messages from the `messages` table, optionally with a `broadcast` channel for real-time delivery. Wire it into the dashboard.
|
||||
3. **Build the new-session dialog in the TUI** — modal form with task input, agent selector, worktree toggle. Should call `session::manager::create_session()`.
|
||||
4. **Add aggregate metrics** — total cost, average session duration, tool call frequency, cost per session. Show in the Metrics pane.
|
||||
|
||||
### P2 — Robustness
|
||||
|
||||
5. **Expand integration coverage for `manager.rs`, `runtime.rs`, and `daemon.rs`** — the repo now has baseline tests here, but it still needs failure-path coverage around process crashes, timeouts, and cleanup edge cases.
|
||||
6. **Add first-party tests for `worktree/mod.rs` and `comms/mod.rs`** — these are still uncovered and back important orchestration features.
|
||||
7. **Add daemon health reporting** — PID file, structured logging, graceful shutdown via signal handler.
|
||||
8. **Task string security audit** — The session task uses `claude --print` via `tokio::process::Command`. Verify arguments are never shell-interpreted. Checklist: confirm `Command` arg usage, threat-model metacharacter injection, input validation/escaping strategy, logging of raw inputs, and automated tests. Re-audit if invocation code changes.
|
||||
9. **Break up `dashboard.rs`** — extract SessionsPane, OutputPane, MetricsPane, LogPane into separate files under `tui/panes/`.
|
||||
|
||||
### P3 — Extensibility
|
||||
|
||||
10. **Multi-agent support** — make `agent_program()` pluggable. Add `codex`, `opencode`, `custom` agent types.
|
||||
11. **Config validation** — validate risk thresholds sum correctly, budget values are positive, paths exist.
|
||||
|
||||
## 8. Comparison with Ratatui 0.29 Best Practices
|
||||
|
||||
The codebase follows ratatui conventions well:
|
||||
- Uses `TableState` for stateful selection (correct pattern)
|
||||
- Custom `Widget` trait implementation for `TokenMeter` (idiomatic)
|
||||
- `tick()` method for periodic state sync (standard)
|
||||
- `broadcast::channel` for real-time output events (appropriate)
|
||||
|
||||
**Minor deviations:**
|
||||
- The `Dashboard` struct directly holds `StateStore` (SQLite connection). Ratatui best practice is to keep the state store behind an `Arc<Mutex<>>` to allow background updates. Currently the TUI owns the DB exclusively, which blocks adding a background metrics refresh task.
|
||||
- No `Clear` widget usage when rendering the help overlay — could cause rendering artifacts on some terminals.
|
||||
|
||||
## 9. Risk Assessment
|
||||
|
||||
| Risk | Likelihood | Impact | Mitigation |
|
||||
|------|-----------|--------|------------|
|
||||
| Dashboard file exceeds 1500 lines (projected) | High | Medium | At 1,273 lines currently (Section 2); extract panes into modules before it grows further |
|
||||
| SQLite lock contention | Low | High | DbWriter pattern already handles this |
|
||||
| No agent diversity | Medium | Medium | Pluggable agent support |
|
||||
| Task-string handling assumptions drift over time | Medium | Medium | Keep `Command` argument handling shell-free, document the threat model, and add regression tests for metacharacter-heavy task input |
|
||||
|
||||
---
|
||||
|
||||
**Bottom line:** ECC2 is a well-structured Rust project with clean error handling, good separation of concerns, and strong security features (risk scoring). The main gaps are incomplete features (comms, new-session dialog, single agent) rather than architectural problems. The codebase is ready for feature work on top of the solid foundation.
|
||||
124
rules/common/code-review.md
Normal file
124
rules/common/code-review.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Code Review Standards
|
||||
|
||||
## Purpose
|
||||
|
||||
Code review ensures quality, security, and maintainability before code is merged. This rule defines when and how to conduct code reviews.
|
||||
|
||||
## When to Review
|
||||
|
||||
**MANDATORY review triggers:**
|
||||
|
||||
- After writing or modifying code
|
||||
- Before any commit to shared branches
|
||||
- When security-sensitive code is changed (auth, payments, user data)
|
||||
- When architectural changes are made
|
||||
- Before merging pull requests
|
||||
|
||||
**Pre-Review Requirements:**
|
||||
|
||||
Before requesting review, ensure:
|
||||
|
||||
- All automated checks (CI/CD) are passing
|
||||
- Merge conflicts are resolved
|
||||
- Branch is up to date with target branch
|
||||
|
||||
## Review Checklist
|
||||
|
||||
Before marking code complete:
|
||||
|
||||
- [ ] Code is readable and well-named
|
||||
- [ ] Functions are focused (<50 lines)
|
||||
- [ ] Files are cohesive (<800 lines)
|
||||
- [ ] No deep nesting (>4 levels)
|
||||
- [ ] Errors are handled explicitly
|
||||
- [ ] No hardcoded secrets or credentials
|
||||
- [ ] No console.log or debug statements
|
||||
- [ ] Tests exist for new functionality
|
||||
- [ ] Test coverage meets 80% minimum
|
||||
|
||||
## Security Review Triggers
|
||||
|
||||
**STOP and use security-reviewer agent when:**
|
||||
|
||||
- Authentication or authorization code
|
||||
- User input handling
|
||||
- Database queries
|
||||
- File system operations
|
||||
- External API calls
|
||||
- Cryptographic operations
|
||||
- Payment or financial code
|
||||
|
||||
## Review Severity Levels
|
||||
|
||||
| Level | Meaning | Action |
|
||||
|-------|---------|--------|
|
||||
| CRITICAL | Security vulnerability or data loss risk | **BLOCK** - Must fix before merge |
|
||||
| HIGH | Bug or significant quality issue | **WARN** - Should fix before merge |
|
||||
| MEDIUM | Maintainability concern | **INFO** - Consider fixing |
|
||||
| LOW | Style or minor suggestion | **NOTE** - Optional |
|
||||
|
||||
## Agent Usage
|
||||
|
||||
Use these agents for code review:
|
||||
|
||||
| Agent | Purpose |
|
||||
|-------|---------|
|
||||
| **code-reviewer** | General code quality, patterns, best practices |
|
||||
| **security-reviewer** | Security vulnerabilities, OWASP Top 10 |
|
||||
| **typescript-reviewer** | TypeScript/JavaScript specific issues |
|
||||
| **python-reviewer** | Python specific issues |
|
||||
| **go-reviewer** | Go specific issues |
|
||||
| **rust-reviewer** | Rust specific issues |
|
||||
|
||||
## Review Workflow
|
||||
|
||||
```
|
||||
1. Run git diff to understand changes
|
||||
2. Check security checklist first
|
||||
3. Review code quality checklist
|
||||
4. Run relevant tests
|
||||
5. Verify coverage >= 80%
|
||||
6. Use appropriate agent for detailed review
|
||||
```
|
||||
|
||||
## Common Issues to Catch
|
||||
|
||||
### Security
|
||||
|
||||
- Hardcoded credentials (API keys, passwords, tokens)
|
||||
- SQL injection (string concatenation in queries)
|
||||
- XSS vulnerabilities (unescaped user input)
|
||||
- Path traversal (unsanitized file paths)
|
||||
- CSRF protection missing
|
||||
- Authentication bypasses
|
||||
|
||||
### Code Quality
|
||||
|
||||
- Large functions (>50 lines) - split into smaller
|
||||
- Large files (>800 lines) - extract modules
|
||||
- Deep nesting (>4 levels) - use early returns
|
||||
- Missing error handling - handle explicitly
|
||||
- Mutation patterns - prefer immutable operations
|
||||
- Missing tests - add test coverage
|
||||
|
||||
### Performance
|
||||
|
||||
- N+1 queries - use JOINs or batching
|
||||
- Missing pagination - add LIMIT to queries
|
||||
- Unbounded queries - add constraints
|
||||
- Missing caching - cache expensive operations
|
||||
|
||||
## Approval Criteria
|
||||
|
||||
- **Approve**: No CRITICAL or HIGH issues
|
||||
- **Warning**: Only HIGH issues (merge with caution)
|
||||
- **Block**: CRITICAL issues found
|
||||
|
||||
## Integration with Other Rules
|
||||
|
||||
This rule works with:
|
||||
|
||||
- [testing.md](testing.md) - Test coverage requirements
|
||||
- [security.md](security.md) - Security checklist
|
||||
- [git-workflow.md](git-workflow.md) - Commit standards
|
||||
- [agents.md](agents.md) - Agent delegation
|
||||
@@ -36,3 +36,9 @@ The Feature Implementation Workflow describes the development pipeline: research
|
||||
- Detailed commit messages
|
||||
- Follow conventional commits format
|
||||
- See [git-workflow.md](./git-workflow.md) for commit message format and PR process
|
||||
|
||||
5. **Pre-Review Checks**
|
||||
- Verify all automated checks (CI/CD) are passing
|
||||
- Resolve any merge conflicts
|
||||
- Ensure branch is up to date with target branch
|
||||
- Only request review after these checks pass
|
||||
|
||||
108
rules/zh/README.md
Normal file
108
rules/zh/README.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# 规则
|
||||
|
||||
## 结构
|
||||
|
||||
规则按**通用**层和**语言特定**目录组织:
|
||||
|
||||
```
|
||||
rules/
|
||||
├── common/ # 语言无关的原则(始终安装)
|
||||
│ ├── coding-style.md
|
||||
│ ├── git-workflow.md
|
||||
│ ├── testing.md
|
||||
│ ├── performance.md
|
||||
│ ├── patterns.md
|
||||
│ ├── hooks.md
|
||||
│ ├── agents.md
|
||||
│ ├── security.md
|
||||
│ ├── code-review.md
|
||||
│ └── development-workflow.md
|
||||
├── zh/ # 中文翻译版本
|
||||
│ ├── coding-style.md
|
||||
│ ├── git-workflow.md
|
||||
│ ├── testing.md
|
||||
│ ├── performance.md
|
||||
│ ├── patterns.md
|
||||
│ ├── hooks.md
|
||||
│ ├── agents.md
|
||||
│ ├── security.md
|
||||
│ ├── code-review.md
|
||||
│ └── development-workflow.md
|
||||
├── typescript/ # TypeScript/JavaScript 特定
|
||||
├── python/ # Python 特定
|
||||
├── golang/ # Go 特定
|
||||
├── swift/ # Swift 特定
|
||||
└── php/ # PHP 特定
|
||||
```
|
||||
|
||||
- **common/** 包含通用原则 — 无语言特定的代码示例。
|
||||
- **zh/** 包含 common 目录的中文翻译版本。
|
||||
- **语言目录** 扩展通用规则,包含框架特定的模式、工具和代码示例。每个文件引用其对应的通用版本。
|
||||
|
||||
## 安装
|
||||
|
||||
### 选项 1:安装脚本(推荐)
|
||||
|
||||
```bash
|
||||
# 安装通用 + 一个或多个语言特定的规则集
|
||||
./install.sh typescript
|
||||
./install.sh python
|
||||
./install.sh golang
|
||||
./install.sh swift
|
||||
./install.sh php
|
||||
|
||||
# 同时安装多种语言
|
||||
./install.sh typescript python
|
||||
```
|
||||
|
||||
### 选项 2:手动安装
|
||||
|
||||
> **重要提示:** 复制整个目录 — 不要使用 `/*` 展开。
|
||||
> 通用和语言特定目录包含同名文件。
|
||||
> 将它们展开到一个目录会导致语言特定文件覆盖通用规则,
|
||||
> 并破坏语言特定文件使用的 `../common/` 相对引用。
|
||||
|
||||
```bash
|
||||
# 创建目标目录
|
||||
mkdir -p ~/.claude/rules
|
||||
|
||||
# 安装通用规则(所有项目必需)
|
||||
cp -r rules/common ~/.claude/rules/common
|
||||
|
||||
# 安装中文翻译版本(可选)
|
||||
cp -r rules/zh ~/.claude/rules/zh
|
||||
|
||||
# 根据项目技术栈安装语言特定规则
|
||||
cp -r rules/typescript ~/.claude/rules/typescript
|
||||
cp -r rules/python ~/.claude/rules/python
|
||||
cp -r rules/golang ~/.claude/rules/golang
|
||||
cp -r rules/swift ~/.claude/rules/swift
|
||||
cp -r rules/php ~/.claude/rules/php
|
||||
```
|
||||
|
||||
## 规则 vs 技能
|
||||
|
||||
- **规则** 定义广泛适用的标准、约定和检查清单(如"80% 测试覆盖率"、"禁止硬编码密钥")。
|
||||
- **技能**(`skills/` 目录)为特定任务提供深入、可操作的参考材料(如 `python-patterns`、`golang-testing`)。
|
||||
|
||||
语言特定的规则文件在适当的地方引用相关技能。规则告诉你*做什么*;技能告诉你*怎么做*。
|
||||
|
||||
## 规则优先级
|
||||
|
||||
当语言特定规则与通用规则冲突时,**语言特定规则优先**(特定覆盖通用)。这遵循标准的分层配置模式(类似于 CSS 特异性或 `.gitignore` 优先级)。
|
||||
|
||||
- `rules/common/` 定义适用于所有项目的通用默认值。
|
||||
- `rules/golang/`、`rules/python/`、`rules/swift/`、`rules/php/`、`rules/typescript/` 等在语言习惯不同时覆盖这些默认值。
|
||||
- `rules/zh/` 是通用规则的中文翻译,与英文版本内容一致。
|
||||
|
||||
### 示例
|
||||
|
||||
`common/coding-style.md` 推荐不可变性作为默认原则。语言特定的 `golang/coding-style.md` 可以覆盖这一点:
|
||||
|
||||
> 惯用的 Go 使用指针接收器进行结构体变更 — 参见 [common/coding-style.md](../common/coding-style.md) 了解通用原则,但这里首选符合 Go 习惯的变更方式。
|
||||
|
||||
### 带覆盖说明的通用规则
|
||||
|
||||
`rules/common/` 中可能被语言特定文件覆盖的规则会被标记:
|
||||
|
||||
> **语言说明**:此规则可能会被语言特定规则覆盖;对于某些语言,该模式可能并不符合惯用写法。
|
||||
50
rules/zh/agents.md
Normal file
50
rules/zh/agents.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# 代理编排
|
||||
|
||||
## 可用代理
|
||||
|
||||
位于 `~/.claude/agents/`:
|
||||
|
||||
| 代理 | 用途 | 何时使用 |
|
||||
|-------|---------|------------|
|
||||
| planner | 实现规划 | 复杂功能、重构 |
|
||||
| architect | 系统设计 | 架构决策 |
|
||||
| tdd-guide | 测试驱动开发 | 新功能、bug 修复 |
|
||||
| code-reviewer | 代码审查 | 编写代码后 |
|
||||
| security-reviewer | 安全分析 | 提交前 |
|
||||
| build-error-resolver | 修复构建错误 | 构建失败时 |
|
||||
| e2e-runner | E2E 测试 | 关键用户流程 |
|
||||
| refactor-cleaner | 死代码清理 | 代码维护 |
|
||||
| doc-updater | 文档 | 更新文档 |
|
||||
| rust-reviewer | Rust 代码审查 | Rust 项目 |
|
||||
|
||||
## 立即使用代理
|
||||
|
||||
无需用户提示:
|
||||
1. 复杂功能请求 - 使用 **planner** 代理
|
||||
2. 刚编写/修改的代码 - 使用 **code-reviewer** 代理
|
||||
3. Bug 修复或新功能 - 使用 **tdd-guide** 代理
|
||||
4. 架构决策 - 使用 **architect** 代理
|
||||
|
||||
## 并行任务执行
|
||||
|
||||
对独立操作始终使用并行 Task 执行:
|
||||
|
||||
```markdown
|
||||
# 好:并行执行
|
||||
同时启动 3 个代理:
|
||||
1. 代理 1:认证模块安全分析
|
||||
2. 代理 2:缓存系统性能审查
|
||||
3. 代理 3:工具类型检查
|
||||
|
||||
# 坏:不必要的顺序
|
||||
先代理 1,然后代理 2,然后代理 3
|
||||
```
|
||||
|
||||
## 多视角分析
|
||||
|
||||
对于复杂问题,使用分角色子代理:
|
||||
- 事实审查者
|
||||
- 高级工程师
|
||||
- 安全专家
|
||||
- 一致性审查者
|
||||
- 冗余检查者
|
||||
124
rules/zh/code-review.md
Normal file
124
rules/zh/code-review.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# 代码审查标准
|
||||
|
||||
## 目的
|
||||
|
||||
代码审查确保代码合并前的质量、安全性和可维护性。此规则定义何时以及如何进行代码审查。
|
||||
|
||||
## 何时审查
|
||||
|
||||
**强制审查触发条件:**
|
||||
|
||||
- 编写或修改代码后
|
||||
- 提交到共享分支之前
|
||||
- 更改安全敏感代码时(认证、支付、用户数据)
|
||||
- 进行架构更改时
|
||||
- 合并 pull request 之前
|
||||
|
||||
**审查前要求:**
|
||||
|
||||
在请求审查之前,确保:
|
||||
|
||||
- 所有自动化检查(CI/CD)已通过
|
||||
- 合并冲突已解决
|
||||
- 分支已与目标分支同步
|
||||
|
||||
## 审查检查清单
|
||||
|
||||
在标记代码完成之前:
|
||||
|
||||
- [ ] 代码可读且命名良好
|
||||
- [ ] 函数聚焦(<50 行)
|
||||
- [ ] 文件内聚(<800 行)
|
||||
- [ ] 无深层嵌套(>4 层)
|
||||
- [ ] 错误显式处理
|
||||
- [ ] 无硬编码密钥或凭据
|
||||
- [ ] 无 console.log 或调试语句
|
||||
- [ ] 新功能有测试
|
||||
- [ ] 测试覆盖率满足 80% 最低要求
|
||||
|
||||
## 安全审查触发条件
|
||||
|
||||
**停止并使用 security-reviewer 代理当:**
|
||||
|
||||
- 认证或授权代码
|
||||
- 用户输入处理
|
||||
- 数据库查询
|
||||
- 文件系统操作
|
||||
- 外部 API 调用
|
||||
- 加密操作
|
||||
- 支付或金融代码
|
||||
|
||||
## 审查严重级别
|
||||
|
||||
| 级别 | 含义 | 行动 |
|
||||
|-------|---------|--------|
|
||||
| CRITICAL(关键) | 安全漏洞或数据丢失风险 | **阻止** - 合并前必须修复 |
|
||||
| HIGH(高) | Bug 或重大质量问题 | **警告** - 合并前应修复 |
|
||||
| MEDIUM(中) | 可维护性问题 | **信息** - 考虑修复 |
|
||||
| LOW(低) | 风格或次要建议 | **注意** - 可选 |
|
||||
|
||||
## 代理使用
|
||||
|
||||
使用这些代理进行代码审查:
|
||||
|
||||
| 代理 | 用途 |
|
||||
|-------|--------|
|
||||
| **code-reviewer** | 通用代码质量、模式、最佳实践 |
|
||||
| **security-reviewer** | 安全漏洞、OWASP Top 10 |
|
||||
| **typescript-reviewer** | TypeScript/JavaScript 特定问题 |
|
||||
| **python-reviewer** | Python 特定问题 |
|
||||
| **go-reviewer** | Go 特定问题 |
|
||||
| **rust-reviewer** | Rust 特定问题 |
|
||||
|
||||
## 审查工作流
|
||||
|
||||
```
|
||||
1. 运行 git diff 了解更改
|
||||
2. 先检查安全检查清单
|
||||
3. 审查代码质量检查清单
|
||||
4. 运行相关测试
|
||||
5. 验证覆盖率 >= 80%
|
||||
6. 使用适当的代理进行详细审查
|
||||
```
|
||||
|
||||
## 常见问题捕获
|
||||
|
||||
### 安全
|
||||
|
||||
- 硬编码凭据(API 密钥、密码、令牌)
|
||||
- SQL 注入(查询中的字符串拼接)
|
||||
- XSS 漏洞(未转义的用户输入)
|
||||
- 路径遍历(未净化的文件路径)
|
||||
- CSRF 保护缺失
|
||||
- 认证绕过
|
||||
|
||||
### 代码质量
|
||||
|
||||
- 大函数(>50 行)- 拆分为更小的
|
||||
- 大文件(>800 行)- 提取模块
|
||||
- 深层嵌套(>4 层)- 使用提前返回
|
||||
- 缺少错误处理 - 显式处理
|
||||
- 变更模式 - 优先使用不可变操作
|
||||
- 缺少测试 - 添加测试覆盖
|
||||
|
||||
### 性能
|
||||
|
||||
- N+1 查询 - 使用 JOIN 或批处理
|
||||
- 缺少分页 - 给查询添加 LIMIT
|
||||
- 无界查询 - 添加约束
|
||||
- 缺少缓存 - 缓存昂贵操作
|
||||
|
||||
## 批准标准
|
||||
|
||||
- **批准**:无关键或高优先级问题
|
||||
- **警告**:仅有高优先级问题(谨慎合并)
|
||||
- **阻止**:发现关键问题
|
||||
|
||||
## 与其他规则的集成
|
||||
|
||||
此规则与以下规则配合:
|
||||
|
||||
- [testing.md](testing.md) - 测试覆盖率要求
|
||||
- [security.md](security.md) - 安全检查清单
|
||||
- [git-workflow.md](git-workflow.md) - 提交标准
|
||||
- [agents.md](agents.md) - 代理委托
|
||||
48
rules/zh/coding-style.md
Normal file
48
rules/zh/coding-style.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# 编码风格
|
||||
|
||||
## 不可变性(关键)
|
||||
|
||||
始终创建新对象,永远不要修改现有对象:
|
||||
|
||||
```
|
||||
// 伪代码
|
||||
错误: modify(original, field, value) → 就地修改 original
|
||||
正确: update(original, field, value) → 返回带有更改的新副本
|
||||
```
|
||||
|
||||
原理:不可变数据防止隐藏的副作用,使调试更容易,并启用安全的并发。
|
||||
|
||||
## 文件组织
|
||||
|
||||
多个小文件 > 少量大文件:
|
||||
- 高内聚,低耦合
|
||||
- 典型 200-400 行,最多 800 行
|
||||
- 从大模块中提取工具函数
|
||||
- 按功能/领域组织,而非按类型
|
||||
|
||||
## 错误处理
|
||||
|
||||
始终全面处理错误:
|
||||
- 在每一层显式处理错误
|
||||
- 在面向 UI 的代码中提供用户友好的错误消息
|
||||
- 在服务器端记录详细的错误上下文
|
||||
- 永远不要静默吞掉错误
|
||||
|
||||
## 输入验证
|
||||
|
||||
始终在系统边界验证:
|
||||
- 处理前验证所有用户输入
|
||||
- 在可用的情况下使用基于模式的验证
|
||||
- 快速失败并给出清晰的错误消息
|
||||
- 永远不要信任外部数据(API 响应、用户输入、文件内容)
|
||||
|
||||
## 代码质量检查清单
|
||||
|
||||
在标记工作完成前:
|
||||
- [ ] 代码可读且命名良好
|
||||
- [ ] 函数很小(<50 行)
|
||||
- [ ] 文件聚焦(<800 行)
|
||||
- [ ] 没有深层嵌套(>4 层)
|
||||
- [ ] 正确的错误处理
|
||||
- [ ] 没有硬编码值(使用常量或配置)
|
||||
- [ ] 没有变更(使用不可变模式)
|
||||
44
rules/zh/development-workflow.md
Normal file
44
rules/zh/development-workflow.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# 开发工作流
|
||||
|
||||
> 此文件扩展 [common/git-workflow.md](./git-workflow.md),包含 git 操作之前的完整功能开发流程。
|
||||
|
||||
功能实现工作流描述了开发管道:研究、规划、TDD、代码审查,然后提交到 git。
|
||||
|
||||
## 功能实现工作流
|
||||
|
||||
0. **研究与重用** _(任何新实现前必需)_
|
||||
- **GitHub 代码搜索优先:** 在编写任何新代码之前,运行 `gh search repos` 和 `gh search code` 查找现有实现、模板和模式。
|
||||
- **库文档其次:** 使用 Context7 或主要供应商文档确认 API 行为、包使用和版本特定细节。
|
||||
- **仅当前两者不足时使用 Exa:** 在 GitHub 搜索和主要文档之后,使用 Exa 进行更广泛的网络研究或发现。
|
||||
- **检查包注册表:** 在编写工具代码之前搜索 npm、PyPI、crates.io 和其他注册表。首选久经考验的库而非手工编写的解决方案。
|
||||
- **搜索可适配的实现:** 寻找解决问题 80%+ 且可以分支、移植或包装的开源项目。
|
||||
- 当满足需求时,优先采用或移植经验证的方法而非从头编写新代码。
|
||||
|
||||
1. **先规划**
|
||||
- 使用 **planner** 代理创建实现计划
|
||||
- 编码前生成规划文档:PRD、架构、系统设计、技术文档、任务列表
|
||||
- 识别依赖和风险
|
||||
- 分解为阶段
|
||||
|
||||
2. **TDD 方法**
|
||||
- 使用 **tdd-guide** 代理
|
||||
- 先写测试(RED)
|
||||
- 实现以通过测试(GREEN)
|
||||
- 重构(IMPROVE)
|
||||
- 验证 80%+ 覆盖率
|
||||
|
||||
3. **代码审查**
|
||||
- 编写代码后立即使用 **code-reviewer** 代理
|
||||
- 解决关键和高优先级问题
|
||||
- 尽可能修复中优先级问题
|
||||
|
||||
4. **提交与推送**
|
||||
- 详细的提交消息
|
||||
- 遵循约定式提交格式
|
||||
- 参见 [git-workflow.md](./git-workflow.md) 了解提交消息格式和 PR 流程
|
||||
|
||||
5. **审查前检查**
|
||||
- 验证所有自动化检查(CI/CD)已通过
|
||||
- 解决任何合并冲突
|
||||
- 确保分支已与目标分支同步
|
||||
- 仅在这些检查通过后请求审查
|
||||
24
rules/zh/git-workflow.md
Normal file
24
rules/zh/git-workflow.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Git 工作流
|
||||
|
||||
## 提交消息格式
|
||||
```
|
||||
<类型>: <描述>
|
||||
|
||||
<可选正文>
|
||||
```
|
||||
|
||||
类型:feat, fix, refactor, docs, test, chore, perf, ci
|
||||
|
||||
注意:通过 ~/.claude/settings.json 全局禁用归属。
|
||||
|
||||
## Pull Request 工作流
|
||||
|
||||
创建 PR 时:
|
||||
1. 分析完整提交历史(不仅是最新提交)
|
||||
2. 使用 `git diff [base-branch]...HEAD` 查看所有更改
|
||||
3. 起草全面的 PR 摘要
|
||||
4. 包含带有 TODO 的测试计划
|
||||
5. 如果是新分支,使用 `-u` 标志推送
|
||||
|
||||
> 对于 git 操作之前的完整开发流程(规划、TDD、代码审查),
|
||||
> 参见 [development-workflow.md](./development-workflow.md)。
|
||||
30
rules/zh/hooks.md
Normal file
30
rules/zh/hooks.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# 钩子系统
|
||||
|
||||
## 钩子类型
|
||||
|
||||
- **PreToolUse**:工具执行前(验证、参数修改)
|
||||
- **PostToolUse**:工具执行后(自动格式化、检查)
|
||||
- **Stop**:会话结束时(最终验证)
|
||||
|
||||
## 自动接受权限
|
||||
|
||||
谨慎使用:
|
||||
- 为可信、定义明确的计划启用
|
||||
- 探索性工作时禁用
|
||||
- 永远不要使用 dangerously-skip-permissions 标志
|
||||
- 改为在 `~/.claude.json` 中配置 `allowedTools`
|
||||
|
||||
## TodoWrite 最佳实践
|
||||
|
||||
使用 TodoWrite 工具:
|
||||
- 跟踪多步骤任务的进度
|
||||
- 验证对指令的理解
|
||||
- 启用实时引导
|
||||
- 显示细粒度的实现步骤
|
||||
|
||||
待办列表揭示:
|
||||
- 顺序错误的步骤
|
||||
- 缺失的项目
|
||||
- 多余的不必要项目
|
||||
- 错误的粒度
|
||||
- 误解的需求
|
||||
31
rules/zh/patterns.md
Normal file
31
rules/zh/patterns.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# 常用模式
|
||||
|
||||
## 骨架项目
|
||||
|
||||
实现新功能时:
|
||||
1. 搜索久经考验的骨架项目
|
||||
2. 使用并行代理评估选项:
|
||||
- 安全性评估
|
||||
- 可扩展性分析
|
||||
- 相关性评分
|
||||
- 实现规划
|
||||
3. 克隆最佳匹配作为基础
|
||||
4. 在经验证的结构内迭代
|
||||
|
||||
## 设计模式
|
||||
|
||||
### 仓储模式
|
||||
|
||||
将数据访问封装在一致的接口后面:
|
||||
- 定义标准操作:findAll、findById、create、update、delete
|
||||
- 具体实现处理存储细节(数据库、API、文件等)
|
||||
- 业务逻辑依赖抽象接口,而非存储机制
|
||||
- 便于轻松切换数据源,并简化使用模拟的测试
|
||||
|
||||
### API 响应格式
|
||||
|
||||
对所有 API 响应使用一致的信封:
|
||||
- 包含成功/状态指示器
|
||||
- 包含数据负载(错误时可为空)
|
||||
- 包含错误消息字段(成功时可为空)
|
||||
- 包含分页响应的元数据(total、page、limit)
|
||||
55
rules/zh/performance.md
Normal file
55
rules/zh/performance.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# 性能优化
|
||||
|
||||
## 模型选择策略
|
||||
|
||||
**Haiku 4.5**(Sonnet 90% 的能力,3 倍成本节省):
|
||||
- 频繁调用的轻量级代理
|
||||
- 结对编程和代码生成
|
||||
- 多代理系统中的工作者代理
|
||||
|
||||
**Sonnet 4.6**(最佳编码模型):
|
||||
- 主要开发工作
|
||||
- 编排多代理工作流
|
||||
- 复杂编码任务
|
||||
|
||||
**Opus 4.5**(最深度推理):
|
||||
- 复杂架构决策
|
||||
- 最大推理需求
|
||||
- 研究和分析任务
|
||||
|
||||
## 上下文窗口管理
|
||||
|
||||
避免在上下文窗口的最后 20% 进行以下操作:
|
||||
- 大规模重构
|
||||
- 跨多个文件的功能实现
|
||||
- 调试复杂交互
|
||||
|
||||
上下文敏感度较低的任务:
|
||||
- 单文件编辑
|
||||
- 独立工具创建
|
||||
- 文档更新
|
||||
- 简单 bug 修复
|
||||
|
||||
## 扩展思考 + 规划模式
|
||||
|
||||
扩展思考默认启用,为内部推理保留最多 31,999 个 token。
|
||||
|
||||
通过以下方式控制扩展思考:
|
||||
- **切换**:Option+T(macOS)/ Alt+T(Windows/Linux)
|
||||
- **配置**:在 `~/.claude/settings.json` 中设置 `alwaysThinkingEnabled`
|
||||
- **预算上限**:`export MAX_THINKING_TOKENS=10000`
|
||||
- **详细模式**:Ctrl+O 查看思考输出
|
||||
|
||||
对于需要深度推理的复杂任务:
|
||||
1. 确保扩展思考已启用(默认开启)
|
||||
2. 启用**规划模式**进行结构化方法
|
||||
3. 使用多轮审查进行彻底分析
|
||||
4. 使用分角色子代理获得多样化视角
|
||||
|
||||
## 构建排查
|
||||
|
||||
如果构建失败:
|
||||
1. 使用 **build-error-resolver** 代理
|
||||
2. 分析错误消息
|
||||
3. 增量修复
|
||||
4. 每次修复后验证
|
||||
29
rules/zh/security.md
Normal file
29
rules/zh/security.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# 安全指南
|
||||
|
||||
## 强制安全检查
|
||||
|
||||
在任何提交之前:
|
||||
- [ ] 无硬编码密钥(API 密钥、密码、令牌)
|
||||
- [ ] 所有用户输入已验证
|
||||
- [ ] SQL 注入防护(参数化查询)
|
||||
- [ ] XSS 防护(净化 HTML)
|
||||
- [ ] CSRF 保护已启用
|
||||
- [ ] 认证/授权已验证
|
||||
- [ ] 所有端点启用速率限制
|
||||
- [ ] 错误消息不泄露敏感数据
|
||||
|
||||
## 密钥管理
|
||||
|
||||
- 永远不要在源代码中硬编码密钥
|
||||
- 始终使用环境变量或密钥管理器
|
||||
- 启动时验证所需的密钥是否存在
|
||||
- 轮换任何可能已暴露的密钥
|
||||
|
||||
## 安全响应协议
|
||||
|
||||
如果发现安全问题:
|
||||
1. 立即停止
|
||||
2. 使用 **security-reviewer** 代理
|
||||
3. 在继续之前修复关键问题
|
||||
4. 轮换任何已暴露的密钥
|
||||
5. 审查整个代码库中的类似问题
|
||||
29
rules/zh/testing.md
Normal file
29
rules/zh/testing.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# 测试要求
|
||||
|
||||
## 最低测试覆盖率:80%
|
||||
|
||||
测试类型(全部必需):
|
||||
1. **单元测试** - 单个函数、工具、组件
|
||||
2. **集成测试** - API 端点、数据库操作
|
||||
3. **E2E 测试** - 关键用户流程(框架根据语言选择)
|
||||
|
||||
## 测试驱动开发
|
||||
|
||||
强制工作流:
|
||||
1. 先写测试(RED)
|
||||
2. 运行测试 - 应该失败
|
||||
3. 编写最小实现(GREEN)
|
||||
4. 运行测试 - 应该通过
|
||||
5. 重构(IMPROVE)
|
||||
6. 验证覆盖率(80%+)
|
||||
|
||||
## 测试失败排查
|
||||
|
||||
1. 使用 **tdd-guide** 代理
|
||||
2. 检查测试隔离
|
||||
3. 验证模拟是否正确
|
||||
4. 修复实现,而非测试(除非测试有误)
|
||||
|
||||
## 代理支持
|
||||
|
||||
- **tdd-guide** - 主动用于新功能,强制先写测试
|
||||
@@ -11,7 +11,7 @@ CODEX_HOME="${CODEX_HOME:-$HOME/.codex}"
|
||||
CONFIG_FILE="$CODEX_HOME/config.toml"
|
||||
AGENTS_FILE="$CODEX_HOME/AGENTS.md"
|
||||
PROMPTS_DIR="$CODEX_HOME/prompts"
|
||||
SKILLS_DIR="$CODEX_HOME/skills"
|
||||
SKILLS_DIR="${AGENTS_HOME:-$HOME/.agents}/skills"
|
||||
HOOKS_DIR_EXPECT="${ECC_GLOBAL_HOOKS_DIR:-$CODEX_HOME/git-hooks}"
|
||||
|
||||
failures=0
|
||||
@@ -89,7 +89,13 @@ fi
|
||||
if [[ -f "$CONFIG_FILE" ]]; then
|
||||
check_config_pattern '^multi_agent\s*=\s*true' "multi_agent is enabled"
|
||||
check_config_absent '^\s*collab\s*=' "deprecated collab flag is absent"
|
||||
check_config_pattern '^persistent_instructions\s*=' "persistent_instructions is configured"
|
||||
# persistent_instructions is recommended but optional; warn instead of fail
|
||||
# so users who rely on AGENTS.md alone are not blocked (#967).
|
||||
if rg -n '^[[:space:]]*persistent_instructions\s*=' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
ok "persistent_instructions is configured"
|
||||
else
|
||||
warn "persistent_instructions is not set (recommended but optional)"
|
||||
fi
|
||||
check_config_pattern '^\[profiles\.strict\]' "profiles.strict exists"
|
||||
check_config_pattern '^\[profiles\.yolo\]' "profiles.yolo exists"
|
||||
|
||||
@@ -97,7 +103,7 @@ if [[ -f "$CONFIG_FILE" ]]; then
|
||||
'mcp_servers.github' \
|
||||
'mcp_servers.memory' \
|
||||
'mcp_servers.sequential-thinking' \
|
||||
'mcp_servers.context7-mcp'
|
||||
'mcp_servers.context7'
|
||||
do
|
||||
if rg -n "^\[$section\]" "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
ok "MCP section [$section] exists"
|
||||
@@ -106,10 +112,25 @@ if [[ -f "$CONFIG_FILE" ]]; then
|
||||
fi
|
||||
done
|
||||
|
||||
has_context7_legacy=0
|
||||
has_context7_current=0
|
||||
|
||||
if rg -n '^\[mcp_servers\.context7\]' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
warn "Duplicate [mcp_servers.context7] exists (context7-mcp is preferred)"
|
||||
has_context7_legacy=1
|
||||
fi
|
||||
|
||||
if rg -n '^\[mcp_servers\.context7-mcp\]' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
has_context7_current=1
|
||||
fi
|
||||
|
||||
if [[ "$has_context7_legacy" -eq 1 || "$has_context7_current" -eq 1 ]]; then
|
||||
ok "MCP section [mcp_servers.context7] or [mcp_servers.context7-mcp] exists"
|
||||
else
|
||||
ok "No duplicate [mcp_servers.context7] section"
|
||||
fail "MCP section [mcp_servers.context7] or [mcp_servers.context7-mcp] missing"
|
||||
fi
|
||||
|
||||
if [[ "$has_context7_legacy" -eq 1 && "$has_context7_current" -eq 1 ]]; then
|
||||
warn "Both [mcp_servers.context7] and [mcp_servers.context7-mcp] exist; prefer one name"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -144,12 +165,12 @@ if [[ -d "$SKILLS_DIR" ]]; then
|
||||
done
|
||||
|
||||
if [[ "$missing_skills" -eq 0 ]]; then
|
||||
ok "All 16 ECC Codex skills are present"
|
||||
ok "All 16 ECC skills are present in $SKILLS_DIR"
|
||||
else
|
||||
fail "$missing_skills required skills are missing"
|
||||
warn "$missing_skills ECC skills missing from $SKILLS_DIR (install via ECC installer or npx skills)"
|
||||
fi
|
||||
else
|
||||
fail "Skills directory missing ($SKILLS_DIR)"
|
||||
warn "Skills directory missing ($SKILLS_DIR) — install via ECC installer or npx skills"
|
||||
fi
|
||||
|
||||
if [[ -f "$PROMPTS_DIR/ecc-prompts-manifest.txt" ]]; then
|
||||
|
||||
@@ -83,20 +83,23 @@ function dlxServer(name, pkg, extraFields, extraToml) {
|
||||
}
|
||||
|
||||
/** Each entry: key = section name under mcp_servers, value = { toml, fields } */
|
||||
const DEFAULT_MCP_STARTUP_TIMEOUT_SEC = 30;
|
||||
const DEFAULT_MCP_STARTUP_TIMEOUT_TOML = `startup_timeout_sec = ${DEFAULT_MCP_STARTUP_TIMEOUT_SEC}`;
|
||||
|
||||
const ECC_SERVERS = {
|
||||
supabase: dlxServer('supabase', '@supabase/mcp-server-supabase@latest', { startup_timeout_sec: 20.0, tool_timeout_sec: 120.0 }, 'startup_timeout_sec = 20.0\ntool_timeout_sec = 120.0'),
|
||||
playwright: dlxServer('playwright', '@playwright/mcp@latest'),
|
||||
'context7-mcp': dlxServer('context7-mcp', '@upstash/context7-mcp'),
|
||||
playwright: dlxServer('playwright', '@playwright/mcp@latest', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
|
||||
context7: dlxServer('context7', '@upstash/context7-mcp@latest', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
|
||||
exa: {
|
||||
fields: { url: 'https://mcp.exa.ai/mcp' },
|
||||
toml: `[mcp_servers.exa]\nurl = "https://mcp.exa.ai/mcp"`
|
||||
},
|
||||
github: {
|
||||
fields: { command: 'bash', args: ['-lc', GH_BOOTSTRAP] },
|
||||
toml: `[mcp_servers.github]\ncommand = "bash"\nargs = ["-lc", ${JSON.stringify(GH_BOOTSTRAP)}]`
|
||||
fields: { command: 'bash', args: ['-lc', GH_BOOTSTRAP], startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC },
|
||||
toml: `[mcp_servers.github]\ncommand = "bash"\nargs = ["-lc", ${JSON.stringify(GH_BOOTSTRAP)}]\n${DEFAULT_MCP_STARTUP_TIMEOUT_TOML}`
|
||||
},
|
||||
memory: dlxServer('memory', '@modelcontextprotocol/server-memory'),
|
||||
'sequential-thinking': dlxServer('sequential-thinking', '@modelcontextprotocol/server-sequential-thinking')
|
||||
memory: dlxServer('memory', '@modelcontextprotocol/server-memory', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML),
|
||||
'sequential-thinking': dlxServer('sequential-thinking', '@modelcontextprotocol/server-sequential-thinking', { startup_timeout_sec: DEFAULT_MCP_STARTUP_TIMEOUT_SEC }, DEFAULT_MCP_STARTUP_TIMEOUT_TOML)
|
||||
};
|
||||
|
||||
// Append --features arg for supabase after dlxServer builds the base
|
||||
@@ -104,9 +107,9 @@ ECC_SERVERS.supabase.fields.args.push('--features=account,docs,database,debuggin
|
||||
ECC_SERVERS.supabase.toml = ECC_SERVERS.supabase.toml.replace(/^(args = \[.*)\]$/m, '$1, "--features=account,docs,database,debugging,development,functions,storage,branching"]');
|
||||
|
||||
// Legacy section names that should be treated as an existing ECC server.
|
||||
// e.g. old configs shipped [mcp_servers.context7] instead of [mcp_servers.context7-mcp].
|
||||
// e.g. older configs shipped [mcp_servers.context7-mcp] instead of [mcp_servers.context7].
|
||||
const LEGACY_ALIASES = {
|
||||
'context7-mcp': ['context7']
|
||||
context7: ['context7-mcp']
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -254,6 +257,10 @@ function main() {
|
||||
if (resolvedLabel !== name) {
|
||||
raw = removeServerFromText(raw, name, existing);
|
||||
}
|
||||
if (legacyName && hasCanonical) {
|
||||
toRemoveLog.push(`mcp_servers.${legacyName}`);
|
||||
raw = removeServerFromText(raw, legacyName, existing);
|
||||
}
|
||||
toAppend.push(spec.toml);
|
||||
} else {
|
||||
// Add-only mode: skip, but warn about drift
|
||||
|
||||
0
scripts/ecc.js
Normal file → Executable file
0
scripts/ecc.js
Normal file → Executable file
@@ -1,7 +1,13 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Doc file warning hook (PreToolUse - Write)
|
||||
* Warns about non-standard documentation files.
|
||||
*
|
||||
* Uses a denylist approach: only warn on known ad-hoc documentation
|
||||
* filenames (NOTES, TODO, SCRATCH, etc.) outside structured directories.
|
||||
* This avoids false positives for legitimate markdown-heavy workflows
|
||||
* (specs, ADRs, command definitions, skill files, etc.).
|
||||
*
|
||||
* Policy ported from the intent of PR #962 into the current hook architecture.
|
||||
* Exit code 0 always (warns only, never blocks).
|
||||
*/
|
||||
|
||||
@@ -12,31 +18,59 @@ const path = require('path');
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let data = '';
|
||||
|
||||
function isAllowedDocPath(filePath) {
|
||||
// Known ad-hoc filenames that indicate impulse/scratch files (case-sensitive, uppercase only)
|
||||
const ADHOC_FILENAMES = /^(NOTES|TODO|SCRATCH|TEMP|DRAFT|BRAINSTORM|SPIKE|DEBUG|WIP)\.(md|txt)$/;
|
||||
|
||||
// Structured directories where even ad-hoc names are intentional
|
||||
const STRUCTURED_DIRS = /(^|\/)(docs|\.claude|\.github|commands|skills|benchmarks|templates|\.history|memory)\//;
|
||||
|
||||
function isSuspiciousDocPath(filePath) {
|
||||
const normalized = filePath.replace(/\\/g, '/');
|
||||
const basename = path.basename(filePath);
|
||||
const basename = path.basename(normalized);
|
||||
|
||||
if (!/\.(md|txt)$/i.test(filePath)) return true;
|
||||
// Only inspect .md and .txt files (case-sensitive, consistent with ADHOC_FILENAMES)
|
||||
if (!/\.(md|txt)$/.test(basename)) return false;
|
||||
|
||||
if (/^(README|CLAUDE|AGENTS|CONTRIBUTING|CHANGELOG|LICENSE|SKILL|MEMORY|WORKLOG)\.md$/i.test(basename)) {
|
||||
return true;
|
||||
}
|
||||
// Only flag known ad-hoc filenames
|
||||
if (!ADHOC_FILENAMES.test(basename)) return false;
|
||||
|
||||
if (/\.claude\/(commands|plans|projects)\//.test(normalized)) {
|
||||
return true;
|
||||
}
|
||||
// Allow ad-hoc names inside structured directories (intentional usage)
|
||||
if (STRUCTURED_DIRS.test(normalized)) return false;
|
||||
|
||||
if (/(^|\/)(docs|skills|\.history|memory)\//.test(normalized)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (/\.plan\.md$/i.test(basename)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Exportable run() for in-process execution via run-with-flags.js.
|
||||
* Avoids the ~50-100ms spawnSync overhead when available.
|
||||
*/
|
||||
function run(inputOrRaw, _options = {}) {
|
||||
let input;
|
||||
try {
|
||||
input = typeof inputOrRaw === 'string'
|
||||
? (inputOrRaw.trim() ? JSON.parse(inputOrRaw) : {})
|
||||
: (inputOrRaw || {});
|
||||
} catch {
|
||||
return { exitCode: 0 };
|
||||
}
|
||||
const filePath = String(input?.tool_input?.file_path || '');
|
||||
|
||||
if (filePath && isSuspiciousDocPath(filePath)) {
|
||||
return {
|
||||
exitCode: 0,
|
||||
stderr:
|
||||
'[Hook] WARNING: Ad-hoc documentation filename detected\n' +
|
||||
`[Hook] File: ${filePath}\n` +
|
||||
'[Hook] Consider using a structured path (e.g. docs/, .claude/, skills/, .github/, benchmarks/, templates/)',
|
||||
};
|
||||
}
|
||||
|
||||
return { exitCode: 0 };
|
||||
}
|
||||
|
||||
module.exports = { run };
|
||||
|
||||
// Stdin fallback for spawnSync execution
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', c => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
@@ -46,17 +80,10 @@ process.stdin.on('data', c => {
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(data);
|
||||
const filePath = String(input.tool_input?.file_path || '');
|
||||
const result = run(data);
|
||||
|
||||
if (filePath && !isAllowedDocPath(filePath)) {
|
||||
console.error('[Hook] WARNING: Non-standard documentation file detected');
|
||||
console.error(`[Hook] File: ${filePath}`);
|
||||
console.error('[Hook] Consider consolidating into README.md or docs/ directory');
|
||||
}
|
||||
} catch {
|
||||
// ignore parse errors
|
||||
if (result.stderr) {
|
||||
process.stderr.write(result.stderr + '\n');
|
||||
}
|
||||
|
||||
process.stdout.write(data);
|
||||
|
||||
405
scripts/hooks/pre-bash-commit-quality.js
Normal file
405
scripts/hooks/pre-bash-commit-quality.js
Normal file
@@ -0,0 +1,405 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* PreToolUse Hook: Pre-commit Quality Check
|
||||
*
|
||||
* Runs quality checks before git commit commands:
|
||||
* - Detects staged files
|
||||
* - Runs linter on staged files (if available)
|
||||
* - Checks for common issues (console.log, TODO, etc.)
|
||||
* - Validates commit message format (if provided)
|
||||
*
|
||||
* Cross-platform (Windows, macOS, Linux)
|
||||
*
|
||||
* Exit codes:
|
||||
* 0 - Success (allow commit)
|
||||
* 2 - Block commit (quality issues found)
|
||||
*/
|
||||
|
||||
const { spawnSync } = require('child_process');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024; // 1MB limit
|
||||
|
||||
/**
|
||||
* Detect staged files for commit
|
||||
* @returns {string[]} Array of staged file paths
|
||||
*/
|
||||
function getStagedFiles() {
|
||||
const result = spawnSync('git', ['diff', '--cached', '--name-only', '--diff-filter=ACMR'], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
if (result.status !== 0) {
|
||||
return [];
|
||||
}
|
||||
return result.stdout.trim().split('\n').filter(f => f.length > 0);
|
||||
}
|
||||
|
||||
function getStagedFileContent(filePath) {
|
||||
const result = spawnSync('git', ['show', `:${filePath}`], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
if (result.status !== 0) {
|
||||
return null;
|
||||
}
|
||||
return result.stdout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a file should be quality-checked
|
||||
* @param {string} filePath
|
||||
* @returns {boolean}
|
||||
*/
|
||||
function shouldCheckFile(filePath) {
|
||||
const checkableExtensions = ['.js', '.jsx', '.ts', '.tsx', '.py', '.go', '.rs'];
|
||||
return checkableExtensions.some(ext => filePath.endsWith(ext));
|
||||
}
|
||||
|
||||
/**
|
||||
* Find issues in file content
|
||||
* @param {string} filePath
|
||||
* @returns {object[]} Array of issues found
|
||||
*/
|
||||
function findFileIssues(filePath) {
|
||||
const issues = [];
|
||||
|
||||
try {
|
||||
const content = getStagedFileContent(filePath);
|
||||
if (content == null) {
|
||||
return issues;
|
||||
}
|
||||
const lines = content.split('\n');
|
||||
|
||||
lines.forEach((line, index) => {
|
||||
const lineNum = index + 1;
|
||||
|
||||
// Check for console.log
|
||||
if (line.includes('console.log') && !line.trim().startsWith('//') && !line.trim().startsWith('*')) {
|
||||
issues.push({
|
||||
type: 'console.log',
|
||||
message: `console.log found at line ${lineNum}`,
|
||||
line: lineNum,
|
||||
severity: 'warning'
|
||||
});
|
||||
}
|
||||
|
||||
// Check for debugger statements
|
||||
if (/\bdebugger\b/.test(line) && !line.trim().startsWith('//')) {
|
||||
issues.push({
|
||||
type: 'debugger',
|
||||
message: `debugger statement at line ${lineNum}`,
|
||||
line: lineNum,
|
||||
severity: 'error'
|
||||
});
|
||||
}
|
||||
|
||||
// Check for TODO/FIXME without issue reference
|
||||
const todoMatch = line.match(/\/\/\s*(TODO|FIXME):?\s*(.+)/);
|
||||
if (todoMatch && !todoMatch[2].match(/#\d+|issue/i)) {
|
||||
issues.push({
|
||||
type: 'todo',
|
||||
message: `TODO/FIXME without issue reference at line ${lineNum}: "${todoMatch[2].trim()}"`,
|
||||
line: lineNum,
|
||||
severity: 'info'
|
||||
});
|
||||
}
|
||||
|
||||
// Check for hardcoded secrets (basic patterns)
|
||||
const secretPatterns = [
|
||||
{ pattern: /sk-[a-zA-Z0-9]{20,}/, name: 'OpenAI API key' },
|
||||
{ pattern: /ghp_[a-zA-Z0-9]{36}/, name: 'GitHub PAT' },
|
||||
{ pattern: /AKIA[A-Z0-9]{16}/, name: 'AWS Access Key' },
|
||||
{ pattern: /api[_-]?key\s*[=:]\s*['"][^'"]+['"]/i, name: 'API key' }
|
||||
];
|
||||
|
||||
for (const { pattern, name } of secretPatterns) {
|
||||
if (pattern.test(line)) {
|
||||
issues.push({
|
||||
type: 'secret',
|
||||
message: `Potential ${name} exposed at line ${lineNum}`,
|
||||
line: lineNum,
|
||||
severity: 'error'
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch {
|
||||
// File not readable, skip
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate commit message format
|
||||
* @param {string} command
|
||||
* @returns {object|null} Validation result or null if no message to validate
|
||||
*/
|
||||
function validateCommitMessage(command) {
|
||||
// Extract commit message from command
|
||||
const messageMatch = command.match(/(?:-m|--message)[=\s]+["']?([^"']+)["']?/);
|
||||
if (!messageMatch) return null;
|
||||
|
||||
const message = messageMatch[1];
|
||||
const issues = [];
|
||||
|
||||
// Check conventional commit format
|
||||
const conventionalCommit = /^(feat|fix|docs|style|refactor|test|chore|build|ci|perf|revert)(\(.+\))?:\s*.+/;
|
||||
if (!conventionalCommit.test(message)) {
|
||||
issues.push({
|
||||
type: 'format',
|
||||
message: 'Commit message does not follow conventional commit format',
|
||||
suggestion: 'Use format: type(scope): description (e.g., "feat(auth): add login flow")'
|
||||
});
|
||||
}
|
||||
|
||||
// Check message length
|
||||
if (message.length > 72) {
|
||||
issues.push({
|
||||
type: 'length',
|
||||
message: `Commit message too long (${message.length} chars, max 72)`,
|
||||
suggestion: 'Keep the first line under 72 characters'
|
||||
});
|
||||
}
|
||||
|
||||
// Check for lowercase first letter (conventional)
|
||||
if (conventionalCommit.test(message)) {
|
||||
const afterColon = message.split(':')[1];
|
||||
if (afterColon && /^[A-Z]/.test(afterColon.trim())) {
|
||||
issues.push({
|
||||
type: 'capitalization',
|
||||
message: 'Subject should start with lowercase after type',
|
||||
suggestion: 'Use lowercase for the first letter of the subject'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Check for trailing period
|
||||
if (message.endsWith('.')) {
|
||||
issues.push({
|
||||
type: 'punctuation',
|
||||
message: 'Commit message should not end with a period',
|
||||
suggestion: 'Remove the trailing period'
|
||||
});
|
||||
}
|
||||
|
||||
return { message, issues };
|
||||
}
|
||||
|
||||
/**
|
||||
* Run linter on staged files
|
||||
* @param {string[]} files
|
||||
* @returns {object} Lint results
|
||||
*/
|
||||
function runLinter(files) {
|
||||
const jsFiles = files.filter(f => /\.(js|jsx|ts|tsx)$/.test(f));
|
||||
const pyFiles = files.filter(f => f.endsWith('.py'));
|
||||
const goFiles = files.filter(f => f.endsWith('.go'));
|
||||
|
||||
const results = {
|
||||
eslint: null,
|
||||
pylint: null,
|
||||
golint: null
|
||||
};
|
||||
|
||||
// Run ESLint if available
|
||||
if (jsFiles.length > 0) {
|
||||
const eslintBin = process.platform === 'win32' ? 'eslint.cmd' : 'eslint';
|
||||
const eslintPath = path.join(process.cwd(), 'node_modules', '.bin', eslintBin);
|
||||
if (fs.existsSync(eslintPath)) {
|
||||
const result = spawnSync(eslintPath, ['--format', 'compact', ...jsFiles], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
});
|
||||
results.eslint = {
|
||||
success: result.status === 0,
|
||||
output: result.stdout || result.stderr
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Run Pylint if available
|
||||
if (pyFiles.length > 0) {
|
||||
try {
|
||||
const result = spawnSync('pylint', ['--output-format=text', ...pyFiles], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
});
|
||||
if (result.error && result.error.code === 'ENOENT') {
|
||||
results.pylint = null;
|
||||
} else {
|
||||
results.pylint = {
|
||||
success: result.status === 0,
|
||||
output: result.stdout || result.stderr
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
// Pylint not available
|
||||
}
|
||||
}
|
||||
|
||||
// Run golint if available
|
||||
if (goFiles.length > 0) {
|
||||
try {
|
||||
const result = spawnSync('golint', goFiles, {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
});
|
||||
if (result.error && result.error.code === 'ENOENT') {
|
||||
results.golint = null;
|
||||
} else {
|
||||
results.golint = {
|
||||
success: !result.stdout || result.stdout.trim() === '',
|
||||
output: result.stdout
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
// golint not available
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Core logic — exported for direct invocation
|
||||
* @param {string} rawInput - Raw JSON string from stdin
|
||||
* @returns {{output:string, exitCode:number}} Pass-through output and exit code
|
||||
*/
|
||||
function evaluate(rawInput) {
|
||||
try {
|
||||
const input = JSON.parse(rawInput);
|
||||
const command = input.tool_input?.command || '';
|
||||
|
||||
// Only run for git commit commands
|
||||
if (!command.includes('git commit')) {
|
||||
return { output: rawInput, exitCode: 0 };
|
||||
}
|
||||
|
||||
// Check if this is an amend (skip checks for amends to avoid blocking)
|
||||
if (command.includes('--amend')) {
|
||||
return { output: rawInput, exitCode: 0 };
|
||||
}
|
||||
|
||||
// Get staged files
|
||||
const stagedFiles = getStagedFiles();
|
||||
|
||||
if (stagedFiles.length === 0) {
|
||||
console.error('[Hook] No staged files found. Use "git add" to stage files first.');
|
||||
return { output: rawInput, exitCode: 0 };
|
||||
}
|
||||
|
||||
console.error(`[Hook] Checking ${stagedFiles.length} staged file(s)...`);
|
||||
|
||||
// Check each staged file
|
||||
const filesToCheck = stagedFiles.filter(shouldCheckFile);
|
||||
let totalIssues = 0;
|
||||
let errorCount = 0;
|
||||
let warningCount = 0;
|
||||
let infoCount = 0;
|
||||
|
||||
for (const file of filesToCheck) {
|
||||
const fileIssues = findFileIssues(file);
|
||||
if (fileIssues.length > 0) {
|
||||
console.error(`\n📁 ${file}`);
|
||||
for (const issue of fileIssues) {
|
||||
const icon = issue.severity === 'error' ? '❌' : issue.severity === 'warning' ? '⚠️' : 'ℹ️';
|
||||
console.error(` ${icon} Line ${issue.line}: ${issue.message}`);
|
||||
totalIssues++;
|
||||
if (issue.severity === 'error') errorCount++;
|
||||
if (issue.severity === 'warning') warningCount++;
|
||||
if (issue.severity === 'info') infoCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate commit message if provided
|
||||
const messageValidation = validateCommitMessage(command);
|
||||
if (messageValidation && messageValidation.issues.length > 0) {
|
||||
console.error('\n📝 Commit Message Issues:');
|
||||
for (const issue of messageValidation.issues) {
|
||||
console.error(` ⚠️ ${issue.message}`);
|
||||
if (issue.suggestion) {
|
||||
console.error(` 💡 ${issue.suggestion}`);
|
||||
}
|
||||
totalIssues++;
|
||||
warningCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// Run linter
|
||||
const lintResults = runLinter(filesToCheck);
|
||||
|
||||
if (lintResults.eslint && !lintResults.eslint.success) {
|
||||
console.error('\n🔍 ESLint Issues:');
|
||||
console.error(lintResults.eslint.output);
|
||||
totalIssues++;
|
||||
errorCount++;
|
||||
}
|
||||
|
||||
if (lintResults.pylint && !lintResults.pylint.success) {
|
||||
console.error('\n🔍 Pylint Issues:');
|
||||
console.error(lintResults.pylint.output);
|
||||
totalIssues++;
|
||||
errorCount++;
|
||||
}
|
||||
|
||||
if (lintResults.golint && !lintResults.golint.success) {
|
||||
console.error('\n🔍 golint Issues:');
|
||||
console.error(lintResults.golint.output);
|
||||
totalIssues++;
|
||||
errorCount++;
|
||||
}
|
||||
|
||||
// Summary
|
||||
if (totalIssues > 0) {
|
||||
console.error(`\n📊 Summary: ${totalIssues} issue(s) found (${errorCount} error(s), ${warningCount} warning(s), ${infoCount} info)`);
|
||||
|
||||
if (errorCount > 0) {
|
||||
console.error('\n[Hook] ❌ Commit blocked due to critical issues. Fix them before committing.');
|
||||
return { output: rawInput, exitCode: 2 };
|
||||
} else {
|
||||
console.error('\n[Hook] ⚠️ Warnings found. Consider fixing them, but commit is allowed.');
|
||||
console.error('[Hook] To bypass these checks, use: git commit --no-verify');
|
||||
}
|
||||
} else {
|
||||
console.error('\n[Hook] ✅ All checks passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error(`[Hook] Error: ${error.message}`);
|
||||
// Non-blocking on error
|
||||
}
|
||||
|
||||
return { output: rawInput, exitCode: 0 };
|
||||
}
|
||||
|
||||
function run(rawInput) {
|
||||
return evaluate(rawInput).output;
|
||||
}
|
||||
|
||||
// ── stdin entry point ────────────────────────────────────────────
|
||||
if (require.main === module) {
|
||||
let data = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
const result = evaluate(data);
|
||||
process.stdout.write(result.output);
|
||||
process.exit(result.exitCode);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = { run, evaluate };
|
||||
0
scripts/install-apply.js
Normal file → Executable file
0
scripts/install-apply.js
Normal file → Executable file
@@ -1,15 +1,109 @@
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const { writeInstallState } = require('../install-state');
|
||||
|
||||
function readJsonObject(filePath, label) {
|
||||
let parsed;
|
||||
try {
|
||||
parsed = JSON.parse(fs.readFileSync(filePath, 'utf8'));
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to parse ${label} at ${filePath}: ${error.message}`);
|
||||
}
|
||||
|
||||
if (!parsed || typeof parsed !== 'object' || Array.isArray(parsed)) {
|
||||
throw new Error(`Invalid ${label} at ${filePath}: expected a JSON object`);
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function mergeHookEntries(existingEntries, incomingEntries) {
|
||||
const mergedEntries = [];
|
||||
const seenEntries = new Set();
|
||||
|
||||
for (const entry of [...existingEntries, ...incomingEntries]) {
|
||||
const entryKey = JSON.stringify(entry);
|
||||
if (seenEntries.has(entryKey)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
seenEntries.add(entryKey);
|
||||
mergedEntries.push(entry);
|
||||
}
|
||||
|
||||
return mergedEntries;
|
||||
}
|
||||
|
||||
function findHooksSourcePath(plan, hooksDestinationPath) {
|
||||
const operation = plan.operations.find(item => item.destinationPath === hooksDestinationPath);
|
||||
return operation ? operation.sourcePath : null;
|
||||
}
|
||||
|
||||
function buildMergedSettings(plan) {
|
||||
if (!plan.adapter || plan.adapter.target !== 'claude') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hooksDestinationPath = path.join(plan.targetRoot, 'hooks', 'hooks.json');
|
||||
const hooksSourcePath = findHooksSourcePath(plan, hooksDestinationPath) || hooksDestinationPath;
|
||||
if (!fs.existsSync(hooksSourcePath)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hooksConfig = readJsonObject(hooksSourcePath, 'hooks config');
|
||||
const incomingHooks = hooksConfig.hooks;
|
||||
if (!incomingHooks || typeof incomingHooks !== 'object' || Array.isArray(incomingHooks)) {
|
||||
throw new Error(`Invalid hooks config at ${hooksSourcePath}: expected "hooks" to be a JSON object`);
|
||||
}
|
||||
|
||||
const settingsPath = path.join(plan.targetRoot, 'settings.json');
|
||||
let settings = {};
|
||||
if (fs.existsSync(settingsPath)) {
|
||||
settings = readJsonObject(settingsPath, 'existing settings');
|
||||
}
|
||||
|
||||
const existingHooks = settings.hooks && typeof settings.hooks === 'object' && !Array.isArray(settings.hooks)
|
||||
? settings.hooks
|
||||
: {};
|
||||
const mergedHooks = { ...existingHooks };
|
||||
|
||||
for (const [eventName, incomingEntries] of Object.entries(incomingHooks)) {
|
||||
const currentEntries = Array.isArray(existingHooks[eventName]) ? existingHooks[eventName] : [];
|
||||
const nextEntries = Array.isArray(incomingEntries) ? incomingEntries : [];
|
||||
mergedHooks[eventName] = mergeHookEntries(currentEntries, nextEntries);
|
||||
}
|
||||
|
||||
const mergedSettings = {
|
||||
...settings,
|
||||
hooks: mergedHooks,
|
||||
};
|
||||
|
||||
return {
|
||||
settingsPath,
|
||||
mergedSettings,
|
||||
};
|
||||
}
|
||||
|
||||
function applyInstallPlan(plan) {
|
||||
const mergedSettingsPlan = buildMergedSettings(plan);
|
||||
|
||||
for (const operation of plan.operations) {
|
||||
fs.mkdirSync(require('path').dirname(operation.destinationPath), { recursive: true });
|
||||
fs.mkdirSync(path.dirname(operation.destinationPath), { recursive: true });
|
||||
fs.copyFileSync(operation.sourcePath, operation.destinationPath);
|
||||
}
|
||||
|
||||
if (mergedSettingsPlan) {
|
||||
fs.mkdirSync(path.dirname(mergedSettingsPlan.settingsPath), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
mergedSettingsPlan.settingsPath,
|
||||
JSON.stringify(mergedSettingsPlan.mergedSettings, null, 2) + '\n',
|
||||
'utf8'
|
||||
);
|
||||
}
|
||||
|
||||
writeInstallState(plan.installStatePath, plan.statePreview);
|
||||
|
||||
return {
|
||||
|
||||
@@ -4,7 +4,6 @@ set -euo pipefail
|
||||
# Sync Everything Claude Code (ECC) assets into a local Codex CLI setup.
|
||||
# - Backs up ~/.codex config and AGENTS.md
|
||||
# - Merges ECC AGENTS.md into existing AGENTS.md (marker-based, preserves user content)
|
||||
# - Syncs Codex-ready skills from .agents/skills
|
||||
# - Generates prompt files from commands/*.md
|
||||
# - Generates Codex QA wrappers and optional language rule-pack prompts
|
||||
# - Installs global git safety hooks (pre-commit and pre-push)
|
||||
@@ -28,8 +27,6 @@ CONFIG_FILE="$CODEX_HOME/config.toml"
|
||||
AGENTS_FILE="$CODEX_HOME/AGENTS.md"
|
||||
AGENTS_ROOT_SRC="$REPO_ROOT/AGENTS.md"
|
||||
AGENTS_CODEX_SUPP_SRC="$REPO_ROOT/.codex/AGENTS.md"
|
||||
SKILLS_SRC="$REPO_ROOT/.agents/skills"
|
||||
SKILLS_DEST="$CODEX_HOME/skills"
|
||||
PROMPTS_SRC="$REPO_ROOT/commands"
|
||||
PROMPTS_DEST="$CODEX_HOME/prompts"
|
||||
HOOKS_INSTALLER="$REPO_ROOT/scripts/codex/install-global-git-hooks.sh"
|
||||
@@ -133,7 +130,6 @@ MCP_MERGE_SCRIPT="$REPO_ROOT/scripts/codex/merge-mcp-config.js"
|
||||
|
||||
require_path "$REPO_ROOT/AGENTS.md" "ECC AGENTS.md"
|
||||
require_path "$AGENTS_CODEX_SUPP_SRC" "ECC Codex AGENTS supplement"
|
||||
require_path "$SKILLS_SRC" "ECC skills directory"
|
||||
require_path "$PROMPTS_SRC" "ECC commands directory"
|
||||
require_path "$HOOKS_INSTALLER" "ECC global git hooks installer"
|
||||
require_path "$SANITY_CHECKER" "ECC global sanity checker"
|
||||
@@ -235,17 +231,9 @@ else
|
||||
fi
|
||||
fi
|
||||
|
||||
log "Syncing ECC Codex skills"
|
||||
run_or_echo mkdir -p "$SKILLS_DEST"
|
||||
skills_count=0
|
||||
for skill_dir in "$SKILLS_SRC"/*; do
|
||||
[[ -d "$skill_dir" ]] || continue
|
||||
skill_name="$(basename "$skill_dir")"
|
||||
dest="$SKILLS_DEST/$skill_name"
|
||||
run_or_echo rm -rf "$dest"
|
||||
run_or_echo cp -R "$skill_dir" "$dest"
|
||||
skills_count=$((skills_count + 1))
|
||||
done
|
||||
# Skills are NOT synced here — Codex CLI reads directly from
|
||||
# ~/.agents/skills/ (installed by ECC installer / npx skills).
|
||||
# Copying into ~/.codex/skills/ was unnecessary.
|
||||
|
||||
log "Generating prompt files from ECC commands"
|
||||
run_or_echo mkdir -p "$PROMPTS_DEST"
|
||||
@@ -486,7 +474,6 @@ fi
|
||||
|
||||
log "Sync complete"
|
||||
log "Backup saved at: $BACKUP_DIR"
|
||||
log "Skills synced: $skills_count"
|
||||
log "Prompts generated: $((prompt_count + extension_count)) (commands: $prompt_count, extensions: $extension_count)"
|
||||
|
||||
if [[ "$MODE" == "apply" ]]; then
|
||||
|
||||
178
skills/agent-payment-x402/SKILL.md
Normal file
178
skills/agent-payment-x402/SKILL.md
Normal file
@@ -0,0 +1,178 @@
|
||||
---
|
||||
name: agent-payment-x402
|
||||
description: Add x402 payment execution to AI agents — per-task budgets, spending controls, and non-custodial wallets via MCP tools. Use when agents need to pay for APIs, services, or other agents.
|
||||
origin: community
|
||||
---
|
||||
|
||||
# Agent Payment Execution (x402)
|
||||
|
||||
Enable AI agents to make autonomous payments with built-in spending controls. Uses the x402 HTTP payment protocol and MCP tools so agents can pay for external services, APIs, or other agents without custodial risk.
|
||||
|
||||
## When to Use
|
||||
|
||||
Use when: your agent needs to pay for an API call, purchase a service, settle with another agent, enforce per-task spending limits, or manage a non-custodial wallet. Pairs naturally with cost-aware-llm-pipeline and security-review skills.
|
||||
|
||||
## How It Works
|
||||
|
||||
### x402 Protocol
|
||||
x402 extends HTTP 402 (Payment Required) into a machine-negotiable flow. When a server returns `402`, the agent's payment tool automatically negotiates price, checks budget, signs a transaction, and retries — no human in the loop.
|
||||
|
||||
### Spending Controls
|
||||
Every payment tool call enforces a `SpendingPolicy`:
|
||||
- **Per-task budget** — max spend for a single agent action
|
||||
- **Per-session budget** — cumulative limit across an entire session
|
||||
- **Allowlisted recipients** — restrict which addresses/services the agent can pay
|
||||
- **Rate limits** — max transactions per minute/hour
|
||||
|
||||
### Non-Custodial Wallets
|
||||
Agents hold their own keys via ERC-4337 smart accounts. The orchestrator sets policy before delegation; the agent can only spend within bounds. No pooled funds, no custodial risk.
|
||||
|
||||
## MCP Integration
|
||||
|
||||
The payment layer exposes standard MCP tools that slot into any Claude Code or agent harness setup.
|
||||
|
||||
> **Security note**: Always pin the package version. This tool manages private keys — unpinned `npx` installs introduce supply-chain risk.
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"agentpay": {
|
||||
"command": "npx",
|
||||
"args": ["agentwallet-sdk@6.0.0"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Available Tools (agent-callable)
|
||||
|
||||
| Tool | Purpose |
|
||||
|------|---------|
|
||||
| `get_balance` | Check agent wallet balance |
|
||||
| `send_payment` | Send payment to address or ENS |
|
||||
| `check_spending` | Query remaining budget |
|
||||
| `list_transactions` | Audit trail of all payments |
|
||||
|
||||
> **Note**: Spending policy is set by the **orchestrator** before delegating to the agent — not by the agent itself. This prevents agents from escalating their own spending limits. Configure policy via `set_policy` in your orchestration layer or pre-task hook, never as an agent-callable tool.
|
||||
|
||||
## Examples
|
||||
|
||||
### Budget enforcement in an MCP client
|
||||
|
||||
When building an orchestrator that calls the agentpay MCP server, enforce budgets before dispatching paid tool calls.
|
||||
|
||||
> **Prerequisites**: Install the package before adding the MCP config — `npx` without `-y` will prompt for confirmation in non-interactive environments, causing the server to hang: `npm install -g agentwallet-sdk@6.0.0`
|
||||
|
||||
```typescript
|
||||
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
||||
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
|
||||
|
||||
async function main() {
|
||||
// 1. Validate credentials before constructing the transport.
|
||||
// A missing key must fail immediately — never let the subprocess start without auth.
|
||||
const walletKey = process.env.WALLET_PRIVATE_KEY;
|
||||
if (!walletKey) {
|
||||
throw new Error("WALLET_PRIVATE_KEY is not set — refusing to start payment server");
|
||||
}
|
||||
|
||||
// Connect to the agentpay MCP server via stdio transport.
|
||||
// Whitelist only the env vars the server needs — never forward all of process.env
|
||||
// to a third-party subprocess that manages private keys.
|
||||
const transport = new StdioClientTransport({
|
||||
command: "npx",
|
||||
args: ["agentwallet-sdk@6.0.0"],
|
||||
env: {
|
||||
PATH: process.env.PATH ?? "",
|
||||
NODE_ENV: process.env.NODE_ENV ?? "production",
|
||||
WALLET_PRIVATE_KEY: walletKey,
|
||||
},
|
||||
});
|
||||
const agentpay = new Client({ name: "orchestrator", version: "1.0.0" });
|
||||
await agentpay.connect(transport);
|
||||
|
||||
// 2. Set spending policy before delegating to the agent.
|
||||
// Always verify success — a silent failure means no controls are active.
|
||||
const policyResult = await agentpay.callTool({
|
||||
name: "set_policy",
|
||||
arguments: {
|
||||
per_task_budget: 0.50,
|
||||
per_session_budget: 5.00,
|
||||
allowlisted_recipients: ["api.example.com"],
|
||||
},
|
||||
});
|
||||
if (policyResult.isError) {
|
||||
throw new Error(
|
||||
`Failed to set spending policy — do not delegate: ${JSON.stringify(policyResult.content)}`
|
||||
);
|
||||
}
|
||||
|
||||
// 3. Use preToolCheck before any paid action
|
||||
await preToolCheck(agentpay, 0.01);
|
||||
}
|
||||
|
||||
// Pre-tool hook: fail-closed budget enforcement with four distinct error paths.
|
||||
async function preToolCheck(agentpay: Client, apiCost: number): Promise<void> {
|
||||
// Path 1: Reject invalid input (NaN/Infinity bypass the < comparison)
|
||||
if (!Number.isFinite(apiCost) || apiCost < 0) {
|
||||
throw new Error(`Invalid apiCost: ${apiCost} — action blocked`);
|
||||
}
|
||||
|
||||
// Path 2: Transport/connectivity failure
|
||||
let result;
|
||||
try {
|
||||
result = await agentpay.callTool({ name: "check_spending" });
|
||||
} catch (err) {
|
||||
throw new Error(`Payment service unreachable — action blocked: ${err}`);
|
||||
}
|
||||
|
||||
// Path 3: Tool returned an error (e.g., auth failure, wallet not initialised)
|
||||
if (result.isError) {
|
||||
throw new Error(
|
||||
`check_spending failed — action blocked: ${JSON.stringify(result.content)}`
|
||||
);
|
||||
}
|
||||
|
||||
// Path 4: Parse and validate the response shape
|
||||
let remaining: number;
|
||||
try {
|
||||
const parsed = JSON.parse(
|
||||
(result.content as Array<{ text: string }>)[0].text
|
||||
);
|
||||
if (!Number.isFinite(parsed?.remaining)) {
|
||||
throw new TypeError("missing or non-finite 'remaining' field");
|
||||
}
|
||||
remaining = parsed.remaining;
|
||||
} catch (err) {
|
||||
throw new Error(
|
||||
`check_spending returned unexpected format — action blocked: ${err}`
|
||||
);
|
||||
}
|
||||
|
||||
// Path 5: Budget exceeded
|
||||
if (remaining < apiCost) {
|
||||
throw new Error(
|
||||
`Budget exceeded: need $${apiCost} but only $${remaining} remaining`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err);
|
||||
process.exitCode = 1;
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Set budgets before delegation**: When spawning sub-agents, attach a SpendingPolicy via your orchestration layer. Never give an agent unlimited spend.
|
||||
- **Pin your dependencies**: Always specify an exact version in your MCP config (e.g., `agentwallet-sdk@6.0.0`). Verify package integrity before deploying to production.
|
||||
- **Audit trails**: Use `list_transactions` in post-task hooks to log what was spent and why.
|
||||
- **Fail closed**: If the payment tool is unreachable, block the paid action — don't fall back to unmetered access.
|
||||
- **Pair with security-review**: Payment tools are high-privilege. Apply the same scrutiny as shell access.
|
||||
- **Test with testnets first**: Use Base Sepolia for development; switch to Base mainnet for production.
|
||||
|
||||
## Production Reference
|
||||
|
||||
- **npm**: [`agentwallet-sdk`](https://www.npmjs.com/package/agentwallet-sdk)
|
||||
- **Merged into NVIDIA NeMo Agent Toolkit**: [PR #17](https://github.com/NVIDIA/NeMo-Agent-Toolkit-Examples/pull/17) — x402 payment tool for NVIDIA's agent examples
|
||||
- **Protocol spec**: [x402.org](https://x402.org)
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: benchmark
|
||||
description: Use this skill to measure performance baselines, detect regressions before/after PRs, and compare stack alternatives.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Benchmark — Performance Baseline & Regression Detection
|
||||
|
||||
## When to Use
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: browser-qa
|
||||
description: Use this skill to automate visual testing and UI interaction verification using browser automation after deploying features.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Browser QA — Automated Visual Testing & Interaction
|
||||
|
||||
## When to Use
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: canary-watch
|
||||
description: Use this skill to monitor a deployed URL for regressions after deploys, merges, or dependency upgrades.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Canary Watch — Post-Deploy Monitoring
|
||||
|
||||
## When to Use
|
||||
|
||||
147
skills/ck/SKILL.md
Normal file
147
skills/ck/SKILL.md
Normal file
@@ -0,0 +1,147 @@
|
||||
---
|
||||
name: ck
|
||||
description: Persistent per-project memory for Claude Code. Auto-loads project context on session start, tracks sessions with git activity, and writes to native memory. Commands run deterministic Node.js scripts — behavior is consistent across model versions.
|
||||
origin: community
|
||||
version: 2.0.0
|
||||
author: sreedhargs89
|
||||
repo: https://github.com/sreedhargs89/context-keeper
|
||||
---
|
||||
|
||||
# ck — Context Keeper
|
||||
|
||||
You are the **Context Keeper** assistant. When the user invokes any `/ck:*` command,
|
||||
run the corresponding Node.js script and present its stdout to the user verbatim.
|
||||
Scripts live at: `~/.claude/skills/ck/commands/` (expand `~` with `$HOME`).
|
||||
|
||||
---
|
||||
|
||||
## Data Layout
|
||||
|
||||
```
|
||||
~/.claude/ck/
|
||||
├── projects.json ← path → {name, contextDir, lastUpdated}
|
||||
└── contexts/<name>/
|
||||
├── context.json ← SOURCE OF TRUTH (structured JSON, v2)
|
||||
└── CONTEXT.md ← generated view — do not hand-edit
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Commands
|
||||
|
||||
### `/ck:init` — Register a Project
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/init.mjs"
|
||||
```
|
||||
The script outputs JSON with auto-detected info. Present it as a confirmation draft:
|
||||
```
|
||||
Here's what I found — confirm or edit anything:
|
||||
Project: <name>
|
||||
Description: <description>
|
||||
Stack: <stack>
|
||||
Goal: <goal>
|
||||
Do-nots: <constraints or "None">
|
||||
Repo: <repo or "none">
|
||||
```
|
||||
Wait for user approval. Apply any edits. Then pipe confirmed JSON to save.mjs --init:
|
||||
```bash
|
||||
echo '<confirmed-json>' | node "$HOME/.claude/skills/ck/commands/save.mjs" --init
|
||||
```
|
||||
Confirmed JSON schema: `{"name":"...","path":"...","description":"...","stack":["..."],"goal":"...","constraints":["..."],"repo":"..." }`
|
||||
|
||||
---
|
||||
|
||||
### `/ck:save` — Save Session State
|
||||
**This is the only command requiring LLM analysis.** Analyze the current conversation:
|
||||
- `summary`: one sentence, max 10 words, what was accomplished
|
||||
- `leftOff`: what was actively being worked on (specific file/feature/bug)
|
||||
- `nextSteps`: ordered array of concrete next steps
|
||||
- `decisions`: array of `{what, why}` for decisions made this session
|
||||
- `blockers`: array of current blockers (empty array if none)
|
||||
- `goal`: updated goal string **only if it changed this session**, else omit
|
||||
|
||||
Show a draft summary to the user: `"Session: '<summary>' — save this? (yes / edit)"`
|
||||
Wait for confirmation. Then pipe to save.mjs:
|
||||
```bash
|
||||
echo '<json>' | node "$HOME/.claude/skills/ck/commands/save.mjs"
|
||||
```
|
||||
JSON schema (exact): `{"summary":"...","leftOff":"...","nextSteps":["..."],"decisions":[{"what":"...","why":"..."}],"blockers":["..."]}`
|
||||
Display the script's stdout confirmation verbatim.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:resume [name|number]` — Full Briefing
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/resume.mjs" [arg]
|
||||
```
|
||||
Display output verbatim. Then ask: "Continue from here? Or has anything changed?"
|
||||
If user reports changes → run `/ck:save` immediately.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:info [name|number]` — Quick Snapshot
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/info.mjs" [arg]
|
||||
```
|
||||
Display output verbatim. No follow-up question.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:list` — Portfolio View
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/list.mjs"
|
||||
```
|
||||
Display output verbatim. If user replies with a number or name → run `/ck:resume`.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:forget [name|number]` — Remove a Project
|
||||
First resolve the project name (run `/ck:list` if needed).
|
||||
Ask: `"This will permanently delete context for '<name>'. Are you sure? (yes/no)"`
|
||||
If yes:
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/forget.mjs" [name]
|
||||
```
|
||||
Display confirmation verbatim.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:migrate` — Convert v1 Data to v2
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/migrate.mjs"
|
||||
```
|
||||
For a dry run first:
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/migrate.mjs" --dry-run
|
||||
```
|
||||
Display output verbatim. Migrates all v1 CONTEXT.md + meta.json files to v2 context.json.
|
||||
Originals are backed up as `meta.json.v1-backup` — nothing is deleted.
|
||||
|
||||
---
|
||||
|
||||
## SessionStart Hook
|
||||
|
||||
The hook at `~/.claude/skills/ck/hooks/session-start.mjs` must be registered in
|
||||
`~/.claude/settings.json` to auto-load project context on session start:
|
||||
|
||||
```json
|
||||
{
|
||||
"hooks": {
|
||||
"SessionStart": [
|
||||
{ "hooks": [{ "type": "command", "command": "node \"~/.claude/skills/ck/hooks/session-start.mjs\"" }] }
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The hook injects ~100 tokens per session (compact 5-line summary). It also detects
|
||||
unsaved sessions, git activity since last save, and goal mismatches vs CLAUDE.md.
|
||||
|
||||
---
|
||||
|
||||
## Rules
|
||||
- Always expand `~` as `$HOME` in Bash calls.
|
||||
- Commands are case-insensitive: `/CK:SAVE`, `/ck:save`, `/Ck:Save` all work.
|
||||
- If a script exits with code 1, display its stdout as an error message.
|
||||
- Never edit `context.json` or `CONTEXT.md` directly — always use the scripts.
|
||||
- If `projects.json` is malformed, tell the user and offer to reset it to `{}`.
|
||||
44
skills/ck/commands/forget.mjs
Normal file
44
skills/ck/commands/forget.mjs
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* forget.mjs — remove a project's context and registry entry
|
||||
*
|
||||
* Usage: node forget.mjs [name|number]
|
||||
* stdout: confirmation or error
|
||||
* exit 0: success exit 1: not found
|
||||
*
|
||||
* Note: SKILL.md instructs Claude to ask "Are you sure?" before calling this script.
|
||||
* This script is the "do it" step — no confirmation prompt here.
|
||||
*/
|
||||
|
||||
import { rmSync } from 'fs';
|
||||
import { resolve } from 'path';
|
||||
import { resolveContext, readProjects, writeProjects, CONTEXTS_DIR } from './shared.mjs';
|
||||
|
||||
const arg = process.argv[2];
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
|
||||
const resolved = resolveContext(arg, cwd);
|
||||
if (!resolved) {
|
||||
const hint = arg ? `No project matching "${arg}".` : 'This directory is not registered.';
|
||||
console.log(`${hint}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const { name, contextDir, projectPath } = resolved;
|
||||
|
||||
// Remove context directory
|
||||
const contextDirPath = resolve(CONTEXTS_DIR, contextDir);
|
||||
try {
|
||||
rmSync(contextDirPath, { recursive: true, force: true });
|
||||
} catch (e) {
|
||||
console.log(`ck: could not remove context directory — ${e.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove from projects.json
|
||||
const projects = readProjects();
|
||||
delete projects[projectPath];
|
||||
writeProjects(projects);
|
||||
|
||||
console.log(`✓ Context for '${name}' removed.`);
|
||||
24
skills/ck/commands/info.mjs
Normal file
24
skills/ck/commands/info.mjs
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* info.mjs — quick read-only context snapshot
|
||||
*
|
||||
* Usage: node info.mjs [name|number]
|
||||
* stdout: compact info block
|
||||
* exit 0: success exit 1: not found
|
||||
*/
|
||||
|
||||
import { resolveContext, renderInfoBlock } from './shared.mjs';
|
||||
|
||||
const arg = process.argv[2];
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
|
||||
const resolved = resolveContext(arg, cwd);
|
||||
if (!resolved) {
|
||||
const hint = arg ? `No project matching "${arg}".` : 'This directory is not registered.';
|
||||
console.log(`${hint} Run /ck:init to register it.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log('');
|
||||
console.log(renderInfoBlock(resolved.context));
|
||||
143
skills/ck/commands/init.mjs
Normal file
143
skills/ck/commands/init.mjs
Normal file
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* init.mjs — auto-detect project info and output JSON for Claude to confirm
|
||||
*
|
||||
* Usage: node init.mjs
|
||||
* stdout: JSON with auto-detected project info
|
||||
* exit 0: success exit 1: error
|
||||
*/
|
||||
|
||||
import { readFileSync, existsSync } from 'fs';
|
||||
import { resolve, basename } from 'path';
|
||||
import { readProjects } from './shared.mjs';
|
||||
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
const projects = readProjects();
|
||||
|
||||
const output = {
|
||||
path: cwd,
|
||||
name: null,
|
||||
description: null,
|
||||
stack: [],
|
||||
goal: null,
|
||||
constraints: [],
|
||||
repo: null,
|
||||
alreadyRegistered: !!projects[cwd],
|
||||
};
|
||||
|
||||
function readFile(filename) {
|
||||
const p = resolve(cwd, filename);
|
||||
if (!existsSync(p)) return null;
|
||||
try { return readFileSync(p, 'utf8'); } catch { return null; }
|
||||
}
|
||||
|
||||
function extractSection(md, heading) {
|
||||
const re = new RegExp(`## ${heading}\\n([\\s\\S]*?)(?=\\n## |$)`);
|
||||
const m = md.match(re);
|
||||
return m ? m[1].trim() : null;
|
||||
}
|
||||
|
||||
// ── package.json ──────────────────────────────────────────────────────────────
|
||||
const pkg = readFile('package.json');
|
||||
if (pkg) {
|
||||
try {
|
||||
const parsed = JSON.parse(pkg);
|
||||
if (parsed.name && !output.name) output.name = parsed.name;
|
||||
if (parsed.description && !output.description) output.description = parsed.description;
|
||||
|
||||
// Detect stack from dependencies
|
||||
const deps = Object.keys({ ...(parsed.dependencies || {}), ...(parsed.devDependencies || {}) });
|
||||
const stackMap = {
|
||||
next: 'Next.js', react: 'React', vue: 'Vue', svelte: 'Svelte', astro: 'Astro',
|
||||
express: 'Express', fastify: 'Fastify', hono: 'Hono', nestjs: 'NestJS',
|
||||
typescript: 'TypeScript', prisma: 'Prisma', drizzle: 'Drizzle',
|
||||
'@neondatabase/serverless': 'Neon', '@upstash/redis': 'Upstash Redis',
|
||||
'@clerk/nextjs': 'Clerk', stripe: 'Stripe', tailwindcss: 'Tailwind CSS',
|
||||
};
|
||||
for (const [dep, label] of Object.entries(stackMap)) {
|
||||
if (deps.includes(dep) && !output.stack.includes(label)) {
|
||||
output.stack.push(label);
|
||||
}
|
||||
}
|
||||
if (deps.includes('typescript') || existsSync(resolve(cwd, 'tsconfig.json'))) {
|
||||
if (!output.stack.includes('TypeScript')) output.stack.push('TypeScript');
|
||||
}
|
||||
} catch { /* malformed package.json */ }
|
||||
}
|
||||
|
||||
// ── go.mod ────────────────────────────────────────────────────────────────────
|
||||
const goMod = readFile('go.mod');
|
||||
if (goMod) {
|
||||
if (!output.stack.includes('Go')) output.stack.push('Go');
|
||||
const modName = goMod.match(/^module\s+(\S+)/m)?.[1];
|
||||
if (modName && !output.name) output.name = modName.split('/').pop();
|
||||
}
|
||||
|
||||
// ── Cargo.toml ────────────────────────────────────────────────────────────────
|
||||
const cargo = readFile('Cargo.toml');
|
||||
if (cargo) {
|
||||
if (!output.stack.includes('Rust')) output.stack.push('Rust');
|
||||
const crateName = cargo.match(/^name\s*=\s*"(.+?)"/m)?.[1];
|
||||
if (crateName && !output.name) output.name = crateName;
|
||||
}
|
||||
|
||||
// ── pyproject.toml ────────────────────────────────────────────────────────────
|
||||
const pyproject = readFile('pyproject.toml');
|
||||
if (pyproject) {
|
||||
if (!output.stack.includes('Python')) output.stack.push('Python');
|
||||
const pyName = pyproject.match(/^name\s*=\s*"(.+?)"/m)?.[1];
|
||||
if (pyName && !output.name) output.name = pyName;
|
||||
}
|
||||
|
||||
// ── .git/config (repo URL) ────────────────────────────────────────────────────
|
||||
const gitConfig = readFile('.git/config');
|
||||
if (gitConfig) {
|
||||
const repoMatch = gitConfig.match(/url\s*=\s*(.+)/);
|
||||
if (repoMatch) output.repo = repoMatch[1].trim();
|
||||
}
|
||||
|
||||
// ── CLAUDE.md ─────────────────────────────────────────────────────────────────
|
||||
const claudeMd = readFile('CLAUDE.md');
|
||||
if (claudeMd) {
|
||||
const goal = extractSection(claudeMd, 'Current Goal');
|
||||
if (goal && !output.goal) output.goal = goal.split('\n')[0].trim();
|
||||
|
||||
const doNot = extractSection(claudeMd, 'Do Not Do');
|
||||
if (doNot) {
|
||||
const bullets = doNot.split('\n')
|
||||
.filter(l => /^[-*]\s+/.test(l))
|
||||
.map(l => l.replace(/^[-*]\s+/, '').trim());
|
||||
output.constraints = bullets;
|
||||
}
|
||||
|
||||
const stack = extractSection(claudeMd, 'Tech Stack');
|
||||
if (stack && output.stack.length === 0) {
|
||||
output.stack = stack.split(/[,\n]/).map(s => s.replace(/^[-*]\s+/, '').trim()).filter(Boolean);
|
||||
}
|
||||
|
||||
// Description from first section or "What This Is"
|
||||
const whatItIs = extractSection(claudeMd, 'What This Is') || extractSection(claudeMd, 'About');
|
||||
if (whatItIs && !output.description) output.description = whatItIs.split('\n')[0].trim();
|
||||
}
|
||||
|
||||
// ── README.md (description fallback) ─────────────────────────────────────────
|
||||
const readme = readFile('README.md');
|
||||
if (readme && !output.description) {
|
||||
// First non-header, non-badge, non-empty paragraph
|
||||
const lines = readme.split('\n');
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (trimmed && !trimmed.startsWith('#') && !trimmed.startsWith('!') && !trimmed.startsWith('>') && !trimmed.startsWith('[') && trimmed !== '---' && trimmed !== '___') {
|
||||
output.description = trimmed.slice(0, 120);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Name fallback: directory name ─────────────────────────────────────────────
|
||||
if (!output.name) {
|
||||
output.name = basename(cwd).toLowerCase().replace(/\s+/g, '-');
|
||||
}
|
||||
|
||||
console.log(JSON.stringify(output, null, 2));
|
||||
41
skills/ck/commands/list.mjs
Normal file
41
skills/ck/commands/list.mjs
Normal file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* list.mjs — portfolio view of all registered projects
|
||||
*
|
||||
* Usage: node list.mjs
|
||||
* stdout: ASCII table of all projects + prompt to resume
|
||||
* exit 0: success exit 1: no projects
|
||||
*/
|
||||
|
||||
import { readProjects, loadContext, today, CONTEXTS_DIR } from './shared.mjs';
|
||||
import { renderListTable } from './shared.mjs';
|
||||
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
const projects = readProjects();
|
||||
const entries = Object.entries(projects);
|
||||
|
||||
if (entries.length === 0) {
|
||||
console.log('No projects registered. Run /ck:init to get started.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Build enriched list sorted alphabetically by contextDir
|
||||
const enriched = entries
|
||||
.map(([path, info]) => {
|
||||
const context = loadContext(info.contextDir);
|
||||
return {
|
||||
name: info.name,
|
||||
contextDir: info.contextDir,
|
||||
path,
|
||||
context,
|
||||
lastUpdated: info.lastUpdated,
|
||||
};
|
||||
})
|
||||
.sort((a, b) => a.contextDir.localeCompare(b.contextDir));
|
||||
|
||||
const table = renderListTable(enriched, cwd, today());
|
||||
console.log('');
|
||||
console.log(table);
|
||||
console.log('');
|
||||
console.log('Resume which? (number or name)');
|
||||
198
skills/ck/commands/migrate.mjs
Normal file
198
skills/ck/commands/migrate.mjs
Normal file
@@ -0,0 +1,198 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* migrate.mjs — convert v1 (CONTEXT.md + meta.json) to v2 (context.json)
|
||||
*
|
||||
* Usage:
|
||||
* node migrate.mjs — migrate all v1 projects
|
||||
* node migrate.mjs --dry-run — preview without writing
|
||||
*
|
||||
* Safe: backs up meta.json to meta.json.v1-backup, never deletes data.
|
||||
* exit 0: success exit 1: error
|
||||
*/
|
||||
|
||||
import { readFileSync, writeFileSync, existsSync, renameSync } from 'fs';
|
||||
import { resolve } from 'path';
|
||||
import { readProjects, writeProjects, saveContext, today, shortId, CONTEXTS_DIR } from './shared.mjs';
|
||||
|
||||
const isDryRun = process.argv.includes('--dry-run');
|
||||
|
||||
if (isDryRun) {
|
||||
console.log('ck migrate — DRY RUN (no files will be written)\n');
|
||||
}
|
||||
|
||||
// ── v1 markdown parsers ───────────────────────────────────────────────────────
|
||||
|
||||
function extractSection(md, heading) {
|
||||
const re = new RegExp(`## ${heading}\\n([\\s\\S]*?)(?=\\n## |$)`);
|
||||
const m = md.match(re);
|
||||
return m ? m[1].trim() : null;
|
||||
}
|
||||
|
||||
function parseBullets(text) {
|
||||
if (!text) return [];
|
||||
return text.split('\n')
|
||||
.filter(l => /^[-*\d]\s/.test(l.trim()))
|
||||
.map(l => l.replace(/^[-*\d]+\.?\s+/, '').trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
function parseDecisionsTable(text) {
|
||||
if (!text) return [];
|
||||
const rows = [];
|
||||
for (const line of text.split('\n')) {
|
||||
if (!line.startsWith('|') || line.match(/^[|\s-]+$/)) continue;
|
||||
const cols = line.split('|').map(c => c.trim()).filter((c, i) => i > 0 && i < 4);
|
||||
if (cols.length >= 1 && !cols[0].startsWith('Decision') && !cols[0].startsWith('_')) {
|
||||
rows.push({ what: cols[0] || '', why: cols[1] || '', date: cols[2] || '' });
|
||||
}
|
||||
}
|
||||
return rows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse "Where I Left Off" which in v1 can be:
|
||||
* - Simple bullet list
|
||||
* - Multi-session blocks: "Session N (date):\n- bullet\n"
|
||||
* Returns array of session-like objects {date?, leftOff}
|
||||
*/
|
||||
function parseLeftOff(text) {
|
||||
if (!text) return [{ leftOff: null }];
|
||||
|
||||
// Detect multi-session format: "Session N ..."
|
||||
const sessionBlocks = text.split(/(?=Session \d+)/);
|
||||
if (sessionBlocks.length > 1) {
|
||||
return sessionBlocks
|
||||
.filter(b => b.trim())
|
||||
.map(block => {
|
||||
const dateMatch = block.match(/\((\d{4}-\d{2}-\d{2})\)/);
|
||||
const bullets = parseBullets(block);
|
||||
return {
|
||||
date: dateMatch?.[1] || null,
|
||||
leftOff: bullets.length ? bullets.join('\n') : block.replace(/^Session \d+.*\n/, '').trim(),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
// Simple format
|
||||
const bullets = parseBullets(text);
|
||||
return [{ leftOff: bullets.length ? bullets.join('\n') : text.trim() }];
|
||||
}
|
||||
|
||||
// ── Main migration ─────────────────────────────────────────────────────────────
|
||||
|
||||
const projects = readProjects();
|
||||
let migrated = 0;
|
||||
let skipped = 0;
|
||||
let errors = 0;
|
||||
|
||||
for (const [projectPath, info] of Object.entries(projects)) {
|
||||
const contextDir = info.contextDir;
|
||||
const contextDirPath = resolve(CONTEXTS_DIR, contextDir);
|
||||
const contextJsonPath = resolve(contextDirPath, 'context.json');
|
||||
const contextMdPath = resolve(contextDirPath, 'CONTEXT.md');
|
||||
const metaPath = resolve(contextDirPath, 'meta.json');
|
||||
|
||||
// Already v2
|
||||
if (existsSync(contextJsonPath)) {
|
||||
try {
|
||||
const existing = JSON.parse(readFileSync(contextJsonPath, 'utf8'));
|
||||
if (existing.version === 2) {
|
||||
console.log(` ✓ ${contextDir} — already v2, skipping`);
|
||||
skipped++;
|
||||
continue;
|
||||
}
|
||||
} catch { /* fall through to migrate */ }
|
||||
}
|
||||
|
||||
console.log(`\n → Migrating: ${contextDir}`);
|
||||
|
||||
try {
|
||||
// Read v1 files
|
||||
const contextMd = existsSync(contextMdPath) ? readFileSync(contextMdPath, 'utf8') : '';
|
||||
let meta = {};
|
||||
if (existsSync(metaPath)) {
|
||||
try { meta = JSON.parse(readFileSync(metaPath, 'utf8')); } catch {}
|
||||
}
|
||||
|
||||
// Extract fields from CONTEXT.md
|
||||
const description = extractSection(contextMd, 'What This Is') || extractSection(contextMd, 'About') || null;
|
||||
const stackRaw = extractSection(contextMd, 'Tech Stack') || '';
|
||||
const stack = stackRaw.split(/[,\n]/).map(s => s.replace(/^[-*]\s+/, '').trim()).filter(Boolean);
|
||||
const goal = (extractSection(contextMd, 'Current Goal') || '').split('\n')[0].trim() || null;
|
||||
const constraintRaw = extractSection(contextMd, 'Do Not Do') || '';
|
||||
const constraints = parseBullets(constraintRaw);
|
||||
const decisionsRaw = extractSection(contextMd, 'Decisions Made') || '';
|
||||
const decisions = parseDecisionsTable(decisionsRaw);
|
||||
const nextStepsRaw = extractSection(contextMd, 'Next Steps') || '';
|
||||
const nextSteps = parseBullets(nextStepsRaw);
|
||||
const blockersRaw = extractSection(contextMd, 'Blockers') || '';
|
||||
const blockers = parseBullets(blockersRaw).filter(b => b.toLowerCase() !== 'none');
|
||||
const leftOffRaw = extractSection(contextMd, 'Where I Left Off') || '';
|
||||
const leftOffParsed = parseLeftOff(leftOffRaw);
|
||||
|
||||
// Build sessions from parsed left-off blocks (may be multiple)
|
||||
const sessions = leftOffParsed.map((lo, idx) => ({
|
||||
id: idx === leftOffParsed.length - 1 && meta.lastSessionId
|
||||
? meta.lastSessionId.slice(0, 8)
|
||||
: shortId(),
|
||||
date: lo.date || meta.lastUpdated || today(),
|
||||
summary: idx === leftOffParsed.length - 1
|
||||
? (meta.lastSessionSummary || 'Migrated from v1')
|
||||
: `Session ${idx + 1} (migrated)`,
|
||||
leftOff: lo.leftOff,
|
||||
nextSteps: idx === leftOffParsed.length - 1 ? nextSteps : [],
|
||||
decisions: idx === leftOffParsed.length - 1 ? decisions : [],
|
||||
blockers: idx === leftOffParsed.length - 1 ? blockers : [],
|
||||
}));
|
||||
|
||||
const context = {
|
||||
version: 2,
|
||||
name: contextDir,
|
||||
path: meta.path || projectPath,
|
||||
description,
|
||||
stack,
|
||||
goal,
|
||||
constraints,
|
||||
repo: meta.repo || null,
|
||||
createdAt: meta.lastUpdated || today(),
|
||||
sessions,
|
||||
};
|
||||
|
||||
if (isDryRun) {
|
||||
console.log(` description: ${description?.slice(0, 60) || '(none)'}`);
|
||||
console.log(` stack: ${stack.join(', ') || '(none)'}`);
|
||||
console.log(` goal: ${goal?.slice(0, 60) || '(none)'}`);
|
||||
console.log(` sessions: ${sessions.length}`);
|
||||
console.log(` decisions: ${decisions.length}`);
|
||||
console.log(` nextSteps: ${nextSteps.length}`);
|
||||
migrated++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Backup meta.json
|
||||
if (existsSync(metaPath)) {
|
||||
renameSync(metaPath, resolve(contextDirPath, 'meta.json.v1-backup'));
|
||||
}
|
||||
|
||||
// Write context.json + regenerated CONTEXT.md
|
||||
saveContext(contextDir, context);
|
||||
|
||||
// Update projects.json entry
|
||||
projects[projectPath].lastUpdated = today();
|
||||
|
||||
console.log(` ✓ Migrated — ${sessions.length} session(s), ${decisions.length} decision(s)`);
|
||||
migrated++;
|
||||
} catch (e) {
|
||||
console.log(` ✗ Error: ${e.message}`);
|
||||
errors++;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isDryRun && migrated > 0) {
|
||||
writeProjects(projects);
|
||||
}
|
||||
|
||||
console.log(`\nck migrate: ${migrated} migrated, ${skipped} already v2, ${errors} errors`);
|
||||
if (isDryRun) console.log('Run without --dry-run to apply.');
|
||||
if (errors > 0) process.exit(1);
|
||||
36
skills/ck/commands/resume.mjs
Normal file
36
skills/ck/commands/resume.mjs
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* resume.mjs — full project briefing
|
||||
*
|
||||
* Usage: node resume.mjs [name|number]
|
||||
* stdout: bordered briefing box
|
||||
* exit 0: success exit 1: not found
|
||||
*/
|
||||
|
||||
import { existsSync } from 'fs';
|
||||
import { resolveContext, renderBriefingBox } from './shared.mjs';
|
||||
|
||||
const arg = process.argv[2];
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
|
||||
const resolved = resolveContext(arg, cwd);
|
||||
if (!resolved) {
|
||||
const hint = arg ? `No project matching "${arg}".` : 'This directory is not registered.';
|
||||
console.log(`${hint} Run /ck:init to register it.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const { context, projectPath } = resolved;
|
||||
|
||||
// Attempt to cd to the project path
|
||||
if (projectPath && projectPath !== cwd) {
|
||||
if (existsSync(projectPath)) {
|
||||
console.log(`→ cd ${projectPath}`);
|
||||
} else {
|
||||
console.log(`⚠ Path not found: ${projectPath}`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log('');
|
||||
console.log(renderBriefingBox(context));
|
||||
210
skills/ck/commands/save.mjs
Normal file
210
skills/ck/commands/save.mjs
Normal file
@@ -0,0 +1,210 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* save.mjs — write session data to context.json, regenerate CONTEXT.md,
|
||||
* and write a native memory entry.
|
||||
*
|
||||
* Usage (regular save):
|
||||
* echo '<json>' | node save.mjs
|
||||
* JSON schema: { summary, leftOff, nextSteps[], decisions[{what,why}], blockers[], goal? }
|
||||
*
|
||||
* Usage (init — first registration):
|
||||
* echo '<json>' | node save.mjs --init
|
||||
* JSON schema: { name, path, description, stack[], goal, constraints[], repo? }
|
||||
*
|
||||
* stdout: confirmation message
|
||||
* exit 0: success exit 1: error
|
||||
*/
|
||||
|
||||
import { readFileSync, mkdirSync, writeFileSync } from 'fs';
|
||||
import { resolve } from 'path';
|
||||
import {
|
||||
readProjects, writeProjects, loadContext, saveContext,
|
||||
today, shortId, gitSummary, nativeMemoryDir, encodeProjectPath,
|
||||
CONTEXTS_DIR, CURRENT_SESSION,
|
||||
} from './shared.mjs';
|
||||
|
||||
const isInit = process.argv.includes('--init');
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
|
||||
// ── Read JSON from stdin ──────────────────────────────────────────────────────
|
||||
let input;
|
||||
try {
|
||||
const raw = readFileSync(0, 'utf8').trim();
|
||||
if (!raw) throw new Error('empty stdin');
|
||||
input = JSON.parse(raw);
|
||||
} catch (e) {
|
||||
console.error(`ck save: invalid JSON on stdin — ${e.message}`);
|
||||
console.log('Expected schema (save): {"summary":"...","leftOff":"...","nextSteps":["..."],"decisions":[{"what":"...","why":"..."}],"blockers":["..."]}');
|
||||
console.log('Expected schema (--init): {"name":"...","path":"...","description":"...","stack":["..."],"goal":"...","constraints":["..."]}');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// INIT MODE: first-time project registration
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
if (isInit) {
|
||||
const { name, path: projectPath, description, stack, goal, constraints, repo } = input;
|
||||
|
||||
if (!name || !projectPath) {
|
||||
console.log('ck init: name and path are required.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const projects = readProjects();
|
||||
|
||||
// Derive contextDir (lowercase, spaces→dashes, deduplicate)
|
||||
let contextDir = name.toLowerCase().replace(/\s+/g, '-').replace(/[^a-z0-9-]/g, '');
|
||||
let suffix = 2;
|
||||
const existingDirs = Object.values(projects).map(p => p.contextDir);
|
||||
while (existingDirs.includes(contextDir) && projects[projectPath]?.contextDir !== contextDir) {
|
||||
contextDir = `${contextDir.replace(/-\d+$/, '')}-${suffix++}`;
|
||||
}
|
||||
|
||||
const context = {
|
||||
version: 2,
|
||||
name: contextDir,
|
||||
displayName: name,
|
||||
path: projectPath,
|
||||
description: description || null,
|
||||
stack: Array.isArray(stack) ? stack : (stack ? [stack] : []),
|
||||
goal: goal || null,
|
||||
constraints: Array.isArray(constraints) ? constraints : [],
|
||||
repo: repo || null,
|
||||
createdAt: today(),
|
||||
sessions: [],
|
||||
};
|
||||
|
||||
saveContext(contextDir, context);
|
||||
|
||||
// Update projects.json
|
||||
projects[projectPath] = {
|
||||
name,
|
||||
contextDir,
|
||||
lastUpdated: today(),
|
||||
};
|
||||
writeProjects(projects);
|
||||
|
||||
console.log(`✓ Project '${name}' registered.`);
|
||||
console.log(` Use /ck:save to save session state and /ck:resume to reload it next time.`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// SAVE MODE: record a session
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
const projects = readProjects();
|
||||
const projectEntry = projects[cwd];
|
||||
|
||||
if (!projectEntry) {
|
||||
console.log("This project isn't registered yet. Run /ck:init first.");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const { contextDir } = projectEntry;
|
||||
let context = loadContext(contextDir);
|
||||
|
||||
if (!context) {
|
||||
console.log(`ck: context.json not found for '${contextDir}'. The install may be corrupted.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Get session ID from current-session.json
|
||||
let sessionId;
|
||||
try {
|
||||
const sess = JSON.parse(readFileSync(CURRENT_SESSION, 'utf8'));
|
||||
sessionId = sess.sessionId || shortId();
|
||||
} catch {
|
||||
sessionId = shortId();
|
||||
}
|
||||
|
||||
// Check for duplicate (re-save of same session)
|
||||
const existingIdx = context.sessions.findIndex(s => s.id === sessionId);
|
||||
|
||||
const { summary, leftOff, nextSteps, decisions, blockers, goal } = input;
|
||||
|
||||
// Capture git activity since the last session
|
||||
const lastSessionDate = context.sessions?.[context.sessions.length - 1]?.date;
|
||||
const gitActivity = gitSummary(cwd, lastSessionDate);
|
||||
|
||||
const session = {
|
||||
id: sessionId,
|
||||
date: today(),
|
||||
summary: summary || 'Session saved',
|
||||
leftOff: leftOff || null,
|
||||
nextSteps: Array.isArray(nextSteps) ? nextSteps : (nextSteps ? [nextSteps] : []),
|
||||
decisions: Array.isArray(decisions) ? decisions : [],
|
||||
blockers: Array.isArray(blockers) ? blockers.filter(Boolean) : [],
|
||||
...(gitActivity ? { gitActivity } : {}),
|
||||
};
|
||||
|
||||
if (existingIdx >= 0) {
|
||||
// Update existing session (re-save)
|
||||
context.sessions[existingIdx] = session;
|
||||
} else {
|
||||
context.sessions.push(session);
|
||||
}
|
||||
|
||||
// Update goal if provided
|
||||
if (goal && goal !== context.goal) {
|
||||
context.goal = goal;
|
||||
}
|
||||
|
||||
// Save context.json + regenerate CONTEXT.md
|
||||
saveContext(contextDir, context);
|
||||
|
||||
// Update projects.json timestamp
|
||||
projects[cwd].lastUpdated = today();
|
||||
writeProjects(projects);
|
||||
|
||||
// ── Write to native memory ────────────────────────────────────────────────────
|
||||
try {
|
||||
const memDir = nativeMemoryDir(cwd);
|
||||
mkdirSync(memDir, { recursive: true });
|
||||
|
||||
const memFile = resolve(memDir, `ck_${today()}_${sessionId.slice(0, 8)}.md`);
|
||||
const decisionsBlock = session.decisions.length
|
||||
? session.decisions.map(d => `- **${d.what}**: ${d.why || ''}`).join('\n')
|
||||
: '- None this session';
|
||||
const nextBlock = session.nextSteps.length
|
||||
? session.nextSteps.map((s, i) => `${i + 1}. ${s}`).join('\n')
|
||||
: '- None recorded';
|
||||
const blockersBlock = session.blockers.length
|
||||
? session.blockers.map(b => `- ${b}`).join('\n')
|
||||
: '- None';
|
||||
|
||||
const memContent = [
|
||||
`---`,
|
||||
`name: Session ${today()} — ${session.summary}`,
|
||||
`description: Key decisions and outcomes from ck session ${sessionId.slice(0, 8)}`,
|
||||
`type: project`,
|
||||
`source: ck`,
|
||||
`sessionId: ${sessionId}`,
|
||||
`---`,
|
||||
``,
|
||||
`# Session: ${session.summary}`,
|
||||
``,
|
||||
`## Decisions`,
|
||||
decisionsBlock,
|
||||
``,
|
||||
`## Left Off`,
|
||||
session.leftOff || '—',
|
||||
``,
|
||||
`## Next Steps`,
|
||||
nextBlock,
|
||||
``,
|
||||
`## Blockers`,
|
||||
blockersBlock,
|
||||
``,
|
||||
...(gitActivity ? [`## Git Activity`, gitActivity, ``] : []),
|
||||
].join('\n');
|
||||
|
||||
writeFileSync(memFile, memContent, 'utf8');
|
||||
} catch (e) {
|
||||
// Non-fatal — native memory write failure should not block the save
|
||||
process.stderr.write(`ck: warning — could not write native memory entry: ${e.message}\n`);
|
||||
}
|
||||
|
||||
console.log(`✓ Saved. Session: ${sessionId.slice(0, 8)}`);
|
||||
if (gitActivity) console.log(` Git: ${gitActivity}`);
|
||||
console.log(` See you next time.`);
|
||||
387
skills/ck/commands/shared.mjs
Normal file
387
skills/ck/commands/shared.mjs
Normal file
@@ -0,0 +1,387 @@
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* shared.mjs — common utilities for all command scripts
|
||||
*
|
||||
* No external dependencies. Node.js stdlib only.
|
||||
*/
|
||||
|
||||
import { readFileSync, writeFileSync, existsSync, mkdirSync, readdirSync } from 'fs';
|
||||
import { resolve, basename } from 'path';
|
||||
import { homedir } from 'os';
|
||||
import { spawnSync } from 'child_process';
|
||||
import { randomBytes } from 'crypto';
|
||||
|
||||
// ─── Paths ────────────────────────────────────────────────────────────────────
|
||||
|
||||
export const CK_HOME = resolve(homedir(), '.claude', 'ck');
|
||||
export const CONTEXTS_DIR = resolve(CK_HOME, 'contexts');
|
||||
export const PROJECTS_FILE = resolve(CK_HOME, 'projects.json');
|
||||
export const CURRENT_SESSION = resolve(CK_HOME, 'current-session.json');
|
||||
export const SKILL_FILE = resolve(homedir(), '.claude', 'skills', 'ck', 'SKILL.md');
|
||||
|
||||
// ─── JSON I/O ─────────────────────────────────────────────────────────────────
|
||||
|
||||
export function readJson(filePath) {
|
||||
try {
|
||||
if (!existsSync(filePath)) return null;
|
||||
return JSON.parse(readFileSync(filePath, 'utf8'));
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function writeJson(filePath, data) {
|
||||
const dir = resolve(filePath, '..');
|
||||
mkdirSync(dir, { recursive: true });
|
||||
writeFileSync(filePath, JSON.stringify(data, null, 2) + '\n', 'utf8');
|
||||
}
|
||||
|
||||
export function readProjects() {
|
||||
return readJson(PROJECTS_FILE) || {};
|
||||
}
|
||||
|
||||
export function writeProjects(projects) {
|
||||
writeJson(PROJECTS_FILE, projects);
|
||||
}
|
||||
|
||||
// ─── Context I/O ──────────────────────────────────────────────────────────────
|
||||
|
||||
export function contextPath(contextDir) {
|
||||
return resolve(CONTEXTS_DIR, contextDir, 'context.json');
|
||||
}
|
||||
|
||||
export function contextMdPath(contextDir) {
|
||||
return resolve(CONTEXTS_DIR, contextDir, 'CONTEXT.md');
|
||||
}
|
||||
|
||||
export function loadContext(contextDir) {
|
||||
return readJson(contextPath(contextDir));
|
||||
}
|
||||
|
||||
export function saveContext(contextDir, data) {
|
||||
const dir = resolve(CONTEXTS_DIR, contextDir);
|
||||
mkdirSync(dir, { recursive: true });
|
||||
writeJson(contextPath(contextDir), data);
|
||||
writeFileSync(contextMdPath(contextDir), renderContextMd(data), 'utf8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve which project to operate on.
|
||||
* @param {string|undefined} arg — undefined = cwd match, number string = alphabetical index, else name search
|
||||
* @param {string} cwd
|
||||
* @returns {{ name, contextDir, projectPath, context } | null}
|
||||
*/
|
||||
export function resolveContext(arg, cwd) {
|
||||
const projects = readProjects();
|
||||
const entries = Object.entries(projects); // [path, {name, contextDir, lastUpdated}]
|
||||
|
||||
if (!arg) {
|
||||
// Match by cwd
|
||||
const entry = projects[cwd];
|
||||
if (!entry) return null;
|
||||
const context = loadContext(entry.contextDir);
|
||||
if (!context) return null;
|
||||
return { name: entry.name, contextDir: entry.contextDir, projectPath: cwd, context };
|
||||
}
|
||||
|
||||
// Collect all contexts sorted alphabetically by contextDir
|
||||
const sorted = entries
|
||||
.map(([path, info]) => ({ path, ...info }))
|
||||
.sort((a, b) => a.contextDir.localeCompare(b.contextDir));
|
||||
|
||||
const asNumber = parseInt(arg, 10);
|
||||
if (!isNaN(asNumber) && String(asNumber) === arg) {
|
||||
// Number-based lookup (1-indexed)
|
||||
const item = sorted[asNumber - 1];
|
||||
if (!item) return null;
|
||||
const context = loadContext(item.contextDir);
|
||||
if (!context) return null;
|
||||
return { name: item.name, contextDir: item.contextDir, projectPath: item.path, context };
|
||||
}
|
||||
|
||||
// Name-based lookup: exact > prefix > substring (case-insensitive)
|
||||
const lower = arg.toLowerCase();
|
||||
let match =
|
||||
sorted.find(e => e.name.toLowerCase() === lower) ||
|
||||
sorted.find(e => e.name.toLowerCase().startsWith(lower)) ||
|
||||
sorted.find(e => e.name.toLowerCase().includes(lower));
|
||||
|
||||
if (!match) return null;
|
||||
const context = loadContext(match.contextDir);
|
||||
if (!context) return null;
|
||||
return { name: match.name, contextDir: match.contextDir, projectPath: match.path, context };
|
||||
}
|
||||
|
||||
// ─── Date helpers ─────────────────────────────────────────────────────────────
|
||||
|
||||
export function today() {
|
||||
return new Date().toISOString().slice(0, 10);
|
||||
}
|
||||
|
||||
export function daysAgoLabel(dateStr) {
|
||||
if (!dateStr) return 'unknown';
|
||||
const diff = Math.floor((Date.now() - new Date(dateStr)) / 86_400_000);
|
||||
if (diff === 0) return 'Today';
|
||||
if (diff === 1) return '1 day ago';
|
||||
return `${diff} days ago`;
|
||||
}
|
||||
|
||||
export function stalenessIcon(dateStr) {
|
||||
if (!dateStr) return '○';
|
||||
const diff = Math.floor((Date.now() - new Date(dateStr)) / 86_400_000);
|
||||
if (diff < 1) return '●';
|
||||
if (diff <= 5) return '◐';
|
||||
return '○';
|
||||
}
|
||||
|
||||
// ─── ID generation ────────────────────────────────────────────────────────────
|
||||
|
||||
export function shortId() {
|
||||
return randomBytes(4).toString('hex');
|
||||
}
|
||||
|
||||
// ─── Git helpers ──────────────────────────────────────────────────────────────
|
||||
|
||||
function runGit(args, cwd) {
|
||||
try {
|
||||
const result = spawnSync('git', ['-C', cwd, ...args], {
|
||||
timeout: 3000,
|
||||
stdio: 'pipe',
|
||||
encoding: 'utf8',
|
||||
});
|
||||
if (result.status !== 0) return null;
|
||||
return result.stdout.trim();
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function gitLogSince(projectPath, sinceDate) {
|
||||
if (!sinceDate) return null;
|
||||
return runGit(['log', '--oneline', `--since=${sinceDate}`], projectPath);
|
||||
}
|
||||
|
||||
export function gitSummary(projectPath, sinceDate) {
|
||||
const log = gitLogSince(projectPath, sinceDate);
|
||||
if (!log) return null;
|
||||
const commits = log.split('\n').filter(Boolean).length;
|
||||
if (commits === 0) return null;
|
||||
|
||||
// Count unique files changed: use a separate runGit call to avoid nested shell substitution
|
||||
const countStr = runGit(['rev-list', '--count', 'HEAD', `--since=${sinceDate}`], projectPath);
|
||||
const revCount = countStr ? parseInt(countStr, 10) : commits;
|
||||
const diff = runGit(['diff', '--shortstat', `HEAD~${Math.min(revCount, 50)}..HEAD`], projectPath);
|
||||
|
||||
if (diff) {
|
||||
const filesMatch = diff.match(/(\d+) file/);
|
||||
const files = filesMatch ? parseInt(filesMatch[1]) : '?';
|
||||
return `${commits} commit${commits !== 1 ? 's' : ''}, ${files} file${files !== 1 ? 's' : ''} changed`;
|
||||
}
|
||||
return `${commits} commit${commits !== 1 ? 's' : ''}`;
|
||||
}
|
||||
|
||||
// ─── Native memory path encoding ──────────────────────────────────────────────
|
||||
|
||||
export function encodeProjectPath(absolutePath) {
|
||||
// "/Users/sree/dev/app" -> "-Users-sree-dev-app"
|
||||
return absolutePath.replace(/\//g, '-');
|
||||
}
|
||||
|
||||
export function nativeMemoryDir(absolutePath) {
|
||||
const encoded = encodeProjectPath(absolutePath);
|
||||
return resolve(homedir(), '.claude', 'projects', encoded, 'memory');
|
||||
}
|
||||
|
||||
// ─── Rendering ────────────────────────────────────────────────────────────────
|
||||
|
||||
/** Render the human-readable CONTEXT.md from context.json */
|
||||
export function renderContextMd(ctx) {
|
||||
const latest = ctx.sessions?.[ctx.sessions.length - 1] || null;
|
||||
const lines = [
|
||||
`<!-- Generated by ck v2 — edit context.json instead -->`,
|
||||
`# Project: ${ctx.displayName ?? ctx.name}`,
|
||||
`> Path: ${ctx.path}`,
|
||||
];
|
||||
if (ctx.repo) lines.push(`> Repo: ${ctx.repo}`);
|
||||
const sessionCount = ctx.sessions?.length || 0;
|
||||
lines.push(`> Last Session: ${ctx.sessions?.[sessionCount - 1]?.date || 'never'} | Sessions: ${sessionCount}`);
|
||||
lines.push(``);
|
||||
lines.push(`## What This Is`);
|
||||
lines.push(ctx.description || '_Not set._');
|
||||
lines.push(``);
|
||||
lines.push(`## Tech Stack`);
|
||||
lines.push(Array.isArray(ctx.stack) ? ctx.stack.join(', ') : (ctx.stack || '_Not set._'));
|
||||
lines.push(``);
|
||||
lines.push(`## Current Goal`);
|
||||
lines.push(ctx.goal || '_Not set._');
|
||||
lines.push(``);
|
||||
lines.push(`## Where I Left Off`);
|
||||
lines.push(latest?.leftOff || '_Not yet recorded. Run /ck:save after your first session._');
|
||||
lines.push(``);
|
||||
lines.push(`## Next Steps`);
|
||||
if (latest?.nextSteps?.length) {
|
||||
latest.nextSteps.forEach((s, i) => lines.push(`${i + 1}. ${s}`));
|
||||
} else {
|
||||
lines.push(`_Not yet recorded._`);
|
||||
}
|
||||
lines.push(``);
|
||||
lines.push(`## Blockers`);
|
||||
if (latest?.blockers?.length) {
|
||||
latest.blockers.forEach(b => lines.push(`- ${b}`));
|
||||
} else {
|
||||
lines.push(`- None`);
|
||||
}
|
||||
lines.push(``);
|
||||
lines.push(`## Do Not Do`);
|
||||
if (ctx.constraints?.length) {
|
||||
ctx.constraints.forEach(c => lines.push(`- ${c}`));
|
||||
} else {
|
||||
lines.push(`- None specified`);
|
||||
}
|
||||
lines.push(``);
|
||||
|
||||
// All decisions across sessions
|
||||
const allDecisions = (ctx.sessions || []).flatMap(s =>
|
||||
(s.decisions || []).map(d => ({ ...d, date: s.date }))
|
||||
);
|
||||
lines.push(`## Decisions Made`);
|
||||
lines.push(`| Decision | Why | Date |`);
|
||||
lines.push(`|----------|-----|------|`);
|
||||
if (allDecisions.length) {
|
||||
allDecisions.forEach(d => lines.push(`| ${d.what} | ${d.why || ''} | ${d.date || ''} |`));
|
||||
} else {
|
||||
lines.push(`| _(none yet)_ | | |`);
|
||||
}
|
||||
lines.push(``);
|
||||
|
||||
// Session history (most recent first)
|
||||
if (ctx.sessions?.length > 1) {
|
||||
lines.push(`## Session History`);
|
||||
const reversed = [...ctx.sessions].reverse();
|
||||
reversed.forEach(s => {
|
||||
lines.push(`### ${s.date} — ${s.summary || 'Session'}`);
|
||||
if (s.gitActivity) lines.push(`_${s.gitActivity}_`);
|
||||
if (s.leftOff) lines.push(`**Left off:** ${s.leftOff}`);
|
||||
});
|
||||
lines.push(``);
|
||||
}
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/** Render the bordered briefing box used by /ck:resume */
|
||||
export function renderBriefingBox(ctx, meta = {}) {
|
||||
const latest = ctx.sessions?.[ctx.sessions.length - 1] || {};
|
||||
const W = 57;
|
||||
const pad = (str, w) => {
|
||||
const s = String(str || '');
|
||||
return s.length > w ? s.slice(0, w - 1) + '…' : s.padEnd(w);
|
||||
};
|
||||
const row = (label, value) => `│ ${label} → ${pad(value, W - label.length - 7)}│`;
|
||||
|
||||
const when = daysAgoLabel(ctx.sessions?.[ctx.sessions.length - 1]?.date);
|
||||
const sessions = ctx.sessions?.length || 0;
|
||||
const shortSessId = latest.id?.slice(0, 8) || null;
|
||||
|
||||
const lines = [
|
||||
`┌${'─'.repeat(W)}┐`,
|
||||
`│ RESUMING: ${pad(ctx.displayName ?? ctx.name, W - 12)}│`,
|
||||
`│ Last session: ${pad(`${when} | Sessions: ${sessions}`, W - 16)}│`,
|
||||
];
|
||||
if (shortSessId) lines.push(`│ Session ID: ${pad(shortSessId, W - 14)}│`);
|
||||
lines.push(`├${'─'.repeat(W)}┤`);
|
||||
lines.push(row('WHAT IT IS', ctx.description || '—'));
|
||||
lines.push(row('STACK ', Array.isArray(ctx.stack) ? ctx.stack.join(', ') : (ctx.stack || '—')));
|
||||
lines.push(row('PATH ', ctx.path));
|
||||
if (ctx.repo) lines.push(row('REPO ', ctx.repo));
|
||||
lines.push(row('GOAL ', ctx.goal || '—'));
|
||||
lines.push(`├${'─'.repeat(W)}┤`);
|
||||
lines.push(`│ WHERE I LEFT OFF${' '.repeat(W - 18)}│`);
|
||||
const leftOffLines = (latest.leftOff || '—').split('\n').filter(Boolean);
|
||||
leftOffLines.forEach(l => lines.push(`│ • ${pad(l, W - 7)}│`));
|
||||
lines.push(`├${'─'.repeat(W)}┤`);
|
||||
lines.push(`│ NEXT STEPS${' '.repeat(W - 12)}│`);
|
||||
const steps = latest.nextSteps || [];
|
||||
if (steps.length) {
|
||||
steps.forEach((s, i) => lines.push(`│ ${i + 1}. ${pad(s, W - 8)}│`));
|
||||
} else {
|
||||
lines.push(`│ —${' '.repeat(W - 5)}│`);
|
||||
}
|
||||
const blockers = latest.blockers?.length ? latest.blockers.join(', ') : 'None';
|
||||
lines.push(`│ BLOCKERS → ${pad(blockers, W - 13)}│`);
|
||||
if (latest.gitActivity) {
|
||||
lines.push(`│ GIT → ${pad(latest.gitActivity, W - 13)}│`);
|
||||
}
|
||||
lines.push(`└${'─'.repeat(W)}┘`);
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/** Render compact info block used by /ck:info */
|
||||
export function renderInfoBlock(ctx) {
|
||||
const latest = ctx.sessions?.[ctx.sessions.length - 1] || {};
|
||||
const sep = '─'.repeat(44);
|
||||
const lines = [
|
||||
`ck: ${ctx.displayName ?? ctx.name}`,
|
||||
sep,
|
||||
];
|
||||
lines.push(`PATH ${ctx.path}`);
|
||||
if (ctx.repo) lines.push(`REPO ${ctx.repo}`);
|
||||
if (latest.id) lines.push(`SESSION ${latest.id.slice(0, 8)}`);
|
||||
lines.push(`GOAL ${ctx.goal || '—'}`);
|
||||
lines.push(sep);
|
||||
lines.push(`WHERE I LEFT OFF`);
|
||||
(latest.leftOff || '—').split('\n').filter(Boolean).forEach(l => lines.push(` • ${l}`));
|
||||
lines.push(`NEXT STEPS`);
|
||||
(latest.nextSteps || []).forEach((s, i) => lines.push(` ${i + 1}. ${s}`));
|
||||
if (!latest.nextSteps?.length) lines.push(` —`);
|
||||
lines.push(`BLOCKERS`);
|
||||
if (latest.blockers?.length) {
|
||||
latest.blockers.forEach(b => lines.push(` • ${b}`));
|
||||
} else {
|
||||
lines.push(` • None`);
|
||||
}
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/** Render ASCII list table used by /ck:list */
|
||||
export function renderListTable(entries, cwd, todayStr) {
|
||||
// entries: [{name, contextDir, path, context, lastUpdated}]
|
||||
// Sorted alphabetically by contextDir before calling
|
||||
const rows = entries.map((e, i) => {
|
||||
const isHere = e.path === cwd;
|
||||
const latest = e.context?.sessions?.[e.context.sessions.length - 1] || {};
|
||||
const when = daysAgoLabel(latest.date);
|
||||
const icon = stalenessIcon(latest.date);
|
||||
const statusLabel = icon === '●' ? '● Active' : icon === '◐' ? '◐ Warm' : '○ Stale';
|
||||
const sessId = latest.id ? latest.id.slice(0, 8) : '—';
|
||||
const summary = (latest.summary || '—').slice(0, 34);
|
||||
const displayName = ((e.context?.displayName ?? e.name) + (isHere ? ' <-' : '')).slice(0, 18);
|
||||
return {
|
||||
num: String(i + 1),
|
||||
name: displayName,
|
||||
status: statusLabel,
|
||||
when: when.slice(0, 10),
|
||||
sessId,
|
||||
summary,
|
||||
};
|
||||
});
|
||||
|
||||
const cols = {
|
||||
num: Math.max(1, ...rows.map(r => r.num.length)),
|
||||
name: Math.max(7, ...rows.map(r => r.name.length)),
|
||||
status: Math.max(6, ...rows.map(r => r.status.length)),
|
||||
when: Math.max(9, ...rows.map(r => r.when.length)),
|
||||
sessId: Math.max(7, ...rows.map(r => r.sessId.length)),
|
||||
summary: Math.max(12, ...rows.map(r => r.summary.length)),
|
||||
};
|
||||
|
||||
const hr = `+${'-'.repeat(cols.num + 2)}+${'-'.repeat(cols.name + 2)}+${'-'.repeat(cols.status + 2)}+${'-'.repeat(cols.when + 2)}+${'-'.repeat(cols.sessId + 2)}+${'-'.repeat(cols.summary + 2)}+`;
|
||||
const cell = (val, width) => ` ${val.padEnd(width)} `;
|
||||
const headerRow = `|${cell('#', cols.num)}|${cell('Project', cols.name)}|${cell('Status', cols.status)}|${cell('Last Seen', cols.when)}|${cell('Session', cols.sessId)}|${cell('Last Summary', cols.summary)}|`;
|
||||
|
||||
const dataRows = rows.map(r =>
|
||||
`|${cell(r.num, cols.num)}|${cell(r.name, cols.name)}|${cell(r.status, cols.status)}|${cell(r.when, cols.when)}|${cell(r.sessId, cols.sessId)}|${cell(r.summary, cols.summary)}|`
|
||||
);
|
||||
|
||||
return [hr, headerRow, hr, ...dataRows, hr].join('\n');
|
||||
}
|
||||
224
skills/ck/hooks/session-start.mjs
Normal file
224
skills/ck/hooks/session-start.mjs
Normal file
@@ -0,0 +1,224 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* session-start.mjs — inject compact project context on session start.
|
||||
*
|
||||
* Injects ~100 tokens (not ~2,500 like v1).
|
||||
* SKILL.md is injected separately (still small at ~50 lines).
|
||||
*
|
||||
* Features:
|
||||
* - Compact 5-line summary for registered projects
|
||||
* - Unsaved session detection → "Last session wasn't saved. Run /ck:save."
|
||||
* - Git activity since last session
|
||||
* - Goal mismatch detection vs CLAUDE.md
|
||||
* - Mini portfolio for unregistered directories
|
||||
*/
|
||||
|
||||
import { readFileSync, writeFileSync, existsSync } from 'fs';
|
||||
import { resolve } from 'path';
|
||||
import { homedir } from 'os';
|
||||
import { spawnSync } from 'child_process';
|
||||
|
||||
const CK_HOME = resolve(homedir(), '.claude', 'ck');
|
||||
const PROJECTS_FILE = resolve(CK_HOME, 'projects.json');
|
||||
const CURRENT_SESSION = resolve(CK_HOME, 'current-session.json');
|
||||
const SKILL_FILE = resolve(homedir(), '.claude', 'skills', 'ck', 'SKILL.md');
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
function readJson(p) {
|
||||
try { return JSON.parse(readFileSync(p, 'utf8')); } catch { return null; }
|
||||
}
|
||||
|
||||
function daysAgo(dateStr) {
|
||||
if (!dateStr) return 'unknown';
|
||||
const diff = Math.floor((Date.now() - new Date(dateStr)) / 86_400_000);
|
||||
if (diff === 0) return 'today';
|
||||
if (diff === 1) return '1 day ago';
|
||||
return `${diff} days ago`;
|
||||
}
|
||||
|
||||
function stalenessIcon(dateStr) {
|
||||
if (!dateStr) return '○';
|
||||
const diff = Math.floor((Date.now() - new Date(dateStr)) / 86_400_000);
|
||||
return diff < 1 ? '●' : diff <= 5 ? '◐' : '○';
|
||||
}
|
||||
|
||||
function gitLogSince(projectPath, sinceDate) {
|
||||
if (!sinceDate || !existsSync(resolve(projectPath, '.git'))) return null;
|
||||
try {
|
||||
const result = spawnSync(
|
||||
'git',
|
||||
['-C', projectPath, 'log', '--oneline', `--since=${sinceDate}`],
|
||||
{ timeout: 3000, stdio: 'pipe', encoding: 'utf8' },
|
||||
);
|
||||
if (result.status !== 0) return null;
|
||||
const output = result.stdout.trim();
|
||||
const commits = output.split('\n').filter(Boolean).length;
|
||||
return commits > 0 ? `${commits} commit${commits !== 1 ? 's' : ''} since last session` : null;
|
||||
} catch { return null; }
|
||||
}
|
||||
|
||||
function extractClaudeMdGoal(projectPath) {
|
||||
const p = resolve(projectPath, 'CLAUDE.md');
|
||||
if (!existsSync(p)) return null;
|
||||
try {
|
||||
const md = readFileSync(p, 'utf8');
|
||||
const m = md.match(/## Current Goal\n([\s\S]*?)(?=\n## |$)/);
|
||||
return m ? m[1].trim().split('\n')[0].trim() : null;
|
||||
} catch { return null; }
|
||||
}
|
||||
|
||||
// ─── Session ID from stdin ────────────────────────────────────────────────────
|
||||
|
||||
function readSessionId() {
|
||||
try {
|
||||
const raw = readFileSync(0, 'utf8');
|
||||
return JSON.parse(raw).session_id || null;
|
||||
} catch { return null; }
|
||||
}
|
||||
|
||||
// ─── Main ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
function main() {
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
const sessionId = readSessionId();
|
||||
|
||||
// Load skill (always inject — now only ~50 lines)
|
||||
const skill = existsSync(SKILL_FILE) ? readFileSync(SKILL_FILE, 'utf8') : '';
|
||||
|
||||
const projects = readJson(PROJECTS_FILE) || {};
|
||||
const entry = projects[cwd];
|
||||
|
||||
// Read previous session BEFORE overwriting current-session.json
|
||||
const prevSession = readJson(CURRENT_SESSION);
|
||||
|
||||
// Write current-session.json
|
||||
try {
|
||||
writeFileSync(CURRENT_SESSION, JSON.stringify({
|
||||
sessionId,
|
||||
projectPath: cwd,
|
||||
projectName: entry?.name || null,
|
||||
startedAt: new Date().toISOString(),
|
||||
}, null, 2), 'utf8');
|
||||
} catch { /* non-fatal */ }
|
||||
|
||||
const parts = [];
|
||||
if (skill) parts.push(skill);
|
||||
|
||||
// ── REGISTERED PROJECT ────────────────────────────────────────────────────
|
||||
if (entry?.contextDir) {
|
||||
const contextFile = resolve(CK_HOME, 'contexts', entry.contextDir, 'context.json');
|
||||
const context = readJson(contextFile);
|
||||
|
||||
if (context) {
|
||||
const latest = context.sessions?.[context.sessions.length - 1] || {};
|
||||
const sessionDate = latest.date || context.createdAt;
|
||||
const sessionCount = context.sessions?.length || 0;
|
||||
const displayName = context.displayName ?? context.name;
|
||||
|
||||
// ── Compact summary block (~100 tokens) ──────────────────────────────
|
||||
const summaryLines = [
|
||||
`ck: ${displayName} | ${daysAgo(sessionDate)} | ${sessionCount} session${sessionCount !== 1 ? 's' : ''}`,
|
||||
`Goal: ${context.goal || '—'}`,
|
||||
latest.leftOff ? `Left off: ${latest.leftOff.split('\n')[0]}` : null,
|
||||
latest.nextSteps?.length ? `Next: ${latest.nextSteps.slice(0, 2).join(' · ')}` : null,
|
||||
].filter(Boolean);
|
||||
|
||||
// ── Unsaved session detection ─────────────────────────────────────────
|
||||
if (prevSession?.sessionId && prevSession.sessionId !== sessionId) {
|
||||
// Check if previous session ID exists in sessions array
|
||||
const alreadySaved = context.sessions?.some(s => s.id === prevSession.sessionId);
|
||||
if (!alreadySaved) {
|
||||
summaryLines.push(`⚠ Last session wasn't saved — run /ck:save to capture it`);
|
||||
}
|
||||
}
|
||||
|
||||
// ── Git activity ──────────────────────────────────────────────────────
|
||||
const gitLine = gitLogSince(cwd, sessionDate);
|
||||
if (gitLine) summaryLines.push(`Git: ${gitLine}`);
|
||||
|
||||
// ── Goal mismatch detection ───────────────────────────────────────────
|
||||
const claudeMdGoal = extractClaudeMdGoal(cwd);
|
||||
if (claudeMdGoal && context.goal &&
|
||||
claudeMdGoal.toLowerCase().trim() !== context.goal.toLowerCase().trim()) {
|
||||
summaryLines.push(`⚠ Goal mismatch — ck: "${context.goal.slice(0, 40)}" · CLAUDE.md: "${claudeMdGoal.slice(0, 40)}"`);
|
||||
summaryLines.push(` Run /ck:save with updated goal to sync`);
|
||||
}
|
||||
|
||||
parts.push([
|
||||
`---`,
|
||||
`## ck: ${displayName}`,
|
||||
``,
|
||||
summaryLines.join('\n'),
|
||||
].join('\n'));
|
||||
|
||||
// Instruct Claude to display compact briefing at session start
|
||||
parts.push([
|
||||
`---`,
|
||||
`## ck: SESSION START`,
|
||||
``,
|
||||
`IMPORTANT: Display the following as your FIRST message, verbatim:`,
|
||||
``,
|
||||
'```',
|
||||
summaryLines.join('\n'),
|
||||
'```',
|
||||
``,
|
||||
`After the block, add one line: "Ready — what are we working on?"`,
|
||||
`If you see ⚠ warnings above, mention them briefly after the block.`,
|
||||
].join('\n'));
|
||||
|
||||
return parts;
|
||||
}
|
||||
}
|
||||
|
||||
// ── NOT IN A REGISTERED PROJECT ────────────────────────────────────────────
|
||||
const entries = Object.entries(projects);
|
||||
if (entries.length === 0) return parts;
|
||||
|
||||
// Load and sort by most recent
|
||||
const recent = entries
|
||||
.map(([path, info]) => {
|
||||
const ctx = readJson(resolve(CK_HOME, 'contexts', info.contextDir, 'context.json'));
|
||||
const latest = ctx?.sessions?.[ctx.sessions.length - 1] || {};
|
||||
return { name: info.name, path, lastDate: latest.date || '', summary: latest.summary || '—', ctx };
|
||||
})
|
||||
.sort((a, b) => (b.lastDate > a.lastDate ? 1 : -1))
|
||||
.slice(0, 3);
|
||||
|
||||
const miniRows = recent.map(p => {
|
||||
const icon = stalenessIcon(p.lastDate);
|
||||
const when = daysAgo(p.lastDate);
|
||||
const name = p.name.padEnd(16).slice(0, 16);
|
||||
const whenStr = when.padEnd(12).slice(0, 12);
|
||||
const summary = p.summary.slice(0, 32);
|
||||
return ` ${name} ${icon} ${whenStr} ${summary}`;
|
||||
});
|
||||
|
||||
const miniStatus = [
|
||||
`ck — recent projects:`,
|
||||
` ${'PROJECT'.padEnd(16)} S ${'LAST SEEN'.padEnd(12)} LAST SESSION`,
|
||||
` ${'─'.repeat(68)}`,
|
||||
...miniRows,
|
||||
``,
|
||||
`Run /ck:list · /ck:resume <name> · /ck:init to register this folder`,
|
||||
].join('\n');
|
||||
|
||||
parts.push([
|
||||
`---`,
|
||||
`## ck: SESSION START`,
|
||||
``,
|
||||
`IMPORTANT: Display the following as your FIRST message, verbatim:`,
|
||||
``,
|
||||
'```',
|
||||
miniStatus,
|
||||
'```',
|
||||
].join('\n'));
|
||||
|
||||
return parts;
|
||||
}
|
||||
|
||||
const parts = main();
|
||||
if (parts.length > 0) {
|
||||
console.log(JSON.stringify({ additionalContext: parts.join('\n\n---\n\n') }));
|
||||
}
|
||||
@@ -62,9 +62,16 @@ analyze_observations() {
|
||||
analysis_count=$(wc -l < "$analysis_file" 2>/dev/null || echo 0)
|
||||
echo "[$(date)] Using last $analysis_count of $obs_count observations for analysis" >> "$LOG_FILE"
|
||||
|
||||
# Use relative path from PROJECT_DIR for cross-platform compatibility (#842).
|
||||
# On Windows (Git Bash/MSYS2), absolute paths from mktemp may use MSYS-style
|
||||
# prefixes (e.g. /c/Users/...) that the Claude subprocess cannot resolve.
|
||||
analysis_relpath=".observer-tmp/$(basename "$analysis_file")"
|
||||
|
||||
prompt_file="$(mktemp "${observer_tmp_dir}/ecc-observer-prompt.XXXXXX")"
|
||||
cat > "$prompt_file" <<PROMPT
|
||||
Read ${analysis_file} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences).
|
||||
IMPORTANT: You are running in non-interactive --print mode. You MUST use the Write tool directly to create files. Do NOT ask for permission, do NOT ask for confirmation, do NOT output summaries instead of writing. Just read, analyze, and write.
|
||||
|
||||
Read ${analysis_relpath} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences).
|
||||
If you find 3+ occurrences of the same pattern, you MUST write an instinct file directly to ${INSTINCTS_DIR}/<id>.md using the Write tool.
|
||||
Do NOT ask for permission to write files, do NOT describe what you would write, and do NOT stop at analysis when a qualifying pattern exists.
|
||||
|
||||
@@ -117,11 +124,19 @@ PROMPT
|
||||
max_turns=10
|
||||
fi
|
||||
|
||||
# Prevent observe.sh from recording this automated Haiku session as observations
|
||||
# Ensure CWD is PROJECT_DIR so the relative analysis_relpath resolves correctly
|
||||
# on all platforms, not just when the observer happens to be launched from the project root.
|
||||
cd "$PROJECT_DIR" || { echo "[$(date)] Failed to cd to PROJECT_DIR ($PROJECT_DIR), skipping analysis" >> "$LOG_FILE"; rm -f "$prompt_file" "$analysis_file"; return; }
|
||||
|
||||
# Prevent observe.sh from recording this automated Haiku session as observations.
|
||||
# Pass prompt via -p flag instead of stdin redirect for Windows compatibility (#842).
|
||||
ECC_SKIP_OBSERVE=1 ECC_HOOK_PROFILE=minimal claude --model haiku --max-turns "$max_turns" --print \
|
||||
--allowedTools "Read,Write" \
|
||||
< "$prompt_file" >> "$LOG_FILE" 2>&1 &
|
||||
-p "$(cat "$prompt_file")" >> "$LOG_FILE" 2>&1 &
|
||||
claude_pid=$!
|
||||
# prompt_file content was already expanded by the shell; remove early to avoid
|
||||
# leaving stale temp files during the (potentially long) analysis window.
|
||||
rm -f "$prompt_file"
|
||||
|
||||
(
|
||||
sleep "$timeout_seconds"
|
||||
@@ -135,7 +150,7 @@ PROMPT
|
||||
wait "$claude_pid"
|
||||
exit_code=$?
|
||||
kill "$watchdog_pid" 2>/dev/null || true
|
||||
rm -f "$prompt_file" "$analysis_file"
|
||||
rm -f "$analysis_file"
|
||||
|
||||
if [ "$exit_code" -ne 0 ]; then
|
||||
echo "[$(date)] Claude analysis failed (exit $exit_code)" >> "$LOG_FILE"
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
---
|
||||
name: design-system
|
||||
description: Use this skill to generate or audit design systems, check visual consistency, and review PRs that touch styling.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Design System — Generate & Audit Visual Systems
|
||||
|
||||
## When to Use
|
||||
|
||||
716
skills/git-workflow/SKILL.md
Normal file
716
skills/git-workflow/SKILL.md
Normal file
@@ -0,0 +1,716 @@
|
||||
---
|
||||
name: git-workflow
|
||||
description: Git workflow patterns including branching strategies, commit conventions, merge vs rebase, conflict resolution, and collaborative development best practices for teams of all sizes.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Git Workflow Patterns
|
||||
|
||||
Best practices for Git version control, branching strategies, and collaborative development.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Setting up Git workflow for a new project
|
||||
- Deciding on branching strategy (GitFlow, trunk-based, GitHub flow)
|
||||
- Writing commit messages and PR descriptions
|
||||
- Resolving merge conflicts
|
||||
- Managing releases and version tags
|
||||
- Onboarding new team members to Git practices
|
||||
|
||||
## Branching Strategies
|
||||
|
||||
### GitHub Flow (Simple, Recommended for Most)
|
||||
|
||||
Best for continuous deployment and small-to-medium teams.
|
||||
|
||||
```
|
||||
main (protected, always deployable)
|
||||
│
|
||||
├── feature/user-auth → PR → merge to main
|
||||
├── feature/payment-flow → PR → merge to main
|
||||
└── fix/login-bug → PR → merge to main
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- `main` is always deployable
|
||||
- Create feature branches from `main`
|
||||
- Open Pull Request when ready for review
|
||||
- After approval and CI passes, merge to `main`
|
||||
- Deploy immediately after merge
|
||||
|
||||
### Trunk-Based Development (High-Velocity Teams)
|
||||
|
||||
Best for teams with strong CI/CD and feature flags.
|
||||
|
||||
```
|
||||
main (trunk)
|
||||
│
|
||||
├── short-lived feature (1-2 days max)
|
||||
├── short-lived feature
|
||||
└── short-lived feature
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- Everyone commits to `main` or very short-lived branches
|
||||
- Feature flags hide incomplete work
|
||||
- CI must pass before merge
|
||||
- Deploy multiple times per day
|
||||
|
||||
### GitFlow (Complex, Release-Cycle Driven)
|
||||
|
||||
Best for scheduled releases and enterprise projects.
|
||||
|
||||
```
|
||||
main (production releases)
|
||||
│
|
||||
└── develop (integration branch)
|
||||
│
|
||||
├── feature/user-auth
|
||||
├── feature/payment
|
||||
│
|
||||
├── release/1.0.0 → merge to main and develop
|
||||
│
|
||||
└── hotfix/critical → merge to main and develop
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- `main` contains production-ready code only
|
||||
- `develop` is the integration branch
|
||||
- Feature branches from `develop`, merge back to `develop`
|
||||
- Release branches from `develop`, merge to `main` and `develop`
|
||||
- Hotfix branches from `main`, merge to both `main` and `develop`
|
||||
|
||||
### When to Use Which
|
||||
|
||||
| Strategy | Team Size | Release Cadence | Best For |
|
||||
|----------|-----------|-----------------|----------|
|
||||
| GitHub Flow | Any | Continuous | SaaS, web apps, startups |
|
||||
| Trunk-Based | 5+ experienced | Multiple/day | High-velocity teams, feature flags |
|
||||
| GitFlow | 10+ | Scheduled | Enterprise, regulated industries |
|
||||
|
||||
## Commit Messages
|
||||
|
||||
### Conventional Commits Format
|
||||
|
||||
```
|
||||
<type>(<scope>): <subject>
|
||||
|
||||
[optional body]
|
||||
|
||||
[optional footer(s)]
|
||||
```
|
||||
|
||||
### Types
|
||||
|
||||
| Type | Use For | Example |
|
||||
|------|---------|---------|
|
||||
| `feat` | New feature | `feat(auth): add OAuth2 login` |
|
||||
| `fix` | Bug fix | `fix(api): handle null response in user endpoint` |
|
||||
| `docs` | Documentation | `docs(readme): update installation instructions` |
|
||||
| `style` | Formatting, no code change | `style: fix indentation in login component` |
|
||||
| `refactor` | Code refactoring | `refactor(db): extract connection pool to module` |
|
||||
| `test` | Adding/updating tests | `test(auth): add unit tests for token validation` |
|
||||
| `chore` | Maintenance tasks | `chore(deps): update dependencies` |
|
||||
| `perf` | Performance improvement | `perf(query): add index to users table` |
|
||||
| `ci` | CI/CD changes | `ci: add PostgreSQL service to test workflow` |
|
||||
| `revert` | Revert previous commit | `revert: revert "feat(auth): add OAuth2 login"` |
|
||||
|
||||
### Good vs Bad Examples
|
||||
|
||||
```
|
||||
# BAD: Vague, no context
|
||||
git commit -m "fixed stuff"
|
||||
git commit -m "updates"
|
||||
git commit -m "WIP"
|
||||
|
||||
# GOOD: Clear, specific, explains why
|
||||
git commit -m "fix(api): retry requests on 503 Service Unavailable
|
||||
|
||||
The external API occasionally returns 503 errors during peak hours.
|
||||
Added exponential backoff retry logic with max 3 attempts.
|
||||
|
||||
Closes #123"
|
||||
```
|
||||
|
||||
### Commit Message Template
|
||||
|
||||
Create `.gitmessage` in repo root:
|
||||
|
||||
```
|
||||
# <type>(<scope>): <subject>
|
||||
#
|
||||
# Types: feat, fix, docs, style, refactor, test, chore, perf, ci, revert
|
||||
# Scope: api, ui, db, auth, etc.
|
||||
# Subject: imperative mood, no period, max 50 chars
|
||||
#
|
||||
# [optional body] - explain why, not what
|
||||
# [optional footer] - Breaking changes, closes #issue
|
||||
```
|
||||
|
||||
Enable with: `git config commit.template .gitmessage`
|
||||
|
||||
## Merge vs Rebase
|
||||
|
||||
### Merge (Preserves History)
|
||||
|
||||
```bash
|
||||
# Creates a merge commit
|
||||
git checkout main
|
||||
git merge feature/user-auth
|
||||
|
||||
# Result:
|
||||
# * merge commit
|
||||
# |\
|
||||
# | * feature commits
|
||||
# |/
|
||||
# * main commits
|
||||
```
|
||||
|
||||
**Use when:**
|
||||
- Merging feature branches into `main`
|
||||
- You want to preserve exact history
|
||||
- Multiple people worked on the branch
|
||||
- The branch has been pushed and others may have based work on it
|
||||
|
||||
### Rebase (Linear History)
|
||||
|
||||
```bash
|
||||
# Rewrites feature commits onto target branch
|
||||
git checkout feature/user-auth
|
||||
git rebase main
|
||||
|
||||
# Result:
|
||||
# * feature commits (rewritten)
|
||||
# * main commits
|
||||
```
|
||||
|
||||
**Use when:**
|
||||
- Updating your local feature branch with latest `main`
|
||||
- You want a linear, clean history
|
||||
- The branch is local-only (not pushed)
|
||||
- You're the only one working on the branch
|
||||
|
||||
### Rebase Workflow
|
||||
|
||||
```bash
|
||||
# Update feature branch with latest main (before PR)
|
||||
git checkout feature/user-auth
|
||||
git fetch origin
|
||||
git rebase origin/main
|
||||
|
||||
# Fix any conflicts
|
||||
# Tests should still pass
|
||||
|
||||
# Force push (only if you're the only contributor)
|
||||
git push --force-with-lease origin feature/user-auth
|
||||
```
|
||||
|
||||
### When NOT to Rebase
|
||||
|
||||
```
|
||||
# NEVER rebase branches that:
|
||||
- Have been pushed to a shared repository
|
||||
- Other people have based work on
|
||||
- Are protected branches (main, develop)
|
||||
- Are already merged
|
||||
|
||||
# Why: Rebase rewrites history, breaking others' work
|
||||
```
|
||||
|
||||
## Pull Request Workflow
|
||||
|
||||
### PR Title Format
|
||||
|
||||
```
|
||||
<type>(<scope>): <description>
|
||||
|
||||
Examples:
|
||||
feat(auth): add SSO support for enterprise users
|
||||
fix(api): resolve race condition in order processing
|
||||
docs(api): add OpenAPI specification for v2 endpoints
|
||||
```
|
||||
|
||||
### PR Description Template
|
||||
|
||||
```markdown
|
||||
## What
|
||||
|
||||
Brief description of what this PR does.
|
||||
|
||||
## Why
|
||||
|
||||
Explain the motivation and context.
|
||||
|
||||
## How
|
||||
|
||||
Key implementation details worth highlighting.
|
||||
|
||||
## Testing
|
||||
|
||||
- [ ] Unit tests added/updated
|
||||
- [ ] Integration tests added/updated
|
||||
- [ ] Manual testing performed
|
||||
|
||||
## Screenshots (if applicable)
|
||||
|
||||
Before/after screenshots for UI changes.
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] Code follows project style guidelines
|
||||
- [ ] Self-review completed
|
||||
- [ ] Comments added for complex logic
|
||||
- [ ] Documentation updated
|
||||
- [ ] No new warnings introduced
|
||||
- [ ] Tests pass locally
|
||||
- [ ] Related issues linked
|
||||
|
||||
Closes #123
|
||||
```
|
||||
|
||||
### Code Review Checklist
|
||||
|
||||
**For Reviewers:**
|
||||
|
||||
- [ ] Does the code solve the stated problem?
|
||||
- [ ] Are there any edge cases not handled?
|
||||
- [ ] Is the code readable and maintainable?
|
||||
- [ ] Are there sufficient tests?
|
||||
- [ ] Are there security concerns?
|
||||
- [ ] Is the commit history clean (squashed if needed)?
|
||||
|
||||
**For Authors:**
|
||||
|
||||
- [ ] Self-review completed before requesting review
|
||||
- [ ] CI passes (tests, lint, typecheck)
|
||||
- [ ] PR size is reasonable (<500 lines ideal)
|
||||
- [ ] Related to a single feature/fix
|
||||
- [ ] Description clearly explains the change
|
||||
|
||||
## Conflict Resolution
|
||||
|
||||
### Identify Conflicts
|
||||
|
||||
```bash
|
||||
# Check for conflicts before merge
|
||||
git checkout main
|
||||
git merge feature/user-auth --no-commit --no-ff
|
||||
|
||||
# If conflicts, Git will show:
|
||||
# CONFLICT (content): Merge conflict in src/auth/login.ts
|
||||
# Automatic merge failed; fix conflicts and then commit the result.
|
||||
```
|
||||
|
||||
### Resolve Conflicts
|
||||
|
||||
```bash
|
||||
# See conflicted files
|
||||
git status
|
||||
|
||||
# View conflict markers in file
|
||||
# <<<<<<< HEAD
|
||||
# content from main
|
||||
# =======
|
||||
# content from feature branch
|
||||
# >>>>>>> feature/user-auth
|
||||
|
||||
# Option 1: Manual resolution
|
||||
# Edit file, remove markers, keep correct content
|
||||
|
||||
# Option 2: Use merge tool
|
||||
git mergetool
|
||||
|
||||
# Option 3: Accept one side
|
||||
git checkout --ours src/auth/login.ts # Keep main version
|
||||
git checkout --theirs src/auth/login.ts # Keep feature version
|
||||
|
||||
# After resolving, stage and commit
|
||||
git add src/auth/login.ts
|
||||
git commit
|
||||
```
|
||||
|
||||
### Conflict Prevention Strategies
|
||||
|
||||
```bash
|
||||
# 1. Keep feature branches small and short-lived
|
||||
# 2. Rebase frequently onto main
|
||||
git checkout feature/user-auth
|
||||
git fetch origin
|
||||
git rebase origin/main
|
||||
|
||||
# 3. Communicate with team about touching shared files
|
||||
# 4. Use feature flags instead of long-lived branches
|
||||
# 5. Review and merge PRs promptly
|
||||
```
|
||||
|
||||
## Branch Management
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
```
|
||||
# Feature branches
|
||||
feature/user-authentication
|
||||
feature/JIRA-123-payment-integration
|
||||
|
||||
# Bug fixes
|
||||
fix/login-redirect-loop
|
||||
fix/456-null-pointer-exception
|
||||
|
||||
# Hotfixes (production issues)
|
||||
hotfix/critical-security-patch
|
||||
hotfix/database-connection-leak
|
||||
|
||||
# Releases
|
||||
release/1.2.0
|
||||
release/2024-01-hotfix
|
||||
|
||||
# Experiments/POCs
|
||||
experiment/new-caching-strategy
|
||||
poc/graphql-migration
|
||||
```
|
||||
|
||||
### Branch Cleanup
|
||||
|
||||
```bash
|
||||
# Delete local branches that are merged
|
||||
git branch --merged main | grep -v "^\*\|main" | xargs -n 1 git branch -d
|
||||
|
||||
# Delete remote-tracking references for deleted remote branches
|
||||
git fetch -p
|
||||
|
||||
# Delete local branch
|
||||
git branch -d feature/user-auth # Safe delete (only if merged)
|
||||
git branch -D feature/user-auth # Force delete
|
||||
|
||||
# Delete remote branch
|
||||
git push origin --delete feature/user-auth
|
||||
```
|
||||
|
||||
### Stash Workflow
|
||||
|
||||
```bash
|
||||
# Save work in progress
|
||||
git stash push -m "WIP: user authentication"
|
||||
|
||||
# List stashes
|
||||
git stash list
|
||||
|
||||
# Apply most recent stash
|
||||
git stash pop
|
||||
|
||||
# Apply specific stash
|
||||
git stash apply stash@{2}
|
||||
|
||||
# Drop stash
|
||||
git stash drop stash@{0}
|
||||
```
|
||||
|
||||
## Release Management
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
```
|
||||
MAJOR.MINOR.PATCH
|
||||
|
||||
MAJOR: Breaking changes
|
||||
MINOR: New features, backward compatible
|
||||
PATCH: Bug fixes, backward compatible
|
||||
|
||||
Examples:
|
||||
1.0.0 → 1.0.1 (patch: bug fix)
|
||||
1.0.1 → 1.1.0 (minor: new feature)
|
||||
1.1.0 → 2.0.0 (major: breaking change)
|
||||
```
|
||||
|
||||
### Creating Releases
|
||||
|
||||
```bash
|
||||
# Create annotated tag
|
||||
git tag -a v1.2.0 -m "Release v1.2.0
|
||||
|
||||
Features:
|
||||
- Add user authentication
|
||||
- Implement password reset
|
||||
|
||||
Fixes:
|
||||
- Resolve login redirect issue
|
||||
|
||||
Breaking Changes:
|
||||
- None"
|
||||
|
||||
# Push tag to remote
|
||||
git push origin v1.2.0
|
||||
|
||||
# List tags
|
||||
git tag -l
|
||||
|
||||
# Delete tag
|
||||
git tag -d v1.2.0
|
||||
git push origin --delete v1.2.0
|
||||
```
|
||||
|
||||
### Changelog Generation
|
||||
|
||||
```bash
|
||||
# Generate changelog from commits
|
||||
git log v1.1.0..v1.2.0 --oneline --no-merges
|
||||
|
||||
# Or use conventional-changelog
|
||||
npx conventional-changelog -i CHANGELOG.md -s
|
||||
```
|
||||
|
||||
## Git Configuration
|
||||
|
||||
### Essential Configs
|
||||
|
||||
```bash
|
||||
# User identity
|
||||
git config --global user.name "Your Name"
|
||||
git config --global user.email "your@email.com"
|
||||
|
||||
# Default branch name
|
||||
git config --global init.defaultBranch main
|
||||
|
||||
# Pull behavior (rebase instead of merge)
|
||||
git config --global pull.rebase true
|
||||
|
||||
# Push behavior (push current branch only)
|
||||
git config --global push.default current
|
||||
|
||||
# Auto-correct typos
|
||||
git config --global help.autocorrect 1
|
||||
|
||||
# Better diff algorithm
|
||||
git config --global diff.algorithm histogram
|
||||
|
||||
# Color output
|
||||
git config --global color.ui auto
|
||||
```
|
||||
|
||||
### Useful Aliases
|
||||
|
||||
```bash
|
||||
# Add to ~/.gitconfig
|
||||
[alias]
|
||||
co = checkout
|
||||
br = branch
|
||||
ci = commit
|
||||
st = status
|
||||
unstage = reset HEAD --
|
||||
last = log -1 HEAD
|
||||
visual = log --oneline --graph --all
|
||||
amend = commit --amend --no-edit
|
||||
wip = commit -m "WIP"
|
||||
undo = reset --soft HEAD~1
|
||||
contributors = shortlog -sn
|
||||
```
|
||||
|
||||
### Gitignore Patterns
|
||||
|
||||
```gitignore
|
||||
# Dependencies
|
||||
node_modules/
|
||||
vendor/
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
build/
|
||||
*.o
|
||||
*.exe
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Test coverage
|
||||
coverage/
|
||||
|
||||
# Cache
|
||||
.cache/
|
||||
*.tsbuildinfo
|
||||
```
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### Starting a New Feature
|
||||
|
||||
```bash
|
||||
# 1. Update main branch
|
||||
git checkout main
|
||||
git pull origin main
|
||||
|
||||
# 2. Create feature branch
|
||||
git checkout -b feature/user-auth
|
||||
|
||||
# 3. Make changes and commit
|
||||
git add .
|
||||
git commit -m "feat(auth): implement OAuth2 login"
|
||||
|
||||
# 4. Push to remote
|
||||
git push -u origin feature/user-auth
|
||||
|
||||
# 5. Create Pull Request on GitHub/GitLab
|
||||
```
|
||||
|
||||
### Updating a PR with New Changes
|
||||
|
||||
```bash
|
||||
# 1. Make additional changes
|
||||
git add .
|
||||
git commit -m "feat(auth): add error handling"
|
||||
|
||||
# 2. Push updates
|
||||
git push origin feature/user-auth
|
||||
```
|
||||
|
||||
### Syncing Fork with Upstream
|
||||
|
||||
```bash
|
||||
# 1. Add upstream remote (once)
|
||||
git remote add upstream https://github.com/original/repo.git
|
||||
|
||||
# 2. Fetch upstream
|
||||
git fetch upstream
|
||||
|
||||
# 3. Merge upstream/main into your main
|
||||
git checkout main
|
||||
git merge upstream/main
|
||||
|
||||
# 4. Push to your fork
|
||||
git push origin main
|
||||
```
|
||||
|
||||
### Undoing Mistakes
|
||||
|
||||
```bash
|
||||
# Undo last commit (keep changes)
|
||||
git reset --soft HEAD~1
|
||||
|
||||
# Undo last commit (discard changes)
|
||||
git reset --hard HEAD~1
|
||||
|
||||
# Undo last commit pushed to remote
|
||||
git revert HEAD
|
||||
git push origin main
|
||||
|
||||
# Undo specific file changes
|
||||
git checkout HEAD -- path/to/file
|
||||
|
||||
# Fix last commit message
|
||||
git commit --amend -m "New message"
|
||||
|
||||
# Add forgotten file to last commit
|
||||
git add forgotten-file
|
||||
git commit --amend --no-edit
|
||||
```
|
||||
|
||||
## Git Hooks
|
||||
|
||||
### Pre-Commit Hook
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# .git/hooks/pre-commit
|
||||
|
||||
# Run linting
|
||||
npm run lint || exit 1
|
||||
|
||||
# Run tests
|
||||
npm test || exit 1
|
||||
|
||||
# Check for secrets
|
||||
if git diff --cached | grep -E '(password|api_key|secret)'; then
|
||||
echo "Possible secret detected. Commit aborted."
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
### Pre-Push Hook
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# .git/hooks/pre-push
|
||||
|
||||
# Run full test suite
|
||||
npm run test:all || exit 1
|
||||
|
||||
# Check for console.log statements
|
||||
if git diff origin/main | grep -E 'console\.log'; then
|
||||
echo "Remove console.log statements before pushing."
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
```
|
||||
# BAD: Committing directly to main
|
||||
git checkout main
|
||||
git commit -m "fix bug"
|
||||
|
||||
# GOOD: Use feature branches and PRs
|
||||
|
||||
# BAD: Committing secrets
|
||||
git add .env # Contains API keys
|
||||
|
||||
# GOOD: Add to .gitignore, use environment variables
|
||||
|
||||
# BAD: Giant PRs (1000+ lines)
|
||||
# GOOD: Break into smaller, focused PRs
|
||||
|
||||
# BAD: "Update" commit messages
|
||||
git commit -m "update"
|
||||
git commit -m "fix"
|
||||
|
||||
# GOOD: Descriptive messages
|
||||
git commit -m "fix(auth): resolve redirect loop after login"
|
||||
|
||||
# BAD: Rewriting public history
|
||||
git push --force origin main
|
||||
|
||||
# GOOD: Use revert for public branches
|
||||
git revert HEAD
|
||||
|
||||
# BAD: Long-lived feature branches (weeks/months)
|
||||
# GOOD: Keep branches short (days), rebase frequently
|
||||
|
||||
# BAD: Committing generated files
|
||||
git add dist/
|
||||
git add node_modules/
|
||||
|
||||
# GOOD: Add to .gitignore
|
||||
```
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Task | Command |
|
||||
|------|---------|
|
||||
| Create branch | `git checkout -b feature/name` |
|
||||
| Switch branch | `git checkout branch-name` |
|
||||
| Delete branch | `git branch -d branch-name` |
|
||||
| Merge branch | `git merge branch-name` |
|
||||
| Rebase branch | `git rebase main` |
|
||||
| View history | `git log --oneline --graph` |
|
||||
| View changes | `git diff` |
|
||||
| Stage changes | `git add .` or `git add -p` |
|
||||
| Commit | `git commit -m "message"` |
|
||||
| Push | `git push origin branch-name` |
|
||||
| Pull | `git pull origin branch-name` |
|
||||
| Stash | `git stash push -m "message"` |
|
||||
| Undo last commit | `git reset --soft HEAD~1` |
|
||||
| Revert commit | `git revert HEAD` |
|
||||
245
skills/healthcare-cdss-patterns/SKILL.md
Normal file
245
skills/healthcare-cdss-patterns/SKILL.md
Normal file
@@ -0,0 +1,245 @@
|
||||
---
|
||||
name: healthcare-cdss-patterns
|
||||
description: Clinical Decision Support System (CDSS) development patterns. Drug interaction checking, dose validation, clinical scoring (NEWS2, qSOFA), alert severity classification, and integration into EMR workflows.
|
||||
origin: Health1 Super Speciality Hospitals — contributed by Dr. Keyur Patel
|
||||
version: "1.0.0"
|
||||
---
|
||||
|
||||
# Healthcare CDSS Development Patterns
|
||||
|
||||
Patterns for building Clinical Decision Support Systems that integrate into EMR workflows. CDSS modules are patient safety critical — zero tolerance for false negatives.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Implementing drug interaction checking
|
||||
- Building dose validation engines
|
||||
- Implementing clinical scoring systems (NEWS2, qSOFA, APACHE, GCS)
|
||||
- Designing alert systems for abnormal clinical values
|
||||
- Building medication order entry with safety checks
|
||||
- Integrating lab result interpretation with clinical context
|
||||
|
||||
## How It Works
|
||||
|
||||
The CDSS engine is a **pure function library with zero side effects**. Input clinical data, output alerts. This makes it fully testable.
|
||||
|
||||
Three primary modules:
|
||||
|
||||
1. **`checkInteractions(newDrug, currentMeds, allergies)`** — Checks a new drug against current medications and known allergies. Returns severity-sorted `InteractionAlert[]`. Uses `DrugInteractionPair` data model.
|
||||
2. **`validateDose(drug, dose, route, weight, age, renalFunction)`** — Validates a prescribed dose against weight-based, age-adjusted, and renal-adjusted rules. Returns `DoseValidationResult`.
|
||||
3. **`calculateNEWS2(vitals)`** — National Early Warning Score 2 from `NEWS2Input`. Returns `NEWS2Result` with total score, risk level, and escalation guidance.
|
||||
|
||||
```
|
||||
EMR UI
|
||||
↓ (user enters data)
|
||||
CDSS Engine (pure functions, no side effects)
|
||||
├── Drug Interaction Checker
|
||||
├── Dose Validator
|
||||
├── Clinical Scoring (NEWS2, qSOFA, etc.)
|
||||
└── Alert Classifier
|
||||
↓ (returns alerts)
|
||||
EMR UI (displays alerts inline, blocks if critical)
|
||||
```
|
||||
|
||||
### Drug Interaction Checking
|
||||
|
||||
```typescript
|
||||
interface DrugInteractionPair {
|
||||
drugA: string; // generic name
|
||||
drugB: string; // generic name
|
||||
severity: 'critical' | 'major' | 'minor';
|
||||
mechanism: string;
|
||||
clinicalEffect: string;
|
||||
recommendation: string;
|
||||
}
|
||||
|
||||
function checkInteractions(
|
||||
newDrug: string,
|
||||
currentMedications: string[],
|
||||
allergyList: string[]
|
||||
): InteractionAlert[] {
|
||||
if (!newDrug) return [];
|
||||
const alerts: InteractionAlert[] = [];
|
||||
for (const current of currentMedications) {
|
||||
const interaction = findInteraction(newDrug, current);
|
||||
if (interaction) {
|
||||
alerts.push({ severity: interaction.severity, pair: [newDrug, current],
|
||||
message: interaction.clinicalEffect, recommendation: interaction.recommendation });
|
||||
}
|
||||
}
|
||||
for (const allergy of allergyList) {
|
||||
if (isCrossReactive(newDrug, allergy)) {
|
||||
alerts.push({ severity: 'critical', pair: [newDrug, allergy],
|
||||
message: `Cross-reactivity with documented allergy: ${allergy}`,
|
||||
recommendation: 'Do not prescribe without allergy consultation' });
|
||||
}
|
||||
}
|
||||
return alerts.sort((a, b) => severityOrder(a.severity) - severityOrder(b.severity));
|
||||
}
|
||||
```
|
||||
|
||||
Interaction pairs must be **bidirectional**: if Drug A interacts with Drug B, then Drug B interacts with Drug A.
|
||||
|
||||
### Dose Validation
|
||||
|
||||
```typescript
|
||||
interface DoseValidationResult {
|
||||
valid: boolean;
|
||||
message: string;
|
||||
suggestedRange: { min: number; max: number; unit: string } | null;
|
||||
factors: string[];
|
||||
}
|
||||
|
||||
function validateDose(
|
||||
drug: string,
|
||||
dose: number,
|
||||
route: 'oral' | 'iv' | 'im' | 'sc' | 'topical',
|
||||
patientWeight?: number,
|
||||
patientAge?: number,
|
||||
renalFunction?: number
|
||||
): DoseValidationResult {
|
||||
const rules = getDoseRules(drug, route);
|
||||
if (!rules) return { valid: true, message: 'No validation rules available', suggestedRange: null, factors: [] };
|
||||
const factors: string[] = [];
|
||||
|
||||
// SAFETY: if rules require weight but weight missing, BLOCK (not pass)
|
||||
if (rules.weightBased) {
|
||||
if (!patientWeight || patientWeight <= 0) {
|
||||
return { valid: false, message: `Weight required for ${drug} (mg/kg drug)`,
|
||||
suggestedRange: null, factors: ['weight_missing'] };
|
||||
}
|
||||
factors.push('weight');
|
||||
const maxDose = rules.maxPerKg * patientWeight;
|
||||
if (dose > maxDose) {
|
||||
return { valid: false, message: `Dose exceeds max for ${patientWeight}kg`,
|
||||
suggestedRange: { min: rules.minPerKg * patientWeight, max: maxDose, unit: rules.unit }, factors };
|
||||
}
|
||||
}
|
||||
|
||||
// Age-based adjustment (when rules define age brackets and age is provided)
|
||||
if (rules.ageAdjusted && patientAge !== undefined) {
|
||||
factors.push('age');
|
||||
const ageMax = rules.getAgeAdjustedMax(patientAge);
|
||||
if (dose > ageMax) {
|
||||
return { valid: false, message: `Exceeds age-adjusted max for ${patientAge}yr`,
|
||||
suggestedRange: { min: rules.typicalMin, max: ageMax, unit: rules.unit }, factors };
|
||||
}
|
||||
}
|
||||
|
||||
// Renal adjustment (when rules define eGFR brackets and eGFR is provided)
|
||||
if (rules.renalAdjusted && renalFunction !== undefined) {
|
||||
factors.push('renal');
|
||||
const renalMax = rules.getRenalAdjustedMax(renalFunction);
|
||||
if (dose > renalMax) {
|
||||
return { valid: false, message: `Exceeds renal-adjusted max for eGFR ${renalFunction}`,
|
||||
suggestedRange: { min: rules.typicalMin, max: renalMax, unit: rules.unit }, factors };
|
||||
}
|
||||
}
|
||||
|
||||
// Absolute max
|
||||
if (dose > rules.absoluteMax) {
|
||||
return { valid: false, message: `Exceeds absolute max ${rules.absoluteMax}${rules.unit}`,
|
||||
suggestedRange: { min: rules.typicalMin, max: rules.absoluteMax, unit: rules.unit },
|
||||
factors: [...factors, 'absolute_max'] };
|
||||
}
|
||||
return { valid: true, message: 'Within range',
|
||||
suggestedRange: { min: rules.typicalMin, max: rules.typicalMax, unit: rules.unit }, factors };
|
||||
}
|
||||
```
|
||||
|
||||
### Clinical Scoring: NEWS2
|
||||
|
||||
```typescript
|
||||
interface NEWS2Input {
|
||||
respiratoryRate: number; oxygenSaturation: number; supplementalOxygen: boolean;
|
||||
temperature: number; systolicBP: number; heartRate: number;
|
||||
consciousness: 'alert' | 'voice' | 'pain' | 'unresponsive';
|
||||
}
|
||||
interface NEWS2Result {
|
||||
total: number; // 0-20
|
||||
risk: 'low' | 'low-medium' | 'medium' | 'high';
|
||||
components: Record<string, number>;
|
||||
escalation: string;
|
||||
}
|
||||
```
|
||||
|
||||
Scoring tables must match the Royal College of Physicians specification exactly.
|
||||
|
||||
### Alert Severity and UI Behavior
|
||||
|
||||
| Severity | UI Behavior | Clinician Action Required |
|
||||
|----------|-------------|--------------------------|
|
||||
| Critical | Block action. Non-dismissable modal. Red. | Must document override reason to proceed |
|
||||
| Major | Warning banner inline. Orange. | Must acknowledge before proceeding |
|
||||
| Minor | Info note inline. Yellow. | Awareness only, no action required |
|
||||
|
||||
Critical alerts must NEVER be auto-dismissed or implemented as toast notifications. Override reasons must be stored in the audit trail.
|
||||
|
||||
### Testing CDSS (Zero Tolerance for False Negatives)
|
||||
|
||||
```typescript
|
||||
describe('CDSS — Patient Safety', () => {
|
||||
INTERACTION_PAIRS.forEach(({ drugA, drugB, severity }) => {
|
||||
it(`detects ${drugA} + ${drugB} (${severity})`, () => {
|
||||
const alerts = checkInteractions(drugA, [drugB], []);
|
||||
expect(alerts.length).toBeGreaterThan(0);
|
||||
expect(alerts[0].severity).toBe(severity);
|
||||
});
|
||||
it(`detects ${drugB} + ${drugA} (reverse)`, () => {
|
||||
const alerts = checkInteractions(drugB, [drugA], []);
|
||||
expect(alerts.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
it('blocks mg/kg drug when weight is missing', () => {
|
||||
const result = validateDose('gentamicin', 300, 'iv');
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.factors).toContain('weight_missing');
|
||||
});
|
||||
it('handles malformed drug data gracefully', () => {
|
||||
expect(() => checkInteractions('', [], [])).not.toThrow();
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
Pass criteria: 100%. A single missed interaction is a patient safety event.
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Making CDSS checks optional or skippable without documented reason
|
||||
- Implementing interaction checks as toast notifications
|
||||
- Using `any` types for drug or clinical data
|
||||
- Hardcoding interaction pairs instead of using a maintainable data structure
|
||||
- Silently catching errors in CDSS engine (must surface failures loudly)
|
||||
- Skipping weight-based validation when weight is not available (must block, not pass)
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Drug Interaction Check
|
||||
|
||||
```typescript
|
||||
const alerts = checkInteractions('warfarin', ['aspirin', 'metformin'], ['penicillin']);
|
||||
// [{ severity: 'critical', pair: ['warfarin', 'aspirin'],
|
||||
// message: 'Increased bleeding risk', recommendation: 'Avoid combination' }]
|
||||
```
|
||||
|
||||
### Example 2: Dose Validation
|
||||
|
||||
```typescript
|
||||
const ok = validateDose('paracetamol', 1000, 'oral', 70, 45);
|
||||
// { valid: true, suggestedRange: { min: 500, max: 4000, unit: 'mg' } }
|
||||
|
||||
const bad = validateDose('paracetamol', 5000, 'oral', 70, 45);
|
||||
// { valid: false, message: 'Exceeds absolute max 4000mg' }
|
||||
|
||||
const noWeight = validateDose('gentamicin', 300, 'iv');
|
||||
// { valid: false, factors: ['weight_missing'] }
|
||||
```
|
||||
|
||||
### Example 3: NEWS2 Scoring
|
||||
|
||||
```typescript
|
||||
const result = calculateNEWS2({
|
||||
respiratoryRate: 24, oxygenSaturation: 93, supplementalOxygen: true,
|
||||
temperature: 38.5, systolicBP: 100, heartRate: 110, consciousness: 'voice'
|
||||
});
|
||||
// { total: 13, risk: 'high', escalation: 'Urgent clinical review. Consider ICU.' }
|
||||
```
|
||||
159
skills/healthcare-emr-patterns/SKILL.md
Normal file
159
skills/healthcare-emr-patterns/SKILL.md
Normal file
@@ -0,0 +1,159 @@
|
||||
---
|
||||
name: healthcare-emr-patterns
|
||||
description: EMR/EHR development patterns for healthcare applications. Clinical safety, encounter workflows, prescription generation, clinical decision support integration, and accessibility-first UI for medical data entry.
|
||||
origin: Health1 Super Speciality Hospitals — contributed by Dr. Keyur Patel
|
||||
version: "1.0.0"
|
||||
---
|
||||
|
||||
# Healthcare EMR Development Patterns
|
||||
|
||||
Patterns for building Electronic Medical Record (EMR) and Electronic Health Record (EHR) systems. Prioritizes patient safety, clinical accuracy, and practitioner efficiency.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Building patient encounter workflows (complaint, exam, diagnosis, prescription)
|
||||
- Implementing clinical note-taking (structured + free text + voice-to-text)
|
||||
- Designing prescription/medication modules with drug interaction checking
|
||||
- Integrating Clinical Decision Support Systems (CDSS)
|
||||
- Building lab result displays with reference range highlighting
|
||||
- Implementing audit trails for clinical data
|
||||
- Designing healthcare-accessible UIs for clinical data entry
|
||||
|
||||
## How It Works
|
||||
|
||||
### Patient Safety First
|
||||
|
||||
Every design decision must be evaluated against: "Could this harm a patient?"
|
||||
|
||||
- Drug interactions MUST alert, not silently pass
|
||||
- Abnormal lab values MUST be visually flagged
|
||||
- Critical vitals MUST trigger escalation workflows
|
||||
- No clinical data modification without audit trail
|
||||
|
||||
### Single-Page Encounter Flow
|
||||
|
||||
Clinical encounters should flow vertically on a single page — no tab switching:
|
||||
|
||||
```
|
||||
Patient Header (sticky — always visible)
|
||||
├── Demographics, allergies, active medications
|
||||
│
|
||||
Encounter Flow (vertical scroll)
|
||||
├── 1. Chief Complaint (structured templates + free text)
|
||||
├── 2. History of Present Illness
|
||||
├── 3. Physical Examination (system-wise)
|
||||
├── 4. Vitals (auto-trigger clinical scoring)
|
||||
├── 5. Diagnosis (ICD-10/SNOMED search)
|
||||
├── 6. Medications (drug DB + interaction check)
|
||||
├── 7. Investigations (lab/radiology orders)
|
||||
├── 8. Plan & Follow-up
|
||||
└── 9. Sign / Lock / Print
|
||||
```
|
||||
|
||||
### Smart Template System
|
||||
|
||||
```typescript
|
||||
interface ClinicalTemplate {
|
||||
id: string;
|
||||
name: string; // e.g., "Chest Pain"
|
||||
chips: string[]; // clickable symptom chips
|
||||
requiredFields: string[]; // mandatory data points
|
||||
redFlags: string[]; // triggers non-dismissable alert
|
||||
icdSuggestions: string[]; // pre-mapped diagnosis codes
|
||||
}
|
||||
```
|
||||
|
||||
Red flags in any template must trigger a visible, non-dismissable alert — NOT a toast notification.
|
||||
|
||||
### Medication Safety Pattern
|
||||
|
||||
```
|
||||
User selects drug
|
||||
→ Check current medications for interactions
|
||||
→ Check encounter medications for interactions
|
||||
→ Check patient allergies
|
||||
→ Validate dose against weight/age/renal function
|
||||
→ If CRITICAL interaction: BLOCK prescribing entirely
|
||||
→ Clinician must document override reason to proceed past a block
|
||||
→ If MAJOR interaction: display warning, require acknowledgment
|
||||
→ Log all alerts and override reasons in audit trail
|
||||
```
|
||||
|
||||
Critical interactions **block prescribing by default**. The clinician must explicitly override with a documented reason stored in the audit trail. The system never silently allows a critical interaction.
|
||||
|
||||
### Locked Encounter Pattern
|
||||
|
||||
Once a clinical encounter is signed:
|
||||
- No edits allowed — only an addendum (a separate linked record)
|
||||
- Both original and addendum appear in the patient timeline
|
||||
- Audit trail captures who signed, when, and any addendum records
|
||||
|
||||
### UI Patterns for Clinical Data
|
||||
|
||||
**Vitals Display:** Current values with normal range highlighting (green/yellow/red), trend arrows vs previous, clinical scoring auto-calculated (NEWS2, qSOFA), escalation guidance inline.
|
||||
|
||||
**Lab Results Display:** Normal range highlighting, previous value comparison, critical values with non-dismissable alert, collection/analysis timestamps, pending orders with expected turnaround.
|
||||
|
||||
**Prescription PDF:** One-click generation with patient demographics, allergies, diagnosis, drug details (generic + brand, dose, route, frequency, duration), clinician signature block.
|
||||
|
||||
### Accessibility for Healthcare
|
||||
|
||||
Healthcare UIs have stricter requirements than typical web apps:
|
||||
- 4.5:1 minimum contrast (WCAG AA) — clinicians work in varied lighting
|
||||
- Large touch targets (44x44px minimum) — for gloved/rushed interaction
|
||||
- Keyboard navigation — for power users entering data rapidly
|
||||
- No color-only indicators — always pair color with text/icon (colorblind clinicians)
|
||||
- Screen reader labels on all form fields
|
||||
- No auto-dismissing toasts for clinical alerts — clinician must actively acknowledge
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Storing clinical data in browser localStorage
|
||||
- Silent failures in drug interaction checking
|
||||
- Dismissable toasts for critical clinical alerts
|
||||
- Tab-based encounter UIs that fragment the clinical workflow
|
||||
- Allowing edits to signed/locked encounters
|
||||
- Displaying clinical data without audit trail
|
||||
- Using `any` type for clinical data structures
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Patient Encounter Flow
|
||||
|
||||
```
|
||||
Doctor opens encounter for Patient #4521
|
||||
→ Sticky header shows: "Rajesh M, 58M, Allergies: Penicillin, Active Meds: Metformin 500mg"
|
||||
→ Chief Complaint: selects "Chest Pain" template
|
||||
→ Clicks chips: "substernal", "radiating to left arm", "crushing"
|
||||
→ Red flag "crushing substernal chest pain" triggers non-dismissable alert
|
||||
→ Examination: CVS system — "S1 S2 normal, no murmur"
|
||||
→ Vitals: HR 110, BP 90/60, SpO2 94%
|
||||
→ NEWS2 auto-calculates: score 8, risk HIGH, escalation alert shown
|
||||
→ Diagnosis: searches "ACS" → selects ICD-10 I21.9
|
||||
→ Medications: selects Aspirin 300mg
|
||||
→ CDSS checks against Metformin: no interaction
|
||||
→ Signs encounter → locked, addendum-only from this point
|
||||
```
|
||||
|
||||
### Example 2: Medication Safety Workflow
|
||||
|
||||
```
|
||||
Doctor prescribes Warfarin for Patient #4521
|
||||
→ CDSS detects: Warfarin + Aspirin = CRITICAL interaction
|
||||
→ UI: red non-dismissable modal blocks prescribing
|
||||
→ Doctor clicks "Override with reason"
|
||||
→ Types: "Benefits outweigh risks — monitored INR protocol"
|
||||
→ Override reason + alert stored in audit trail
|
||||
→ Prescription proceeds with documented override
|
||||
```
|
||||
|
||||
### Example 3: Locked Encounter + Addendum
|
||||
|
||||
```
|
||||
Encounter #E-2024-0891 signed by Dr. Shah at 14:30
|
||||
→ All fields locked — no edit buttons visible
|
||||
→ "Add Addendum" button available
|
||||
→ Dr. Shah clicks addendum, adds: "Lab results received — Troponin elevated"
|
||||
→ New record E-2024-0891-A1 linked to original
|
||||
→ Timeline shows both: original encounter + addendum with timestamps
|
||||
```
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user