mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-30 13:43:26 +08:00
Compare commits
67 Commits
affaan/lar
...
fix/stop-h
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
284d2995ba | ||
|
|
dd675d4258 | ||
|
|
db12d3d838 | ||
|
|
46f37ae4fb | ||
|
|
0c166e14da | ||
|
|
527c79350c | ||
|
|
0ebcfc368e | ||
|
|
bec1ebf76d | ||
|
|
be76918850 | ||
|
|
99a154a908 | ||
|
|
ebf0f135bb | ||
|
|
2d27da52e2 | ||
|
|
65c4a0f6ba | ||
|
|
ab49c9adf5 | ||
|
|
b7a82cf240 | ||
|
|
9a55fd069b | ||
|
|
d9e8305aa1 | ||
|
|
f2bf72c005 | ||
|
|
3ae0df781f | ||
|
|
a346a304b0 | ||
|
|
81acf0c928 | ||
|
|
06a77911e6 | ||
|
|
9406f35fab | ||
|
|
c5e3658ba6 | ||
|
|
eeeea506a6 | ||
|
|
fc1ea4fbea | ||
|
|
00787d68e4 | ||
|
|
1e3572becf | ||
|
|
7462168377 | ||
|
|
3c3781ca43 | ||
|
|
27d71c9548 | ||
|
|
6f16e75f9d | ||
|
|
0d30da1fc7 | ||
|
|
e686bcbc82 | ||
|
|
25c8a5de08 | ||
|
|
a8e088a54e | ||
|
|
eac0228f88 | ||
|
|
4fcaaf8a89 | ||
|
|
17f6f95090 | ||
|
|
1e226ba556 | ||
|
|
9b24bedf85 | ||
|
|
e3f2bda9fc | ||
|
|
63737544a1 | ||
|
|
b44ba7096f | ||
|
|
45baaa1ea5 | ||
|
|
4da1fb388c | ||
|
|
917c35bb6f | ||
|
|
ee3f348dcb | ||
|
|
e6eb99271f | ||
|
|
7cabf77142 | ||
|
|
9cfcfac665 | ||
|
|
0284f60871 | ||
|
|
7a17ec9b14 | ||
|
|
243fae8476 | ||
|
|
dc92b5c62b | ||
|
|
3fbfd7f7ff | ||
|
|
95a1435f61 | ||
|
|
e57ad5c33d | ||
|
|
f7d589ce21 | ||
|
|
6af7ca1afc | ||
|
|
d6061cf937 | ||
|
|
ec921e5202 | ||
|
|
d016e68cee | ||
|
|
aed18eb571 | ||
|
|
f3cf808814 | ||
|
|
e22cb57718 | ||
|
|
4811e8c73b |
@@ -86,7 +86,8 @@ startup_timeout_sec = 30
|
||||
# args = ["-y", "@cloudflare/mcp-server-cloudflare"]
|
||||
|
||||
[features]
|
||||
# Codex multi-agent support is experimental as of March 2026.
|
||||
# Codex multi-agent collaboration is stable and on by default in current builds.
|
||||
# Keep the explicit toggle here so the repo documents its expectation clearly.
|
||||
multi_agent = true
|
||||
|
||||
# Profiles — switch with `codex -p <name>`
|
||||
@@ -101,6 +102,9 @@ sandbox_mode = "workspace-write"
|
||||
web_search = "live"
|
||||
|
||||
[agents]
|
||||
[agents]
|
||||
# Multi-agent role limits and local role definitions.
|
||||
# These map to `.codex/agents/*.toml` and mirror the repo's explorer/reviewer/docs workflow.
|
||||
max_threads = 6
|
||||
max_depth = 1
|
||||
|
||||
|
||||
26
.github/workflows/ci.yml
vendored
26
.github/workflows/ci.yml
vendored
@@ -34,10 +34,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- name: Setup Node.js ${{ matrix.node }}
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: ${{ matrix.node }}
|
||||
|
||||
@@ -68,7 +68,7 @@ jobs:
|
||||
|
||||
- name: Cache npm
|
||||
if: matrix.pm == 'npm'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4
|
||||
with:
|
||||
path: ${{ steps.npm-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-node-${{ matrix.node }}-npm-${{ hashFiles('**/package-lock.json') }}
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
|
||||
- name: Cache pnpm
|
||||
if: matrix.pm == 'pnpm'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4
|
||||
with:
|
||||
path: ${{ steps.pnpm-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-node-${{ matrix.node }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
|
||||
- name: Cache yarn
|
||||
if: matrix.pm == 'yarn'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4
|
||||
with:
|
||||
path: ${{ steps.yarn-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-node-${{ matrix.node }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -113,7 +113,7 @@ jobs:
|
||||
|
||||
- name: Cache bun
|
||||
if: matrix.pm == 'bun'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4
|
||||
with:
|
||||
path: ~/.bun/install/cache
|
||||
key: ${{ runner.os }}-bun-${{ hashFiles('**/bun.lockb') }}
|
||||
@@ -146,7 +146,7 @@ jobs:
|
||||
# Upload test artifacts on failure
|
||||
- name: Upload test artifacts
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
|
||||
with:
|
||||
name: test-results-${{ matrix.os }}-node${{ matrix.node }}-${{ matrix.pm }}
|
||||
path: |
|
||||
@@ -160,10 +160,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: '20.x'
|
||||
|
||||
@@ -205,10 +205,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: '20.x'
|
||||
|
||||
@@ -223,10 +223,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: '20.x'
|
||||
|
||||
|
||||
10
.github/workflows/maintenance.yml
vendored
10
.github/workflows/maintenance.yml
vendored
@@ -15,8 +15,8 @@ jobs:
|
||||
name: Check Dependencies
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: '20.x'
|
||||
- name: Check for outdated packages
|
||||
@@ -26,8 +26,8 @@ jobs:
|
||||
name: Security Audit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
- uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: '20.x'
|
||||
- name: Run security audit
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
name: Stale Issues/PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9
|
||||
with:
|
||||
stale-issue-message: 'This issue is stale due to inactivity.'
|
||||
stale-pr-message: 'This PR is stale due to inactivity.'
|
||||
|
||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
2
.github/workflows/reusable-release.yml
vendored
2
.github/workflows/reusable-release.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
14
.github/workflows/reusable-test.yml
vendored
14
.github/workflows/reusable-test.yml
vendored
@@ -27,10 +27,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: ${{ inputs.node-version }}
|
||||
|
||||
@@ -59,7 +59,7 @@ jobs:
|
||||
|
||||
- name: Cache npm
|
||||
if: inputs.package-manager == 'npm'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4
|
||||
with:
|
||||
path: ${{ steps.npm-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-node-${{ inputs.node-version }}-npm-${{ hashFiles('**/package-lock.json') }}
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
|
||||
- name: Cache pnpm
|
||||
if: inputs.package-manager == 'pnpm'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4
|
||||
with:
|
||||
path: ${{ steps.pnpm-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-node-${{ inputs.node-version }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
|
||||
- name: Cache yarn
|
||||
if: inputs.package-manager == 'yarn'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4
|
||||
with:
|
||||
path: ${{ steps.yarn-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-node-${{ inputs.node-version }}-yarn-${{ hashFiles('**/yarn.lock') }}
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
|
||||
- name: Cache bun
|
||||
if: inputs.package-manager == 'bun'
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4
|
||||
with:
|
||||
path: ~/.bun/install/cache
|
||||
key: ${{ runner.os }}-bun-${{ hashFiles('**/bun.lockb') }}
|
||||
@@ -134,7 +134,7 @@ jobs:
|
||||
|
||||
- name: Upload test artifacts
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
|
||||
with:
|
||||
name: test-results-${{ inputs.os }}-node${{ inputs.node-version }}-${{ inputs.package-manager }}
|
||||
path: |
|
||||
|
||||
4
.github/workflows/reusable-validate.yml
vendored
4
.github/workflows/reusable-validate.yml
vendored
@@ -17,10 +17,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: ${{ inputs.node-version }}
|
||||
|
||||
|
||||
184
.trae/README.md
Normal file
184
.trae/README.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# Everything Claude Code for Trae
|
||||
|
||||
Bring Everything Claude Code (ECC) workflows to Trae IDE. This repository provides custom commands, agents, skills, and rules that can be installed into any Trae project with a single command.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Option 1: Local Installation (Current Project Only)
|
||||
|
||||
```bash
|
||||
# Install to current project
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
```
|
||||
|
||||
This creates `.trae-cn/` in your project directory.
|
||||
|
||||
### Option 2: Global Installation (All Projects)
|
||||
|
||||
```bash
|
||||
# Install globally to ~/.trae-cn/
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh ~
|
||||
|
||||
# Or from the .trae folder directly
|
||||
cd /path/to/your/project/.trae
|
||||
TRAE_ENV=cn ./install.sh ~
|
||||
```
|
||||
|
||||
This creates `~/.trae-cn/` which applies to all Trae projects.
|
||||
|
||||
### Option 3: Quick Install to Current Directory
|
||||
|
||||
```bash
|
||||
# If already in project directory with .trae folder
|
||||
cd .trae
|
||||
./install.sh
|
||||
```
|
||||
|
||||
The installer uses non-destructive copy - it will not overwrite your existing files.
|
||||
|
||||
## Installation Modes
|
||||
|
||||
### Local Installation
|
||||
|
||||
Install to the current project's `.trae-cn` directory:
|
||||
|
||||
```bash
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
```
|
||||
|
||||
This creates `/path/to/your/project/.trae-cn/` with all ECC components.
|
||||
|
||||
### Global Installation
|
||||
|
||||
Install to your home directory's `.trae-cn` directory (applies to all Trae projects):
|
||||
|
||||
```bash
|
||||
# From project directory
|
||||
TRAE_ENV=cn .trae/install.sh ~
|
||||
|
||||
# Or directly from .trae folder
|
||||
cd .trae
|
||||
TRAE_ENV=cn ./install.sh ~
|
||||
```
|
||||
|
||||
This creates `~/.trae-cn/` with all ECC components. All Trae projects will use these global installations.
|
||||
|
||||
**Note**: Global installation is useful when you want to maintain a single copy of ECC across all your projects.
|
||||
|
||||
## Environment Support
|
||||
|
||||
- **Default**: Uses `.trae` directory
|
||||
- **CN Environment**: Uses `.trae-cn` directory (set via `TRAE_ENV=cn`)
|
||||
|
||||
### Force Environment
|
||||
|
||||
```bash
|
||||
# From project root, force the CN environment
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
|
||||
# From inside the .trae folder
|
||||
cd .trae
|
||||
TRAE_ENV=cn ./install.sh
|
||||
```
|
||||
|
||||
**Note**: `TRAE_ENV` is a global environment variable that applies to the entire installation session.
|
||||
|
||||
## Uninstall
|
||||
|
||||
The uninstaller uses a manifest file (`.ecc-manifest`) to track installed files, ensuring safe removal:
|
||||
|
||||
```bash
|
||||
# Uninstall from current directory (if already inside .trae or .trae-cn)
|
||||
cd .trae-cn
|
||||
./uninstall.sh
|
||||
|
||||
# Or uninstall from project root
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/uninstall.sh
|
||||
|
||||
# Uninstall globally from home directory
|
||||
TRAE_ENV=cn .trae/uninstall.sh ~
|
||||
|
||||
# Will ask for confirmation before uninstalling
|
||||
```
|
||||
|
||||
### Uninstall Behavior
|
||||
|
||||
- **Safe removal**: Only removes files tracked in the manifest (installed by ECC)
|
||||
- **User files preserved**: Any files you added manually are kept
|
||||
- **Non-empty directories**: Directories containing user-added files are skipped
|
||||
- **Manifest-based**: Requires `.ecc-manifest` file (created during install)
|
||||
|
||||
### Environment Support
|
||||
|
||||
Uninstall respects the same `TRAE_ENV` environment variable as install:
|
||||
|
||||
```bash
|
||||
# Uninstall from .trae-cn (CN environment)
|
||||
TRAE_ENV=cn ./uninstall.sh
|
||||
|
||||
# Uninstall from .trae (default environment)
|
||||
./uninstall.sh
|
||||
```
|
||||
|
||||
**Note**: If no manifest file is found (old installation), the uninstaller will ask whether to remove the entire directory.
|
||||
|
||||
## What's Included
|
||||
|
||||
### Commands
|
||||
|
||||
Commands are on-demand workflows invocable via the `/` menu in Trae chat. All commands are reused directly from the project root's `commands/` folder.
|
||||
|
||||
### Agents
|
||||
|
||||
Agents are specialized AI assistants with specific tool configurations. All agents are reused directly from the project root's `agents/` folder.
|
||||
|
||||
### Skills
|
||||
|
||||
Skills are on-demand workflows invocable via the `/` menu in chat. All skills are reused directly from the project's `skills/` folder.
|
||||
|
||||
### Rules
|
||||
|
||||
Rules provide always-on rules and context that shape how the agent works with your code. All rules are reused directly from the project root's `rules/` folder.
|
||||
|
||||
## Usage
|
||||
|
||||
1. Type `/` in chat to open the commands menu
|
||||
2. Select a command or skill
|
||||
3. The agent will guide you through the workflow with specific instructions and checklists
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
.trae/ (or .trae-cn/)
|
||||
├── commands/ # Command files (reused from project root)
|
||||
├── agents/ # Agent files (reused from project root)
|
||||
├── skills/ # Skill files (reused from skills/)
|
||||
├── rules/ # Rule files (reused from project root)
|
||||
├── install.sh # Install script
|
||||
├── uninstall.sh # Uninstall script
|
||||
└── README.md # This file
|
||||
```
|
||||
|
||||
## Customization
|
||||
|
||||
All files are yours to modify after installation. The installer never overwrites existing files, so your customizations are safe across re-installs.
|
||||
|
||||
**Note**: The `install.sh` and `uninstall.sh` scripts are automatically copied to the target directory during installation, so you can run these commands directly from your project.
|
||||
|
||||
## Recommended Workflow
|
||||
|
||||
1. **Start with planning**: Use `/plan` command to break down complex features
|
||||
2. **Write tests first**: Invoke `/tdd` command before implementing
|
||||
3. **Review your code**: Use `/code-review` after writing code
|
||||
4. **Check security**: Use `/code-review` again for auth, API endpoints, or sensitive data handling
|
||||
5. **Fix build errors**: Use `/build-fix` if there are build errors
|
||||
|
||||
## Next Steps
|
||||
|
||||
- Open your project in Trae
|
||||
- Type `/` to see available commands
|
||||
- Enjoy the ECC workflows!
|
||||
192
.trae/README.zh-CN.md
Normal file
192
.trae/README.zh-CN.md
Normal file
@@ -0,0 +1,192 @@
|
||||
# Everything Claude Code for Trae
|
||||
|
||||
为 Trae IDE 带来 Everything Claude Code (ECC) 工作流。此仓库提供自定义命令、智能体、技能和规则,可以通过单个命令安装到任何 Trae 项目中。
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 方式一:本地安装到 `.trae` 目录(默认环境)
|
||||
|
||||
```bash
|
||||
# 安装到当前项目的 .trae 目录
|
||||
cd /path/to/your/project
|
||||
.trae/install.sh
|
||||
```
|
||||
|
||||
这将在您的项目目录中创建 `.trae/`。
|
||||
|
||||
### 方式二:本地安装到 `.trae-cn` 目录(CN 环境)
|
||||
|
||||
```bash
|
||||
# 安装到当前项目的 .trae-cn 目录
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
```
|
||||
|
||||
这将在您的项目目录中创建 `.trae-cn/`。
|
||||
|
||||
### 方式三:全局安装到 `~/.trae` 目录(默认环境)
|
||||
|
||||
```bash
|
||||
# 全局安装到 ~/.trae/
|
||||
cd /path/to/your/project
|
||||
.trae/install.sh ~
|
||||
```
|
||||
|
||||
这将创建 `~/.trae/`,适用于所有 Trae 项目。
|
||||
|
||||
### 方式四:全局安装到 `~/.trae-cn` 目录(CN 环境)
|
||||
|
||||
```bash
|
||||
# 全局安装到 ~/.trae-cn/
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh ~
|
||||
```
|
||||
|
||||
这将创建 `~/.trae-cn/`,适用于所有 Trae 项目。
|
||||
|
||||
安装程序使用非破坏性复制 - 它不会覆盖您现有的文件。
|
||||
|
||||
## 安装模式
|
||||
|
||||
### 本地安装
|
||||
|
||||
安装到当前项目的 `.trae` 或 `.trae-cn` 目录:
|
||||
|
||||
```bash
|
||||
# 安装到当前项目的 .trae 目录(默认)
|
||||
cd /path/to/your/project
|
||||
.trae/install.sh
|
||||
|
||||
# 安装到当前项目的 .trae-cn 目录(CN 环境)
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
```
|
||||
|
||||
### 全局安装
|
||||
|
||||
安装到您主目录的 `.trae` 或 `.trae-cn` 目录(适用于所有 Trae 项目):
|
||||
|
||||
```bash
|
||||
# 全局安装到 ~/.trae/(默认)
|
||||
.trae/install.sh ~
|
||||
|
||||
# 全局安装到 ~/.trae-cn/(CN 环境)
|
||||
TRAE_ENV=cn .trae/install.sh ~
|
||||
```
|
||||
|
||||
**注意**:全局安装适用于希望在所有项目之间维护单个 ECC 副本的场景。
|
||||
|
||||
## 环境支持
|
||||
|
||||
- **默认**:使用 `.trae` 目录
|
||||
- **CN 环境**:使用 `.trae-cn` 目录(通过 `TRAE_ENV=cn` 设置)
|
||||
|
||||
### 强制指定环境
|
||||
|
||||
```bash
|
||||
# 从项目根目录强制使用 CN 环境
|
||||
TRAE_ENV=cn .trae/install.sh
|
||||
|
||||
# 进入 .trae 目录后使用默认环境
|
||||
cd .trae
|
||||
./install.sh
|
||||
```
|
||||
|
||||
**注意**:`TRAE_ENV` 是一个全局环境变量,适用于整个安装会话。
|
||||
|
||||
## 卸载
|
||||
|
||||
卸载程序使用清单文件(`.ecc-manifest`)跟踪已安装的文件,确保安全删除:
|
||||
|
||||
```bash
|
||||
# 从当前目录卸载(如果已经在 .trae 或 .trae-cn 目录中)
|
||||
cd .trae-cn
|
||||
./uninstall.sh
|
||||
|
||||
# 或者从项目根目录卸载
|
||||
cd /path/to/your/project
|
||||
TRAE_ENV=cn .trae/uninstall.sh
|
||||
|
||||
# 从主目录全局卸载
|
||||
TRAE_ENV=cn .trae/uninstall.sh ~
|
||||
|
||||
# 卸载前会询问确认
|
||||
```
|
||||
|
||||
### 卸载行为
|
||||
|
||||
- **安全删除**:仅删除清单中跟踪的文件(由 ECC 安装的文件)
|
||||
- **保留用户文件**:您手动添加的任何文件都会被保留
|
||||
- **非空目录**:包含用户添加文件的目录会被跳过
|
||||
- **基于清单**:需要 `.ecc-manifest` 文件(在安装时创建)
|
||||
|
||||
### 环境支持
|
||||
|
||||
卸载程序遵循与安装程序相同的 `TRAE_ENV` 环境变量:
|
||||
|
||||
```bash
|
||||
# 从 .trae-cn 卸载(CN 环境)
|
||||
TRAE_ENV=cn ./uninstall.sh
|
||||
|
||||
# 从 .trae 卸载(默认环境)
|
||||
./uninstall.sh
|
||||
```
|
||||
|
||||
**注意**:如果找不到清单文件(旧版本安装),卸载程序将询问是否删除整个目录。
|
||||
|
||||
## 包含的内容
|
||||
|
||||
### 命令
|
||||
|
||||
命令是通过 Trae 聊天中的 `/` 菜单调用的按需工作流。所有命令都直接复用自项目根目录的 `commands/` 文件夹。
|
||||
|
||||
### 智能体
|
||||
|
||||
智能体是具有特定工具配置的专门 AI 助手。所有智能体都直接复用自项目根目录的 `agents/` 文件夹。
|
||||
|
||||
### 技能
|
||||
|
||||
技能是通过聊天中的 `/` 菜单调用的按需工作流。所有技能都直接复用自项目的 `skills/` 文件夹。
|
||||
|
||||
### 规则
|
||||
|
||||
规则提供始终适用的规则和上下文,塑造智能体处理代码的方式。所有规则都直接复用自项目根目录的 `rules/` 文件夹。
|
||||
|
||||
## 使用方法
|
||||
|
||||
1. 在聊天中输入 `/` 以打开命令菜单
|
||||
2. 选择一个命令或技能
|
||||
3. 智能体将通过具体说明和检查清单指导您完成工作流
|
||||
|
||||
## 项目结构
|
||||
|
||||
```
|
||||
.trae/ (或 .trae-cn/)
|
||||
├── commands/ # 命令文件(复用自项目根目录)
|
||||
├── agents/ # 智能体文件(复用自项目根目录)
|
||||
├── skills/ # 技能文件(复用自 skills/)
|
||||
├── rules/ # 规则文件(复用自项目根目录)
|
||||
├── install.sh # 安装脚本
|
||||
├── uninstall.sh # 卸载脚本
|
||||
└── README.md # 此文件
|
||||
```
|
||||
|
||||
## 自定义
|
||||
|
||||
安装后,所有文件都归您修改。安装程序永远不会覆盖现有文件,因此您的自定义在重新安装时是安全的。
|
||||
|
||||
**注意**:安装时会自动将 `install.sh` 和 `uninstall.sh` 脚本复制到目标目录,这样您可以在项目本地直接运行这些命令。
|
||||
|
||||
## 推荐的工作流
|
||||
|
||||
1. **从计划开始**:使用 `/plan` 命令分解复杂功能
|
||||
2. **先写测试**:在实现之前调用 `/tdd` 命令
|
||||
3. **审查您的代码**:编写代码后使用 `/code-review`
|
||||
4. **检查安全性**:对于身份验证、API 端点或敏感数据处理,再次使用 `/code-review`
|
||||
5. **修复构建错误**:如果有构建错误,使用 `/build-fix`
|
||||
|
||||
## 下一步
|
||||
|
||||
- 在 Trae 中打开您的项目
|
||||
- 输入 `/` 以查看可用命令
|
||||
- 享受 ECC 工作流!
|
||||
221
.trae/install.sh
Executable file
221
.trae/install.sh
Executable file
@@ -0,0 +1,221 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# ECC Trae Installer
|
||||
# Installs Everything Claude Code workflows into a Trae project.
|
||||
#
|
||||
# Usage:
|
||||
# ./install.sh # Install to current directory
|
||||
# ./install.sh ~ # Install globally to ~/.trae/ or ~/.trae-cn/
|
||||
#
|
||||
# Environment:
|
||||
# TRAE_ENV=cn # Force use .trae-cn directory
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# When globs match nothing, expand to empty list instead of the literal pattern
|
||||
shopt -s nullglob
|
||||
|
||||
# Resolve the directory where this script lives (the repo root)
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(dirname "$SCRIPT_DIR")"
|
||||
|
||||
# Get the trae directory name (.trae or .trae-cn)
|
||||
get_trae_dir() {
|
||||
if [ "${TRAE_ENV:-}" = "cn" ]; then
|
||||
echo ".trae-cn"
|
||||
else
|
||||
echo ".trae"
|
||||
fi
|
||||
}
|
||||
|
||||
ensure_manifest_entry() {
|
||||
local manifest="$1"
|
||||
local entry="$2"
|
||||
|
||||
touch "$manifest"
|
||||
if ! grep -Fqx "$entry" "$manifest"; then
|
||||
echo "$entry" >> "$manifest"
|
||||
fi
|
||||
}
|
||||
|
||||
# Install function
|
||||
do_install() {
|
||||
local target_dir="$PWD"
|
||||
local trae_dir="$(get_trae_dir)"
|
||||
|
||||
# Check if ~ was specified (or expanded to $HOME)
|
||||
if [ "$#" -ge 1 ]; then
|
||||
if [ "$1" = "~" ] || [ "$1" = "$HOME" ]; then
|
||||
target_dir="$HOME"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if we're already inside a .trae or .trae-cn directory
|
||||
local current_dir_name="$(basename "$target_dir")"
|
||||
local trae_full_path
|
||||
|
||||
if [ "$current_dir_name" = ".trae" ] || [ "$current_dir_name" = ".trae-cn" ]; then
|
||||
# Already inside the trae directory, use it directly
|
||||
trae_full_path="$target_dir"
|
||||
else
|
||||
# Normal case: append trae_dir to target_dir
|
||||
trae_full_path="$target_dir/$trae_dir"
|
||||
fi
|
||||
|
||||
echo "ECC Trae Installer"
|
||||
echo "=================="
|
||||
echo ""
|
||||
echo "Source: $REPO_ROOT"
|
||||
echo "Target: $trae_full_path/"
|
||||
echo ""
|
||||
|
||||
# Subdirectories to create
|
||||
SUBDIRS="commands agents skills rules"
|
||||
|
||||
# Create all required trae subdirectories
|
||||
for dir in $SUBDIRS; do
|
||||
mkdir -p "$trae_full_path/$dir"
|
||||
done
|
||||
|
||||
# Manifest file to track installed files
|
||||
MANIFEST="$trae_full_path/.ecc-manifest"
|
||||
touch "$MANIFEST"
|
||||
|
||||
# Counters for summary
|
||||
commands=0
|
||||
agents=0
|
||||
skills=0
|
||||
rules=0
|
||||
other=0
|
||||
|
||||
# Copy commands from repo root
|
||||
if [ -d "$REPO_ROOT/commands" ]; then
|
||||
for f in "$REPO_ROOT/commands"/*.md; do
|
||||
[ -f "$f" ] || continue
|
||||
local_name=$(basename "$f")
|
||||
target_path="$trae_full_path/commands/$local_name"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$f" "$target_path"
|
||||
ensure_manifest_entry "$MANIFEST" "commands/$local_name"
|
||||
commands=$((commands + 1))
|
||||
else
|
||||
ensure_manifest_entry "$MANIFEST" "commands/$local_name"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Copy agents from repo root
|
||||
if [ -d "$REPO_ROOT/agents" ]; then
|
||||
for f in "$REPO_ROOT/agents"/*.md; do
|
||||
[ -f "$f" ] || continue
|
||||
local_name=$(basename "$f")
|
||||
target_path="$trae_full_path/agents/$local_name"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$f" "$target_path"
|
||||
ensure_manifest_entry "$MANIFEST" "agents/$local_name"
|
||||
agents=$((agents + 1))
|
||||
else
|
||||
ensure_manifest_entry "$MANIFEST" "agents/$local_name"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Copy skills from repo root (if available)
|
||||
if [ -d "$REPO_ROOT/skills" ]; then
|
||||
for d in "$REPO_ROOT/skills"/*/; do
|
||||
[ -d "$d" ] || continue
|
||||
skill_name="$(basename "$d")"
|
||||
target_skill_dir="$trae_full_path/skills/$skill_name"
|
||||
skill_copied=0
|
||||
|
||||
while IFS= read -r source_file; do
|
||||
relative_path="${source_file#$d}"
|
||||
target_path="$target_skill_dir/$relative_path"
|
||||
|
||||
mkdir -p "$(dirname "$target_path")"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$source_file" "$target_path"
|
||||
skill_copied=1
|
||||
fi
|
||||
ensure_manifest_entry "$MANIFEST" "skills/$skill_name/$relative_path"
|
||||
done < <(find "$d" -type f | sort)
|
||||
|
||||
if [ "$skill_copied" -eq 1 ]; then
|
||||
skills=$((skills + 1))
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Copy rules from repo root
|
||||
if [ -d "$REPO_ROOT/rules" ]; then
|
||||
while IFS= read -r rule_file; do
|
||||
relative_path="${rule_file#$REPO_ROOT/rules/}"
|
||||
target_path="$trae_full_path/rules/$relative_path"
|
||||
|
||||
mkdir -p "$(dirname "$target_path")"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$rule_file" "$target_path"
|
||||
rules=$((rules + 1))
|
||||
fi
|
||||
ensure_manifest_entry "$MANIFEST" "rules/$relative_path"
|
||||
done < <(find "$REPO_ROOT/rules" -type f | sort)
|
||||
fi
|
||||
|
||||
# Copy README files from this directory
|
||||
for readme_file in "$SCRIPT_DIR/README.md" "$SCRIPT_DIR/README.zh-CN.md"; do
|
||||
if [ -f "$readme_file" ]; then
|
||||
local_name=$(basename "$readme_file")
|
||||
target_path="$trae_full_path/$local_name"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$readme_file" "$target_path"
|
||||
ensure_manifest_entry "$MANIFEST" "$local_name"
|
||||
other=$((other + 1))
|
||||
else
|
||||
ensure_manifest_entry "$MANIFEST" "$local_name"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Copy install and uninstall scripts
|
||||
for script_file in "$SCRIPT_DIR/install.sh" "$SCRIPT_DIR/uninstall.sh"; do
|
||||
if [ -f "$script_file" ]; then
|
||||
local_name=$(basename "$script_file")
|
||||
target_path="$trae_full_path/$local_name"
|
||||
if [ ! -f "$target_path" ]; then
|
||||
cp "$script_file" "$target_path"
|
||||
chmod +x "$target_path"
|
||||
ensure_manifest_entry "$MANIFEST" "$local_name"
|
||||
other=$((other + 1))
|
||||
else
|
||||
ensure_manifest_entry "$MANIFEST" "$local_name"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Add manifest file itself to manifest
|
||||
ensure_manifest_entry "$MANIFEST" ".ecc-manifest"
|
||||
|
||||
# Installation summary
|
||||
echo "Installation complete!"
|
||||
echo ""
|
||||
echo "Components installed:"
|
||||
echo " Commands: $commands"
|
||||
echo " Agents: $agents"
|
||||
echo " Skills: $skills"
|
||||
echo " Rules: $rules"
|
||||
echo ""
|
||||
echo "Directory: $(basename "$trae_full_path")"
|
||||
echo ""
|
||||
echo "Next steps:"
|
||||
echo " 1. Open your project in Trae"
|
||||
echo " 2. Type / to see available commands"
|
||||
echo " 3. Enjoy the ECC workflows!"
|
||||
echo ""
|
||||
echo "To uninstall later:"
|
||||
echo " cd $trae_full_path"
|
||||
echo " ./uninstall.sh"
|
||||
}
|
||||
|
||||
# Main logic
|
||||
do_install "$@"
|
||||
194
.trae/uninstall.sh
Executable file
194
.trae/uninstall.sh
Executable file
@@ -0,0 +1,194 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# ECC Trae Uninstaller
|
||||
# Uninstalls Everything Claude Code workflows from a Trae project.
|
||||
#
|
||||
# Usage:
|
||||
# ./uninstall.sh # Uninstall from current directory
|
||||
# ./uninstall.sh ~ # Uninstall globally from ~/.trae/
|
||||
#
|
||||
# Environment:
|
||||
# TRAE_ENV=cn # Force use .trae-cn directory
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Resolve the directory where this script lives
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
# Get the trae directory name (.trae or .trae-cn)
|
||||
get_trae_dir() {
|
||||
# Check environment variable first
|
||||
if [ "${TRAE_ENV:-}" = "cn" ]; then
|
||||
echo ".trae-cn"
|
||||
else
|
||||
echo ".trae"
|
||||
fi
|
||||
}
|
||||
|
||||
resolve_path() {
|
||||
python3 -c 'import os, sys; print(os.path.realpath(sys.argv[1]))' "$1"
|
||||
}
|
||||
|
||||
is_valid_manifest_entry() {
|
||||
local file_path="$1"
|
||||
|
||||
case "$file_path" in
|
||||
""|/*|~*|*/../*|../*|*/..|..)
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Main uninstall function
|
||||
do_uninstall() {
|
||||
local target_dir="$PWD"
|
||||
local trae_dir="$(get_trae_dir)"
|
||||
|
||||
# Check if ~ was specified (or expanded to $HOME)
|
||||
if [ "$#" -ge 1 ]; then
|
||||
if [ "$1" = "~" ] || [ "$1" = "$HOME" ]; then
|
||||
target_dir="$HOME"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if we're already inside a .trae or .trae-cn directory
|
||||
local current_dir_name="$(basename "$target_dir")"
|
||||
local trae_full_path
|
||||
|
||||
if [ "$current_dir_name" = ".trae" ] || [ "$current_dir_name" = ".trae-cn" ]; then
|
||||
# Already inside the trae directory, use it directly
|
||||
trae_full_path="$target_dir"
|
||||
else
|
||||
# Normal case: append trae_dir to target_dir
|
||||
trae_full_path="$target_dir/$trae_dir"
|
||||
fi
|
||||
|
||||
echo "ECC Trae Uninstaller"
|
||||
echo "===================="
|
||||
echo ""
|
||||
echo "Target: $trae_full_path/"
|
||||
echo ""
|
||||
|
||||
if [ ! -d "$trae_full_path" ]; then
|
||||
echo "Error: $trae_dir directory not found at $target_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
trae_root_resolved="$(resolve_path "$trae_full_path")"
|
||||
|
||||
# Manifest file path
|
||||
MANIFEST="$trae_full_path/.ecc-manifest"
|
||||
|
||||
if [ ! -f "$MANIFEST" ]; then
|
||||
echo "Warning: No manifest file found (.ecc-manifest)"
|
||||
echo ""
|
||||
echo "This could mean:"
|
||||
echo " 1. ECC was installed with an older version without manifest support"
|
||||
echo " 2. The manifest file was manually deleted"
|
||||
echo ""
|
||||
read -p "Do you want to remove the entire $trae_dir directory? (y/N) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Uninstall cancelled."
|
||||
exit 0
|
||||
fi
|
||||
rm -rf "$trae_full_path"
|
||||
echo "Uninstall complete!"
|
||||
echo ""
|
||||
echo "Removed: $trae_full_path/"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Found manifest file - will only remove files installed by ECC"
|
||||
echo ""
|
||||
read -p "Are you sure you want to uninstall ECC from $trae_dir? (y/N) " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
echo "Uninstall cancelled."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Counters
|
||||
removed=0
|
||||
skipped=0
|
||||
|
||||
# Read manifest and remove files
|
||||
while IFS= read -r file_path; do
|
||||
[ -z "$file_path" ] && continue
|
||||
|
||||
if ! is_valid_manifest_entry "$file_path"; then
|
||||
echo "Skipped: $file_path (invalid manifest entry)"
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
full_path="$trae_full_path/$file_path"
|
||||
resolved_full="$(resolve_path "$full_path")"
|
||||
|
||||
case "$resolved_full" in
|
||||
"$trae_root_resolved"|"$trae_root_resolved"/*)
|
||||
;;
|
||||
*)
|
||||
echo "Skipped: $file_path (invalid manifest entry)"
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -f "$resolved_full" ]; then
|
||||
rm -f "$resolved_full"
|
||||
echo "Removed: $file_path"
|
||||
removed=$((removed + 1))
|
||||
elif [ -d "$resolved_full" ]; then
|
||||
# Only remove directory if it's empty
|
||||
if [ -z "$(ls -A "$resolved_full" 2>/dev/null)" ]; then
|
||||
rmdir "$resolved_full" 2>/dev/null || true
|
||||
if [ ! -d "$resolved_full" ]; then
|
||||
echo "Removed: $file_path/"
|
||||
removed=$((removed + 1))
|
||||
fi
|
||||
else
|
||||
echo "Skipped: $file_path/ (not empty - contains user files)"
|
||||
skipped=$((skipped + 1))
|
||||
fi
|
||||
else
|
||||
skipped=$((skipped + 1))
|
||||
fi
|
||||
done < "$MANIFEST"
|
||||
|
||||
while IFS= read -r empty_dir; do
|
||||
[ "$empty_dir" = "$trae_full_path" ] && continue
|
||||
relative_dir="${empty_dir#$trae_full_path/}"
|
||||
rmdir "$empty_dir" 2>/dev/null || true
|
||||
if [ ! -d "$empty_dir" ]; then
|
||||
echo "Removed: $relative_dir/"
|
||||
removed=$((removed + 1))
|
||||
fi
|
||||
done < <(find "$trae_full_path" -depth -type d -empty 2>/dev/null | sort -r)
|
||||
|
||||
# Try to remove the main trae directory if it's empty
|
||||
if [ -d "$trae_full_path" ] && [ -z "$(ls -A "$trae_full_path" 2>/dev/null)" ]; then
|
||||
rmdir "$trae_full_path" 2>/dev/null || true
|
||||
if [ ! -d "$trae_full_path" ]; then
|
||||
echo "Removed: $trae_dir/"
|
||||
removed=$((removed + 1))
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Uninstall complete!"
|
||||
echo ""
|
||||
echo "Summary:"
|
||||
echo " Removed: $removed items"
|
||||
echo " Skipped: $skipped items (not found or user-modified)"
|
||||
echo ""
|
||||
if [ -d "$trae_full_path" ]; then
|
||||
echo "Note: $trae_dir directory still exists (contains user-added files)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Execute uninstall
|
||||
do_uninstall "$@"
|
||||
@@ -1,6 +1,6 @@
|
||||
# Everything Claude Code (ECC) — Agent Instructions
|
||||
|
||||
This is a **production-ready AI coding plugin** providing 28 specialized agents, 126 skills, 60 commands, and automated hook workflows for software development.
|
||||
This is a **production-ready AI coding plugin** providing 30 specialized agents, 135 skills, 60 commands, and automated hook workflows for software development.
|
||||
|
||||
**Version:** 1.9.0
|
||||
|
||||
@@ -141,8 +141,8 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
agents/ — 28 specialized subagents
|
||||
skills/ — 126 workflow skills and domain knowledge
|
||||
agents/ — 30 specialized subagents
|
||||
skills/ — 135 workflow skills and domain knowledge
|
||||
commands/ — 60 slash commands
|
||||
hooks/ — Trigger-based automations
|
||||
rules/ — Always-follow guidelines (common + per-language)
|
||||
|
||||
11
CLAUDE.md
11
CLAUDE.md
@@ -59,3 +59,14 @@ Follow the formats in CONTRIBUTING.md:
|
||||
- Hooks: JSON with matcher and hooks array
|
||||
|
||||
File naming: lowercase with hyphens (e.g., `python-reviewer.md`, `tdd-workflow.md`)
|
||||
|
||||
## Skills
|
||||
|
||||
Use the following skills when working on related files:
|
||||
|
||||
| File(s) | Skill |
|
||||
|---------|-------|
|
||||
| `README.md` | `/readme` |
|
||||
| `.github/workflows/*.yml` | `/ci-workflow` |
|
||||
|
||||
When spawning subagents, always pass conventions from the respective skill into the agent's prompt.
|
||||
|
||||
12
README.md
12
README.md
@@ -1,6 +1,4 @@
|
||||
**Language:** English | [Português (Brasil)](docs/pt-BR/README.md) | [简体中文](README.zh-CN.md) | [繁體中文](docs/zh-TW/README.md) | [日本語](docs/ja-JP/README.md) | [한국어](docs/ko-KR/README.md)
|
||||
[Türkçe](docs/tr/README.md)
|
||||
|
||||
**Language:** English | [Português (Brasil)](docs/pt-BR/README.md) | [简体中文](README.zh-CN.md) | [繁體中文](docs/zh-TW/README.md) | [日本語](docs/ja-JP/README.md) | [한국어](docs/ko-KR/README.md) | [Türkçe](docs/tr/README.md)
|
||||
|
||||
# Everything Claude Code
|
||||
|
||||
@@ -222,7 +220,7 @@ For manual install instructions see the README in the `rules/` folder. When copy
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
✨ **That's it!** You now have access to 28 agents, 126 skills, and 60 commands.
|
||||
✨ **That's it!** You now have access to 30 agents, 135 skills, and 60 commands.
|
||||
|
||||
### Multi-model commands require additional setup
|
||||
|
||||
@@ -297,7 +295,7 @@ everything-claude-code/
|
||||
| |-- plugin.json # Plugin metadata and component paths
|
||||
| |-- marketplace.json # Marketplace catalog for /plugin marketplace add
|
||||
|
|
||||
|-- agents/ # 28 specialized subagents for delegation
|
||||
|-- agents/ # 30 specialized subagents for delegation
|
||||
| |-- planner.md # Feature implementation planning
|
||||
| |-- architect.md # System design decisions
|
||||
| |-- tdd-guide.md # Test-driven development
|
||||
@@ -1111,9 +1109,9 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|
||||
|
||||
| Feature | Claude Code | OpenCode | Status |
|
||||
|---------|-------------|----------|--------|
|
||||
| Agents | ✅ 28 agents | ✅ 12 agents | **Claude Code leads** |
|
||||
| Agents | ✅ 30 agents | ✅ 12 agents | **Claude Code leads** |
|
||||
| Commands | ✅ 60 commands | ✅ 31 commands | **Claude Code leads** |
|
||||
| Skills | ✅ 126 skills | ✅ 37 skills | **Claude Code leads** |
|
||||
| Skills | ✅ 135 skills | ✅ 37 skills | **Claude Code leads** |
|
||||
| Hooks | ✅ 8 event types | ✅ 11 events | **OpenCode has more!** |
|
||||
| Rules | ✅ 29 rules | ✅ 13 instructions | **Claude Code leads** |
|
||||
| MCP Servers | ✅ 14 servers | ✅ Full | **Full parity** |
|
||||
|
||||
83
agents/healthcare-reviewer.md
Normal file
83
agents/healthcare-reviewer.md
Normal file
@@ -0,0 +1,83 @@
|
||||
---
|
||||
name: healthcare-reviewer
|
||||
description: Reviews healthcare application code for clinical safety, CDSS accuracy, PHI compliance, and medical data integrity. Specialized for EMR/EHR, clinical decision support, and health information systems.
|
||||
tools: ["Read", "Grep", "Glob"]
|
||||
model: opus
|
||||
---
|
||||
|
||||
# Healthcare Reviewer — Clinical Safety & PHI Compliance
|
||||
|
||||
You are a clinical informatics reviewer for healthcare software. Patient safety is your top priority. You review code for clinical accuracy, data protection, and regulatory compliance.
|
||||
|
||||
## Your Responsibilities
|
||||
|
||||
1. **CDSS accuracy** — Verify drug interaction logic, dose validation rules, and clinical scoring implementations match published medical standards
|
||||
2. **PHI/PII protection** — Scan for patient data exposure in logs, errors, responses, URLs, and client storage
|
||||
3. **Clinical data integrity** — Ensure audit trails, locked records, and cascade protection
|
||||
4. **Medical data correctness** — Verify ICD-10/SNOMED mappings, lab reference ranges, and drug database entries
|
||||
5. **Integration compliance** — Validate HL7/FHIR message handling and error recovery
|
||||
|
||||
## Critical Checks
|
||||
|
||||
### CDSS Engine
|
||||
|
||||
- [ ] All drug interaction pairs produce correct alerts (both directions)
|
||||
- [ ] Dose validation rules fire on out-of-range values
|
||||
- [ ] Clinical scoring matches published specification (NEWS2 = Royal College of Physicians, qSOFA = Sepsis-3)
|
||||
- [ ] No false negatives (missed interaction = patient safety event)
|
||||
- [ ] Malformed inputs produce errors, NOT silent passes
|
||||
|
||||
### PHI Protection
|
||||
|
||||
- [ ] No patient data in `console.log`, `console.error`, or error messages
|
||||
- [ ] No PHI in URL parameters or query strings
|
||||
- [ ] No PHI in browser localStorage/sessionStorage
|
||||
- [ ] No `service_role` key in client-side code
|
||||
- [ ] RLS enabled on all tables with patient data
|
||||
- [ ] Cross-facility data isolation verified
|
||||
|
||||
### Clinical Workflow
|
||||
|
||||
- [ ] Encounter lock prevents edits (addendum only)
|
||||
- [ ] Audit trail entry on every create/read/update/delete of clinical data
|
||||
- [ ] Critical alerts are non-dismissable (not toast notifications)
|
||||
- [ ] Override reasons logged when clinician proceeds past critical alert
|
||||
- [ ] Red flag symptoms trigger visible alerts
|
||||
|
||||
### Data Integrity
|
||||
|
||||
- [ ] No CASCADE DELETE on patient records
|
||||
- [ ] Concurrent edit detection (optimistic locking or conflict resolution)
|
||||
- [ ] No orphaned records across clinical tables
|
||||
- [ ] Timestamps use consistent timezone
|
||||
|
||||
## Output Format
|
||||
|
||||
```
|
||||
## Healthcare Review: [module/feature]
|
||||
|
||||
### Patient Safety Impact: [CRITICAL / HIGH / MEDIUM / LOW / NONE]
|
||||
|
||||
### Clinical Accuracy
|
||||
- CDSS: [checks passed/failed]
|
||||
- Drug DB: [verified/issues]
|
||||
- Scoring: [matches spec/deviates]
|
||||
|
||||
### PHI Compliance
|
||||
- Exposure vectors checked: [list]
|
||||
- Issues found: [list or none]
|
||||
|
||||
### Issues
|
||||
1. [PATIENT SAFETY / CLINICAL / PHI / TECHNICAL] Description
|
||||
- Impact: [potential harm or exposure]
|
||||
- Fix: [required change]
|
||||
|
||||
### Verdict: [SAFE TO DEPLOY / NEEDS FIXES / BLOCK — PATIENT SAFETY RISK]
|
||||
```
|
||||
|
||||
## Rules
|
||||
|
||||
- When in doubt about clinical accuracy, flag as NEEDS REVIEW — never approve uncertain clinical logic
|
||||
- A single missed drug interaction is worse than a hundred false alarms
|
||||
- PHI exposure is always CRITICAL severity, regardless of how small the leak
|
||||
- Never approve code that silently catches CDSS errors
|
||||
446
agents/performance-optimizer.md
Normal file
446
agents/performance-optimizer.md
Normal file
@@ -0,0 +1,446 @@
|
||||
---
|
||||
name: performance-optimizer
|
||||
description: Performance analysis and optimization specialist. Use PROACTIVELY for identifying bottlenecks, optimizing slow code, reducing bundle sizes, and improving runtime performance. Profiling, memory leaks, render optimization, and algorithmic improvements.
|
||||
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
|
||||
model: sonnet
|
||||
---
|
||||
|
||||
# Performance Optimizer
|
||||
|
||||
You are an expert performance specialist focused on identifying bottlenecks and optimizing application speed, memory usage, and efficiency. Your mission is to make code faster, lighter, and more responsive.
|
||||
|
||||
## Core Responsibilities
|
||||
|
||||
1. **Performance Profiling** — Identify slow code paths, memory leaks, and bottlenecks
|
||||
2. **Bundle Optimization** — Reduce JavaScript bundle sizes, lazy loading, code splitting
|
||||
3. **Runtime Optimization** — Improve algorithmic efficiency, reduce unnecessary computations
|
||||
4. **React/Rendering Optimization** — Prevent unnecessary re-renders, optimize component trees
|
||||
5. **Database & Network** — Optimize queries, reduce API calls, implement caching
|
||||
6. **Memory Management** — Detect leaks, optimize memory usage, cleanup resources
|
||||
|
||||
## Analysis Commands
|
||||
|
||||
```bash
|
||||
# Bundle analysis
|
||||
npx bundle-analyzer
|
||||
npx source-map-explorer build/static/js/*.js
|
||||
|
||||
# Lighthouse performance audit
|
||||
npx lighthouse https://your-app.com --view
|
||||
|
||||
# Node.js profiling
|
||||
node --prof your-app.js
|
||||
node --prof-process isolate-*.log
|
||||
|
||||
# Memory analysis
|
||||
node --inspect your-app.js # Then use Chrome DevTools
|
||||
|
||||
# React profiling (in browser)
|
||||
# React DevTools > Profiler tab
|
||||
|
||||
# Network analysis
|
||||
npx webpack-bundle-analyzer
|
||||
```
|
||||
|
||||
## Performance Review Workflow
|
||||
|
||||
### 1. Identify Performance Issues
|
||||
|
||||
**Critical Performance Indicators:**
|
||||
|
||||
| Metric | Target | Action if Exceeded |
|
||||
|--------|--------|-------------------|
|
||||
| First Contentful Paint | < 1.8s | Optimize critical path, inline critical CSS |
|
||||
| Largest Contentful Paint | < 2.5s | Lazy load images, optimize server response |
|
||||
| Time to Interactive | < 3.8s | Code splitting, reduce JavaScript |
|
||||
| Cumulative Layout Shift | < 0.1 | Reserve space for images, avoid layout thrashing |
|
||||
| Total Blocking Time | < 200ms | Break up long tasks, use web workers |
|
||||
| Bundle Size (gzipped) | < 200KB | Tree shaking, lazy loading, code splitting |
|
||||
|
||||
### 2. Algorithmic Analysis
|
||||
|
||||
Check for inefficient algorithms:
|
||||
|
||||
| Pattern | Complexity | Better Alternative |
|
||||
|---------|------------|-------------------|
|
||||
| Nested loops on same data | O(n²) | Use Map/Set for O(1) lookups |
|
||||
| Repeated array searches | O(n) per search | Convert to Map for O(1) |
|
||||
| Sorting inside loop | O(n² log n) | Sort once outside loop |
|
||||
| String concatenation in loop | O(n²) | Use array.join() |
|
||||
| Deep cloning large objects | O(n) each time | Use shallow copy or immer |
|
||||
| Recursion without memoization | O(2^n) | Add memoization |
|
||||
|
||||
```typescript
|
||||
// BAD: O(n²) - searching array in loop
|
||||
for (const user of users) {
|
||||
const posts = allPosts.filter(p => p.userId === user.id); // O(n) per user
|
||||
}
|
||||
|
||||
// GOOD: O(n) - group once with Map
|
||||
const postsByUser = new Map<number, Post[]>();
|
||||
for (const post of allPosts) {
|
||||
const userPosts = postsByUser.get(post.userId) || [];
|
||||
userPosts.push(post);
|
||||
postsByUser.set(post.userId, userPosts);
|
||||
}
|
||||
// Now O(1) lookup per user
|
||||
```
|
||||
|
||||
### 3. React Performance Optimization
|
||||
|
||||
**Common React Anti-patterns:**
|
||||
|
||||
```tsx
|
||||
// BAD: Inline function creation in render
|
||||
<Button onClick={() => handleClick(id)}>Submit</Button>
|
||||
|
||||
// GOOD: Stable callback with useCallback
|
||||
const handleButtonClick = useCallback(() => handleClick(id), [handleClick, id]);
|
||||
<Button onClick={handleButtonClick}>Submit</Button>
|
||||
|
||||
// BAD: Object creation in render
|
||||
<Child style={{ color: 'red' }} />
|
||||
|
||||
// GOOD: Stable object reference
|
||||
const style = useMemo(() => ({ color: 'red' }), []);
|
||||
<Child style={style} />
|
||||
|
||||
// BAD: Expensive computation on every render
|
||||
const sortedItems = items.sort((a, b) => a.name.localeCompare(b.name));
|
||||
|
||||
// GOOD: Memoize expensive computations
|
||||
const sortedItems = useMemo(
|
||||
() => [...items].sort((a, b) => a.name.localeCompare(b.name)),
|
||||
[items]
|
||||
);
|
||||
|
||||
// BAD: List without keys or with index
|
||||
{items.map((item, index) => <Item key={index} />)}
|
||||
|
||||
// GOOD: Stable unique keys
|
||||
{items.map(item => <Item key={item.id} item={item} />)}
|
||||
```
|
||||
|
||||
**React Performance Checklist:**
|
||||
|
||||
- [ ] `useMemo` for expensive computations
|
||||
- [ ] `useCallback` for functions passed to children
|
||||
- [ ] `React.memo` for frequently re-rendered components
|
||||
- [ ] Proper dependency arrays in hooks
|
||||
- [ ] Virtualization for long lists (react-window, react-virtualized)
|
||||
- [ ] Lazy loading for heavy components (`React.lazy`)
|
||||
- [ ] Code splitting at route level
|
||||
|
||||
### 4. Bundle Size Optimization
|
||||
|
||||
**Bundle Analysis Checklist:**
|
||||
|
||||
```bash
|
||||
# Analyze bundle composition
|
||||
npx webpack-bundle-analyzer build/static/js/*.js
|
||||
|
||||
# Check for duplicate dependencies
|
||||
npx duplicate-package-checker-analyzer
|
||||
|
||||
# Find largest files
|
||||
du -sh node_modules/* | sort -hr | head -20
|
||||
```
|
||||
|
||||
**Optimization Strategies:**
|
||||
|
||||
| Issue | Solution |
|
||||
|-------|----------|
|
||||
| Large vendor bundle | Tree shaking, smaller alternatives |
|
||||
| Duplicate code | Extract to shared module |
|
||||
| Unused exports | Remove dead code with knip |
|
||||
| Moment.js | Use date-fns or dayjs (smaller) |
|
||||
| Lodash | Use lodash-es or native methods |
|
||||
| Large icons library | Import only needed icons |
|
||||
|
||||
```javascript
|
||||
// BAD: Import entire library
|
||||
import _ from 'lodash';
|
||||
import moment from 'moment';
|
||||
|
||||
// GOOD: Import only what you need
|
||||
import debounce from 'lodash/debounce';
|
||||
import { format, addDays } from 'date-fns';
|
||||
|
||||
// Or use lodash-es with tree shaking
|
||||
import { debounce, throttle } from 'lodash-es';
|
||||
```
|
||||
|
||||
### 5. Database & Query Optimization
|
||||
|
||||
**Query Optimization Patterns:**
|
||||
|
||||
```sql
|
||||
-- BAD: Select all columns
|
||||
SELECT * FROM users WHERE active = true;
|
||||
|
||||
-- GOOD: Select only needed columns
|
||||
SELECT id, name, email FROM users WHERE active = true;
|
||||
|
||||
-- BAD: N+1 queries (in application loop)
|
||||
-- 1 query for users, then N queries for each user's orders
|
||||
|
||||
-- GOOD: Single query with JOIN or batch fetch
|
||||
SELECT u.*, o.id as order_id, o.total
|
||||
FROM users u
|
||||
LEFT JOIN orders o ON u.id = o.user_id
|
||||
WHERE u.active = true;
|
||||
|
||||
-- Add index for frequently queried columns
|
||||
CREATE INDEX idx_users_active ON users(active);
|
||||
CREATE INDEX idx_orders_user_id ON orders(user_id);
|
||||
```
|
||||
|
||||
**Database Performance Checklist:**
|
||||
|
||||
- [ ] Indexes on frequently queried columns
|
||||
- [ ] Composite indexes for multi-column queries
|
||||
- [ ] Avoid SELECT * in production code
|
||||
- [ ] Use connection pooling
|
||||
- [ ] Implement query result caching
|
||||
- [ ] Use pagination for large result sets
|
||||
- [ ] Monitor slow query logs
|
||||
|
||||
### 6. Network & API Optimization
|
||||
|
||||
**Network Optimization Strategies:**
|
||||
|
||||
```typescript
|
||||
// BAD: Multiple sequential requests
|
||||
const user = await fetchUser(id);
|
||||
const posts = await fetchPosts(user.id);
|
||||
const comments = await fetchComments(posts[0].id);
|
||||
|
||||
// GOOD: Parallel requests when independent
|
||||
const [user, posts] = await Promise.all([
|
||||
fetchUser(id),
|
||||
fetchPosts(id)
|
||||
]);
|
||||
|
||||
// GOOD: Batch requests when possible
|
||||
const results = await batchFetch(['user1', 'user2', 'user3']);
|
||||
|
||||
// Implement request caching
|
||||
const fetchWithCache = async (url: string, ttl = 300000) => {
|
||||
const cached = cache.get(url);
|
||||
if (cached) return cached;
|
||||
|
||||
const data = await fetch(url).then(r => r.json());
|
||||
cache.set(url, data, ttl);
|
||||
return data;
|
||||
};
|
||||
|
||||
// Debounce rapid API calls
|
||||
const debouncedSearch = debounce(async (query: string) => {
|
||||
const results = await searchAPI(query);
|
||||
setResults(results);
|
||||
}, 300);
|
||||
```
|
||||
|
||||
**Network Optimization Checklist:**
|
||||
|
||||
- [ ] Parallel independent requests with `Promise.all`
|
||||
- [ ] Implement request caching
|
||||
- [ ] Debounce rapid-fire requests
|
||||
- [ ] Use streaming for large responses
|
||||
- [ ] Implement pagination for large datasets
|
||||
- [ ] Use GraphQL or API batching to reduce requests
|
||||
- [ ] Enable compression (gzip/brotli) on server
|
||||
|
||||
### 7. Memory Leak Detection
|
||||
|
||||
**Common Memory Leak Patterns:**
|
||||
|
||||
```typescript
|
||||
// BAD: Event listener without cleanup
|
||||
useEffect(() => {
|
||||
window.addEventListener('resize', handleResize);
|
||||
// Missing cleanup!
|
||||
}, []);
|
||||
|
||||
// GOOD: Clean up event listeners
|
||||
useEffect(() => {
|
||||
window.addEventListener('resize', handleResize);
|
||||
return () => window.removeEventListener('resize', handleResize);
|
||||
}, []);
|
||||
|
||||
// BAD: Timer without cleanup
|
||||
useEffect(() => {
|
||||
setInterval(() => pollData(), 1000);
|
||||
// Missing cleanup!
|
||||
}, []);
|
||||
|
||||
// GOOD: Clean up timers
|
||||
useEffect(() => {
|
||||
const interval = setInterval(() => pollData(), 1000);
|
||||
return () => clearInterval(interval);
|
||||
}, []);
|
||||
|
||||
// BAD: Holding references in closures
|
||||
const Component = () => {
|
||||
const largeData = useLargeData();
|
||||
useEffect(() => {
|
||||
eventEmitter.on('update', () => {
|
||||
console.log(largeData); // Closure keeps reference
|
||||
});
|
||||
}, [largeData]);
|
||||
};
|
||||
|
||||
// GOOD: Use refs or proper dependencies
|
||||
const largeDataRef = useRef(largeData);
|
||||
useEffect(() => {
|
||||
largeDataRef.current = largeData;
|
||||
}, [largeData]);
|
||||
|
||||
useEffect(() => {
|
||||
const handleUpdate = () => {
|
||||
console.log(largeDataRef.current);
|
||||
};
|
||||
eventEmitter.on('update', handleUpdate);
|
||||
return () => eventEmitter.off('update', handleUpdate);
|
||||
}, []);
|
||||
```
|
||||
|
||||
**Memory Leak Detection:**
|
||||
|
||||
```bash
|
||||
# Chrome DevTools Memory tab:
|
||||
# 1. Take heap snapshot
|
||||
# 2. Perform action
|
||||
# 3. Take another snapshot
|
||||
# 4. Compare to find objects that shouldn't exist
|
||||
# 5. Look for detached DOM nodes, event listeners, closures
|
||||
|
||||
# Node.js memory debugging
|
||||
node --inspect app.js
|
||||
# Open chrome://inspect
|
||||
# Take heap snapshots and compare
|
||||
```
|
||||
|
||||
## Performance Testing
|
||||
|
||||
### Lighthouse Audits
|
||||
|
||||
```bash
|
||||
# Run full lighthouse audit
|
||||
npx lighthouse https://your-app.com --view --preset=desktop
|
||||
|
||||
# CI mode for automated checks
|
||||
npx lighthouse https://your-app.com --output=json --output-path=./lighthouse.json
|
||||
|
||||
# Check specific metrics
|
||||
npx lighthouse https://your-app.com --only-categories=performance
|
||||
```
|
||||
|
||||
### Performance Budgets
|
||||
|
||||
```json
|
||||
// package.json
|
||||
{
|
||||
"bundlesize": [
|
||||
{
|
||||
"path": "./build/static/js/*.js",
|
||||
"maxSize": "200 kB"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Web Vitals Monitoring
|
||||
|
||||
```typescript
|
||||
// Track Core Web Vitals
|
||||
import { getCLS, getFID, getLCP, getFCP, getTTFB } from 'web-vitals';
|
||||
|
||||
getCLS(console.log); // Cumulative Layout Shift
|
||||
getFID(console.log); // First Input Delay
|
||||
getLCP(console.log); // Largest Contentful Paint
|
||||
getFCP(console.log); // First Contentful Paint
|
||||
getTTFB(console.log); // Time to First Byte
|
||||
```
|
||||
|
||||
## Performance Report Template
|
||||
|
||||
````markdown
|
||||
# Performance Audit Report
|
||||
|
||||
## Executive Summary
|
||||
- **Overall Score**: X/100
|
||||
- **Critical Issues**: X
|
||||
- **Recommendations**: X
|
||||
|
||||
## Bundle Analysis
|
||||
| Metric | Current | Target | Status |
|
||||
|--------|---------|--------|--------|
|
||||
| Total Size (gzip) | XXX KB | < 200 KB | ⚠️ |
|
||||
| Main Bundle | XXX KB | < 100 KB | ✅ |
|
||||
| Vendor Bundle | XXX KB | < 150 KB | ⚠️ |
|
||||
|
||||
## Web Vitals
|
||||
| Metric | Current | Target | Status |
|
||||
|--------|---------|--------|--------|
|
||||
| LCP | X.Xs | < 2.5s | ✅ |
|
||||
| FID | XXms | < 100ms | ✅ |
|
||||
| CLS | X.XX | < 0.1 | ⚠️ |
|
||||
|
||||
## Critical Issues
|
||||
|
||||
### 1. [Issue Title]
|
||||
**File**: path/to/file.ts:42
|
||||
**Impact**: High - Causes XXXms delay
|
||||
**Fix**: [Description of fix]
|
||||
|
||||
```typescript
|
||||
// Before (slow)
|
||||
const slowCode = ...;
|
||||
|
||||
// After (optimized)
|
||||
const fastCode = ...;
|
||||
```
|
||||
|
||||
### 2. [Issue Title]
|
||||
...
|
||||
|
||||
## Recommendations
|
||||
1. [Priority recommendation]
|
||||
2. [Priority recommendation]
|
||||
3. [Priority recommendation]
|
||||
|
||||
## Estimated Impact
|
||||
- Bundle size reduction: XX KB (XX%)
|
||||
- LCP improvement: XXms
|
||||
- Time to Interactive improvement: XXms
|
||||
````
|
||||
|
||||
## When to Run
|
||||
|
||||
**ALWAYS:** Before major releases, after adding new features, when users report slowness, during performance regression testing.
|
||||
|
||||
**IMMEDIATELY:** Lighthouse score drops, bundle size increases >10%, memory usage grows, slow page loads.
|
||||
|
||||
## Red Flags - Act Immediately
|
||||
|
||||
| Issue | Action |
|
||||
|-------|--------|
|
||||
| Bundle > 500KB gzip | Code split, lazy load, tree shake |
|
||||
| LCP > 4s | Optimize critical path, preload resources |
|
||||
| Memory usage growing | Check for leaks, review useEffect cleanup |
|
||||
| CPU spikes | Profile with Chrome DevTools |
|
||||
| Database query > 1s | Add index, optimize query, cache results |
|
||||
|
||||
## Success Metrics
|
||||
|
||||
- Lighthouse performance score > 90
|
||||
- All Core Web Vitals in "good" range
|
||||
- Bundle size under budget
|
||||
- No memory leaks detected
|
||||
- Test suite still passing
|
||||
- No performance regressions
|
||||
|
||||
---
|
||||
|
||||
**Remember**: Performance is a feature. Users notice speed. Every 100ms of improvement matters. Optimize for the 90th percentile, not the average.
|
||||
31
docs/zh-CN/commands/prune.md
Normal file
31
docs/zh-CN/commands/prune.md
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
name: prune
|
||||
description: 删除超过 30 天且从未被提升的待处理本能
|
||||
command: true
|
||||
---
|
||||
|
||||
# 清理待处理本能
|
||||
|
||||
删除那些由系统自动生成、但从未经过审查或提升的过期待处理本能。
|
||||
|
||||
## 实现
|
||||
|
||||
使用插件根目录路径运行本能 CLI:
|
||||
|
||||
```bash
|
||||
python3 "${CLAUDE_PLUGIN_ROOT}/skills/continuous-learning-v2/scripts/instinct-cli.py" prune
|
||||
```
|
||||
|
||||
或者如果 `CLAUDE_PLUGIN_ROOT` 未设置(手动安装):
|
||||
|
||||
```bash
|
||||
python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py prune
|
||||
```
|
||||
|
||||
## 用法
|
||||
|
||||
```
|
||||
/prune # 删除超过 30 天的本能
|
||||
/prune --max-age 60 # 自定义年龄阈值(天)
|
||||
/prune --dry-run # 仅预览,不实际删除
|
||||
```
|
||||
@@ -23,6 +23,7 @@ User request → Claude picks a tool → PreToolUse hook runs → Tool executes
|
||||
| **Dev server blocker** | `Bash` | Blocks `npm run dev` etc. outside tmux — ensures log access | 2 (blocks) |
|
||||
| **Tmux reminder** | `Bash` | Suggests tmux for long-running commands (npm test, cargo build, docker) | 0 (warns) |
|
||||
| **Git push reminder** | `Bash` | Reminds to review changes before `git push` | 0 (warns) |
|
||||
| **Pre-commit quality check** | `Bash` | Runs quality checks before `git commit`: lints staged files, validates commit message format when provided via `-m/--message`, detects console.log/debugger/secrets | 2 (blocks critical) / 0 (warns) |
|
||||
| **Doc file warning** | `Write` | Warns about non-standard `.md`/`.txt` files (allows README, CLAUDE, CONTRIBUTING, CHANGELOG, LICENSE, SKILL, docs/, skills/); cross-platform path handling | 0 (warns) |
|
||||
| **Strategic compact** | `Edit\|Write` | Suggests manual `/compact` at logical intervals (every ~50 tool calls) | 0 (warns) |
|
||||
| **InsAIts security monitor (opt-in)** | `Bash\|Write\|Edit\|MultiEdit` | Optional security scan for high-signal tool inputs. Disabled unless `ECC_ENABLE_INSAITS=1`. Blocks on critical findings, warns on non-critical, and writes audit log to `.insaits_audit_session.jsonl`. Requires `pip install insa-its`. [Details](../scripts/hooks/insaits-security-monitor.py) | 2 (blocks critical) / 0 (warns) |
|
||||
|
||||
@@ -42,6 +42,16 @@
|
||||
],
|
||||
"description": "Reminder before git push to review changes"
|
||||
},
|
||||
{
|
||||
"matcher": "Bash",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:bash:commit-quality\" \"scripts/hooks/pre-bash-commit-quality.js\" \"strict\""
|
||||
}
|
||||
],
|
||||
"description": "Pre-commit quality check: lint staged files, validate commit message format, detect console.log/debugger/secrets before committing"
|
||||
},
|
||||
{
|
||||
"matcher": "Write",
|
||||
"hooks": [
|
||||
@@ -249,7 +259,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:check-console-log\" \"scripts/hooks/check-console-log.js\" \"standard,strict\""
|
||||
"command": "node -e \"const fs=require('fs');const p=require('path');const {spawnSync}=require('child_process');const raw=fs.readFileSync(0,'utf8');const root=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var l of [p.join(d,'plugins','everything-claude-code'),p.join(d,'plugins','everything-claude-code@everything-claude-code'),p.join(d,'plugins','marketplace','everything-claude-code')])if(f.existsSync(p.join(l,q)))return l;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}catch(x){}return d})();const script=p.join(root,'scripts','hooks','run-with-flags.js');if(!fs.existsSync(script)){process.stderr.write('[Stop] WARNING: could not resolve ECC plugin root; skipping hook'+String.fromCharCode(10));process.stdout.write(raw);process.exit(0);}const result=spawnSync(process.execPath,[script,'stop:check-console-log','scripts/hooks/check-console-log.js','standard,strict'],{input:raw,encoding:'utf8',env:process.env,cwd:process.cwd(),timeout:30000});if(typeof result.stdout==='string'&&result.stdout)process.stdout.write(result.stdout);if(result.stderr)process.stderr.write(result.stderr);if(result.error||result.status===null||result.signal){const reason=result.error?result.error.message:(result.signal?'signal '+result.signal:'missing exit status');process.stderr.write('[Stop] ERROR: inline hook resolver failed: '+reason+String.fromCharCode(10));process.exit(1);}process.exit(Number.isInteger(result.status)?result.status:0);\""
|
||||
}
|
||||
],
|
||||
"description": "Check for console.log in modified files after each response"
|
||||
@@ -259,7 +269,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:session-end\" \"scripts/hooks/session-end.js\" \"minimal,standard,strict\"",
|
||||
"command": "node -e \"const fs=require('fs');const p=require('path');const {spawnSync}=require('child_process');const raw=fs.readFileSync(0,'utf8');const root=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var l of [p.join(d,'plugins','everything-claude-code'),p.join(d,'plugins','everything-claude-code@everything-claude-code'),p.join(d,'plugins','marketplace','everything-claude-code')])if(f.existsSync(p.join(l,q)))return l;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}catch(x){}return d})();const script=p.join(root,'scripts','hooks','run-with-flags.js');if(!fs.existsSync(script)){process.stderr.write('[Stop] WARNING: could not resolve ECC plugin root; skipping hook'+String.fromCharCode(10));process.stdout.write(raw);process.exit(0);}const result=spawnSync(process.execPath,[script,'stop:session-end','scripts/hooks/session-end.js','minimal,standard,strict'],{input:raw,encoding:'utf8',env:process.env,cwd:process.cwd(),timeout:30000});if(typeof result.stdout==='string'&&result.stdout)process.stdout.write(result.stdout);if(result.stderr)process.stderr.write(result.stderr);if(result.error||result.status===null||result.signal){const reason=result.error?result.error.message:(result.signal?'signal '+result.signal:'missing exit status');process.stderr.write('[Stop] ERROR: inline hook resolver failed: '+reason+String.fromCharCode(10));process.exit(1);}process.exit(Number.isInteger(result.status)?result.status:0);\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
@@ -271,7 +281,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:evaluate-session\" \"scripts/hooks/evaluate-session.js\" \"minimal,standard,strict\"",
|
||||
"command": "node -e \"const fs=require('fs');const p=require('path');const {spawnSync}=require('child_process');const raw=fs.readFileSync(0,'utf8');const root=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var l of [p.join(d,'plugins','everything-claude-code'),p.join(d,'plugins','everything-claude-code@everything-claude-code'),p.join(d,'plugins','marketplace','everything-claude-code')])if(f.existsSync(p.join(l,q)))return l;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}catch(x){}return d})();const script=p.join(root,'scripts','hooks','run-with-flags.js');if(!fs.existsSync(script)){process.stderr.write('[Stop] WARNING: could not resolve ECC plugin root; skipping hook'+String.fromCharCode(10));process.stdout.write(raw);process.exit(0);}const result=spawnSync(process.execPath,[script,'stop:evaluate-session','scripts/hooks/evaluate-session.js','minimal,standard,strict'],{input:raw,encoding:'utf8',env:process.env,cwd:process.cwd(),timeout:30000});if(typeof result.stdout==='string'&&result.stdout)process.stdout.write(result.stdout);if(result.stderr)process.stderr.write(result.stderr);if(result.error||result.status===null||result.signal){const reason=result.error?result.error.message:(result.signal?'signal '+result.signal:'missing exit status');process.stderr.write('[Stop] ERROR: inline hook resolver failed: '+reason+String.fromCharCode(10));process.exit(1);}process.exit(Number.isInteger(result.status)?result.status:0);\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
@@ -283,7 +293,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:cost-tracker\" \"scripts/hooks/cost-tracker.js\" \"minimal,standard,strict\"",
|
||||
"command": "node -e \"const fs=require('fs');const p=require('path');const {spawnSync}=require('child_process');const raw=fs.readFileSync(0,'utf8');const root=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var l of [p.join(d,'plugins','everything-claude-code'),p.join(d,'plugins','everything-claude-code@everything-claude-code'),p.join(d,'plugins','marketplace','everything-claude-code')])if(f.existsSync(p.join(l,q)))return l;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}catch(x){}return d})();const script=p.join(root,'scripts','hooks','run-with-flags.js');if(!fs.existsSync(script)){process.stderr.write('[Stop] WARNING: could not resolve ECC plugin root; skipping hook'+String.fromCharCode(10));process.stdout.write(raw);process.exit(0);}const result=spawnSync(process.execPath,[script,'stop:cost-tracker','scripts/hooks/cost-tracker.js','minimal,standard,strict'],{input:raw,encoding:'utf8',env:process.env,cwd:process.cwd(),timeout:30000});if(typeof result.stdout==='string'&&result.stdout)process.stdout.write(result.stdout);if(result.stderr)process.stderr.write(result.stderr);if(result.error||result.status===null||result.signal){const reason=result.error?result.error.message:(result.signal?'signal '+result.signal:'missing exit status');process.stderr.write('[Stop] ERROR: inline hook resolver failed: '+reason+String.fromCharCode(10));process.exit(1);}process.exit(Number.isInteger(result.status)?result.status:0);\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
@@ -295,7 +305,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:desktop-notify\" \"scripts/hooks/desktop-notify.js\" \"standard,strict\"",
|
||||
"command": "node -e \"const fs=require('fs');const p=require('path');const {spawnSync}=require('child_process');const raw=fs.readFileSync(0,'utf8');const root=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var l of [p.join(d,'plugins','everything-claude-code'),p.join(d,'plugins','everything-claude-code@everything-claude-code'),p.join(d,'plugins','marketplace','everything-claude-code')])if(f.existsSync(p.join(l,q)))return l;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}catch(x){}return d})();const script=p.join(root,'scripts','hooks','run-with-flags.js');if(!fs.existsSync(script)){process.stderr.write('[Stop] WARNING: could not resolve ECC plugin root; skipping hook'+String.fromCharCode(10));process.stdout.write(raw);process.exit(0);}const result=spawnSync(process.execPath,[script,'stop:desktop-notify','scripts/hooks/desktop-notify.js','standard,strict'],{input:raw,encoding:'utf8',env:process.env,cwd:process.cwd(),timeout:30000});if(typeof result.stdout==='string'&&result.stdout)process.stdout.write(result.stdout);if(result.stderr)process.stderr.write(result.stderr);if(result.error||result.status===null||result.signal){const reason=result.error?result.error.message:(result.signal?'signal '+result.signal:'missing exit status');process.stderr.write('[Stop] ERROR: inline hook resolver failed: '+reason+String.fromCharCode(10));process.exit(1);}process.exit(Number.isInteger(result.status)?result.status:0);\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
@@ -309,7 +319,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"session:end:marker\" \"scripts/hooks/session-end-marker.js\" \"minimal,standard,strict\"",
|
||||
"command": "node -e \"const fs=require('fs');const p=require('path');const {spawnSync}=require('child_process');const raw=fs.readFileSync(0,'utf8');const root=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var l of [p.join(d,'plugins','everything-claude-code'),p.join(d,'plugins','everything-claude-code@everything-claude-code'),p.join(d,'plugins','marketplace','everything-claude-code')])if(f.existsSync(p.join(l,q)))return l;try{var b=p.join(d,'plugins','cache','everything-claude-code');for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}catch(x){}return d})();const script=p.join(root,'scripts','hooks','run-with-flags.js');if(!fs.existsSync(script)){process.stderr.write('[SessionEnd] WARNING: could not resolve ECC plugin root; skipping hook'+String.fromCharCode(10));process.stdout.write(raw);process.exit(0);}const result=spawnSync(process.execPath,[script,'session:end:marker','scripts/hooks/session-end-marker.js','minimal,standard,strict'],{input:raw,encoding:'utf8',env:process.env,cwd:process.cwd(),timeout:30000});if(typeof result.stdout==='string'&&result.stdout)process.stdout.write(result.stdout);if(result.stderr)process.stderr.write(result.stderr);if(result.error||result.status===null||result.signal){const reason=result.error?result.error.message:(result.signal?'signal '+result.signal:'missing exit status');process.stderr.write('[SessionEnd] ERROR: inline hook resolver failed: '+reason+String.fromCharCode(10));process.exit(1);}process.exit(Number.isInteger(result.status)?result.status:0);\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
|
||||
@@ -408,6 +408,7 @@
|
||||
"skills/ralphinho-rfc-pipeline",
|
||||
"skills/regex-vs-llm-structured-text",
|
||||
"skills/search-first",
|
||||
"skills/token-budget-advisor",
|
||||
"skills/team-builder"
|
||||
],
|
||||
"targets": [
|
||||
|
||||
@@ -16,6 +16,20 @@ if ! git rev-parse --is-inside-work-tree >/dev/null 2>&1; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Skip checks for branch deletion pushes (e.g., git push origin --delete <branch>).
|
||||
# The pre-push hook receives lines on stdin: <local ref> <local sha> <remote ref> <remote sha>.
|
||||
# For deletions, the local sha is the zero OID.
|
||||
is_delete_only=true
|
||||
while read -r _local_ref local_sha _remote_ref _remote_sha; do
|
||||
if [[ "$local_sha" != "0000000000000000000000000000000000000000" ]]; then
|
||||
is_delete_only=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "$is_delete_only" == "true" ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
ran_any_check=0
|
||||
|
||||
log() {
|
||||
|
||||
@@ -112,10 +112,25 @@ if [[ -f "$CONFIG_FILE" ]]; then
|
||||
fi
|
||||
done
|
||||
|
||||
has_context7_legacy=0
|
||||
has_context7_current=0
|
||||
|
||||
if rg -n '^\[mcp_servers\.context7\]' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
has_context7_legacy=1
|
||||
fi
|
||||
|
||||
if rg -n '^\[mcp_servers\.context7-mcp\]' "$CONFIG_FILE" >/dev/null 2>&1; then
|
||||
warn "Legacy [mcp_servers.context7-mcp] exists (context7 is preferred)"
|
||||
has_context7_current=1
|
||||
fi
|
||||
|
||||
if [[ "$has_context7_legacy" -eq 1 || "$has_context7_current" -eq 1 ]]; then
|
||||
ok "MCP section [mcp_servers.context7] or [mcp_servers.context7-mcp] exists"
|
||||
else
|
||||
ok "No legacy [mcp_servers.context7-mcp] section"
|
||||
fail "MCP section [mcp_servers.context7] or [mcp_servers.context7-mcp] missing"
|
||||
fi
|
||||
|
||||
if [[ "$has_context7_legacy" -eq 1 && "$has_context7_current" -eq 1 ]]; then
|
||||
warn "Both [mcp_servers.context7] and [mcp_servers.context7-mcp] exist; prefer one name"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Doc file warning hook (PreToolUse - Write)
|
||||
* Warns about non-standard documentation files.
|
||||
*
|
||||
* Uses a denylist approach: only warn on known ad-hoc documentation
|
||||
* filenames (NOTES, TODO, SCRATCH, etc.) outside structured directories.
|
||||
* This avoids false positives for legitimate markdown-heavy workflows
|
||||
* (specs, ADRs, command definitions, skill files, etc.).
|
||||
*
|
||||
* Policy ported from the intent of PR #962 into the current hook architecture.
|
||||
* Exit code 0 always (warns only, never blocks).
|
||||
*/
|
||||
|
||||
@@ -12,31 +18,59 @@ const path = require('path');
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let data = '';
|
||||
|
||||
function isAllowedDocPath(filePath) {
|
||||
// Known ad-hoc filenames that indicate impulse/scratch files (case-sensitive, uppercase only)
|
||||
const ADHOC_FILENAMES = /^(NOTES|TODO|SCRATCH|TEMP|DRAFT|BRAINSTORM|SPIKE|DEBUG|WIP)\.(md|txt)$/;
|
||||
|
||||
// Structured directories where even ad-hoc names are intentional
|
||||
const STRUCTURED_DIRS = /(^|\/)(docs|\.claude|\.github|commands|skills|benchmarks|templates|\.history|memory)\//;
|
||||
|
||||
function isSuspiciousDocPath(filePath) {
|
||||
const normalized = filePath.replace(/\\/g, '/');
|
||||
const basename = path.basename(filePath);
|
||||
const basename = path.basename(normalized);
|
||||
|
||||
if (!/\.(md|txt)$/i.test(filePath)) return true;
|
||||
// Only inspect .md and .txt files (case-sensitive, consistent with ADHOC_FILENAMES)
|
||||
if (!/\.(md|txt)$/.test(basename)) return false;
|
||||
|
||||
if (/^(README|CLAUDE|AGENTS|CONTRIBUTING|CHANGELOG|LICENSE|SKILL|MEMORY|WORKLOG)\.md$/i.test(basename)) {
|
||||
return true;
|
||||
}
|
||||
// Only flag known ad-hoc filenames
|
||||
if (!ADHOC_FILENAMES.test(basename)) return false;
|
||||
|
||||
if (/\.claude\/(commands|plans|projects)\//.test(normalized)) {
|
||||
return true;
|
||||
}
|
||||
// Allow ad-hoc names inside structured directories (intentional usage)
|
||||
if (STRUCTURED_DIRS.test(normalized)) return false;
|
||||
|
||||
if (/(^|\/)(docs|skills|\.history|memory)\//.test(normalized)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (/\.plan\.md$/i.test(basename)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Exportable run() for in-process execution via run-with-flags.js.
|
||||
* Avoids the ~50-100ms spawnSync overhead when available.
|
||||
*/
|
||||
function run(inputOrRaw, _options = {}) {
|
||||
let input;
|
||||
try {
|
||||
input = typeof inputOrRaw === 'string'
|
||||
? (inputOrRaw.trim() ? JSON.parse(inputOrRaw) : {})
|
||||
: (inputOrRaw || {});
|
||||
} catch {
|
||||
return { exitCode: 0 };
|
||||
}
|
||||
const filePath = String(input?.tool_input?.file_path || '');
|
||||
|
||||
if (filePath && isSuspiciousDocPath(filePath)) {
|
||||
return {
|
||||
exitCode: 0,
|
||||
stderr:
|
||||
'[Hook] WARNING: Ad-hoc documentation filename detected\n' +
|
||||
`[Hook] File: ${filePath}\n` +
|
||||
'[Hook] Consider using a structured path (e.g. docs/, .claude/, skills/, .github/, benchmarks/, templates/)',
|
||||
};
|
||||
}
|
||||
|
||||
return { exitCode: 0 };
|
||||
}
|
||||
|
||||
module.exports = { run };
|
||||
|
||||
// Stdin fallback for spawnSync execution
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', c => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
@@ -46,17 +80,10 @@ process.stdin.on('data', c => {
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(data);
|
||||
const filePath = String(input.tool_input?.file_path || '');
|
||||
const result = run(data);
|
||||
|
||||
if (filePath && !isAllowedDocPath(filePath)) {
|
||||
console.error('[Hook] WARNING: Non-standard documentation file detected');
|
||||
console.error(`[Hook] File: ${filePath}`);
|
||||
console.error('[Hook] Consider consolidating into README.md or docs/ directory');
|
||||
}
|
||||
} catch {
|
||||
// ignore parse errors
|
||||
if (result.stderr) {
|
||||
process.stderr.write(result.stderr + '\n');
|
||||
}
|
||||
|
||||
process.stdout.write(data);
|
||||
|
||||
405
scripts/hooks/pre-bash-commit-quality.js
Normal file
405
scripts/hooks/pre-bash-commit-quality.js
Normal file
@@ -0,0 +1,405 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* PreToolUse Hook: Pre-commit Quality Check
|
||||
*
|
||||
* Runs quality checks before git commit commands:
|
||||
* - Detects staged files
|
||||
* - Runs linter on staged files (if available)
|
||||
* - Checks for common issues (console.log, TODO, etc.)
|
||||
* - Validates commit message format (if provided)
|
||||
*
|
||||
* Cross-platform (Windows, macOS, Linux)
|
||||
*
|
||||
* Exit codes:
|
||||
* 0 - Success (allow commit)
|
||||
* 2 - Block commit (quality issues found)
|
||||
*/
|
||||
|
||||
const { spawnSync } = require('child_process');
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024; // 1MB limit
|
||||
|
||||
/**
|
||||
* Detect staged files for commit
|
||||
* @returns {string[]} Array of staged file paths
|
||||
*/
|
||||
function getStagedFiles() {
|
||||
const result = spawnSync('git', ['diff', '--cached', '--name-only', '--diff-filter=ACMR'], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
if (result.status !== 0) {
|
||||
return [];
|
||||
}
|
||||
return result.stdout.trim().split('\n').filter(f => f.length > 0);
|
||||
}
|
||||
|
||||
function getStagedFileContent(filePath) {
|
||||
const result = spawnSync('git', ['show', `:${filePath}`], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
if (result.status !== 0) {
|
||||
return null;
|
||||
}
|
||||
return result.stdout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a file should be quality-checked
|
||||
* @param {string} filePath
|
||||
* @returns {boolean}
|
||||
*/
|
||||
function shouldCheckFile(filePath) {
|
||||
const checkableExtensions = ['.js', '.jsx', '.ts', '.tsx', '.py', '.go', '.rs'];
|
||||
return checkableExtensions.some(ext => filePath.endsWith(ext));
|
||||
}
|
||||
|
||||
/**
|
||||
* Find issues in file content
|
||||
* @param {string} filePath
|
||||
* @returns {object[]} Array of issues found
|
||||
*/
|
||||
function findFileIssues(filePath) {
|
||||
const issues = [];
|
||||
|
||||
try {
|
||||
const content = getStagedFileContent(filePath);
|
||||
if (content == null) {
|
||||
return issues;
|
||||
}
|
||||
const lines = content.split('\n');
|
||||
|
||||
lines.forEach((line, index) => {
|
||||
const lineNum = index + 1;
|
||||
|
||||
// Check for console.log
|
||||
if (line.includes('console.log') && !line.trim().startsWith('//') && !line.trim().startsWith('*')) {
|
||||
issues.push({
|
||||
type: 'console.log',
|
||||
message: `console.log found at line ${lineNum}`,
|
||||
line: lineNum,
|
||||
severity: 'warning'
|
||||
});
|
||||
}
|
||||
|
||||
// Check for debugger statements
|
||||
if (/\bdebugger\b/.test(line) && !line.trim().startsWith('//')) {
|
||||
issues.push({
|
||||
type: 'debugger',
|
||||
message: `debugger statement at line ${lineNum}`,
|
||||
line: lineNum,
|
||||
severity: 'error'
|
||||
});
|
||||
}
|
||||
|
||||
// Check for TODO/FIXME without issue reference
|
||||
const todoMatch = line.match(/\/\/\s*(TODO|FIXME):?\s*(.+)/);
|
||||
if (todoMatch && !todoMatch[2].match(/#\d+|issue/i)) {
|
||||
issues.push({
|
||||
type: 'todo',
|
||||
message: `TODO/FIXME without issue reference at line ${lineNum}: "${todoMatch[2].trim()}"`,
|
||||
line: lineNum,
|
||||
severity: 'info'
|
||||
});
|
||||
}
|
||||
|
||||
// Check for hardcoded secrets (basic patterns)
|
||||
const secretPatterns = [
|
||||
{ pattern: /sk-[a-zA-Z0-9]{20,}/, name: 'OpenAI API key' },
|
||||
{ pattern: /ghp_[a-zA-Z0-9]{36}/, name: 'GitHub PAT' },
|
||||
{ pattern: /AKIA[A-Z0-9]{16}/, name: 'AWS Access Key' },
|
||||
{ pattern: /api[_-]?key\s*[=:]\s*['"][^'"]+['"]/i, name: 'API key' }
|
||||
];
|
||||
|
||||
for (const { pattern, name } of secretPatterns) {
|
||||
if (pattern.test(line)) {
|
||||
issues.push({
|
||||
type: 'secret',
|
||||
message: `Potential ${name} exposed at line ${lineNum}`,
|
||||
line: lineNum,
|
||||
severity: 'error'
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch {
|
||||
// File not readable, skip
|
||||
}
|
||||
|
||||
return issues;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate commit message format
|
||||
* @param {string} command
|
||||
* @returns {object|null} Validation result or null if no message to validate
|
||||
*/
|
||||
function validateCommitMessage(command) {
|
||||
// Extract commit message from command
|
||||
const messageMatch = command.match(/(?:-m|--message)[=\s]+["']?([^"']+)["']?/);
|
||||
if (!messageMatch) return null;
|
||||
|
||||
const message = messageMatch[1];
|
||||
const issues = [];
|
||||
|
||||
// Check conventional commit format
|
||||
const conventionalCommit = /^(feat|fix|docs|style|refactor|test|chore|build|ci|perf|revert)(\(.+\))?:\s*.+/;
|
||||
if (!conventionalCommit.test(message)) {
|
||||
issues.push({
|
||||
type: 'format',
|
||||
message: 'Commit message does not follow conventional commit format',
|
||||
suggestion: 'Use format: type(scope): description (e.g., "feat(auth): add login flow")'
|
||||
});
|
||||
}
|
||||
|
||||
// Check message length
|
||||
if (message.length > 72) {
|
||||
issues.push({
|
||||
type: 'length',
|
||||
message: `Commit message too long (${message.length} chars, max 72)`,
|
||||
suggestion: 'Keep the first line under 72 characters'
|
||||
});
|
||||
}
|
||||
|
||||
// Check for lowercase first letter (conventional)
|
||||
if (conventionalCommit.test(message)) {
|
||||
const afterColon = message.split(':')[1];
|
||||
if (afterColon && /^[A-Z]/.test(afterColon.trim())) {
|
||||
issues.push({
|
||||
type: 'capitalization',
|
||||
message: 'Subject should start with lowercase after type',
|
||||
suggestion: 'Use lowercase for the first letter of the subject'
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Check for trailing period
|
||||
if (message.endsWith('.')) {
|
||||
issues.push({
|
||||
type: 'punctuation',
|
||||
message: 'Commit message should not end with a period',
|
||||
suggestion: 'Remove the trailing period'
|
||||
});
|
||||
}
|
||||
|
||||
return { message, issues };
|
||||
}
|
||||
|
||||
/**
|
||||
* Run linter on staged files
|
||||
* @param {string[]} files
|
||||
* @returns {object} Lint results
|
||||
*/
|
||||
function runLinter(files) {
|
||||
const jsFiles = files.filter(f => /\.(js|jsx|ts|tsx)$/.test(f));
|
||||
const pyFiles = files.filter(f => f.endsWith('.py'));
|
||||
const goFiles = files.filter(f => f.endsWith('.go'));
|
||||
|
||||
const results = {
|
||||
eslint: null,
|
||||
pylint: null,
|
||||
golint: null
|
||||
};
|
||||
|
||||
// Run ESLint if available
|
||||
if (jsFiles.length > 0) {
|
||||
const eslintBin = process.platform === 'win32' ? 'eslint.cmd' : 'eslint';
|
||||
const eslintPath = path.join(process.cwd(), 'node_modules', '.bin', eslintBin);
|
||||
if (fs.existsSync(eslintPath)) {
|
||||
const result = spawnSync(eslintPath, ['--format', 'compact', ...jsFiles], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
});
|
||||
results.eslint = {
|
||||
success: result.status === 0,
|
||||
output: result.stdout || result.stderr
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Run Pylint if available
|
||||
if (pyFiles.length > 0) {
|
||||
try {
|
||||
const result = spawnSync('pylint', ['--output-format=text', ...pyFiles], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
});
|
||||
if (result.error && result.error.code === 'ENOENT') {
|
||||
results.pylint = null;
|
||||
} else {
|
||||
results.pylint = {
|
||||
success: result.status === 0,
|
||||
output: result.stdout || result.stderr
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
// Pylint not available
|
||||
}
|
||||
}
|
||||
|
||||
// Run golint if available
|
||||
if (goFiles.length > 0) {
|
||||
try {
|
||||
const result = spawnSync('golint', goFiles, {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
});
|
||||
if (result.error && result.error.code === 'ENOENT') {
|
||||
results.golint = null;
|
||||
} else {
|
||||
results.golint = {
|
||||
success: !result.stdout || result.stdout.trim() === '',
|
||||
output: result.stdout
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
// golint not available
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Core logic — exported for direct invocation
|
||||
* @param {string} rawInput - Raw JSON string from stdin
|
||||
* @returns {{output:string, exitCode:number}} Pass-through output and exit code
|
||||
*/
|
||||
function evaluate(rawInput) {
|
||||
try {
|
||||
const input = JSON.parse(rawInput);
|
||||
const command = input.tool_input?.command || '';
|
||||
|
||||
// Only run for git commit commands
|
||||
if (!command.includes('git commit')) {
|
||||
return { output: rawInput, exitCode: 0 };
|
||||
}
|
||||
|
||||
// Check if this is an amend (skip checks for amends to avoid blocking)
|
||||
if (command.includes('--amend')) {
|
||||
return { output: rawInput, exitCode: 0 };
|
||||
}
|
||||
|
||||
// Get staged files
|
||||
const stagedFiles = getStagedFiles();
|
||||
|
||||
if (stagedFiles.length === 0) {
|
||||
console.error('[Hook] No staged files found. Use "git add" to stage files first.');
|
||||
return { output: rawInput, exitCode: 0 };
|
||||
}
|
||||
|
||||
console.error(`[Hook] Checking ${stagedFiles.length} staged file(s)...`);
|
||||
|
||||
// Check each staged file
|
||||
const filesToCheck = stagedFiles.filter(shouldCheckFile);
|
||||
let totalIssues = 0;
|
||||
let errorCount = 0;
|
||||
let warningCount = 0;
|
||||
let infoCount = 0;
|
||||
|
||||
for (const file of filesToCheck) {
|
||||
const fileIssues = findFileIssues(file);
|
||||
if (fileIssues.length > 0) {
|
||||
console.error(`\n📁 ${file}`);
|
||||
for (const issue of fileIssues) {
|
||||
const icon = issue.severity === 'error' ? '❌' : issue.severity === 'warning' ? '⚠️' : 'ℹ️';
|
||||
console.error(` ${icon} Line ${issue.line}: ${issue.message}`);
|
||||
totalIssues++;
|
||||
if (issue.severity === 'error') errorCount++;
|
||||
if (issue.severity === 'warning') warningCount++;
|
||||
if (issue.severity === 'info') infoCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate commit message if provided
|
||||
const messageValidation = validateCommitMessage(command);
|
||||
if (messageValidation && messageValidation.issues.length > 0) {
|
||||
console.error('\n📝 Commit Message Issues:');
|
||||
for (const issue of messageValidation.issues) {
|
||||
console.error(` ⚠️ ${issue.message}`);
|
||||
if (issue.suggestion) {
|
||||
console.error(` 💡 ${issue.suggestion}`);
|
||||
}
|
||||
totalIssues++;
|
||||
warningCount++;
|
||||
}
|
||||
}
|
||||
|
||||
// Run linter
|
||||
const lintResults = runLinter(filesToCheck);
|
||||
|
||||
if (lintResults.eslint && !lintResults.eslint.success) {
|
||||
console.error('\n🔍 ESLint Issues:');
|
||||
console.error(lintResults.eslint.output);
|
||||
totalIssues++;
|
||||
errorCount++;
|
||||
}
|
||||
|
||||
if (lintResults.pylint && !lintResults.pylint.success) {
|
||||
console.error('\n🔍 Pylint Issues:');
|
||||
console.error(lintResults.pylint.output);
|
||||
totalIssues++;
|
||||
errorCount++;
|
||||
}
|
||||
|
||||
if (lintResults.golint && !lintResults.golint.success) {
|
||||
console.error('\n🔍 golint Issues:');
|
||||
console.error(lintResults.golint.output);
|
||||
totalIssues++;
|
||||
errorCount++;
|
||||
}
|
||||
|
||||
// Summary
|
||||
if (totalIssues > 0) {
|
||||
console.error(`\n📊 Summary: ${totalIssues} issue(s) found (${errorCount} error(s), ${warningCount} warning(s), ${infoCount} info)`);
|
||||
|
||||
if (errorCount > 0) {
|
||||
console.error('\n[Hook] ❌ Commit blocked due to critical issues. Fix them before committing.');
|
||||
return { output: rawInput, exitCode: 2 };
|
||||
} else {
|
||||
console.error('\n[Hook] ⚠️ Warnings found. Consider fixing them, but commit is allowed.');
|
||||
console.error('[Hook] To bypass these checks, use: git commit --no-verify');
|
||||
}
|
||||
} else {
|
||||
console.error('\n[Hook] ✅ All checks passed!');
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error(`[Hook] Error: ${error.message}`);
|
||||
// Non-blocking on error
|
||||
}
|
||||
|
||||
return { output: rawInput, exitCode: 0 };
|
||||
}
|
||||
|
||||
function run(rawInput) {
|
||||
return evaluate(rawInput).output;
|
||||
}
|
||||
|
||||
// ── stdin entry point ────────────────────────────────────────────
|
||||
if (require.main === module) {
|
||||
let data = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
const result = evaluate(data);
|
||||
process.stdout.write(result.output);
|
||||
process.exit(result.exitCode);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = { run, evaluate };
|
||||
178
skills/agent-payment-x402/SKILL.md
Normal file
178
skills/agent-payment-x402/SKILL.md
Normal file
@@ -0,0 +1,178 @@
|
||||
---
|
||||
name: agent-payment-x402
|
||||
description: Add x402 payment execution to AI agents — per-task budgets, spending controls, and non-custodial wallets via MCP tools. Use when agents need to pay for APIs, services, or other agents.
|
||||
origin: community
|
||||
---
|
||||
|
||||
# Agent Payment Execution (x402)
|
||||
|
||||
Enable AI agents to make autonomous payments with built-in spending controls. Uses the x402 HTTP payment protocol and MCP tools so agents can pay for external services, APIs, or other agents without custodial risk.
|
||||
|
||||
## When to Use
|
||||
|
||||
Use when: your agent needs to pay for an API call, purchase a service, settle with another agent, enforce per-task spending limits, or manage a non-custodial wallet. Pairs naturally with cost-aware-llm-pipeline and security-review skills.
|
||||
|
||||
## How It Works
|
||||
|
||||
### x402 Protocol
|
||||
x402 extends HTTP 402 (Payment Required) into a machine-negotiable flow. When a server returns `402`, the agent's payment tool automatically negotiates price, checks budget, signs a transaction, and retries — no human in the loop.
|
||||
|
||||
### Spending Controls
|
||||
Every payment tool call enforces a `SpendingPolicy`:
|
||||
- **Per-task budget** — max spend for a single agent action
|
||||
- **Per-session budget** — cumulative limit across an entire session
|
||||
- **Allowlisted recipients** — restrict which addresses/services the agent can pay
|
||||
- **Rate limits** — max transactions per minute/hour
|
||||
|
||||
### Non-Custodial Wallets
|
||||
Agents hold their own keys via ERC-4337 smart accounts. The orchestrator sets policy before delegation; the agent can only spend within bounds. No pooled funds, no custodial risk.
|
||||
|
||||
## MCP Integration
|
||||
|
||||
The payment layer exposes standard MCP tools that slot into any Claude Code or agent harness setup.
|
||||
|
||||
> **Security note**: Always pin the package version. This tool manages private keys — unpinned `npx` installs introduce supply-chain risk.
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"agentpay": {
|
||||
"command": "npx",
|
||||
"args": ["agentwallet-sdk@6.0.0"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Available Tools (agent-callable)
|
||||
|
||||
| Tool | Purpose |
|
||||
|------|---------|
|
||||
| `get_balance` | Check agent wallet balance |
|
||||
| `send_payment` | Send payment to address or ENS |
|
||||
| `check_spending` | Query remaining budget |
|
||||
| `list_transactions` | Audit trail of all payments |
|
||||
|
||||
> **Note**: Spending policy is set by the **orchestrator** before delegating to the agent — not by the agent itself. This prevents agents from escalating their own spending limits. Configure policy via `set_policy` in your orchestration layer or pre-task hook, never as an agent-callable tool.
|
||||
|
||||
## Examples
|
||||
|
||||
### Budget enforcement in an MCP client
|
||||
|
||||
When building an orchestrator that calls the agentpay MCP server, enforce budgets before dispatching paid tool calls.
|
||||
|
||||
> **Prerequisites**: Install the package before adding the MCP config — `npx` without `-y` will prompt for confirmation in non-interactive environments, causing the server to hang: `npm install -g agentwallet-sdk@6.0.0`
|
||||
|
||||
```typescript
|
||||
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
||||
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
|
||||
|
||||
async function main() {
|
||||
// 1. Validate credentials before constructing the transport.
|
||||
// A missing key must fail immediately — never let the subprocess start without auth.
|
||||
const walletKey = process.env.WALLET_PRIVATE_KEY;
|
||||
if (!walletKey) {
|
||||
throw new Error("WALLET_PRIVATE_KEY is not set — refusing to start payment server");
|
||||
}
|
||||
|
||||
// Connect to the agentpay MCP server via stdio transport.
|
||||
// Whitelist only the env vars the server needs — never forward all of process.env
|
||||
// to a third-party subprocess that manages private keys.
|
||||
const transport = new StdioClientTransport({
|
||||
command: "npx",
|
||||
args: ["agentwallet-sdk@6.0.0"],
|
||||
env: {
|
||||
PATH: process.env.PATH ?? "",
|
||||
NODE_ENV: process.env.NODE_ENV ?? "production",
|
||||
WALLET_PRIVATE_KEY: walletKey,
|
||||
},
|
||||
});
|
||||
const agentpay = new Client({ name: "orchestrator", version: "1.0.0" });
|
||||
await agentpay.connect(transport);
|
||||
|
||||
// 2. Set spending policy before delegating to the agent.
|
||||
// Always verify success — a silent failure means no controls are active.
|
||||
const policyResult = await agentpay.callTool({
|
||||
name: "set_policy",
|
||||
arguments: {
|
||||
per_task_budget: 0.50,
|
||||
per_session_budget: 5.00,
|
||||
allowlisted_recipients: ["api.example.com"],
|
||||
},
|
||||
});
|
||||
if (policyResult.isError) {
|
||||
throw new Error(
|
||||
`Failed to set spending policy — do not delegate: ${JSON.stringify(policyResult.content)}`
|
||||
);
|
||||
}
|
||||
|
||||
// 3. Use preToolCheck before any paid action
|
||||
await preToolCheck(agentpay, 0.01);
|
||||
}
|
||||
|
||||
// Pre-tool hook: fail-closed budget enforcement with four distinct error paths.
|
||||
async function preToolCheck(agentpay: Client, apiCost: number): Promise<void> {
|
||||
// Path 1: Reject invalid input (NaN/Infinity bypass the < comparison)
|
||||
if (!Number.isFinite(apiCost) || apiCost < 0) {
|
||||
throw new Error(`Invalid apiCost: ${apiCost} — action blocked`);
|
||||
}
|
||||
|
||||
// Path 2: Transport/connectivity failure
|
||||
let result;
|
||||
try {
|
||||
result = await agentpay.callTool({ name: "check_spending" });
|
||||
} catch (err) {
|
||||
throw new Error(`Payment service unreachable — action blocked: ${err}`);
|
||||
}
|
||||
|
||||
// Path 3: Tool returned an error (e.g., auth failure, wallet not initialised)
|
||||
if (result.isError) {
|
||||
throw new Error(
|
||||
`check_spending failed — action blocked: ${JSON.stringify(result.content)}`
|
||||
);
|
||||
}
|
||||
|
||||
// Path 4: Parse and validate the response shape
|
||||
let remaining: number;
|
||||
try {
|
||||
const parsed = JSON.parse(
|
||||
(result.content as Array<{ text: string }>)[0].text
|
||||
);
|
||||
if (!Number.isFinite(parsed?.remaining)) {
|
||||
throw new TypeError("missing or non-finite 'remaining' field");
|
||||
}
|
||||
remaining = parsed.remaining;
|
||||
} catch (err) {
|
||||
throw new Error(
|
||||
`check_spending returned unexpected format — action blocked: ${err}`
|
||||
);
|
||||
}
|
||||
|
||||
// Path 5: Budget exceeded
|
||||
if (remaining < apiCost) {
|
||||
throw new Error(
|
||||
`Budget exceeded: need $${apiCost} but only $${remaining} remaining`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err);
|
||||
process.exitCode = 1;
|
||||
});
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
- **Set budgets before delegation**: When spawning sub-agents, attach a SpendingPolicy via your orchestration layer. Never give an agent unlimited spend.
|
||||
- **Pin your dependencies**: Always specify an exact version in your MCP config (e.g., `agentwallet-sdk@6.0.0`). Verify package integrity before deploying to production.
|
||||
- **Audit trails**: Use `list_transactions` in post-task hooks to log what was spent and why.
|
||||
- **Fail closed**: If the payment tool is unreachable, block the paid action — don't fall back to unmetered access.
|
||||
- **Pair with security-review**: Payment tools are high-privilege. Apply the same scrutiny as shell access.
|
||||
- **Test with testnets first**: Use Base Sepolia for development; switch to Base mainnet for production.
|
||||
|
||||
## Production Reference
|
||||
|
||||
- **npm**: [`agentwallet-sdk`](https://www.npmjs.com/package/agentwallet-sdk)
|
||||
- **Merged into NVIDIA NeMo Agent Toolkit**: [PR #17](https://github.com/NVIDIA/NeMo-Agent-Toolkit-Examples/pull/17) — x402 payment tool for NVIDIA's agent examples
|
||||
- **Protocol spec**: [x402.org](https://x402.org)
|
||||
147
skills/ck/SKILL.md
Normal file
147
skills/ck/SKILL.md
Normal file
@@ -0,0 +1,147 @@
|
||||
---
|
||||
name: ck
|
||||
description: Persistent per-project memory for Claude Code. Auto-loads project context on session start, tracks sessions with git activity, and writes to native memory. Commands run deterministic Node.js scripts — behavior is consistent across model versions.
|
||||
origin: community
|
||||
version: 2.0.0
|
||||
author: sreedhargs89
|
||||
repo: https://github.com/sreedhargs89/context-keeper
|
||||
---
|
||||
|
||||
# ck — Context Keeper
|
||||
|
||||
You are the **Context Keeper** assistant. When the user invokes any `/ck:*` command,
|
||||
run the corresponding Node.js script and present its stdout to the user verbatim.
|
||||
Scripts live at: `~/.claude/skills/ck/commands/` (expand `~` with `$HOME`).
|
||||
|
||||
---
|
||||
|
||||
## Data Layout
|
||||
|
||||
```
|
||||
~/.claude/ck/
|
||||
├── projects.json ← path → {name, contextDir, lastUpdated}
|
||||
└── contexts/<name>/
|
||||
├── context.json ← SOURCE OF TRUTH (structured JSON, v2)
|
||||
└── CONTEXT.md ← generated view — do not hand-edit
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Commands
|
||||
|
||||
### `/ck:init` — Register a Project
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/init.mjs"
|
||||
```
|
||||
The script outputs JSON with auto-detected info. Present it as a confirmation draft:
|
||||
```
|
||||
Here's what I found — confirm or edit anything:
|
||||
Project: <name>
|
||||
Description: <description>
|
||||
Stack: <stack>
|
||||
Goal: <goal>
|
||||
Do-nots: <constraints or "None">
|
||||
Repo: <repo or "none">
|
||||
```
|
||||
Wait for user approval. Apply any edits. Then pipe confirmed JSON to save.mjs --init:
|
||||
```bash
|
||||
echo '<confirmed-json>' | node "$HOME/.claude/skills/ck/commands/save.mjs" --init
|
||||
```
|
||||
Confirmed JSON schema: `{"name":"...","path":"...","description":"...","stack":["..."],"goal":"...","constraints":["..."],"repo":"..." }`
|
||||
|
||||
---
|
||||
|
||||
### `/ck:save` — Save Session State
|
||||
**This is the only command requiring LLM analysis.** Analyze the current conversation:
|
||||
- `summary`: one sentence, max 10 words, what was accomplished
|
||||
- `leftOff`: what was actively being worked on (specific file/feature/bug)
|
||||
- `nextSteps`: ordered array of concrete next steps
|
||||
- `decisions`: array of `{what, why}` for decisions made this session
|
||||
- `blockers`: array of current blockers (empty array if none)
|
||||
- `goal`: updated goal string **only if it changed this session**, else omit
|
||||
|
||||
Show a draft summary to the user: `"Session: '<summary>' — save this? (yes / edit)"`
|
||||
Wait for confirmation. Then pipe to save.mjs:
|
||||
```bash
|
||||
echo '<json>' | node "$HOME/.claude/skills/ck/commands/save.mjs"
|
||||
```
|
||||
JSON schema (exact): `{"summary":"...","leftOff":"...","nextSteps":["..."],"decisions":[{"what":"...","why":"..."}],"blockers":["..."]}`
|
||||
Display the script's stdout confirmation verbatim.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:resume [name|number]` — Full Briefing
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/resume.mjs" [arg]
|
||||
```
|
||||
Display output verbatim. Then ask: "Continue from here? Or has anything changed?"
|
||||
If user reports changes → run `/ck:save` immediately.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:info [name|number]` — Quick Snapshot
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/info.mjs" [arg]
|
||||
```
|
||||
Display output verbatim. No follow-up question.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:list` — Portfolio View
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/list.mjs"
|
||||
```
|
||||
Display output verbatim. If user replies with a number or name → run `/ck:resume`.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:forget [name|number]` — Remove a Project
|
||||
First resolve the project name (run `/ck:list` if needed).
|
||||
Ask: `"This will permanently delete context for '<name>'. Are you sure? (yes/no)"`
|
||||
If yes:
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/forget.mjs" [name]
|
||||
```
|
||||
Display confirmation verbatim.
|
||||
|
||||
---
|
||||
|
||||
### `/ck:migrate` — Convert v1 Data to v2
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/migrate.mjs"
|
||||
```
|
||||
For a dry run first:
|
||||
```bash
|
||||
node "$HOME/.claude/skills/ck/commands/migrate.mjs" --dry-run
|
||||
```
|
||||
Display output verbatim. Migrates all v1 CONTEXT.md + meta.json files to v2 context.json.
|
||||
Originals are backed up as `meta.json.v1-backup` — nothing is deleted.
|
||||
|
||||
---
|
||||
|
||||
## SessionStart Hook
|
||||
|
||||
The hook at `~/.claude/skills/ck/hooks/session-start.mjs` must be registered in
|
||||
`~/.claude/settings.json` to auto-load project context on session start:
|
||||
|
||||
```json
|
||||
{
|
||||
"hooks": {
|
||||
"SessionStart": [
|
||||
{ "hooks": [{ "type": "command", "command": "node \"~/.claude/skills/ck/hooks/session-start.mjs\"" }] }
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The hook injects ~100 tokens per session (compact 5-line summary). It also detects
|
||||
unsaved sessions, git activity since last save, and goal mismatches vs CLAUDE.md.
|
||||
|
||||
---
|
||||
|
||||
## Rules
|
||||
- Always expand `~` as `$HOME` in Bash calls.
|
||||
- Commands are case-insensitive: `/CK:SAVE`, `/ck:save`, `/Ck:Save` all work.
|
||||
- If a script exits with code 1, display its stdout as an error message.
|
||||
- Never edit `context.json` or `CONTEXT.md` directly — always use the scripts.
|
||||
- If `projects.json` is malformed, tell the user and offer to reset it to `{}`.
|
||||
44
skills/ck/commands/forget.mjs
Normal file
44
skills/ck/commands/forget.mjs
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* forget.mjs — remove a project's context and registry entry
|
||||
*
|
||||
* Usage: node forget.mjs [name|number]
|
||||
* stdout: confirmation or error
|
||||
* exit 0: success exit 1: not found
|
||||
*
|
||||
* Note: SKILL.md instructs Claude to ask "Are you sure?" before calling this script.
|
||||
* This script is the "do it" step — no confirmation prompt here.
|
||||
*/
|
||||
|
||||
import { rmSync } from 'fs';
|
||||
import { resolve } from 'path';
|
||||
import { resolveContext, readProjects, writeProjects, CONTEXTS_DIR } from './shared.mjs';
|
||||
|
||||
const arg = process.argv[2];
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
|
||||
const resolved = resolveContext(arg, cwd);
|
||||
if (!resolved) {
|
||||
const hint = arg ? `No project matching "${arg}".` : 'This directory is not registered.';
|
||||
console.log(`${hint}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const { name, contextDir, projectPath } = resolved;
|
||||
|
||||
// Remove context directory
|
||||
const contextDirPath = resolve(CONTEXTS_DIR, contextDir);
|
||||
try {
|
||||
rmSync(contextDirPath, { recursive: true, force: true });
|
||||
} catch (e) {
|
||||
console.log(`ck: could not remove context directory — ${e.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Remove from projects.json
|
||||
const projects = readProjects();
|
||||
delete projects[projectPath];
|
||||
writeProjects(projects);
|
||||
|
||||
console.log(`✓ Context for '${name}' removed.`);
|
||||
24
skills/ck/commands/info.mjs
Normal file
24
skills/ck/commands/info.mjs
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* info.mjs — quick read-only context snapshot
|
||||
*
|
||||
* Usage: node info.mjs [name|number]
|
||||
* stdout: compact info block
|
||||
* exit 0: success exit 1: not found
|
||||
*/
|
||||
|
||||
import { resolveContext, renderInfoBlock } from './shared.mjs';
|
||||
|
||||
const arg = process.argv[2];
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
|
||||
const resolved = resolveContext(arg, cwd);
|
||||
if (!resolved) {
|
||||
const hint = arg ? `No project matching "${arg}".` : 'This directory is not registered.';
|
||||
console.log(`${hint} Run /ck:init to register it.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log('');
|
||||
console.log(renderInfoBlock(resolved.context));
|
||||
143
skills/ck/commands/init.mjs
Normal file
143
skills/ck/commands/init.mjs
Normal file
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* init.mjs — auto-detect project info and output JSON for Claude to confirm
|
||||
*
|
||||
* Usage: node init.mjs
|
||||
* stdout: JSON with auto-detected project info
|
||||
* exit 0: success exit 1: error
|
||||
*/
|
||||
|
||||
import { readFileSync, existsSync } from 'fs';
|
||||
import { resolve, basename } from 'path';
|
||||
import { readProjects } from './shared.mjs';
|
||||
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
const projects = readProjects();
|
||||
|
||||
const output = {
|
||||
path: cwd,
|
||||
name: null,
|
||||
description: null,
|
||||
stack: [],
|
||||
goal: null,
|
||||
constraints: [],
|
||||
repo: null,
|
||||
alreadyRegistered: !!projects[cwd],
|
||||
};
|
||||
|
||||
function readFile(filename) {
|
||||
const p = resolve(cwd, filename);
|
||||
if (!existsSync(p)) return null;
|
||||
try { return readFileSync(p, 'utf8'); } catch { return null; }
|
||||
}
|
||||
|
||||
function extractSection(md, heading) {
|
||||
const re = new RegExp(`## ${heading}\\n([\\s\\S]*?)(?=\\n## |$)`);
|
||||
const m = md.match(re);
|
||||
return m ? m[1].trim() : null;
|
||||
}
|
||||
|
||||
// ── package.json ──────────────────────────────────────────────────────────────
|
||||
const pkg = readFile('package.json');
|
||||
if (pkg) {
|
||||
try {
|
||||
const parsed = JSON.parse(pkg);
|
||||
if (parsed.name && !output.name) output.name = parsed.name;
|
||||
if (parsed.description && !output.description) output.description = parsed.description;
|
||||
|
||||
// Detect stack from dependencies
|
||||
const deps = Object.keys({ ...(parsed.dependencies || {}), ...(parsed.devDependencies || {}) });
|
||||
const stackMap = {
|
||||
next: 'Next.js', react: 'React', vue: 'Vue', svelte: 'Svelte', astro: 'Astro',
|
||||
express: 'Express', fastify: 'Fastify', hono: 'Hono', nestjs: 'NestJS',
|
||||
typescript: 'TypeScript', prisma: 'Prisma', drizzle: 'Drizzle',
|
||||
'@neondatabase/serverless': 'Neon', '@upstash/redis': 'Upstash Redis',
|
||||
'@clerk/nextjs': 'Clerk', stripe: 'Stripe', tailwindcss: 'Tailwind CSS',
|
||||
};
|
||||
for (const [dep, label] of Object.entries(stackMap)) {
|
||||
if (deps.includes(dep) && !output.stack.includes(label)) {
|
||||
output.stack.push(label);
|
||||
}
|
||||
}
|
||||
if (deps.includes('typescript') || existsSync(resolve(cwd, 'tsconfig.json'))) {
|
||||
if (!output.stack.includes('TypeScript')) output.stack.push('TypeScript');
|
||||
}
|
||||
} catch { /* malformed package.json */ }
|
||||
}
|
||||
|
||||
// ── go.mod ────────────────────────────────────────────────────────────────────
|
||||
const goMod = readFile('go.mod');
|
||||
if (goMod) {
|
||||
if (!output.stack.includes('Go')) output.stack.push('Go');
|
||||
const modName = goMod.match(/^module\s+(\S+)/m)?.[1];
|
||||
if (modName && !output.name) output.name = modName.split('/').pop();
|
||||
}
|
||||
|
||||
// ── Cargo.toml ────────────────────────────────────────────────────────────────
|
||||
const cargo = readFile('Cargo.toml');
|
||||
if (cargo) {
|
||||
if (!output.stack.includes('Rust')) output.stack.push('Rust');
|
||||
const crateName = cargo.match(/^name\s*=\s*"(.+?)"/m)?.[1];
|
||||
if (crateName && !output.name) output.name = crateName;
|
||||
}
|
||||
|
||||
// ── pyproject.toml ────────────────────────────────────────────────────────────
|
||||
const pyproject = readFile('pyproject.toml');
|
||||
if (pyproject) {
|
||||
if (!output.stack.includes('Python')) output.stack.push('Python');
|
||||
const pyName = pyproject.match(/^name\s*=\s*"(.+?)"/m)?.[1];
|
||||
if (pyName && !output.name) output.name = pyName;
|
||||
}
|
||||
|
||||
// ── .git/config (repo URL) ────────────────────────────────────────────────────
|
||||
const gitConfig = readFile('.git/config');
|
||||
if (gitConfig) {
|
||||
const repoMatch = gitConfig.match(/url\s*=\s*(.+)/);
|
||||
if (repoMatch) output.repo = repoMatch[1].trim();
|
||||
}
|
||||
|
||||
// ── CLAUDE.md ─────────────────────────────────────────────────────────────────
|
||||
const claudeMd = readFile('CLAUDE.md');
|
||||
if (claudeMd) {
|
||||
const goal = extractSection(claudeMd, 'Current Goal');
|
||||
if (goal && !output.goal) output.goal = goal.split('\n')[0].trim();
|
||||
|
||||
const doNot = extractSection(claudeMd, 'Do Not Do');
|
||||
if (doNot) {
|
||||
const bullets = doNot.split('\n')
|
||||
.filter(l => /^[-*]\s+/.test(l))
|
||||
.map(l => l.replace(/^[-*]\s+/, '').trim());
|
||||
output.constraints = bullets;
|
||||
}
|
||||
|
||||
const stack = extractSection(claudeMd, 'Tech Stack');
|
||||
if (stack && output.stack.length === 0) {
|
||||
output.stack = stack.split(/[,\n]/).map(s => s.replace(/^[-*]\s+/, '').trim()).filter(Boolean);
|
||||
}
|
||||
|
||||
// Description from first section or "What This Is"
|
||||
const whatItIs = extractSection(claudeMd, 'What This Is') || extractSection(claudeMd, 'About');
|
||||
if (whatItIs && !output.description) output.description = whatItIs.split('\n')[0].trim();
|
||||
}
|
||||
|
||||
// ── README.md (description fallback) ─────────────────────────────────────────
|
||||
const readme = readFile('README.md');
|
||||
if (readme && !output.description) {
|
||||
// First non-header, non-badge, non-empty paragraph
|
||||
const lines = readme.split('\n');
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
if (trimmed && !trimmed.startsWith('#') && !trimmed.startsWith('!') && !trimmed.startsWith('>') && !trimmed.startsWith('[') && trimmed !== '---' && trimmed !== '___') {
|
||||
output.description = trimmed.slice(0, 120);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── Name fallback: directory name ─────────────────────────────────────────────
|
||||
if (!output.name) {
|
||||
output.name = basename(cwd).toLowerCase().replace(/\s+/g, '-');
|
||||
}
|
||||
|
||||
console.log(JSON.stringify(output, null, 2));
|
||||
41
skills/ck/commands/list.mjs
Normal file
41
skills/ck/commands/list.mjs
Normal file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* list.mjs — portfolio view of all registered projects
|
||||
*
|
||||
* Usage: node list.mjs
|
||||
* stdout: ASCII table of all projects + prompt to resume
|
||||
* exit 0: success exit 1: no projects
|
||||
*/
|
||||
|
||||
import { readProjects, loadContext, today, CONTEXTS_DIR } from './shared.mjs';
|
||||
import { renderListTable } from './shared.mjs';
|
||||
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
const projects = readProjects();
|
||||
const entries = Object.entries(projects);
|
||||
|
||||
if (entries.length === 0) {
|
||||
console.log('No projects registered. Run /ck:init to get started.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Build enriched list sorted alphabetically by contextDir
|
||||
const enriched = entries
|
||||
.map(([path, info]) => {
|
||||
const context = loadContext(info.contextDir);
|
||||
return {
|
||||
name: info.name,
|
||||
contextDir: info.contextDir,
|
||||
path,
|
||||
context,
|
||||
lastUpdated: info.lastUpdated,
|
||||
};
|
||||
})
|
||||
.sort((a, b) => a.contextDir.localeCompare(b.contextDir));
|
||||
|
||||
const table = renderListTable(enriched, cwd, today());
|
||||
console.log('');
|
||||
console.log(table);
|
||||
console.log('');
|
||||
console.log('Resume which? (number or name)');
|
||||
198
skills/ck/commands/migrate.mjs
Normal file
198
skills/ck/commands/migrate.mjs
Normal file
@@ -0,0 +1,198 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* migrate.mjs — convert v1 (CONTEXT.md + meta.json) to v2 (context.json)
|
||||
*
|
||||
* Usage:
|
||||
* node migrate.mjs — migrate all v1 projects
|
||||
* node migrate.mjs --dry-run — preview without writing
|
||||
*
|
||||
* Safe: backs up meta.json to meta.json.v1-backup, never deletes data.
|
||||
* exit 0: success exit 1: error
|
||||
*/
|
||||
|
||||
import { readFileSync, writeFileSync, existsSync, renameSync } from 'fs';
|
||||
import { resolve } from 'path';
|
||||
import { readProjects, writeProjects, saveContext, today, shortId, CONTEXTS_DIR } from './shared.mjs';
|
||||
|
||||
const isDryRun = process.argv.includes('--dry-run');
|
||||
|
||||
if (isDryRun) {
|
||||
console.log('ck migrate — DRY RUN (no files will be written)\n');
|
||||
}
|
||||
|
||||
// ── v1 markdown parsers ───────────────────────────────────────────────────────
|
||||
|
||||
function extractSection(md, heading) {
|
||||
const re = new RegExp(`## ${heading}\\n([\\s\\S]*?)(?=\\n## |$)`);
|
||||
const m = md.match(re);
|
||||
return m ? m[1].trim() : null;
|
||||
}
|
||||
|
||||
function parseBullets(text) {
|
||||
if (!text) return [];
|
||||
return text.split('\n')
|
||||
.filter(l => /^[-*\d]\s/.test(l.trim()))
|
||||
.map(l => l.replace(/^[-*\d]+\.?\s+/, '').trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
function parseDecisionsTable(text) {
|
||||
if (!text) return [];
|
||||
const rows = [];
|
||||
for (const line of text.split('\n')) {
|
||||
if (!line.startsWith('|') || line.match(/^[|\s-]+$/)) continue;
|
||||
const cols = line.split('|').map(c => c.trim()).filter((c, i) => i > 0 && i < 4);
|
||||
if (cols.length >= 1 && !cols[0].startsWith('Decision') && !cols[0].startsWith('_')) {
|
||||
rows.push({ what: cols[0] || '', why: cols[1] || '', date: cols[2] || '' });
|
||||
}
|
||||
}
|
||||
return rows;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse "Where I Left Off" which in v1 can be:
|
||||
* - Simple bullet list
|
||||
* - Multi-session blocks: "Session N (date):\n- bullet\n"
|
||||
* Returns array of session-like objects {date?, leftOff}
|
||||
*/
|
||||
function parseLeftOff(text) {
|
||||
if (!text) return [{ leftOff: null }];
|
||||
|
||||
// Detect multi-session format: "Session N ..."
|
||||
const sessionBlocks = text.split(/(?=Session \d+)/);
|
||||
if (sessionBlocks.length > 1) {
|
||||
return sessionBlocks
|
||||
.filter(b => b.trim())
|
||||
.map(block => {
|
||||
const dateMatch = block.match(/\((\d{4}-\d{2}-\d{2})\)/);
|
||||
const bullets = parseBullets(block);
|
||||
return {
|
||||
date: dateMatch?.[1] || null,
|
||||
leftOff: bullets.length ? bullets.join('\n') : block.replace(/^Session \d+.*\n/, '').trim(),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
// Simple format
|
||||
const bullets = parseBullets(text);
|
||||
return [{ leftOff: bullets.length ? bullets.join('\n') : text.trim() }];
|
||||
}
|
||||
|
||||
// ── Main migration ─────────────────────────────────────────────────────────────
|
||||
|
||||
const projects = readProjects();
|
||||
let migrated = 0;
|
||||
let skipped = 0;
|
||||
let errors = 0;
|
||||
|
||||
for (const [projectPath, info] of Object.entries(projects)) {
|
||||
const contextDir = info.contextDir;
|
||||
const contextDirPath = resolve(CONTEXTS_DIR, contextDir);
|
||||
const contextJsonPath = resolve(contextDirPath, 'context.json');
|
||||
const contextMdPath = resolve(contextDirPath, 'CONTEXT.md');
|
||||
const metaPath = resolve(contextDirPath, 'meta.json');
|
||||
|
||||
// Already v2
|
||||
if (existsSync(contextJsonPath)) {
|
||||
try {
|
||||
const existing = JSON.parse(readFileSync(contextJsonPath, 'utf8'));
|
||||
if (existing.version === 2) {
|
||||
console.log(` ✓ ${contextDir} — already v2, skipping`);
|
||||
skipped++;
|
||||
continue;
|
||||
}
|
||||
} catch { /* fall through to migrate */ }
|
||||
}
|
||||
|
||||
console.log(`\n → Migrating: ${contextDir}`);
|
||||
|
||||
try {
|
||||
// Read v1 files
|
||||
const contextMd = existsSync(contextMdPath) ? readFileSync(contextMdPath, 'utf8') : '';
|
||||
let meta = {};
|
||||
if (existsSync(metaPath)) {
|
||||
try { meta = JSON.parse(readFileSync(metaPath, 'utf8')); } catch {}
|
||||
}
|
||||
|
||||
// Extract fields from CONTEXT.md
|
||||
const description = extractSection(contextMd, 'What This Is') || extractSection(contextMd, 'About') || null;
|
||||
const stackRaw = extractSection(contextMd, 'Tech Stack') || '';
|
||||
const stack = stackRaw.split(/[,\n]/).map(s => s.replace(/^[-*]\s+/, '').trim()).filter(Boolean);
|
||||
const goal = (extractSection(contextMd, 'Current Goal') || '').split('\n')[0].trim() || null;
|
||||
const constraintRaw = extractSection(contextMd, 'Do Not Do') || '';
|
||||
const constraints = parseBullets(constraintRaw);
|
||||
const decisionsRaw = extractSection(contextMd, 'Decisions Made') || '';
|
||||
const decisions = parseDecisionsTable(decisionsRaw);
|
||||
const nextStepsRaw = extractSection(contextMd, 'Next Steps') || '';
|
||||
const nextSteps = parseBullets(nextStepsRaw);
|
||||
const blockersRaw = extractSection(contextMd, 'Blockers') || '';
|
||||
const blockers = parseBullets(blockersRaw).filter(b => b.toLowerCase() !== 'none');
|
||||
const leftOffRaw = extractSection(contextMd, 'Where I Left Off') || '';
|
||||
const leftOffParsed = parseLeftOff(leftOffRaw);
|
||||
|
||||
// Build sessions from parsed left-off blocks (may be multiple)
|
||||
const sessions = leftOffParsed.map((lo, idx) => ({
|
||||
id: idx === leftOffParsed.length - 1 && meta.lastSessionId
|
||||
? meta.lastSessionId.slice(0, 8)
|
||||
: shortId(),
|
||||
date: lo.date || meta.lastUpdated || today(),
|
||||
summary: idx === leftOffParsed.length - 1
|
||||
? (meta.lastSessionSummary || 'Migrated from v1')
|
||||
: `Session ${idx + 1} (migrated)`,
|
||||
leftOff: lo.leftOff,
|
||||
nextSteps: idx === leftOffParsed.length - 1 ? nextSteps : [],
|
||||
decisions: idx === leftOffParsed.length - 1 ? decisions : [],
|
||||
blockers: idx === leftOffParsed.length - 1 ? blockers : [],
|
||||
}));
|
||||
|
||||
const context = {
|
||||
version: 2,
|
||||
name: contextDir,
|
||||
path: meta.path || projectPath,
|
||||
description,
|
||||
stack,
|
||||
goal,
|
||||
constraints,
|
||||
repo: meta.repo || null,
|
||||
createdAt: meta.lastUpdated || today(),
|
||||
sessions,
|
||||
};
|
||||
|
||||
if (isDryRun) {
|
||||
console.log(` description: ${description?.slice(0, 60) || '(none)'}`);
|
||||
console.log(` stack: ${stack.join(', ') || '(none)'}`);
|
||||
console.log(` goal: ${goal?.slice(0, 60) || '(none)'}`);
|
||||
console.log(` sessions: ${sessions.length}`);
|
||||
console.log(` decisions: ${decisions.length}`);
|
||||
console.log(` nextSteps: ${nextSteps.length}`);
|
||||
migrated++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Backup meta.json
|
||||
if (existsSync(metaPath)) {
|
||||
renameSync(metaPath, resolve(contextDirPath, 'meta.json.v1-backup'));
|
||||
}
|
||||
|
||||
// Write context.json + regenerated CONTEXT.md
|
||||
saveContext(contextDir, context);
|
||||
|
||||
// Update projects.json entry
|
||||
projects[projectPath].lastUpdated = today();
|
||||
|
||||
console.log(` ✓ Migrated — ${sessions.length} session(s), ${decisions.length} decision(s)`);
|
||||
migrated++;
|
||||
} catch (e) {
|
||||
console.log(` ✗ Error: ${e.message}`);
|
||||
errors++;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isDryRun && migrated > 0) {
|
||||
writeProjects(projects);
|
||||
}
|
||||
|
||||
console.log(`\nck migrate: ${migrated} migrated, ${skipped} already v2, ${errors} errors`);
|
||||
if (isDryRun) console.log('Run without --dry-run to apply.');
|
||||
if (errors > 0) process.exit(1);
|
||||
36
skills/ck/commands/resume.mjs
Normal file
36
skills/ck/commands/resume.mjs
Normal file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* resume.mjs — full project briefing
|
||||
*
|
||||
* Usage: node resume.mjs [name|number]
|
||||
* stdout: bordered briefing box
|
||||
* exit 0: success exit 1: not found
|
||||
*/
|
||||
|
||||
import { existsSync } from 'fs';
|
||||
import { resolveContext, renderBriefingBox } from './shared.mjs';
|
||||
|
||||
const arg = process.argv[2];
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
|
||||
const resolved = resolveContext(arg, cwd);
|
||||
if (!resolved) {
|
||||
const hint = arg ? `No project matching "${arg}".` : 'This directory is not registered.';
|
||||
console.log(`${hint} Run /ck:init to register it.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const { context, projectPath } = resolved;
|
||||
|
||||
// Attempt to cd to the project path
|
||||
if (projectPath && projectPath !== cwd) {
|
||||
if (existsSync(projectPath)) {
|
||||
console.log(`→ cd ${projectPath}`);
|
||||
} else {
|
||||
console.log(`⚠ Path not found: ${projectPath}`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log('');
|
||||
console.log(renderBriefingBox(context));
|
||||
210
skills/ck/commands/save.mjs
Normal file
210
skills/ck/commands/save.mjs
Normal file
@@ -0,0 +1,210 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* save.mjs — write session data to context.json, regenerate CONTEXT.md,
|
||||
* and write a native memory entry.
|
||||
*
|
||||
* Usage (regular save):
|
||||
* echo '<json>' | node save.mjs
|
||||
* JSON schema: { summary, leftOff, nextSteps[], decisions[{what,why}], blockers[], goal? }
|
||||
*
|
||||
* Usage (init — first registration):
|
||||
* echo '<json>' | node save.mjs --init
|
||||
* JSON schema: { name, path, description, stack[], goal, constraints[], repo? }
|
||||
*
|
||||
* stdout: confirmation message
|
||||
* exit 0: success exit 1: error
|
||||
*/
|
||||
|
||||
import { readFileSync, mkdirSync, writeFileSync } from 'fs';
|
||||
import { resolve } from 'path';
|
||||
import {
|
||||
readProjects, writeProjects, loadContext, saveContext,
|
||||
today, shortId, gitSummary, nativeMemoryDir, encodeProjectPath,
|
||||
CONTEXTS_DIR, CURRENT_SESSION,
|
||||
} from './shared.mjs';
|
||||
|
||||
const isInit = process.argv.includes('--init');
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
|
||||
// ── Read JSON from stdin ──────────────────────────────────────────────────────
|
||||
let input;
|
||||
try {
|
||||
const raw = readFileSync(0, 'utf8').trim();
|
||||
if (!raw) throw new Error('empty stdin');
|
||||
input = JSON.parse(raw);
|
||||
} catch (e) {
|
||||
console.error(`ck save: invalid JSON on stdin — ${e.message}`);
|
||||
console.log('Expected schema (save): {"summary":"...","leftOff":"...","nextSteps":["..."],"decisions":[{"what":"...","why":"..."}],"blockers":["..."]}');
|
||||
console.log('Expected schema (--init): {"name":"...","path":"...","description":"...","stack":["..."],"goal":"...","constraints":["..."]}');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// INIT MODE: first-time project registration
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
if (isInit) {
|
||||
const { name, path: projectPath, description, stack, goal, constraints, repo } = input;
|
||||
|
||||
if (!name || !projectPath) {
|
||||
console.log('ck init: name and path are required.');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const projects = readProjects();
|
||||
|
||||
// Derive contextDir (lowercase, spaces→dashes, deduplicate)
|
||||
let contextDir = name.toLowerCase().replace(/\s+/g, '-').replace(/[^a-z0-9-]/g, '');
|
||||
let suffix = 2;
|
||||
const existingDirs = Object.values(projects).map(p => p.contextDir);
|
||||
while (existingDirs.includes(contextDir) && projects[projectPath]?.contextDir !== contextDir) {
|
||||
contextDir = `${contextDir.replace(/-\d+$/, '')}-${suffix++}`;
|
||||
}
|
||||
|
||||
const context = {
|
||||
version: 2,
|
||||
name: contextDir,
|
||||
displayName: name,
|
||||
path: projectPath,
|
||||
description: description || null,
|
||||
stack: Array.isArray(stack) ? stack : (stack ? [stack] : []),
|
||||
goal: goal || null,
|
||||
constraints: Array.isArray(constraints) ? constraints : [],
|
||||
repo: repo || null,
|
||||
createdAt: today(),
|
||||
sessions: [],
|
||||
};
|
||||
|
||||
saveContext(contextDir, context);
|
||||
|
||||
// Update projects.json
|
||||
projects[projectPath] = {
|
||||
name,
|
||||
contextDir,
|
||||
lastUpdated: today(),
|
||||
};
|
||||
writeProjects(projects);
|
||||
|
||||
console.log(`✓ Project '${name}' registered.`);
|
||||
console.log(` Use /ck:save to save session state and /ck:resume to reload it next time.`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
// SAVE MODE: record a session
|
||||
// ─────────────────────────────────────────────────────────────────────────────
|
||||
const projects = readProjects();
|
||||
const projectEntry = projects[cwd];
|
||||
|
||||
if (!projectEntry) {
|
||||
console.log("This project isn't registered yet. Run /ck:init first.");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const { contextDir } = projectEntry;
|
||||
let context = loadContext(contextDir);
|
||||
|
||||
if (!context) {
|
||||
console.log(`ck: context.json not found for '${contextDir}'. The install may be corrupted.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Get session ID from current-session.json
|
||||
let sessionId;
|
||||
try {
|
||||
const sess = JSON.parse(readFileSync(CURRENT_SESSION, 'utf8'));
|
||||
sessionId = sess.sessionId || shortId();
|
||||
} catch {
|
||||
sessionId = shortId();
|
||||
}
|
||||
|
||||
// Check for duplicate (re-save of same session)
|
||||
const existingIdx = context.sessions.findIndex(s => s.id === sessionId);
|
||||
|
||||
const { summary, leftOff, nextSteps, decisions, blockers, goal } = input;
|
||||
|
||||
// Capture git activity since the last session
|
||||
const lastSessionDate = context.sessions?.[context.sessions.length - 1]?.date;
|
||||
const gitActivity = gitSummary(cwd, lastSessionDate);
|
||||
|
||||
const session = {
|
||||
id: sessionId,
|
||||
date: today(),
|
||||
summary: summary || 'Session saved',
|
||||
leftOff: leftOff || null,
|
||||
nextSteps: Array.isArray(nextSteps) ? nextSteps : (nextSteps ? [nextSteps] : []),
|
||||
decisions: Array.isArray(decisions) ? decisions : [],
|
||||
blockers: Array.isArray(blockers) ? blockers.filter(Boolean) : [],
|
||||
...(gitActivity ? { gitActivity } : {}),
|
||||
};
|
||||
|
||||
if (existingIdx >= 0) {
|
||||
// Update existing session (re-save)
|
||||
context.sessions[existingIdx] = session;
|
||||
} else {
|
||||
context.sessions.push(session);
|
||||
}
|
||||
|
||||
// Update goal if provided
|
||||
if (goal && goal !== context.goal) {
|
||||
context.goal = goal;
|
||||
}
|
||||
|
||||
// Save context.json + regenerate CONTEXT.md
|
||||
saveContext(contextDir, context);
|
||||
|
||||
// Update projects.json timestamp
|
||||
projects[cwd].lastUpdated = today();
|
||||
writeProjects(projects);
|
||||
|
||||
// ── Write to native memory ────────────────────────────────────────────────────
|
||||
try {
|
||||
const memDir = nativeMemoryDir(cwd);
|
||||
mkdirSync(memDir, { recursive: true });
|
||||
|
||||
const memFile = resolve(memDir, `ck_${today()}_${sessionId.slice(0, 8)}.md`);
|
||||
const decisionsBlock = session.decisions.length
|
||||
? session.decisions.map(d => `- **${d.what}**: ${d.why || ''}`).join('\n')
|
||||
: '- None this session';
|
||||
const nextBlock = session.nextSteps.length
|
||||
? session.nextSteps.map((s, i) => `${i + 1}. ${s}`).join('\n')
|
||||
: '- None recorded';
|
||||
const blockersBlock = session.blockers.length
|
||||
? session.blockers.map(b => `- ${b}`).join('\n')
|
||||
: '- None';
|
||||
|
||||
const memContent = [
|
||||
`---`,
|
||||
`name: Session ${today()} — ${session.summary}`,
|
||||
`description: Key decisions and outcomes from ck session ${sessionId.slice(0, 8)}`,
|
||||
`type: project`,
|
||||
`source: ck`,
|
||||
`sessionId: ${sessionId}`,
|
||||
`---`,
|
||||
``,
|
||||
`# Session: ${session.summary}`,
|
||||
``,
|
||||
`## Decisions`,
|
||||
decisionsBlock,
|
||||
``,
|
||||
`## Left Off`,
|
||||
session.leftOff || '—',
|
||||
``,
|
||||
`## Next Steps`,
|
||||
nextBlock,
|
||||
``,
|
||||
`## Blockers`,
|
||||
blockersBlock,
|
||||
``,
|
||||
...(gitActivity ? [`## Git Activity`, gitActivity, ``] : []),
|
||||
].join('\n');
|
||||
|
||||
writeFileSync(memFile, memContent, 'utf8');
|
||||
} catch (e) {
|
||||
// Non-fatal — native memory write failure should not block the save
|
||||
process.stderr.write(`ck: warning — could not write native memory entry: ${e.message}\n`);
|
||||
}
|
||||
|
||||
console.log(`✓ Saved. Session: ${sessionId.slice(0, 8)}`);
|
||||
if (gitActivity) console.log(` Git: ${gitActivity}`);
|
||||
console.log(` See you next time.`);
|
||||
387
skills/ck/commands/shared.mjs
Normal file
387
skills/ck/commands/shared.mjs
Normal file
@@ -0,0 +1,387 @@
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* shared.mjs — common utilities for all command scripts
|
||||
*
|
||||
* No external dependencies. Node.js stdlib only.
|
||||
*/
|
||||
|
||||
import { readFileSync, writeFileSync, existsSync, mkdirSync, readdirSync } from 'fs';
|
||||
import { resolve, basename } from 'path';
|
||||
import { homedir } from 'os';
|
||||
import { spawnSync } from 'child_process';
|
||||
import { randomBytes } from 'crypto';
|
||||
|
||||
// ─── Paths ────────────────────────────────────────────────────────────────────
|
||||
|
||||
export const CK_HOME = resolve(homedir(), '.claude', 'ck');
|
||||
export const CONTEXTS_DIR = resolve(CK_HOME, 'contexts');
|
||||
export const PROJECTS_FILE = resolve(CK_HOME, 'projects.json');
|
||||
export const CURRENT_SESSION = resolve(CK_HOME, 'current-session.json');
|
||||
export const SKILL_FILE = resolve(homedir(), '.claude', 'skills', 'ck', 'SKILL.md');
|
||||
|
||||
// ─── JSON I/O ─────────────────────────────────────────────────────────────────
|
||||
|
||||
export function readJson(filePath) {
|
||||
try {
|
||||
if (!existsSync(filePath)) return null;
|
||||
return JSON.parse(readFileSync(filePath, 'utf8'));
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function writeJson(filePath, data) {
|
||||
const dir = resolve(filePath, '..');
|
||||
mkdirSync(dir, { recursive: true });
|
||||
writeFileSync(filePath, JSON.stringify(data, null, 2) + '\n', 'utf8');
|
||||
}
|
||||
|
||||
export function readProjects() {
|
||||
return readJson(PROJECTS_FILE) || {};
|
||||
}
|
||||
|
||||
export function writeProjects(projects) {
|
||||
writeJson(PROJECTS_FILE, projects);
|
||||
}
|
||||
|
||||
// ─── Context I/O ──────────────────────────────────────────────────────────────
|
||||
|
||||
export function contextPath(contextDir) {
|
||||
return resolve(CONTEXTS_DIR, contextDir, 'context.json');
|
||||
}
|
||||
|
||||
export function contextMdPath(contextDir) {
|
||||
return resolve(CONTEXTS_DIR, contextDir, 'CONTEXT.md');
|
||||
}
|
||||
|
||||
export function loadContext(contextDir) {
|
||||
return readJson(contextPath(contextDir));
|
||||
}
|
||||
|
||||
export function saveContext(contextDir, data) {
|
||||
const dir = resolve(CONTEXTS_DIR, contextDir);
|
||||
mkdirSync(dir, { recursive: true });
|
||||
writeJson(contextPath(contextDir), data);
|
||||
writeFileSync(contextMdPath(contextDir), renderContextMd(data), 'utf8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve which project to operate on.
|
||||
* @param {string|undefined} arg — undefined = cwd match, number string = alphabetical index, else name search
|
||||
* @param {string} cwd
|
||||
* @returns {{ name, contextDir, projectPath, context } | null}
|
||||
*/
|
||||
export function resolveContext(arg, cwd) {
|
||||
const projects = readProjects();
|
||||
const entries = Object.entries(projects); // [path, {name, contextDir, lastUpdated}]
|
||||
|
||||
if (!arg) {
|
||||
// Match by cwd
|
||||
const entry = projects[cwd];
|
||||
if (!entry) return null;
|
||||
const context = loadContext(entry.contextDir);
|
||||
if (!context) return null;
|
||||
return { name: entry.name, contextDir: entry.contextDir, projectPath: cwd, context };
|
||||
}
|
||||
|
||||
// Collect all contexts sorted alphabetically by contextDir
|
||||
const sorted = entries
|
||||
.map(([path, info]) => ({ path, ...info }))
|
||||
.sort((a, b) => a.contextDir.localeCompare(b.contextDir));
|
||||
|
||||
const asNumber = parseInt(arg, 10);
|
||||
if (!isNaN(asNumber) && String(asNumber) === arg) {
|
||||
// Number-based lookup (1-indexed)
|
||||
const item = sorted[asNumber - 1];
|
||||
if (!item) return null;
|
||||
const context = loadContext(item.contextDir);
|
||||
if (!context) return null;
|
||||
return { name: item.name, contextDir: item.contextDir, projectPath: item.path, context };
|
||||
}
|
||||
|
||||
// Name-based lookup: exact > prefix > substring (case-insensitive)
|
||||
const lower = arg.toLowerCase();
|
||||
let match =
|
||||
sorted.find(e => e.name.toLowerCase() === lower) ||
|
||||
sorted.find(e => e.name.toLowerCase().startsWith(lower)) ||
|
||||
sorted.find(e => e.name.toLowerCase().includes(lower));
|
||||
|
||||
if (!match) return null;
|
||||
const context = loadContext(match.contextDir);
|
||||
if (!context) return null;
|
||||
return { name: match.name, contextDir: match.contextDir, projectPath: match.path, context };
|
||||
}
|
||||
|
||||
// ─── Date helpers ─────────────────────────────────────────────────────────────
|
||||
|
||||
export function today() {
|
||||
return new Date().toISOString().slice(0, 10);
|
||||
}
|
||||
|
||||
export function daysAgoLabel(dateStr) {
|
||||
if (!dateStr) return 'unknown';
|
||||
const diff = Math.floor((Date.now() - new Date(dateStr)) / 86_400_000);
|
||||
if (diff === 0) return 'Today';
|
||||
if (diff === 1) return '1 day ago';
|
||||
return `${diff} days ago`;
|
||||
}
|
||||
|
||||
export function stalenessIcon(dateStr) {
|
||||
if (!dateStr) return '○';
|
||||
const diff = Math.floor((Date.now() - new Date(dateStr)) / 86_400_000);
|
||||
if (diff < 1) return '●';
|
||||
if (diff <= 5) return '◐';
|
||||
return '○';
|
||||
}
|
||||
|
||||
// ─── ID generation ────────────────────────────────────────────────────────────
|
||||
|
||||
export function shortId() {
|
||||
return randomBytes(4).toString('hex');
|
||||
}
|
||||
|
||||
// ─── Git helpers ──────────────────────────────────────────────────────────────
|
||||
|
||||
function runGit(args, cwd) {
|
||||
try {
|
||||
const result = spawnSync('git', ['-C', cwd, ...args], {
|
||||
timeout: 3000,
|
||||
stdio: 'pipe',
|
||||
encoding: 'utf8',
|
||||
});
|
||||
if (result.status !== 0) return null;
|
||||
return result.stdout.trim();
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export function gitLogSince(projectPath, sinceDate) {
|
||||
if (!sinceDate) return null;
|
||||
return runGit(['log', '--oneline', `--since=${sinceDate}`], projectPath);
|
||||
}
|
||||
|
||||
export function gitSummary(projectPath, sinceDate) {
|
||||
const log = gitLogSince(projectPath, sinceDate);
|
||||
if (!log) return null;
|
||||
const commits = log.split('\n').filter(Boolean).length;
|
||||
if (commits === 0) return null;
|
||||
|
||||
// Count unique files changed: use a separate runGit call to avoid nested shell substitution
|
||||
const countStr = runGit(['rev-list', '--count', 'HEAD', `--since=${sinceDate}`], projectPath);
|
||||
const revCount = countStr ? parseInt(countStr, 10) : commits;
|
||||
const diff = runGit(['diff', '--shortstat', `HEAD~${Math.min(revCount, 50)}..HEAD`], projectPath);
|
||||
|
||||
if (diff) {
|
||||
const filesMatch = diff.match(/(\d+) file/);
|
||||
const files = filesMatch ? parseInt(filesMatch[1]) : '?';
|
||||
return `${commits} commit${commits !== 1 ? 's' : ''}, ${files} file${files !== 1 ? 's' : ''} changed`;
|
||||
}
|
||||
return `${commits} commit${commits !== 1 ? 's' : ''}`;
|
||||
}
|
||||
|
||||
// ─── Native memory path encoding ──────────────────────────────────────────────
|
||||
|
||||
export function encodeProjectPath(absolutePath) {
|
||||
// "/Users/sree/dev/app" -> "-Users-sree-dev-app"
|
||||
return absolutePath.replace(/\//g, '-');
|
||||
}
|
||||
|
||||
export function nativeMemoryDir(absolutePath) {
|
||||
const encoded = encodeProjectPath(absolutePath);
|
||||
return resolve(homedir(), '.claude', 'projects', encoded, 'memory');
|
||||
}
|
||||
|
||||
// ─── Rendering ────────────────────────────────────────────────────────────────
|
||||
|
||||
/** Render the human-readable CONTEXT.md from context.json */
|
||||
export function renderContextMd(ctx) {
|
||||
const latest = ctx.sessions?.[ctx.sessions.length - 1] || null;
|
||||
const lines = [
|
||||
`<!-- Generated by ck v2 — edit context.json instead -->`,
|
||||
`# Project: ${ctx.displayName ?? ctx.name}`,
|
||||
`> Path: ${ctx.path}`,
|
||||
];
|
||||
if (ctx.repo) lines.push(`> Repo: ${ctx.repo}`);
|
||||
const sessionCount = ctx.sessions?.length || 0;
|
||||
lines.push(`> Last Session: ${ctx.sessions?.[sessionCount - 1]?.date || 'never'} | Sessions: ${sessionCount}`);
|
||||
lines.push(``);
|
||||
lines.push(`## What This Is`);
|
||||
lines.push(ctx.description || '_Not set._');
|
||||
lines.push(``);
|
||||
lines.push(`## Tech Stack`);
|
||||
lines.push(Array.isArray(ctx.stack) ? ctx.stack.join(', ') : (ctx.stack || '_Not set._'));
|
||||
lines.push(``);
|
||||
lines.push(`## Current Goal`);
|
||||
lines.push(ctx.goal || '_Not set._');
|
||||
lines.push(``);
|
||||
lines.push(`## Where I Left Off`);
|
||||
lines.push(latest?.leftOff || '_Not yet recorded. Run /ck:save after your first session._');
|
||||
lines.push(``);
|
||||
lines.push(`## Next Steps`);
|
||||
if (latest?.nextSteps?.length) {
|
||||
latest.nextSteps.forEach((s, i) => lines.push(`${i + 1}. ${s}`));
|
||||
} else {
|
||||
lines.push(`_Not yet recorded._`);
|
||||
}
|
||||
lines.push(``);
|
||||
lines.push(`## Blockers`);
|
||||
if (latest?.blockers?.length) {
|
||||
latest.blockers.forEach(b => lines.push(`- ${b}`));
|
||||
} else {
|
||||
lines.push(`- None`);
|
||||
}
|
||||
lines.push(``);
|
||||
lines.push(`## Do Not Do`);
|
||||
if (ctx.constraints?.length) {
|
||||
ctx.constraints.forEach(c => lines.push(`- ${c}`));
|
||||
} else {
|
||||
lines.push(`- None specified`);
|
||||
}
|
||||
lines.push(``);
|
||||
|
||||
// All decisions across sessions
|
||||
const allDecisions = (ctx.sessions || []).flatMap(s =>
|
||||
(s.decisions || []).map(d => ({ ...d, date: s.date }))
|
||||
);
|
||||
lines.push(`## Decisions Made`);
|
||||
lines.push(`| Decision | Why | Date |`);
|
||||
lines.push(`|----------|-----|------|`);
|
||||
if (allDecisions.length) {
|
||||
allDecisions.forEach(d => lines.push(`| ${d.what} | ${d.why || ''} | ${d.date || ''} |`));
|
||||
} else {
|
||||
lines.push(`| _(none yet)_ | | |`);
|
||||
}
|
||||
lines.push(``);
|
||||
|
||||
// Session history (most recent first)
|
||||
if (ctx.sessions?.length > 1) {
|
||||
lines.push(`## Session History`);
|
||||
const reversed = [...ctx.sessions].reverse();
|
||||
reversed.forEach(s => {
|
||||
lines.push(`### ${s.date} — ${s.summary || 'Session'}`);
|
||||
if (s.gitActivity) lines.push(`_${s.gitActivity}_`);
|
||||
if (s.leftOff) lines.push(`**Left off:** ${s.leftOff}`);
|
||||
});
|
||||
lines.push(``);
|
||||
}
|
||||
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/** Render the bordered briefing box used by /ck:resume */
|
||||
export function renderBriefingBox(ctx, meta = {}) {
|
||||
const latest = ctx.sessions?.[ctx.sessions.length - 1] || {};
|
||||
const W = 57;
|
||||
const pad = (str, w) => {
|
||||
const s = String(str || '');
|
||||
return s.length > w ? s.slice(0, w - 1) + '…' : s.padEnd(w);
|
||||
};
|
||||
const row = (label, value) => `│ ${label} → ${pad(value, W - label.length - 7)}│`;
|
||||
|
||||
const when = daysAgoLabel(ctx.sessions?.[ctx.sessions.length - 1]?.date);
|
||||
const sessions = ctx.sessions?.length || 0;
|
||||
const shortSessId = latest.id?.slice(0, 8) || null;
|
||||
|
||||
const lines = [
|
||||
`┌${'─'.repeat(W)}┐`,
|
||||
`│ RESUMING: ${pad(ctx.displayName ?? ctx.name, W - 12)}│`,
|
||||
`│ Last session: ${pad(`${when} | Sessions: ${sessions}`, W - 16)}│`,
|
||||
];
|
||||
if (shortSessId) lines.push(`│ Session ID: ${pad(shortSessId, W - 14)}│`);
|
||||
lines.push(`├${'─'.repeat(W)}┤`);
|
||||
lines.push(row('WHAT IT IS', ctx.description || '—'));
|
||||
lines.push(row('STACK ', Array.isArray(ctx.stack) ? ctx.stack.join(', ') : (ctx.stack || '—')));
|
||||
lines.push(row('PATH ', ctx.path));
|
||||
if (ctx.repo) lines.push(row('REPO ', ctx.repo));
|
||||
lines.push(row('GOAL ', ctx.goal || '—'));
|
||||
lines.push(`├${'─'.repeat(W)}┤`);
|
||||
lines.push(`│ WHERE I LEFT OFF${' '.repeat(W - 18)}│`);
|
||||
const leftOffLines = (latest.leftOff || '—').split('\n').filter(Boolean);
|
||||
leftOffLines.forEach(l => lines.push(`│ • ${pad(l, W - 7)}│`));
|
||||
lines.push(`├${'─'.repeat(W)}┤`);
|
||||
lines.push(`│ NEXT STEPS${' '.repeat(W - 12)}│`);
|
||||
const steps = latest.nextSteps || [];
|
||||
if (steps.length) {
|
||||
steps.forEach((s, i) => lines.push(`│ ${i + 1}. ${pad(s, W - 8)}│`));
|
||||
} else {
|
||||
lines.push(`│ —${' '.repeat(W - 5)}│`);
|
||||
}
|
||||
const blockers = latest.blockers?.length ? latest.blockers.join(', ') : 'None';
|
||||
lines.push(`│ BLOCKERS → ${pad(blockers, W - 13)}│`);
|
||||
if (latest.gitActivity) {
|
||||
lines.push(`│ GIT → ${pad(latest.gitActivity, W - 13)}│`);
|
||||
}
|
||||
lines.push(`└${'─'.repeat(W)}┘`);
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/** Render compact info block used by /ck:info */
|
||||
export function renderInfoBlock(ctx) {
|
||||
const latest = ctx.sessions?.[ctx.sessions.length - 1] || {};
|
||||
const sep = '─'.repeat(44);
|
||||
const lines = [
|
||||
`ck: ${ctx.displayName ?? ctx.name}`,
|
||||
sep,
|
||||
];
|
||||
lines.push(`PATH ${ctx.path}`);
|
||||
if (ctx.repo) lines.push(`REPO ${ctx.repo}`);
|
||||
if (latest.id) lines.push(`SESSION ${latest.id.slice(0, 8)}`);
|
||||
lines.push(`GOAL ${ctx.goal || '—'}`);
|
||||
lines.push(sep);
|
||||
lines.push(`WHERE I LEFT OFF`);
|
||||
(latest.leftOff || '—').split('\n').filter(Boolean).forEach(l => lines.push(` • ${l}`));
|
||||
lines.push(`NEXT STEPS`);
|
||||
(latest.nextSteps || []).forEach((s, i) => lines.push(` ${i + 1}. ${s}`));
|
||||
if (!latest.nextSteps?.length) lines.push(` —`);
|
||||
lines.push(`BLOCKERS`);
|
||||
if (latest.blockers?.length) {
|
||||
latest.blockers.forEach(b => lines.push(` • ${b}`));
|
||||
} else {
|
||||
lines.push(` • None`);
|
||||
}
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
/** Render ASCII list table used by /ck:list */
|
||||
export function renderListTable(entries, cwd, todayStr) {
|
||||
// entries: [{name, contextDir, path, context, lastUpdated}]
|
||||
// Sorted alphabetically by contextDir before calling
|
||||
const rows = entries.map((e, i) => {
|
||||
const isHere = e.path === cwd;
|
||||
const latest = e.context?.sessions?.[e.context.sessions.length - 1] || {};
|
||||
const when = daysAgoLabel(latest.date);
|
||||
const icon = stalenessIcon(latest.date);
|
||||
const statusLabel = icon === '●' ? '● Active' : icon === '◐' ? '◐ Warm' : '○ Stale';
|
||||
const sessId = latest.id ? latest.id.slice(0, 8) : '—';
|
||||
const summary = (latest.summary || '—').slice(0, 34);
|
||||
const displayName = ((e.context?.displayName ?? e.name) + (isHere ? ' <-' : '')).slice(0, 18);
|
||||
return {
|
||||
num: String(i + 1),
|
||||
name: displayName,
|
||||
status: statusLabel,
|
||||
when: when.slice(0, 10),
|
||||
sessId,
|
||||
summary,
|
||||
};
|
||||
});
|
||||
|
||||
const cols = {
|
||||
num: Math.max(1, ...rows.map(r => r.num.length)),
|
||||
name: Math.max(7, ...rows.map(r => r.name.length)),
|
||||
status: Math.max(6, ...rows.map(r => r.status.length)),
|
||||
when: Math.max(9, ...rows.map(r => r.when.length)),
|
||||
sessId: Math.max(7, ...rows.map(r => r.sessId.length)),
|
||||
summary: Math.max(12, ...rows.map(r => r.summary.length)),
|
||||
};
|
||||
|
||||
const hr = `+${'-'.repeat(cols.num + 2)}+${'-'.repeat(cols.name + 2)}+${'-'.repeat(cols.status + 2)}+${'-'.repeat(cols.when + 2)}+${'-'.repeat(cols.sessId + 2)}+${'-'.repeat(cols.summary + 2)}+`;
|
||||
const cell = (val, width) => ` ${val.padEnd(width)} `;
|
||||
const headerRow = `|${cell('#', cols.num)}|${cell('Project', cols.name)}|${cell('Status', cols.status)}|${cell('Last Seen', cols.when)}|${cell('Session', cols.sessId)}|${cell('Last Summary', cols.summary)}|`;
|
||||
|
||||
const dataRows = rows.map(r =>
|
||||
`|${cell(r.num, cols.num)}|${cell(r.name, cols.name)}|${cell(r.status, cols.status)}|${cell(r.when, cols.when)}|${cell(r.sessId, cols.sessId)}|${cell(r.summary, cols.summary)}|`
|
||||
);
|
||||
|
||||
return [hr, headerRow, hr, ...dataRows, hr].join('\n');
|
||||
}
|
||||
224
skills/ck/hooks/session-start.mjs
Normal file
224
skills/ck/hooks/session-start.mjs
Normal file
@@ -0,0 +1,224 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* ck — Context Keeper v2
|
||||
* session-start.mjs — inject compact project context on session start.
|
||||
*
|
||||
* Injects ~100 tokens (not ~2,500 like v1).
|
||||
* SKILL.md is injected separately (still small at ~50 lines).
|
||||
*
|
||||
* Features:
|
||||
* - Compact 5-line summary for registered projects
|
||||
* - Unsaved session detection → "Last session wasn't saved. Run /ck:save."
|
||||
* - Git activity since last session
|
||||
* - Goal mismatch detection vs CLAUDE.md
|
||||
* - Mini portfolio for unregistered directories
|
||||
*/
|
||||
|
||||
import { readFileSync, writeFileSync, existsSync } from 'fs';
|
||||
import { resolve } from 'path';
|
||||
import { homedir } from 'os';
|
||||
import { spawnSync } from 'child_process';
|
||||
|
||||
const CK_HOME = resolve(homedir(), '.claude', 'ck');
|
||||
const PROJECTS_FILE = resolve(CK_HOME, 'projects.json');
|
||||
const CURRENT_SESSION = resolve(CK_HOME, 'current-session.json');
|
||||
const SKILL_FILE = resolve(homedir(), '.claude', 'skills', 'ck', 'SKILL.md');
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
function readJson(p) {
|
||||
try { return JSON.parse(readFileSync(p, 'utf8')); } catch { return null; }
|
||||
}
|
||||
|
||||
function daysAgo(dateStr) {
|
||||
if (!dateStr) return 'unknown';
|
||||
const diff = Math.floor((Date.now() - new Date(dateStr)) / 86_400_000);
|
||||
if (diff === 0) return 'today';
|
||||
if (diff === 1) return '1 day ago';
|
||||
return `${diff} days ago`;
|
||||
}
|
||||
|
||||
function stalenessIcon(dateStr) {
|
||||
if (!dateStr) return '○';
|
||||
const diff = Math.floor((Date.now() - new Date(dateStr)) / 86_400_000);
|
||||
return diff < 1 ? '●' : diff <= 5 ? '◐' : '○';
|
||||
}
|
||||
|
||||
function gitLogSince(projectPath, sinceDate) {
|
||||
if (!sinceDate || !existsSync(resolve(projectPath, '.git'))) return null;
|
||||
try {
|
||||
const result = spawnSync(
|
||||
'git',
|
||||
['-C', projectPath, 'log', '--oneline', `--since=${sinceDate}`],
|
||||
{ timeout: 3000, stdio: 'pipe', encoding: 'utf8' },
|
||||
);
|
||||
if (result.status !== 0) return null;
|
||||
const output = result.stdout.trim();
|
||||
const commits = output.split('\n').filter(Boolean).length;
|
||||
return commits > 0 ? `${commits} commit${commits !== 1 ? 's' : ''} since last session` : null;
|
||||
} catch { return null; }
|
||||
}
|
||||
|
||||
function extractClaudeMdGoal(projectPath) {
|
||||
const p = resolve(projectPath, 'CLAUDE.md');
|
||||
if (!existsSync(p)) return null;
|
||||
try {
|
||||
const md = readFileSync(p, 'utf8');
|
||||
const m = md.match(/## Current Goal\n([\s\S]*?)(?=\n## |$)/);
|
||||
return m ? m[1].trim().split('\n')[0].trim() : null;
|
||||
} catch { return null; }
|
||||
}
|
||||
|
||||
// ─── Session ID from stdin ────────────────────────────────────────────────────
|
||||
|
||||
function readSessionId() {
|
||||
try {
|
||||
const raw = readFileSync(0, 'utf8');
|
||||
return JSON.parse(raw).session_id || null;
|
||||
} catch { return null; }
|
||||
}
|
||||
|
||||
// ─── Main ─────────────────────────────────────────────────────────────────────
|
||||
|
||||
function main() {
|
||||
const cwd = process.env.PWD || process.cwd();
|
||||
const sessionId = readSessionId();
|
||||
|
||||
// Load skill (always inject — now only ~50 lines)
|
||||
const skill = existsSync(SKILL_FILE) ? readFileSync(SKILL_FILE, 'utf8') : '';
|
||||
|
||||
const projects = readJson(PROJECTS_FILE) || {};
|
||||
const entry = projects[cwd];
|
||||
|
||||
// Read previous session BEFORE overwriting current-session.json
|
||||
const prevSession = readJson(CURRENT_SESSION);
|
||||
|
||||
// Write current-session.json
|
||||
try {
|
||||
writeFileSync(CURRENT_SESSION, JSON.stringify({
|
||||
sessionId,
|
||||
projectPath: cwd,
|
||||
projectName: entry?.name || null,
|
||||
startedAt: new Date().toISOString(),
|
||||
}, null, 2), 'utf8');
|
||||
} catch { /* non-fatal */ }
|
||||
|
||||
const parts = [];
|
||||
if (skill) parts.push(skill);
|
||||
|
||||
// ── REGISTERED PROJECT ────────────────────────────────────────────────────
|
||||
if (entry?.contextDir) {
|
||||
const contextFile = resolve(CK_HOME, 'contexts', entry.contextDir, 'context.json');
|
||||
const context = readJson(contextFile);
|
||||
|
||||
if (context) {
|
||||
const latest = context.sessions?.[context.sessions.length - 1] || {};
|
||||
const sessionDate = latest.date || context.createdAt;
|
||||
const sessionCount = context.sessions?.length || 0;
|
||||
const displayName = context.displayName ?? context.name;
|
||||
|
||||
// ── Compact summary block (~100 tokens) ──────────────────────────────
|
||||
const summaryLines = [
|
||||
`ck: ${displayName} | ${daysAgo(sessionDate)} | ${sessionCount} session${sessionCount !== 1 ? 's' : ''}`,
|
||||
`Goal: ${context.goal || '—'}`,
|
||||
latest.leftOff ? `Left off: ${latest.leftOff.split('\n')[0]}` : null,
|
||||
latest.nextSteps?.length ? `Next: ${latest.nextSteps.slice(0, 2).join(' · ')}` : null,
|
||||
].filter(Boolean);
|
||||
|
||||
// ── Unsaved session detection ─────────────────────────────────────────
|
||||
if (prevSession?.sessionId && prevSession.sessionId !== sessionId) {
|
||||
// Check if previous session ID exists in sessions array
|
||||
const alreadySaved = context.sessions?.some(s => s.id === prevSession.sessionId);
|
||||
if (!alreadySaved) {
|
||||
summaryLines.push(`⚠ Last session wasn't saved — run /ck:save to capture it`);
|
||||
}
|
||||
}
|
||||
|
||||
// ── Git activity ──────────────────────────────────────────────────────
|
||||
const gitLine = gitLogSince(cwd, sessionDate);
|
||||
if (gitLine) summaryLines.push(`Git: ${gitLine}`);
|
||||
|
||||
// ── Goal mismatch detection ───────────────────────────────────────────
|
||||
const claudeMdGoal = extractClaudeMdGoal(cwd);
|
||||
if (claudeMdGoal && context.goal &&
|
||||
claudeMdGoal.toLowerCase().trim() !== context.goal.toLowerCase().trim()) {
|
||||
summaryLines.push(`⚠ Goal mismatch — ck: "${context.goal.slice(0, 40)}" · CLAUDE.md: "${claudeMdGoal.slice(0, 40)}"`);
|
||||
summaryLines.push(` Run /ck:save with updated goal to sync`);
|
||||
}
|
||||
|
||||
parts.push([
|
||||
`---`,
|
||||
`## ck: ${displayName}`,
|
||||
``,
|
||||
summaryLines.join('\n'),
|
||||
].join('\n'));
|
||||
|
||||
// Instruct Claude to display compact briefing at session start
|
||||
parts.push([
|
||||
`---`,
|
||||
`## ck: SESSION START`,
|
||||
``,
|
||||
`IMPORTANT: Display the following as your FIRST message, verbatim:`,
|
||||
``,
|
||||
'```',
|
||||
summaryLines.join('\n'),
|
||||
'```',
|
||||
``,
|
||||
`After the block, add one line: "Ready — what are we working on?"`,
|
||||
`If you see ⚠ warnings above, mention them briefly after the block.`,
|
||||
].join('\n'));
|
||||
|
||||
return parts;
|
||||
}
|
||||
}
|
||||
|
||||
// ── NOT IN A REGISTERED PROJECT ────────────────────────────────────────────
|
||||
const entries = Object.entries(projects);
|
||||
if (entries.length === 0) return parts;
|
||||
|
||||
// Load and sort by most recent
|
||||
const recent = entries
|
||||
.map(([path, info]) => {
|
||||
const ctx = readJson(resolve(CK_HOME, 'contexts', info.contextDir, 'context.json'));
|
||||
const latest = ctx?.sessions?.[ctx.sessions.length - 1] || {};
|
||||
return { name: info.name, path, lastDate: latest.date || '', summary: latest.summary || '—', ctx };
|
||||
})
|
||||
.sort((a, b) => (b.lastDate > a.lastDate ? 1 : -1))
|
||||
.slice(0, 3);
|
||||
|
||||
const miniRows = recent.map(p => {
|
||||
const icon = stalenessIcon(p.lastDate);
|
||||
const when = daysAgo(p.lastDate);
|
||||
const name = p.name.padEnd(16).slice(0, 16);
|
||||
const whenStr = when.padEnd(12).slice(0, 12);
|
||||
const summary = p.summary.slice(0, 32);
|
||||
return ` ${name} ${icon} ${whenStr} ${summary}`;
|
||||
});
|
||||
|
||||
const miniStatus = [
|
||||
`ck — recent projects:`,
|
||||
` ${'PROJECT'.padEnd(16)} S ${'LAST SEEN'.padEnd(12)} LAST SESSION`,
|
||||
` ${'─'.repeat(68)}`,
|
||||
...miniRows,
|
||||
``,
|
||||
`Run /ck:list · /ck:resume <name> · /ck:init to register this folder`,
|
||||
].join('\n');
|
||||
|
||||
parts.push([
|
||||
`---`,
|
||||
`## ck: SESSION START`,
|
||||
``,
|
||||
`IMPORTANT: Display the following as your FIRST message, verbatim:`,
|
||||
``,
|
||||
'```',
|
||||
miniStatus,
|
||||
'```',
|
||||
].join('\n'));
|
||||
|
||||
return parts;
|
||||
}
|
||||
|
||||
const parts = main();
|
||||
if (parts.length > 0) {
|
||||
console.log(JSON.stringify({ additionalContext: parts.join('\n\n---\n\n') }));
|
||||
}
|
||||
@@ -36,7 +36,11 @@ PYTHON_CMD="${CLV2_PYTHON_CMD:-}"
|
||||
# ─────────────────────────────────────────────
|
||||
|
||||
CONFIG_DIR="${HOME}/.claude/homunculus"
|
||||
CONFIG_FILE="${SKILL_ROOT}/config.json"
|
||||
if [ -n "${CLV2_CONFIG:-}" ]; then
|
||||
CONFIG_FILE="$CLV2_CONFIG"
|
||||
else
|
||||
CONFIG_FILE="${SKILL_ROOT}/config.json"
|
||||
fi
|
||||
# PID file is project-scoped so each project can have its own observer
|
||||
PID_FILE="${PROJECT_DIR}/.observer.pid"
|
||||
LOG_FILE="${PROJECT_DIR}/observer.log"
|
||||
|
||||
716
skills/git-workflow/SKILL.md
Normal file
716
skills/git-workflow/SKILL.md
Normal file
@@ -0,0 +1,716 @@
|
||||
---
|
||||
name: git-workflow
|
||||
description: Git workflow patterns including branching strategies, commit conventions, merge vs rebase, conflict resolution, and collaborative development best practices for teams of all sizes.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Git Workflow Patterns
|
||||
|
||||
Best practices for Git version control, branching strategies, and collaborative development.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Setting up Git workflow for a new project
|
||||
- Deciding on branching strategy (GitFlow, trunk-based, GitHub flow)
|
||||
- Writing commit messages and PR descriptions
|
||||
- Resolving merge conflicts
|
||||
- Managing releases and version tags
|
||||
- Onboarding new team members to Git practices
|
||||
|
||||
## Branching Strategies
|
||||
|
||||
### GitHub Flow (Simple, Recommended for Most)
|
||||
|
||||
Best for continuous deployment and small-to-medium teams.
|
||||
|
||||
```
|
||||
main (protected, always deployable)
|
||||
│
|
||||
├── feature/user-auth → PR → merge to main
|
||||
├── feature/payment-flow → PR → merge to main
|
||||
└── fix/login-bug → PR → merge to main
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- `main` is always deployable
|
||||
- Create feature branches from `main`
|
||||
- Open Pull Request when ready for review
|
||||
- After approval and CI passes, merge to `main`
|
||||
- Deploy immediately after merge
|
||||
|
||||
### Trunk-Based Development (High-Velocity Teams)
|
||||
|
||||
Best for teams with strong CI/CD and feature flags.
|
||||
|
||||
```
|
||||
main (trunk)
|
||||
│
|
||||
├── short-lived feature (1-2 days max)
|
||||
├── short-lived feature
|
||||
└── short-lived feature
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- Everyone commits to `main` or very short-lived branches
|
||||
- Feature flags hide incomplete work
|
||||
- CI must pass before merge
|
||||
- Deploy multiple times per day
|
||||
|
||||
### GitFlow (Complex, Release-Cycle Driven)
|
||||
|
||||
Best for scheduled releases and enterprise projects.
|
||||
|
||||
```
|
||||
main (production releases)
|
||||
│
|
||||
└── develop (integration branch)
|
||||
│
|
||||
├── feature/user-auth
|
||||
├── feature/payment
|
||||
│
|
||||
├── release/1.0.0 → merge to main and develop
|
||||
│
|
||||
└── hotfix/critical → merge to main and develop
|
||||
```
|
||||
|
||||
**Rules:**
|
||||
- `main` contains production-ready code only
|
||||
- `develop` is the integration branch
|
||||
- Feature branches from `develop`, merge back to `develop`
|
||||
- Release branches from `develop`, merge to `main` and `develop`
|
||||
- Hotfix branches from `main`, merge to both `main` and `develop`
|
||||
|
||||
### When to Use Which
|
||||
|
||||
| Strategy | Team Size | Release Cadence | Best For |
|
||||
|----------|-----------|-----------------|----------|
|
||||
| GitHub Flow | Any | Continuous | SaaS, web apps, startups |
|
||||
| Trunk-Based | 5+ experienced | Multiple/day | High-velocity teams, feature flags |
|
||||
| GitFlow | 10+ | Scheduled | Enterprise, regulated industries |
|
||||
|
||||
## Commit Messages
|
||||
|
||||
### Conventional Commits Format
|
||||
|
||||
```
|
||||
<type>(<scope>): <subject>
|
||||
|
||||
[optional body]
|
||||
|
||||
[optional footer(s)]
|
||||
```
|
||||
|
||||
### Types
|
||||
|
||||
| Type | Use For | Example |
|
||||
|------|---------|---------|
|
||||
| `feat` | New feature | `feat(auth): add OAuth2 login` |
|
||||
| `fix` | Bug fix | `fix(api): handle null response in user endpoint` |
|
||||
| `docs` | Documentation | `docs(readme): update installation instructions` |
|
||||
| `style` | Formatting, no code change | `style: fix indentation in login component` |
|
||||
| `refactor` | Code refactoring | `refactor(db): extract connection pool to module` |
|
||||
| `test` | Adding/updating tests | `test(auth): add unit tests for token validation` |
|
||||
| `chore` | Maintenance tasks | `chore(deps): update dependencies` |
|
||||
| `perf` | Performance improvement | `perf(query): add index to users table` |
|
||||
| `ci` | CI/CD changes | `ci: add PostgreSQL service to test workflow` |
|
||||
| `revert` | Revert previous commit | `revert: revert "feat(auth): add OAuth2 login"` |
|
||||
|
||||
### Good vs Bad Examples
|
||||
|
||||
```
|
||||
# BAD: Vague, no context
|
||||
git commit -m "fixed stuff"
|
||||
git commit -m "updates"
|
||||
git commit -m "WIP"
|
||||
|
||||
# GOOD: Clear, specific, explains why
|
||||
git commit -m "fix(api): retry requests on 503 Service Unavailable
|
||||
|
||||
The external API occasionally returns 503 errors during peak hours.
|
||||
Added exponential backoff retry logic with max 3 attempts.
|
||||
|
||||
Closes #123"
|
||||
```
|
||||
|
||||
### Commit Message Template
|
||||
|
||||
Create `.gitmessage` in repo root:
|
||||
|
||||
```
|
||||
# <type>(<scope>): <subject>
|
||||
#
|
||||
# Types: feat, fix, docs, style, refactor, test, chore, perf, ci, revert
|
||||
# Scope: api, ui, db, auth, etc.
|
||||
# Subject: imperative mood, no period, max 50 chars
|
||||
#
|
||||
# [optional body] - explain why, not what
|
||||
# [optional footer] - Breaking changes, closes #issue
|
||||
```
|
||||
|
||||
Enable with: `git config commit.template .gitmessage`
|
||||
|
||||
## Merge vs Rebase
|
||||
|
||||
### Merge (Preserves History)
|
||||
|
||||
```bash
|
||||
# Creates a merge commit
|
||||
git checkout main
|
||||
git merge feature/user-auth
|
||||
|
||||
# Result:
|
||||
# * merge commit
|
||||
# |\
|
||||
# | * feature commits
|
||||
# |/
|
||||
# * main commits
|
||||
```
|
||||
|
||||
**Use when:**
|
||||
- Merging feature branches into `main`
|
||||
- You want to preserve exact history
|
||||
- Multiple people worked on the branch
|
||||
- The branch has been pushed and others may have based work on it
|
||||
|
||||
### Rebase (Linear History)
|
||||
|
||||
```bash
|
||||
# Rewrites feature commits onto target branch
|
||||
git checkout feature/user-auth
|
||||
git rebase main
|
||||
|
||||
# Result:
|
||||
# * feature commits (rewritten)
|
||||
# * main commits
|
||||
```
|
||||
|
||||
**Use when:**
|
||||
- Updating your local feature branch with latest `main`
|
||||
- You want a linear, clean history
|
||||
- The branch is local-only (not pushed)
|
||||
- You're the only one working on the branch
|
||||
|
||||
### Rebase Workflow
|
||||
|
||||
```bash
|
||||
# Update feature branch with latest main (before PR)
|
||||
git checkout feature/user-auth
|
||||
git fetch origin
|
||||
git rebase origin/main
|
||||
|
||||
# Fix any conflicts
|
||||
# Tests should still pass
|
||||
|
||||
# Force push (only if you're the only contributor)
|
||||
git push --force-with-lease origin feature/user-auth
|
||||
```
|
||||
|
||||
### When NOT to Rebase
|
||||
|
||||
```
|
||||
# NEVER rebase branches that:
|
||||
- Have been pushed to a shared repository
|
||||
- Other people have based work on
|
||||
- Are protected branches (main, develop)
|
||||
- Are already merged
|
||||
|
||||
# Why: Rebase rewrites history, breaking others' work
|
||||
```
|
||||
|
||||
## Pull Request Workflow
|
||||
|
||||
### PR Title Format
|
||||
|
||||
```
|
||||
<type>(<scope>): <description>
|
||||
|
||||
Examples:
|
||||
feat(auth): add SSO support for enterprise users
|
||||
fix(api): resolve race condition in order processing
|
||||
docs(api): add OpenAPI specification for v2 endpoints
|
||||
```
|
||||
|
||||
### PR Description Template
|
||||
|
||||
```markdown
|
||||
## What
|
||||
|
||||
Brief description of what this PR does.
|
||||
|
||||
## Why
|
||||
|
||||
Explain the motivation and context.
|
||||
|
||||
## How
|
||||
|
||||
Key implementation details worth highlighting.
|
||||
|
||||
## Testing
|
||||
|
||||
- [ ] Unit tests added/updated
|
||||
- [ ] Integration tests added/updated
|
||||
- [ ] Manual testing performed
|
||||
|
||||
## Screenshots (if applicable)
|
||||
|
||||
Before/after screenshots for UI changes.
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] Code follows project style guidelines
|
||||
- [ ] Self-review completed
|
||||
- [ ] Comments added for complex logic
|
||||
- [ ] Documentation updated
|
||||
- [ ] No new warnings introduced
|
||||
- [ ] Tests pass locally
|
||||
- [ ] Related issues linked
|
||||
|
||||
Closes #123
|
||||
```
|
||||
|
||||
### Code Review Checklist
|
||||
|
||||
**For Reviewers:**
|
||||
|
||||
- [ ] Does the code solve the stated problem?
|
||||
- [ ] Are there any edge cases not handled?
|
||||
- [ ] Is the code readable and maintainable?
|
||||
- [ ] Are there sufficient tests?
|
||||
- [ ] Are there security concerns?
|
||||
- [ ] Is the commit history clean (squashed if needed)?
|
||||
|
||||
**For Authors:**
|
||||
|
||||
- [ ] Self-review completed before requesting review
|
||||
- [ ] CI passes (tests, lint, typecheck)
|
||||
- [ ] PR size is reasonable (<500 lines ideal)
|
||||
- [ ] Related to a single feature/fix
|
||||
- [ ] Description clearly explains the change
|
||||
|
||||
## Conflict Resolution
|
||||
|
||||
### Identify Conflicts
|
||||
|
||||
```bash
|
||||
# Check for conflicts before merge
|
||||
git checkout main
|
||||
git merge feature/user-auth --no-commit --no-ff
|
||||
|
||||
# If conflicts, Git will show:
|
||||
# CONFLICT (content): Merge conflict in src/auth/login.ts
|
||||
# Automatic merge failed; fix conflicts and then commit the result.
|
||||
```
|
||||
|
||||
### Resolve Conflicts
|
||||
|
||||
```bash
|
||||
# See conflicted files
|
||||
git status
|
||||
|
||||
# View conflict markers in file
|
||||
# <<<<<<< HEAD
|
||||
# content from main
|
||||
# =======
|
||||
# content from feature branch
|
||||
# >>>>>>> feature/user-auth
|
||||
|
||||
# Option 1: Manual resolution
|
||||
# Edit file, remove markers, keep correct content
|
||||
|
||||
# Option 2: Use merge tool
|
||||
git mergetool
|
||||
|
||||
# Option 3: Accept one side
|
||||
git checkout --ours src/auth/login.ts # Keep main version
|
||||
git checkout --theirs src/auth/login.ts # Keep feature version
|
||||
|
||||
# After resolving, stage and commit
|
||||
git add src/auth/login.ts
|
||||
git commit
|
||||
```
|
||||
|
||||
### Conflict Prevention Strategies
|
||||
|
||||
```bash
|
||||
# 1. Keep feature branches small and short-lived
|
||||
# 2. Rebase frequently onto main
|
||||
git checkout feature/user-auth
|
||||
git fetch origin
|
||||
git rebase origin/main
|
||||
|
||||
# 3. Communicate with team about touching shared files
|
||||
# 4. Use feature flags instead of long-lived branches
|
||||
# 5. Review and merge PRs promptly
|
||||
```
|
||||
|
||||
## Branch Management
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
```
|
||||
# Feature branches
|
||||
feature/user-authentication
|
||||
feature/JIRA-123-payment-integration
|
||||
|
||||
# Bug fixes
|
||||
fix/login-redirect-loop
|
||||
fix/456-null-pointer-exception
|
||||
|
||||
# Hotfixes (production issues)
|
||||
hotfix/critical-security-patch
|
||||
hotfix/database-connection-leak
|
||||
|
||||
# Releases
|
||||
release/1.2.0
|
||||
release/2024-01-hotfix
|
||||
|
||||
# Experiments/POCs
|
||||
experiment/new-caching-strategy
|
||||
poc/graphql-migration
|
||||
```
|
||||
|
||||
### Branch Cleanup
|
||||
|
||||
```bash
|
||||
# Delete local branches that are merged
|
||||
git branch --merged main | grep -v "^\*\|main" | xargs -n 1 git branch -d
|
||||
|
||||
# Delete remote-tracking references for deleted remote branches
|
||||
git fetch -p
|
||||
|
||||
# Delete local branch
|
||||
git branch -d feature/user-auth # Safe delete (only if merged)
|
||||
git branch -D feature/user-auth # Force delete
|
||||
|
||||
# Delete remote branch
|
||||
git push origin --delete feature/user-auth
|
||||
```
|
||||
|
||||
### Stash Workflow
|
||||
|
||||
```bash
|
||||
# Save work in progress
|
||||
git stash push -m "WIP: user authentication"
|
||||
|
||||
# List stashes
|
||||
git stash list
|
||||
|
||||
# Apply most recent stash
|
||||
git stash pop
|
||||
|
||||
# Apply specific stash
|
||||
git stash apply stash@{2}
|
||||
|
||||
# Drop stash
|
||||
git stash drop stash@{0}
|
||||
```
|
||||
|
||||
## Release Management
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
```
|
||||
MAJOR.MINOR.PATCH
|
||||
|
||||
MAJOR: Breaking changes
|
||||
MINOR: New features, backward compatible
|
||||
PATCH: Bug fixes, backward compatible
|
||||
|
||||
Examples:
|
||||
1.0.0 → 1.0.1 (patch: bug fix)
|
||||
1.0.1 → 1.1.0 (minor: new feature)
|
||||
1.1.0 → 2.0.0 (major: breaking change)
|
||||
```
|
||||
|
||||
### Creating Releases
|
||||
|
||||
```bash
|
||||
# Create annotated tag
|
||||
git tag -a v1.2.0 -m "Release v1.2.0
|
||||
|
||||
Features:
|
||||
- Add user authentication
|
||||
- Implement password reset
|
||||
|
||||
Fixes:
|
||||
- Resolve login redirect issue
|
||||
|
||||
Breaking Changes:
|
||||
- None"
|
||||
|
||||
# Push tag to remote
|
||||
git push origin v1.2.0
|
||||
|
||||
# List tags
|
||||
git tag -l
|
||||
|
||||
# Delete tag
|
||||
git tag -d v1.2.0
|
||||
git push origin --delete v1.2.0
|
||||
```
|
||||
|
||||
### Changelog Generation
|
||||
|
||||
```bash
|
||||
# Generate changelog from commits
|
||||
git log v1.1.0..v1.2.0 --oneline --no-merges
|
||||
|
||||
# Or use conventional-changelog
|
||||
npx conventional-changelog -i CHANGELOG.md -s
|
||||
```
|
||||
|
||||
## Git Configuration
|
||||
|
||||
### Essential Configs
|
||||
|
||||
```bash
|
||||
# User identity
|
||||
git config --global user.name "Your Name"
|
||||
git config --global user.email "your@email.com"
|
||||
|
||||
# Default branch name
|
||||
git config --global init.defaultBranch main
|
||||
|
||||
# Pull behavior (rebase instead of merge)
|
||||
git config --global pull.rebase true
|
||||
|
||||
# Push behavior (push current branch only)
|
||||
git config --global push.default current
|
||||
|
||||
# Auto-correct typos
|
||||
git config --global help.autocorrect 1
|
||||
|
||||
# Better diff algorithm
|
||||
git config --global diff.algorithm histogram
|
||||
|
||||
# Color output
|
||||
git config --global color.ui auto
|
||||
```
|
||||
|
||||
### Useful Aliases
|
||||
|
||||
```bash
|
||||
# Add to ~/.gitconfig
|
||||
[alias]
|
||||
co = checkout
|
||||
br = branch
|
||||
ci = commit
|
||||
st = status
|
||||
unstage = reset HEAD --
|
||||
last = log -1 HEAD
|
||||
visual = log --oneline --graph --all
|
||||
amend = commit --amend --no-edit
|
||||
wip = commit -m "WIP"
|
||||
undo = reset --soft HEAD~1
|
||||
contributors = shortlog -sn
|
||||
```
|
||||
|
||||
### Gitignore Patterns
|
||||
|
||||
```gitignore
|
||||
# Dependencies
|
||||
node_modules/
|
||||
vendor/
|
||||
|
||||
# Build outputs
|
||||
dist/
|
||||
build/
|
||||
*.o
|
||||
*.exe
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
.env.*.local
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# OS files
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Test coverage
|
||||
coverage/
|
||||
|
||||
# Cache
|
||||
.cache/
|
||||
*.tsbuildinfo
|
||||
```
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### Starting a New Feature
|
||||
|
||||
```bash
|
||||
# 1. Update main branch
|
||||
git checkout main
|
||||
git pull origin main
|
||||
|
||||
# 2. Create feature branch
|
||||
git checkout -b feature/user-auth
|
||||
|
||||
# 3. Make changes and commit
|
||||
git add .
|
||||
git commit -m "feat(auth): implement OAuth2 login"
|
||||
|
||||
# 4. Push to remote
|
||||
git push -u origin feature/user-auth
|
||||
|
||||
# 5. Create Pull Request on GitHub/GitLab
|
||||
```
|
||||
|
||||
### Updating a PR with New Changes
|
||||
|
||||
```bash
|
||||
# 1. Make additional changes
|
||||
git add .
|
||||
git commit -m "feat(auth): add error handling"
|
||||
|
||||
# 2. Push updates
|
||||
git push origin feature/user-auth
|
||||
```
|
||||
|
||||
### Syncing Fork with Upstream
|
||||
|
||||
```bash
|
||||
# 1. Add upstream remote (once)
|
||||
git remote add upstream https://github.com/original/repo.git
|
||||
|
||||
# 2. Fetch upstream
|
||||
git fetch upstream
|
||||
|
||||
# 3. Merge upstream/main into your main
|
||||
git checkout main
|
||||
git merge upstream/main
|
||||
|
||||
# 4. Push to your fork
|
||||
git push origin main
|
||||
```
|
||||
|
||||
### Undoing Mistakes
|
||||
|
||||
```bash
|
||||
# Undo last commit (keep changes)
|
||||
git reset --soft HEAD~1
|
||||
|
||||
# Undo last commit (discard changes)
|
||||
git reset --hard HEAD~1
|
||||
|
||||
# Undo last commit pushed to remote
|
||||
git revert HEAD
|
||||
git push origin main
|
||||
|
||||
# Undo specific file changes
|
||||
git checkout HEAD -- path/to/file
|
||||
|
||||
# Fix last commit message
|
||||
git commit --amend -m "New message"
|
||||
|
||||
# Add forgotten file to last commit
|
||||
git add forgotten-file
|
||||
git commit --amend --no-edit
|
||||
```
|
||||
|
||||
## Git Hooks
|
||||
|
||||
### Pre-Commit Hook
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# .git/hooks/pre-commit
|
||||
|
||||
# Run linting
|
||||
npm run lint || exit 1
|
||||
|
||||
# Run tests
|
||||
npm test || exit 1
|
||||
|
||||
# Check for secrets
|
||||
if git diff --cached | grep -E '(password|api_key|secret)'; then
|
||||
echo "Possible secret detected. Commit aborted."
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
### Pre-Push Hook
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# .git/hooks/pre-push
|
||||
|
||||
# Run full test suite
|
||||
npm run test:all || exit 1
|
||||
|
||||
# Check for console.log statements
|
||||
if git diff origin/main | grep -E 'console\.log'; then
|
||||
echo "Remove console.log statements before pushing."
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
```
|
||||
# BAD: Committing directly to main
|
||||
git checkout main
|
||||
git commit -m "fix bug"
|
||||
|
||||
# GOOD: Use feature branches and PRs
|
||||
|
||||
# BAD: Committing secrets
|
||||
git add .env # Contains API keys
|
||||
|
||||
# GOOD: Add to .gitignore, use environment variables
|
||||
|
||||
# BAD: Giant PRs (1000+ lines)
|
||||
# GOOD: Break into smaller, focused PRs
|
||||
|
||||
# BAD: "Update" commit messages
|
||||
git commit -m "update"
|
||||
git commit -m "fix"
|
||||
|
||||
# GOOD: Descriptive messages
|
||||
git commit -m "fix(auth): resolve redirect loop after login"
|
||||
|
||||
# BAD: Rewriting public history
|
||||
git push --force origin main
|
||||
|
||||
# GOOD: Use revert for public branches
|
||||
git revert HEAD
|
||||
|
||||
# BAD: Long-lived feature branches (weeks/months)
|
||||
# GOOD: Keep branches short (days), rebase frequently
|
||||
|
||||
# BAD: Committing generated files
|
||||
git add dist/
|
||||
git add node_modules/
|
||||
|
||||
# GOOD: Add to .gitignore
|
||||
```
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Task | Command |
|
||||
|------|---------|
|
||||
| Create branch | `git checkout -b feature/name` |
|
||||
| Switch branch | `git checkout branch-name` |
|
||||
| Delete branch | `git branch -d branch-name` |
|
||||
| Merge branch | `git merge branch-name` |
|
||||
| Rebase branch | `git rebase main` |
|
||||
| View history | `git log --oneline --graph` |
|
||||
| View changes | `git diff` |
|
||||
| Stage changes | `git add .` or `git add -p` |
|
||||
| Commit | `git commit -m "message"` |
|
||||
| Push | `git push origin branch-name` |
|
||||
| Pull | `git pull origin branch-name` |
|
||||
| Stash | `git stash push -m "message"` |
|
||||
| Undo last commit | `git reset --soft HEAD~1` |
|
||||
| Revert commit | `git revert HEAD` |
|
||||
245
skills/healthcare-cdss-patterns/SKILL.md
Normal file
245
skills/healthcare-cdss-patterns/SKILL.md
Normal file
@@ -0,0 +1,245 @@
|
||||
---
|
||||
name: healthcare-cdss-patterns
|
||||
description: Clinical Decision Support System (CDSS) development patterns. Drug interaction checking, dose validation, clinical scoring (NEWS2, qSOFA), alert severity classification, and integration into EMR workflows.
|
||||
origin: Health1 Super Speciality Hospitals — contributed by Dr. Keyur Patel
|
||||
version: "1.0.0"
|
||||
---
|
||||
|
||||
# Healthcare CDSS Development Patterns
|
||||
|
||||
Patterns for building Clinical Decision Support Systems that integrate into EMR workflows. CDSS modules are patient safety critical — zero tolerance for false negatives.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Implementing drug interaction checking
|
||||
- Building dose validation engines
|
||||
- Implementing clinical scoring systems (NEWS2, qSOFA, APACHE, GCS)
|
||||
- Designing alert systems for abnormal clinical values
|
||||
- Building medication order entry with safety checks
|
||||
- Integrating lab result interpretation with clinical context
|
||||
|
||||
## How It Works
|
||||
|
||||
The CDSS engine is a **pure function library with zero side effects**. Input clinical data, output alerts. This makes it fully testable.
|
||||
|
||||
Three primary modules:
|
||||
|
||||
1. **`checkInteractions(newDrug, currentMeds, allergies)`** — Checks a new drug against current medications and known allergies. Returns severity-sorted `InteractionAlert[]`. Uses `DrugInteractionPair` data model.
|
||||
2. **`validateDose(drug, dose, route, weight, age, renalFunction)`** — Validates a prescribed dose against weight-based, age-adjusted, and renal-adjusted rules. Returns `DoseValidationResult`.
|
||||
3. **`calculateNEWS2(vitals)`** — National Early Warning Score 2 from `NEWS2Input`. Returns `NEWS2Result` with total score, risk level, and escalation guidance.
|
||||
|
||||
```
|
||||
EMR UI
|
||||
↓ (user enters data)
|
||||
CDSS Engine (pure functions, no side effects)
|
||||
├── Drug Interaction Checker
|
||||
├── Dose Validator
|
||||
├── Clinical Scoring (NEWS2, qSOFA, etc.)
|
||||
└── Alert Classifier
|
||||
↓ (returns alerts)
|
||||
EMR UI (displays alerts inline, blocks if critical)
|
||||
```
|
||||
|
||||
### Drug Interaction Checking
|
||||
|
||||
```typescript
|
||||
interface DrugInteractionPair {
|
||||
drugA: string; // generic name
|
||||
drugB: string; // generic name
|
||||
severity: 'critical' | 'major' | 'minor';
|
||||
mechanism: string;
|
||||
clinicalEffect: string;
|
||||
recommendation: string;
|
||||
}
|
||||
|
||||
function checkInteractions(
|
||||
newDrug: string,
|
||||
currentMedications: string[],
|
||||
allergyList: string[]
|
||||
): InteractionAlert[] {
|
||||
if (!newDrug) return [];
|
||||
const alerts: InteractionAlert[] = [];
|
||||
for (const current of currentMedications) {
|
||||
const interaction = findInteraction(newDrug, current);
|
||||
if (interaction) {
|
||||
alerts.push({ severity: interaction.severity, pair: [newDrug, current],
|
||||
message: interaction.clinicalEffect, recommendation: interaction.recommendation });
|
||||
}
|
||||
}
|
||||
for (const allergy of allergyList) {
|
||||
if (isCrossReactive(newDrug, allergy)) {
|
||||
alerts.push({ severity: 'critical', pair: [newDrug, allergy],
|
||||
message: `Cross-reactivity with documented allergy: ${allergy}`,
|
||||
recommendation: 'Do not prescribe without allergy consultation' });
|
||||
}
|
||||
}
|
||||
return alerts.sort((a, b) => severityOrder(a.severity) - severityOrder(b.severity));
|
||||
}
|
||||
```
|
||||
|
||||
Interaction pairs must be **bidirectional**: if Drug A interacts with Drug B, then Drug B interacts with Drug A.
|
||||
|
||||
### Dose Validation
|
||||
|
||||
```typescript
|
||||
interface DoseValidationResult {
|
||||
valid: boolean;
|
||||
message: string;
|
||||
suggestedRange: { min: number; max: number; unit: string } | null;
|
||||
factors: string[];
|
||||
}
|
||||
|
||||
function validateDose(
|
||||
drug: string,
|
||||
dose: number,
|
||||
route: 'oral' | 'iv' | 'im' | 'sc' | 'topical',
|
||||
patientWeight?: number,
|
||||
patientAge?: number,
|
||||
renalFunction?: number
|
||||
): DoseValidationResult {
|
||||
const rules = getDoseRules(drug, route);
|
||||
if (!rules) return { valid: true, message: 'No validation rules available', suggestedRange: null, factors: [] };
|
||||
const factors: string[] = [];
|
||||
|
||||
// SAFETY: if rules require weight but weight missing, BLOCK (not pass)
|
||||
if (rules.weightBased) {
|
||||
if (!patientWeight || patientWeight <= 0) {
|
||||
return { valid: false, message: `Weight required for ${drug} (mg/kg drug)`,
|
||||
suggestedRange: null, factors: ['weight_missing'] };
|
||||
}
|
||||
factors.push('weight');
|
||||
const maxDose = rules.maxPerKg * patientWeight;
|
||||
if (dose > maxDose) {
|
||||
return { valid: false, message: `Dose exceeds max for ${patientWeight}kg`,
|
||||
suggestedRange: { min: rules.minPerKg * patientWeight, max: maxDose, unit: rules.unit }, factors };
|
||||
}
|
||||
}
|
||||
|
||||
// Age-based adjustment (when rules define age brackets and age is provided)
|
||||
if (rules.ageAdjusted && patientAge !== undefined) {
|
||||
factors.push('age');
|
||||
const ageMax = rules.getAgeAdjustedMax(patientAge);
|
||||
if (dose > ageMax) {
|
||||
return { valid: false, message: `Exceeds age-adjusted max for ${patientAge}yr`,
|
||||
suggestedRange: { min: rules.typicalMin, max: ageMax, unit: rules.unit }, factors };
|
||||
}
|
||||
}
|
||||
|
||||
// Renal adjustment (when rules define eGFR brackets and eGFR is provided)
|
||||
if (rules.renalAdjusted && renalFunction !== undefined) {
|
||||
factors.push('renal');
|
||||
const renalMax = rules.getRenalAdjustedMax(renalFunction);
|
||||
if (dose > renalMax) {
|
||||
return { valid: false, message: `Exceeds renal-adjusted max for eGFR ${renalFunction}`,
|
||||
suggestedRange: { min: rules.typicalMin, max: renalMax, unit: rules.unit }, factors };
|
||||
}
|
||||
}
|
||||
|
||||
// Absolute max
|
||||
if (dose > rules.absoluteMax) {
|
||||
return { valid: false, message: `Exceeds absolute max ${rules.absoluteMax}${rules.unit}`,
|
||||
suggestedRange: { min: rules.typicalMin, max: rules.absoluteMax, unit: rules.unit },
|
||||
factors: [...factors, 'absolute_max'] };
|
||||
}
|
||||
return { valid: true, message: 'Within range',
|
||||
suggestedRange: { min: rules.typicalMin, max: rules.typicalMax, unit: rules.unit }, factors };
|
||||
}
|
||||
```
|
||||
|
||||
### Clinical Scoring: NEWS2
|
||||
|
||||
```typescript
|
||||
interface NEWS2Input {
|
||||
respiratoryRate: number; oxygenSaturation: number; supplementalOxygen: boolean;
|
||||
temperature: number; systolicBP: number; heartRate: number;
|
||||
consciousness: 'alert' | 'voice' | 'pain' | 'unresponsive';
|
||||
}
|
||||
interface NEWS2Result {
|
||||
total: number; // 0-20
|
||||
risk: 'low' | 'low-medium' | 'medium' | 'high';
|
||||
components: Record<string, number>;
|
||||
escalation: string;
|
||||
}
|
||||
```
|
||||
|
||||
Scoring tables must match the Royal College of Physicians specification exactly.
|
||||
|
||||
### Alert Severity and UI Behavior
|
||||
|
||||
| Severity | UI Behavior | Clinician Action Required |
|
||||
|----------|-------------|--------------------------|
|
||||
| Critical | Block action. Non-dismissable modal. Red. | Must document override reason to proceed |
|
||||
| Major | Warning banner inline. Orange. | Must acknowledge before proceeding |
|
||||
| Minor | Info note inline. Yellow. | Awareness only, no action required |
|
||||
|
||||
Critical alerts must NEVER be auto-dismissed or implemented as toast notifications. Override reasons must be stored in the audit trail.
|
||||
|
||||
### Testing CDSS (Zero Tolerance for False Negatives)
|
||||
|
||||
```typescript
|
||||
describe('CDSS — Patient Safety', () => {
|
||||
INTERACTION_PAIRS.forEach(({ drugA, drugB, severity }) => {
|
||||
it(`detects ${drugA} + ${drugB} (${severity})`, () => {
|
||||
const alerts = checkInteractions(drugA, [drugB], []);
|
||||
expect(alerts.length).toBeGreaterThan(0);
|
||||
expect(alerts[0].severity).toBe(severity);
|
||||
});
|
||||
it(`detects ${drugB} + ${drugA} (reverse)`, () => {
|
||||
const alerts = checkInteractions(drugB, [drugA], []);
|
||||
expect(alerts.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
it('blocks mg/kg drug when weight is missing', () => {
|
||||
const result = validateDose('gentamicin', 300, 'iv');
|
||||
expect(result.valid).toBe(false);
|
||||
expect(result.factors).toContain('weight_missing');
|
||||
});
|
||||
it('handles malformed drug data gracefully', () => {
|
||||
expect(() => checkInteractions('', [], [])).not.toThrow();
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
Pass criteria: 100%. A single missed interaction is a patient safety event.
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Making CDSS checks optional or skippable without documented reason
|
||||
- Implementing interaction checks as toast notifications
|
||||
- Using `any` types for drug or clinical data
|
||||
- Hardcoding interaction pairs instead of using a maintainable data structure
|
||||
- Silently catching errors in CDSS engine (must surface failures loudly)
|
||||
- Skipping weight-based validation when weight is not available (must block, not pass)
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Drug Interaction Check
|
||||
|
||||
```typescript
|
||||
const alerts = checkInteractions('warfarin', ['aspirin', 'metformin'], ['penicillin']);
|
||||
// [{ severity: 'critical', pair: ['warfarin', 'aspirin'],
|
||||
// message: 'Increased bleeding risk', recommendation: 'Avoid combination' }]
|
||||
```
|
||||
|
||||
### Example 2: Dose Validation
|
||||
|
||||
```typescript
|
||||
const ok = validateDose('paracetamol', 1000, 'oral', 70, 45);
|
||||
// { valid: true, suggestedRange: { min: 500, max: 4000, unit: 'mg' } }
|
||||
|
||||
const bad = validateDose('paracetamol', 5000, 'oral', 70, 45);
|
||||
// { valid: false, message: 'Exceeds absolute max 4000mg' }
|
||||
|
||||
const noWeight = validateDose('gentamicin', 300, 'iv');
|
||||
// { valid: false, factors: ['weight_missing'] }
|
||||
```
|
||||
|
||||
### Example 3: NEWS2 Scoring
|
||||
|
||||
```typescript
|
||||
const result = calculateNEWS2({
|
||||
respiratoryRate: 24, oxygenSaturation: 93, supplementalOxygen: true,
|
||||
temperature: 38.5, systolicBP: 100, heartRate: 110, consciousness: 'voice'
|
||||
});
|
||||
// { total: 13, risk: 'high', escalation: 'Urgent clinical review. Consider ICU.' }
|
||||
```
|
||||
159
skills/healthcare-emr-patterns/SKILL.md
Normal file
159
skills/healthcare-emr-patterns/SKILL.md
Normal file
@@ -0,0 +1,159 @@
|
||||
---
|
||||
name: healthcare-emr-patterns
|
||||
description: EMR/EHR development patterns for healthcare applications. Clinical safety, encounter workflows, prescription generation, clinical decision support integration, and accessibility-first UI for medical data entry.
|
||||
origin: Health1 Super Speciality Hospitals — contributed by Dr. Keyur Patel
|
||||
version: "1.0.0"
|
||||
---
|
||||
|
||||
# Healthcare EMR Development Patterns
|
||||
|
||||
Patterns for building Electronic Medical Record (EMR) and Electronic Health Record (EHR) systems. Prioritizes patient safety, clinical accuracy, and practitioner efficiency.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Building patient encounter workflows (complaint, exam, diagnosis, prescription)
|
||||
- Implementing clinical note-taking (structured + free text + voice-to-text)
|
||||
- Designing prescription/medication modules with drug interaction checking
|
||||
- Integrating Clinical Decision Support Systems (CDSS)
|
||||
- Building lab result displays with reference range highlighting
|
||||
- Implementing audit trails for clinical data
|
||||
- Designing healthcare-accessible UIs for clinical data entry
|
||||
|
||||
## How It Works
|
||||
|
||||
### Patient Safety First
|
||||
|
||||
Every design decision must be evaluated against: "Could this harm a patient?"
|
||||
|
||||
- Drug interactions MUST alert, not silently pass
|
||||
- Abnormal lab values MUST be visually flagged
|
||||
- Critical vitals MUST trigger escalation workflows
|
||||
- No clinical data modification without audit trail
|
||||
|
||||
### Single-Page Encounter Flow
|
||||
|
||||
Clinical encounters should flow vertically on a single page — no tab switching:
|
||||
|
||||
```
|
||||
Patient Header (sticky — always visible)
|
||||
├── Demographics, allergies, active medications
|
||||
│
|
||||
Encounter Flow (vertical scroll)
|
||||
├── 1. Chief Complaint (structured templates + free text)
|
||||
├── 2. History of Present Illness
|
||||
├── 3. Physical Examination (system-wise)
|
||||
├── 4. Vitals (auto-trigger clinical scoring)
|
||||
├── 5. Diagnosis (ICD-10/SNOMED search)
|
||||
├── 6. Medications (drug DB + interaction check)
|
||||
├── 7. Investigations (lab/radiology orders)
|
||||
├── 8. Plan & Follow-up
|
||||
└── 9. Sign / Lock / Print
|
||||
```
|
||||
|
||||
### Smart Template System
|
||||
|
||||
```typescript
|
||||
interface ClinicalTemplate {
|
||||
id: string;
|
||||
name: string; // e.g., "Chest Pain"
|
||||
chips: string[]; // clickable symptom chips
|
||||
requiredFields: string[]; // mandatory data points
|
||||
redFlags: string[]; // triggers non-dismissable alert
|
||||
icdSuggestions: string[]; // pre-mapped diagnosis codes
|
||||
}
|
||||
```
|
||||
|
||||
Red flags in any template must trigger a visible, non-dismissable alert — NOT a toast notification.
|
||||
|
||||
### Medication Safety Pattern
|
||||
|
||||
```
|
||||
User selects drug
|
||||
→ Check current medications for interactions
|
||||
→ Check encounter medications for interactions
|
||||
→ Check patient allergies
|
||||
→ Validate dose against weight/age/renal function
|
||||
→ If CRITICAL interaction: BLOCK prescribing entirely
|
||||
→ Clinician must document override reason to proceed past a block
|
||||
→ If MAJOR interaction: display warning, require acknowledgment
|
||||
→ Log all alerts and override reasons in audit trail
|
||||
```
|
||||
|
||||
Critical interactions **block prescribing by default**. The clinician must explicitly override with a documented reason stored in the audit trail. The system never silently allows a critical interaction.
|
||||
|
||||
### Locked Encounter Pattern
|
||||
|
||||
Once a clinical encounter is signed:
|
||||
- No edits allowed — only an addendum (a separate linked record)
|
||||
- Both original and addendum appear in the patient timeline
|
||||
- Audit trail captures who signed, when, and any addendum records
|
||||
|
||||
### UI Patterns for Clinical Data
|
||||
|
||||
**Vitals Display:** Current values with normal range highlighting (green/yellow/red), trend arrows vs previous, clinical scoring auto-calculated (NEWS2, qSOFA), escalation guidance inline.
|
||||
|
||||
**Lab Results Display:** Normal range highlighting, previous value comparison, critical values with non-dismissable alert, collection/analysis timestamps, pending orders with expected turnaround.
|
||||
|
||||
**Prescription PDF:** One-click generation with patient demographics, allergies, diagnosis, drug details (generic + brand, dose, route, frequency, duration), clinician signature block.
|
||||
|
||||
### Accessibility for Healthcare
|
||||
|
||||
Healthcare UIs have stricter requirements than typical web apps:
|
||||
- 4.5:1 minimum contrast (WCAG AA) — clinicians work in varied lighting
|
||||
- Large touch targets (44x44px minimum) — for gloved/rushed interaction
|
||||
- Keyboard navigation — for power users entering data rapidly
|
||||
- No color-only indicators — always pair color with text/icon (colorblind clinicians)
|
||||
- Screen reader labels on all form fields
|
||||
- No auto-dismissing toasts for clinical alerts — clinician must actively acknowledge
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Storing clinical data in browser localStorage
|
||||
- Silent failures in drug interaction checking
|
||||
- Dismissable toasts for critical clinical alerts
|
||||
- Tab-based encounter UIs that fragment the clinical workflow
|
||||
- Allowing edits to signed/locked encounters
|
||||
- Displaying clinical data without audit trail
|
||||
- Using `any` type for clinical data structures
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Patient Encounter Flow
|
||||
|
||||
```
|
||||
Doctor opens encounter for Patient #4521
|
||||
→ Sticky header shows: "Rajesh M, 58M, Allergies: Penicillin, Active Meds: Metformin 500mg"
|
||||
→ Chief Complaint: selects "Chest Pain" template
|
||||
→ Clicks chips: "substernal", "radiating to left arm", "crushing"
|
||||
→ Red flag "crushing substernal chest pain" triggers non-dismissable alert
|
||||
→ Examination: CVS system — "S1 S2 normal, no murmur"
|
||||
→ Vitals: HR 110, BP 90/60, SpO2 94%
|
||||
→ NEWS2 auto-calculates: score 8, risk HIGH, escalation alert shown
|
||||
→ Diagnosis: searches "ACS" → selects ICD-10 I21.9
|
||||
→ Medications: selects Aspirin 300mg
|
||||
→ CDSS checks against Metformin: no interaction
|
||||
→ Signs encounter → locked, addendum-only from this point
|
||||
```
|
||||
|
||||
### Example 2: Medication Safety Workflow
|
||||
|
||||
```
|
||||
Doctor prescribes Warfarin for Patient #4521
|
||||
→ CDSS detects: Warfarin + Aspirin = CRITICAL interaction
|
||||
→ UI: red non-dismissable modal blocks prescribing
|
||||
→ Doctor clicks "Override with reason"
|
||||
→ Types: "Benefits outweigh risks — monitored INR protocol"
|
||||
→ Override reason + alert stored in audit trail
|
||||
→ Prescription proceeds with documented override
|
||||
```
|
||||
|
||||
### Example 3: Locked Encounter + Addendum
|
||||
|
||||
```
|
||||
Encounter #E-2024-0891 signed by Dr. Shah at 14:30
|
||||
→ All fields locked — no edit buttons visible
|
||||
→ "Add Addendum" button available
|
||||
→ Dr. Shah clicks addendum, adds: "Lab results received — Troponin elevated"
|
||||
→ New record E-2024-0891-A1 linked to original
|
||||
→ Timeline shows both: original encounter + addendum with timestamps
|
||||
```
|
||||
207
skills/healthcare-eval-harness/SKILL.md
Normal file
207
skills/healthcare-eval-harness/SKILL.md
Normal file
@@ -0,0 +1,207 @@
|
||||
---
|
||||
name: healthcare-eval-harness
|
||||
description: Patient safety evaluation harness for healthcare application deployments. Automated test suites for CDSS accuracy, PHI exposure, clinical workflow integrity, and integration compliance. Blocks deployments on safety failures.
|
||||
origin: Health1 Super Speciality Hospitals — contributed by Dr. Keyur Patel
|
||||
version: "1.0.0"
|
||||
---
|
||||
|
||||
# Healthcare Eval Harness — Patient Safety Verification
|
||||
|
||||
Automated verification system for healthcare application deployments. A single CRITICAL failure blocks deployment. Patient safety is non-negotiable.
|
||||
|
||||
> **Note:** Examples use Jest as the reference test runner. Adapt commands for your framework (Vitest, pytest, PHPUnit, etc.) — the test categories and pass thresholds are framework-agnostic.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Before any deployment of EMR/EHR applications
|
||||
- After modifying CDSS logic (drug interactions, dose validation, scoring)
|
||||
- After changing database schemas that touch patient data
|
||||
- After modifying authentication or access control
|
||||
- During CI/CD pipeline configuration for healthcare apps
|
||||
- After resolving merge conflicts in clinical modules
|
||||
|
||||
## How It Works
|
||||
|
||||
The eval harness runs five test categories in order. The first three (CDSS Accuracy, PHI Exposure, Data Integrity) are CRITICAL gates requiring 100% pass rate — a single failure blocks deployment. The remaining two (Clinical Workflow, Integration) are HIGH gates requiring 95%+ pass rate.
|
||||
|
||||
Each category maps to a Jest test path pattern. The CI pipeline runs CRITICAL gates with `--bail` (stop on first failure) and enforces coverage thresholds with `--coverage --coverageThreshold`.
|
||||
|
||||
### Eval Categories
|
||||
|
||||
**1. CDSS Accuracy (CRITICAL — 100% required)**
|
||||
|
||||
Tests all clinical decision support logic: drug interaction pairs (both directions), dose validation rules, clinical scoring vs published specs, no false negatives, no silent failures.
|
||||
|
||||
```bash
|
||||
npx jest --testPathPattern='tests/cdss' --bail --ci --coverage
|
||||
```
|
||||
|
||||
**2. PHI Exposure (CRITICAL — 100% required)**
|
||||
|
||||
Tests for protected health information leaks: API error responses, console output, URL parameters, browser storage, cross-facility isolation, unauthenticated access, service role key absence.
|
||||
|
||||
```bash
|
||||
npx jest --testPathPattern='tests/security/phi' --bail --ci
|
||||
```
|
||||
|
||||
**3. Data Integrity (CRITICAL — 100% required)**
|
||||
|
||||
Tests clinical data safety: locked encounters, audit trail entries, cascade delete protection, concurrent edit handling, no orphaned records.
|
||||
|
||||
```bash
|
||||
npx jest --testPathPattern='tests/data-integrity' --bail --ci
|
||||
```
|
||||
|
||||
**4. Clinical Workflow (HIGH — 95%+ required)**
|
||||
|
||||
Tests end-to-end flows: encounter lifecycle, template rendering, medication sets, drug/diagnosis search, prescription PDF, red flag alerts.
|
||||
|
||||
```bash
|
||||
tmp_json=$(mktemp)
|
||||
npx jest --testPathPattern='tests/clinical' --ci --json --outputFile="$tmp_json" || true
|
||||
total=$(jq '.numTotalTests // 0' "$tmp_json")
|
||||
passed=$(jq '.numPassedTests // 0' "$tmp_json")
|
||||
if [ "$total" -eq 0 ]; then
|
||||
echo "No clinical tests found" >&2
|
||||
exit 1
|
||||
fi
|
||||
rate=$(echo "scale=2; $passed * 100 / $total" | bc)
|
||||
echo "Clinical pass rate: ${rate}% ($passed/$total)"
|
||||
```
|
||||
|
||||
**5. Integration Compliance (HIGH — 95%+ required)**
|
||||
|
||||
Tests external systems: HL7 message parsing (v2.x), FHIR validation, lab result mapping, malformed message handling.
|
||||
|
||||
```bash
|
||||
tmp_json=$(mktemp)
|
||||
npx jest --testPathPattern='tests/integration' --ci --json --outputFile="$tmp_json" || true
|
||||
total=$(jq '.numTotalTests // 0' "$tmp_json")
|
||||
passed=$(jq '.numPassedTests // 0' "$tmp_json")
|
||||
if [ "$total" -eq 0 ]; then
|
||||
echo "No integration tests found" >&2
|
||||
exit 1
|
||||
fi
|
||||
rate=$(echo "scale=2; $passed * 100 / $total" | bc)
|
||||
echo "Integration pass rate: ${rate}% ($passed/$total)"
|
||||
```
|
||||
|
||||
### Pass/Fail Matrix
|
||||
|
||||
| Category | Threshold | On Failure |
|
||||
|----------|-----------|------------|
|
||||
| CDSS Accuracy | 100% | **BLOCK deployment** |
|
||||
| PHI Exposure | 100% | **BLOCK deployment** |
|
||||
| Data Integrity | 100% | **BLOCK deployment** |
|
||||
| Clinical Workflow | 95%+ | WARN, allow with review |
|
||||
| Integration | 95%+ | WARN, allow with review |
|
||||
|
||||
### CI/CD Integration
|
||||
|
||||
```yaml
|
||||
name: Healthcare Safety Gate
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
safety-gate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
- run: npm ci
|
||||
|
||||
# CRITICAL gates — 100% required, bail on first failure
|
||||
- name: CDSS Accuracy
|
||||
run: npx jest --testPathPattern='tests/cdss' --bail --ci --coverage --coverageThreshold='{"global":{"branches":80,"functions":80,"lines":80}}'
|
||||
|
||||
- name: PHI Exposure Check
|
||||
run: npx jest --testPathPattern='tests/security/phi' --bail --ci
|
||||
|
||||
- name: Data Integrity
|
||||
run: npx jest --testPathPattern='tests/data-integrity' --bail --ci
|
||||
|
||||
# HIGH gates — 95%+ required, custom threshold check
|
||||
# HIGH gates — 95%+ required
|
||||
- name: Clinical Workflows
|
||||
run: |
|
||||
TMP_JSON=$(mktemp)
|
||||
npx jest --testPathPattern='tests/clinical' --ci --json --outputFile="$TMP_JSON" || true
|
||||
TOTAL=$(jq '.numTotalTests // 0' "$TMP_JSON")
|
||||
PASSED=$(jq '.numPassedTests // 0' "$TMP_JSON")
|
||||
if [ "$TOTAL" -eq 0 ]; then
|
||||
echo "::error::No clinical tests found"; exit 1
|
||||
fi
|
||||
RATE=$(echo "scale=2; $PASSED * 100 / $TOTAL" | bc)
|
||||
echo "Pass rate: ${RATE}% ($PASSED/$TOTAL)"
|
||||
if (( $(echo "$RATE < 95" | bc -l) )); then
|
||||
echo "::warning::Clinical pass rate ${RATE}% below 95%"
|
||||
fi
|
||||
|
||||
- name: Integration Compliance
|
||||
run: |
|
||||
TMP_JSON=$(mktemp)
|
||||
npx jest --testPathPattern='tests/integration' --ci --json --outputFile="$TMP_JSON" || true
|
||||
TOTAL=$(jq '.numTotalTests // 0' "$TMP_JSON")
|
||||
PASSED=$(jq '.numPassedTests // 0' "$TMP_JSON")
|
||||
if [ "$TOTAL" -eq 0 ]; then
|
||||
echo "::error::No integration tests found"; exit 1
|
||||
fi
|
||||
RATE=$(echo "scale=2; $PASSED * 100 / $TOTAL" | bc)
|
||||
echo "Pass rate: ${RATE}% ($PASSED/$TOTAL)"
|
||||
if (( $(echo "$RATE < 95" | bc -l) )); then
|
||||
echo "::warning::Integration pass rate ${RATE}% below 95%"
|
||||
fi
|
||||
```
|
||||
|
||||
### Anti-Patterns
|
||||
|
||||
- Skipping CDSS tests "because they passed last time"
|
||||
- Setting CRITICAL thresholds below 100%
|
||||
- Using `--no-bail` on CRITICAL test suites
|
||||
- Mocking the CDSS engine in integration tests (must test real logic)
|
||||
- Allowing deployments when safety gate is red
|
||||
- Running tests without `--coverage` on CDSS suites
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Run All Critical Gates Locally
|
||||
|
||||
```bash
|
||||
npx jest --testPathPattern='tests/cdss' --bail --ci --coverage && \
|
||||
npx jest --testPathPattern='tests/security/phi' --bail --ci && \
|
||||
npx jest --testPathPattern='tests/data-integrity' --bail --ci
|
||||
```
|
||||
|
||||
### Example 2: Check HIGH Gate Pass Rate
|
||||
|
||||
```bash
|
||||
tmp_json=$(mktemp)
|
||||
npx jest --testPathPattern='tests/clinical' --ci --json --outputFile="$tmp_json" || true
|
||||
jq '{
|
||||
passed: (.numPassedTests // 0),
|
||||
total: (.numTotalTests // 0),
|
||||
rate: (if (.numTotalTests // 0) == 0 then 0 else ((.numPassedTests // 0) / (.numTotalTests // 1) * 100) end)
|
||||
}' "$tmp_json"
|
||||
# Expected: { "passed": 21, "total": 22, "rate": 95.45 }
|
||||
```
|
||||
|
||||
### Example 3: Eval Report
|
||||
|
||||
```
|
||||
## Healthcare Eval: 2026-03-27 [commit abc1234]
|
||||
|
||||
### Patient Safety: PASS
|
||||
|
||||
| Category | Tests | Pass | Fail | Status |
|
||||
|----------|-------|------|------|--------|
|
||||
| CDSS Accuracy | 39 | 39 | 0 | PASS |
|
||||
| PHI Exposure | 8 | 8 | 0 | PASS |
|
||||
| Data Integrity | 12 | 12 | 0 | PASS |
|
||||
| Clinical Workflow | 22 | 21 | 1 | 95.5% PASS |
|
||||
| Integration | 6 | 6 | 0 | PASS |
|
||||
|
||||
### Coverage: 84% (target: 80%+)
|
||||
### Verdict: SAFE TO DEPLOY
|
||||
```
|
||||
145
skills/healthcare-phi-compliance/SKILL.md
Normal file
145
skills/healthcare-phi-compliance/SKILL.md
Normal file
@@ -0,0 +1,145 @@
|
||||
---
|
||||
name: healthcare-phi-compliance
|
||||
description: Protected Health Information (PHI) and Personally Identifiable Information (PII) compliance patterns for healthcare applications. Covers data classification, access control, audit trails, encryption, and common leak vectors.
|
||||
origin: Health1 Super Speciality Hospitals — contributed by Dr. Keyur Patel
|
||||
version: "1.0.0"
|
||||
---
|
||||
|
||||
# Healthcare PHI/PII Compliance Patterns
|
||||
|
||||
Patterns for protecting patient data, clinician data, and financial data in healthcare applications. Applicable to HIPAA (US), DISHA (India), GDPR (EU), and general healthcare data protection.
|
||||
|
||||
## When to Use
|
||||
|
||||
- Building any feature that touches patient records
|
||||
- Implementing access control or authentication for clinical systems
|
||||
- Designing database schemas for healthcare data
|
||||
- Building APIs that return patient or clinician data
|
||||
- Implementing audit trails or logging
|
||||
- Reviewing code for data exposure vulnerabilities
|
||||
- Setting up Row-Level Security (RLS) for multi-tenant healthcare systems
|
||||
|
||||
## How It Works
|
||||
|
||||
Healthcare data protection operates on three layers: **classification** (what is sensitive), **access control** (who can see it), and **audit** (who did see it).
|
||||
|
||||
### Data Classification
|
||||
|
||||
**PHI (Protected Health Information)** — any data that can identify a patient AND relates to their health: patient name, date of birth, address, phone, email, national ID numbers (SSN, Aadhaar, NHS number), medical record numbers, diagnoses, medications, lab results, imaging, insurance policy and claim details, appointment and admission records, or any combination of the above.
|
||||
|
||||
**PII (Non-patient-sensitive data)** in healthcare systems: clinician/staff personal details, doctor fee structures and payout amounts, employee salary and bank details, vendor payment information.
|
||||
|
||||
### Access Control: Row-Level Security
|
||||
|
||||
```sql
|
||||
ALTER TABLE patients ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Scope access by facility
|
||||
CREATE POLICY "staff_read_own_facility"
|
||||
ON patients FOR SELECT TO authenticated
|
||||
USING (facility_id IN (
|
||||
SELECT facility_id FROM staff_assignments
|
||||
WHERE user_id = auth.uid() AND role IN ('doctor','nurse','lab_tech','admin')
|
||||
));
|
||||
|
||||
-- Audit log: insert-only (tamper-proof)
|
||||
CREATE POLICY "audit_insert_only" ON audit_log FOR INSERT
|
||||
TO authenticated WITH CHECK (user_id = auth.uid());
|
||||
CREATE POLICY "audit_no_modify" ON audit_log FOR UPDATE USING (false);
|
||||
CREATE POLICY "audit_no_delete" ON audit_log FOR DELETE USING (false);
|
||||
```
|
||||
|
||||
### Audit Trail
|
||||
|
||||
Every PHI access or modification must be logged:
|
||||
|
||||
```typescript
|
||||
interface AuditEntry {
|
||||
timestamp: string;
|
||||
user_id: string;
|
||||
patient_id: string;
|
||||
action: 'create' | 'read' | 'update' | 'delete' | 'print' | 'export';
|
||||
resource_type: string;
|
||||
resource_id: string;
|
||||
changes?: { before: object; after: object };
|
||||
ip_address: string;
|
||||
session_id: string;
|
||||
}
|
||||
```
|
||||
|
||||
### Common Leak Vectors
|
||||
|
||||
**Error messages:** Never include patient-identifying data in error messages thrown to the client. Log details server-side only.
|
||||
|
||||
**Console output:** Never log full patient objects. Use opaque internal record IDs (UUIDs) — not medical record numbers, national IDs, or names.
|
||||
|
||||
**URL parameters:** Never put patient-identifying data in query strings or path segments that could appear in logs or browser history. Use opaque UUIDs only.
|
||||
|
||||
**Browser storage:** Never store PHI in localStorage or sessionStorage. Keep PHI in memory only, fetch on demand.
|
||||
|
||||
**Service role keys:** Never use the service_role key in client-side code. Always use the anon/publishable key and let RLS enforce access.
|
||||
|
||||
**Logs and monitoring:** Never log full patient records. Use opaque record IDs only (not medical record numbers). Sanitize stack traces before sending to error tracking services.
|
||||
|
||||
### Database Schema Tagging
|
||||
|
||||
Mark PHI/PII columns at the schema level:
|
||||
|
||||
```sql
|
||||
COMMENT ON COLUMN patients.name IS 'PHI: patient_name';
|
||||
COMMENT ON COLUMN patients.dob IS 'PHI: date_of_birth';
|
||||
COMMENT ON COLUMN patients.aadhaar IS 'PHI: national_id';
|
||||
COMMENT ON COLUMN doctor_payouts.amount IS 'PII: financial';
|
||||
```
|
||||
|
||||
### Deployment Checklist
|
||||
|
||||
Before every deployment:
|
||||
- No PHI in error messages or stack traces
|
||||
- No PHI in console.log/console.error
|
||||
- No PHI in URL parameters
|
||||
- No PHI in browser storage
|
||||
- No service_role key in client code
|
||||
- RLS enabled on all PHI/PII tables
|
||||
- Audit trail for all data modifications
|
||||
- Session timeout configured
|
||||
- API authentication on all PHI endpoints
|
||||
- Cross-facility data isolation verified
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Safe vs Unsafe Error Handling
|
||||
|
||||
```typescript
|
||||
// BAD — leaks PHI in error
|
||||
throw new Error(`Patient ${patient.name} not found in ${patient.facility}`);
|
||||
|
||||
// GOOD — generic error, details logged server-side with opaque IDs only
|
||||
logger.error('Patient lookup failed', { recordId: patient.id, facilityId });
|
||||
throw new Error('Record not found');
|
||||
```
|
||||
|
||||
### Example 2: RLS Policy for Multi-Facility Isolation
|
||||
|
||||
```sql
|
||||
-- Doctor at Facility A cannot see Facility B patients
|
||||
CREATE POLICY "facility_isolation"
|
||||
ON patients FOR SELECT TO authenticated
|
||||
USING (facility_id IN (
|
||||
SELECT facility_id FROM staff_assignments WHERE user_id = auth.uid()
|
||||
));
|
||||
|
||||
-- Test: login as doctor-facility-a, query facility-b patients
|
||||
-- Expected: 0 rows returned
|
||||
```
|
||||
|
||||
### Example 3: Safe Logging
|
||||
|
||||
```typescript
|
||||
// BAD — logs identifiable patient data
|
||||
console.log('Processing patient:', patient);
|
||||
|
||||
// GOOD — logs only opaque internal record ID
|
||||
console.log('Processing record:', patient.id);
|
||||
// Note: even patient.id should be an opaque UUID, not a medical record number
|
||||
```
|
||||
133
skills/token-budget-advisor/SKILL.md
Normal file
133
skills/token-budget-advisor/SKILL.md
Normal file
@@ -0,0 +1,133 @@
|
||||
---
|
||||
name: token-budget-advisor
|
||||
description: >-
|
||||
Offers the user an informed choice about how much response depth to
|
||||
consume before answering. Use this skill when the user explicitly
|
||||
wants to control response length, depth, or token budget.
|
||||
TRIGGER when: "token budget", "token count", "token usage", "token limit",
|
||||
"response length", "answer depth", "short version", "brief answer",
|
||||
"detailed answer", "exhaustive answer", "respuesta corta vs larga",
|
||||
"cuántos tokens", "ahorrar tokens", "responde al 50%", "dame la versión
|
||||
corta", "quiero controlar cuánto usas", or clear variants where the
|
||||
user is explicitly asking to control answer size or depth.
|
||||
DO NOT TRIGGER when: user has already specified a level in the current
|
||||
session (maintain it), the request is clearly a one-word answer, or
|
||||
"token" refers to auth/session/payment tokens rather than response size.
|
||||
origin: community
|
||||
---
|
||||
|
||||
# Token Budget Advisor (TBA)
|
||||
|
||||
Intercept the response flow to offer the user a choice about response depth **before** Claude answers.
|
||||
|
||||
## When to Use
|
||||
|
||||
- User wants to control how long or detailed a response is
|
||||
- User mentions tokens, budget, depth, or response length
|
||||
- User says "short version", "tldr", "brief", "al 25%", "exhaustive", etc.
|
||||
- Any time the user wants to choose depth/detail level upfront
|
||||
|
||||
**Do not trigger** when: user already set a level this session (maintain it silently), or the answer is trivially one line.
|
||||
|
||||
## How It Works
|
||||
|
||||
### Step 1 — Estimate input tokens
|
||||
|
||||
Use the repository's canonical context-budget heuristics to estimate the prompt's token count mentally.
|
||||
|
||||
Use the same calibration guidance as [context-budget](../context-budget/SKILL.md):
|
||||
|
||||
- prose: `words × 1.3`
|
||||
- code-heavy or mixed/code blocks: `chars / 4`
|
||||
|
||||
For mixed content, use the dominant content type and keep the estimate heuristic.
|
||||
|
||||
### Step 2 — Estimate response size by complexity
|
||||
|
||||
Classify the prompt, then apply the multiplier range to get the full response window:
|
||||
|
||||
| Complexity | Multiplier range | Example prompts |
|
||||
|--------------|------------------|------------------------------------------------------|
|
||||
| Simple | 3× – 8× | "What is X?", yes/no, single fact |
|
||||
| Medium | 8× – 20× | "How does X work?" |
|
||||
| Medium-High | 10× – 25× | Code request with context |
|
||||
| Complex | 15× – 40× | Multi-part analysis, comparisons, architecture |
|
||||
| Creative | 10× – 30× | Stories, essays, narrative writing |
|
||||
|
||||
Response window = `input_tokens × mult_min` to `input_tokens × mult_max` (but don’t exceed your model’s configured output-token limit).
|
||||
|
||||
### Step 3 — Present depth options
|
||||
|
||||
Present this block **before** answering, using the actual estimated numbers:
|
||||
|
||||
```
|
||||
Analyzing your prompt...
|
||||
|
||||
Input: ~[N] tokens | Type: [type] | Complexity: [level] | Language: [lang]
|
||||
|
||||
Choose your depth level:
|
||||
|
||||
[1] Essential (25%) -> ~[tokens] Direct answer only, no preamble
|
||||
[2] Moderate (50%) -> ~[tokens] Answer + context + 1 example
|
||||
[3] Detailed (75%) -> ~[tokens] Full answer with alternatives
|
||||
[4] Exhaustive (100%) -> ~[tokens] Everything, no limits
|
||||
|
||||
Which level? (1-4 or say "25% depth", "50% depth", "75% depth", "100% depth")
|
||||
|
||||
Precision: heuristic estimate ~85-90% accuracy (±15%).
|
||||
```
|
||||
|
||||
Level token estimates (within the response window):
|
||||
- 25% → `min + (max - min) × 0.25`
|
||||
- 50% → `min + (max - min) × 0.50`
|
||||
- 75% → `min + (max - min) × 0.75`
|
||||
- 100% → `max`
|
||||
|
||||
### Step 4 — Respond at the chosen level
|
||||
|
||||
| Level | Target length | Include | Omit |
|
||||
|------------------|---------------------|-----------------------------------------------------|---------------------------------------------------|
|
||||
| 25% Essential | 2-4 sentences max | Direct answer, key conclusion | Context, examples, nuance, alternatives |
|
||||
| 50% Moderate | 1-3 paragraphs | Answer + necessary context + 1 example | Deep analysis, edge cases, references |
|
||||
| 75% Detailed | Structured response | Multiple examples, pros/cons, alternatives | Extreme edge cases, exhaustive references |
|
||||
| 100% Exhaustive | No restriction | Everything — full analysis, all code, all perspectives | Nothing |
|
||||
|
||||
## Shortcuts — skip the question
|
||||
|
||||
If the user already signals a level, respond at that level immediately without asking:
|
||||
|
||||
| What they say | Level |
|
||||
|----------------------------------------------------|-------|
|
||||
| "1" / "25% depth" / "short version" / "brief answer" / "tldr" | 25% |
|
||||
| "2" / "50% depth" / "moderate depth" / "balanced answer" | 50% |
|
||||
| "3" / "75% depth" / "detailed answer" / "thorough answer" | 75% |
|
||||
| "4" / "100% depth" / "exhaustive answer" / "full deep dive" | 100% |
|
||||
|
||||
If the user set a level earlier in the session, **maintain it silently** for subsequent responses unless they change it.
|
||||
|
||||
## Precision note
|
||||
|
||||
This skill uses heuristic estimation — no real tokenizer. Accuracy ~85-90%, variance ±15%. Always show the disclaimer.
|
||||
|
||||
## Examples
|
||||
|
||||
### Triggers
|
||||
|
||||
- "Give me the short version first."
|
||||
- "How many tokens will your answer use?"
|
||||
- "Respond at 50% depth."
|
||||
- "I want the exhaustive answer, not the summary."
|
||||
- "Dame la version corta y luego la detallada."
|
||||
|
||||
### Does Not Trigger
|
||||
|
||||
- "What is a JWT token?"
|
||||
- "The checkout flow uses a payment token."
|
||||
- "Is this normal?"
|
||||
- "Complete the refactor."
|
||||
- Follow-up questions after the user already chose a depth for the session
|
||||
|
||||
## Source
|
||||
|
||||
Standalone skill from [TBA — Token Budget Advisor for Claude Code](https://github.com/Xabilimon1/Token-Budget-Advisor-Claude-Code-).
|
||||
Original project also ships a Python estimator script, but this repository keeps the skill self-contained and heuristic-only.
|
||||
@@ -25,6 +25,22 @@ const configPath = path.join(repoRoot, '.codex', 'config.toml');
|
||||
const config = fs.readFileSync(configPath, 'utf8');
|
||||
const codexAgentsDir = path.join(repoRoot, '.codex', 'agents');
|
||||
|
||||
function escapeRegExp(value) {
|
||||
return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
}
|
||||
|
||||
function getTomlSection(text, sectionName) {
|
||||
const escapedSection = escapeRegExp(sectionName);
|
||||
const headerPattern = new RegExp(`^\\s*\\[${escapedSection}\\]\\s*$`, 'm');
|
||||
const headerMatch = headerPattern.exec(text);
|
||||
|
||||
assert.ok(headerMatch, `Expected TOML section to exist: [${sectionName}]`);
|
||||
|
||||
const afterHeader = text.slice(headerMatch.index + headerMatch[0].length);
|
||||
const nextHeaderIndex = afterHeader.search(/^\s*\[/m);
|
||||
return nextHeaderIndex === -1 ? afterHeader : afterHeader.slice(0, nextHeaderIndex);
|
||||
}
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
@@ -47,6 +63,37 @@ if (
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('reference config enables Codex multi-agent support', () => {
|
||||
assert.ok(
|
||||
/^\s*multi_agent\s*=\s*true\s*$/m.test(config),
|
||||
'Expected `.codex/config.toml` to opt into Codex multi-agent collaboration',
|
||||
);
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('reference config wires the sample Codex role files', () => {
|
||||
for (const roleFile of ['explorer.toml', 'reviewer.toml', 'docs-researcher.toml']) {
|
||||
const rolePath = path.join(codexAgentsDir, roleFile);
|
||||
const roleSection = roleFile.replace(/\.toml$/, '').replace(/-/g, '_');
|
||||
const sectionBody = getTomlSection(config, `agents.${roleSection}`);
|
||||
|
||||
assert.ok(fs.existsSync(rolePath), `Expected role config to exist: ${roleFile}`);
|
||||
assert.ok(
|
||||
new RegExp(`^\\s*config_file\\s*=\\s*"agents\\/${escapeRegExp(roleFile)}"\\s*$`, 'm').test(
|
||||
sectionBody,
|
||||
),
|
||||
`Expected \`.codex/config.toml\` to reference ${roleFile} inside [agents.${roleSection}]`,
|
||||
);
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('sample Codex role configs do not use o4-mini', () => {
|
||||
const roleFiles = fs.readdirSync(codexAgentsDir).filter(file => file.endsWith('.toml'));
|
||||
|
||||
@@ -29,11 +29,11 @@ function runScript(input) {
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing doc-file-warning.js ===\n');
|
||||
console.log('\n=== Testing doc-file-warning.js (denylist policy) ===\n');
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
// 1. Allowed standard doc files - no warning in stderr
|
||||
// 1. Standard doc filenames - never on denylist, no warning
|
||||
const standardFiles = [
|
||||
'README.md',
|
||||
'CLAUDE.md',
|
||||
@@ -53,10 +53,12 @@ function runTests() {
|
||||
}) ? passed++ : failed++);
|
||||
}
|
||||
|
||||
// 2. Allowed directory paths - no warning
|
||||
const allowedDirPaths = [
|
||||
// 2. Structured directory paths - no warning even for ad-hoc names
|
||||
const structuredDirPaths = [
|
||||
'docs/foo.md',
|
||||
'docs/guide/setup.md',
|
||||
'docs/TODO.md',
|
||||
'docs/specs/NOTES.md',
|
||||
'skills/bar.md',
|
||||
'skills/testing/tdd.md',
|
||||
'.history/session.md',
|
||||
@@ -64,9 +66,13 @@ function runTests() {
|
||||
'.claude/commands/deploy.md',
|
||||
'.claude/plans/roadmap.md',
|
||||
'.claude/projects/myproject.md',
|
||||
'.github/ISSUE_TEMPLATE/bug.md',
|
||||
'commands/triage.md',
|
||||
'benchmarks/test.md',
|
||||
'templates/DRAFT.md',
|
||||
];
|
||||
for (const file of allowedDirPaths) {
|
||||
(test(`allows directory path: ${file}`, () => {
|
||||
for (const file of structuredDirPaths) {
|
||||
(test(`allows structured directory path: ${file}`, () => {
|
||||
const { code, stderr } = runScript({ tool_input: { file_path: file } });
|
||||
assert.strictEqual(code, 0, `expected exit code 0, got ${code}`);
|
||||
assert.strictEqual(stderr, '', `expected no warning for ${file}, got: ${stderr}`);
|
||||
@@ -96,10 +102,42 @@ function runTests() {
|
||||
}) ? passed++ : failed++);
|
||||
}
|
||||
|
||||
// 5. Non-standard doc files - warning in stderr
|
||||
const nonStandardFiles = ['random-notes.md', 'TODO.md', 'notes.txt', 'scratch.md', 'ideas.txt'];
|
||||
for (const file of nonStandardFiles) {
|
||||
(test(`warns on non-standard doc file: ${file}`, () => {
|
||||
// 5. Lowercase, partial-match, and non-standard extension case - NOT on denylist
|
||||
const allowedNonDenylist = [
|
||||
'random-notes.md',
|
||||
'notes.txt',
|
||||
'scratch.md',
|
||||
'ideas.txt',
|
||||
'todo-list.md',
|
||||
'my-draft.md',
|
||||
'meeting-notes.txt',
|
||||
'TODO.MD',
|
||||
'NOTES.TXT',
|
||||
];
|
||||
for (const file of allowedNonDenylist) {
|
||||
(test(`allows non-denylist doc file: ${file}`, () => {
|
||||
const { code, stderr } = runScript({ tool_input: { file_path: file } });
|
||||
assert.strictEqual(code, 0);
|
||||
assert.strictEqual(stderr, '', `expected no warning for ${file}, got: ${stderr}`);
|
||||
}) ? passed++ : failed++);
|
||||
}
|
||||
|
||||
// 6. Ad-hoc denylist filenames at root/non-structured paths - SHOULD warn
|
||||
const deniedFiles = [
|
||||
'NOTES.md',
|
||||
'TODO.md',
|
||||
'SCRATCH.md',
|
||||
'TEMP.md',
|
||||
'DRAFT.txt',
|
||||
'BRAINSTORM.md',
|
||||
'SPIKE.md',
|
||||
'DEBUG.md',
|
||||
'WIP.txt',
|
||||
'src/NOTES.md',
|
||||
'lib/TODO.txt',
|
||||
];
|
||||
for (const file of deniedFiles) {
|
||||
(test(`warns on ad-hoc denylist file: ${file}`, () => {
|
||||
const { code, stderr } = runScript({ tool_input: { file_path: file } });
|
||||
assert.strictEqual(code, 0, 'should still exit 0 (warn only)');
|
||||
assert.ok(stderr.includes('WARNING'), `expected warning in stderr for ${file}, got: ${stderr}`);
|
||||
@@ -107,7 +145,20 @@ function runTests() {
|
||||
}) ? passed++ : failed++);
|
||||
}
|
||||
|
||||
// 6. Invalid/empty input - passes through without error
|
||||
// 7. Windows backslash paths - normalized correctly
|
||||
(test('allows ad-hoc name in structured dir with backslash path', () => {
|
||||
const { code, stderr } = runScript({ tool_input: { file_path: 'docs\\specs\\NOTES.md' } });
|
||||
assert.strictEqual(code, 0);
|
||||
assert.strictEqual(stderr, '', 'expected no warning for structured dir with backslash');
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('warns on ad-hoc name with backslash in non-structured dir', () => {
|
||||
const { code, stderr } = runScript({ tool_input: { file_path: 'src\\SCRATCH.md' } });
|
||||
assert.strictEqual(code, 0, 'should still exit 0');
|
||||
assert.ok(stderr.includes('WARNING'), 'expected warning for non-structured backslash path');
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
// 8. Invalid/empty input - passes through without error
|
||||
(test('handles empty object input without error', () => {
|
||||
const { code, stderr } = runScript({});
|
||||
assert.strictEqual(code, 0);
|
||||
@@ -126,7 +177,19 @@ function runTests() {
|
||||
assert.strictEqual(stderr, '', `expected no warning for empty file_path, got: ${stderr}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
// 7. Stdout always contains the original input (pass-through)
|
||||
// 9. Malformed input - passes through without error
|
||||
(test('handles non-JSON input without error', () => {
|
||||
const result = spawnSync('node', [script], {
|
||||
encoding: 'utf8',
|
||||
input: 'not-json',
|
||||
timeout: 10000,
|
||||
});
|
||||
assert.strictEqual(result.status || 0, 0);
|
||||
assert.strictEqual(result.stderr || '', '');
|
||||
assert.strictEqual(result.stdout, 'not-json');
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
// 10. Stdout always contains the original input (pass-through)
|
||||
(test('passes through input to stdout for allowed file', () => {
|
||||
const input = { tool_input: { file_path: 'README.md' } };
|
||||
const { stdout } = runScript(input);
|
||||
@@ -134,7 +197,7 @@ function runTests() {
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('passes through input to stdout for warned file', () => {
|
||||
const input = { tool_input: { file_path: 'random-notes.md' } };
|
||||
const input = { tool_input: { file_path: 'TODO.md' } };
|
||||
const { stdout } = runScript(input);
|
||||
assert.strictEqual(stdout, JSON.stringify(input));
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
@@ -1965,7 +1965,29 @@ async function runTests() {
|
||||
passed++;
|
||||
else failed++;
|
||||
if (
|
||||
test('script references use CLAUDE_PLUGIN_ROOT variable or safe SessionStart inline resolver', () => {
|
||||
test('Stop and SessionEnd hooks use safe inline resolvers when plugin root may be unset', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
|
||||
|
||||
const lifecycleHooks = [
|
||||
...(hooks.hooks.Stop || []).flatMap(entry => entry.hooks || []),
|
||||
...(hooks.hooks.SessionEnd || []).flatMap(entry => entry.hooks || []),
|
||||
].filter(hook => hook.type === 'command');
|
||||
|
||||
assert.ok(lifecycleHooks.length > 0, 'Should define Stop/SessionEnd command hooks');
|
||||
|
||||
for (const hook of lifecycleHooks) {
|
||||
assert.ok(hook.command.startsWith('node -e "'), `Expected inline node resolver: ${hook.command.substring(0, 80)}...`);
|
||||
assert.ok(hook.command.includes('run-with-flags.js'), 'Inline resolver should invoke run-with-flags.js');
|
||||
assert.ok(hook.command.includes('CLAUDE_PLUGIN_ROOT'), 'Inline resolver should consult CLAUDE_PLUGIN_ROOT');
|
||||
assert.ok(hook.command.includes('plugins'), 'Inline resolver should probe known plugin roots');
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
if (
|
||||
test('script references use CLAUDE_PLUGIN_ROOT variable or safe inline resolvers', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
|
||||
|
||||
@@ -1973,9 +1995,8 @@ async function runTests() {
|
||||
for (const entry of hookArray) {
|
||||
for (const hook of entry.hooks) {
|
||||
if (hook.type === 'command' && hook.command.includes('scripts/hooks/')) {
|
||||
// Check for the literal string "${CLAUDE_PLUGIN_ROOT}" in the command
|
||||
const isSessionStartInlineResolver = hook.command.startsWith('node -e') && hook.command.includes('session:start') && hook.command.includes('run-with-flags.js');
|
||||
const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}') || isSessionStartInlineResolver;
|
||||
const isInlineResolver = hook.command.startsWith('node -e') && hook.command.includes('run-with-flags.js');
|
||||
const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}') || isInlineResolver;
|
||||
assert.ok(hasPluginRoot, `Script paths should use CLAUDE_PLUGIN_ROOT: ${hook.command.substring(0, 80)}...`);
|
||||
}
|
||||
}
|
||||
|
||||
81
tests/hooks/pre-bash-commit-quality.test.js
Normal file
81
tests/hooks/pre-bash-commit-quality.test.js
Normal file
@@ -0,0 +1,81 @@
|
||||
/**
|
||||
* Tests for scripts/hooks/pre-bash-commit-quality.js
|
||||
*
|
||||
* Run with: node tests/hooks/pre-bash-commit-quality.test.js
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const hook = require('../../scripts/hooks/pre-bash-commit-quality');
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function inTempRepo(fn) {
|
||||
const prevCwd = process.cwd();
|
||||
const repoDir = fs.mkdtempSync(path.join(os.tmpdir(), 'pre-bash-commit-quality-'));
|
||||
|
||||
try {
|
||||
spawnSync('git', ['init'], { cwd: repoDir, stdio: 'pipe', encoding: 'utf8' });
|
||||
spawnSync('git', ['config', 'user.name', 'ECC Test'], { cwd: repoDir, stdio: 'pipe', encoding: 'utf8' });
|
||||
spawnSync('git', ['config', 'user.email', 'ecc@example.com'], { cwd: repoDir, stdio: 'pipe', encoding: 'utf8' });
|
||||
process.chdir(repoDir);
|
||||
return fn(repoDir);
|
||||
} finally {
|
||||
process.chdir(prevCwd);
|
||||
fs.rmSync(repoDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
console.log('\nPre-Bash Commit Quality Hook Tests');
|
||||
console.log('==================================\n');
|
||||
|
||||
if (test('evaluate blocks commits when staged snapshot contains debugger', () => {
|
||||
inTempRepo(repoDir => {
|
||||
const filePath = path.join(repoDir, 'index.js');
|
||||
fs.writeFileSync(filePath, 'function main() {\n debugger;\n}\n', 'utf8');
|
||||
spawnSync('git', ['add', 'index.js'], { cwd: repoDir, stdio: 'pipe', encoding: 'utf8' });
|
||||
|
||||
const input = JSON.stringify({ tool_input: { command: 'git commit -m "fix: test debugger hook"' } });
|
||||
const result = hook.evaluate(input);
|
||||
|
||||
assert.strictEqual(result.output, input, 'should preserve stdin payload');
|
||||
assert.strictEqual(result.exitCode, 2, 'should block commit when staged snapshot has debugger');
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('evaluate inspects staged snapshot instead of newer working tree content', () => {
|
||||
inTempRepo(repoDir => {
|
||||
const filePath = path.join(repoDir, 'index.js');
|
||||
fs.writeFileSync(filePath, 'function main() {\n return 1;\n}\n', 'utf8');
|
||||
spawnSync('git', ['add', 'index.js'], { cwd: repoDir, stdio: 'pipe', encoding: 'utf8' });
|
||||
|
||||
// Working tree diverges after staging; hook should still inspect staged content.
|
||||
fs.writeFileSync(filePath, 'function main() {\n debugger;\n return 1;\n}\n', 'utf8');
|
||||
|
||||
const input = JSON.stringify({ tool_input: { command: 'git commit -m "fix: staged snapshot only"' } });
|
||||
const result = hook.evaluate(input);
|
||||
|
||||
assert.strictEqual(result.output, input, 'should preserve stdin payload');
|
||||
assert.strictEqual(result.exitCode, 0, 'should ignore unstaged debugger in working tree');
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
@@ -179,6 +179,17 @@ function cleanupTestDir(testDir) {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function stagePluginRuntime(homeDir, relativePaths) {
|
||||
const pluginRoot = path.join(homeDir, '.claude', 'plugins', 'everything-claude-code');
|
||||
for (const relativePath of relativePaths) {
|
||||
const sourcePath = path.join(REPO_ROOT, relativePath);
|
||||
const destinationPath = path.join(pluginRoot, relativePath);
|
||||
fs.mkdirSync(path.dirname(destinationPath), { recursive: true });
|
||||
fs.copyFileSync(sourcePath, destinationPath);
|
||||
}
|
||||
return pluginRoot;
|
||||
}
|
||||
|
||||
function getHookCommandByDescription(hooks, lifecycle, descriptionText) {
|
||||
const hookGroup = hooks.hooks[lifecycle]?.find(
|
||||
entry => entry.description && entry.description.includes(descriptionText)
|
||||
@@ -267,6 +278,35 @@ async function runTests() {
|
||||
assert.strictEqual(payload.hookSpecificOutput.hookEventName, 'SessionStart');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('Stop hook resolves plugin root when CLAUDE_PLUGIN_ROOT is unset', async () => {
|
||||
const homeDir = createTestDir();
|
||||
try {
|
||||
stagePluginRuntime(homeDir, [
|
||||
path.join('scripts', 'hooks', 'run-with-flags.js'),
|
||||
path.join('scripts', 'hooks', 'check-console-log.js'),
|
||||
path.join('scripts', 'lib', 'hook-flags.js'),
|
||||
path.join('scripts', 'lib', 'utils.js'),
|
||||
]);
|
||||
|
||||
const hookCommand = getHookCommandByDescription(
|
||||
hooks,
|
||||
'Stop',
|
||||
'Check for console.log in modified files after each response'
|
||||
);
|
||||
|
||||
const result = await runHookCommand(hookCommand, {}, {
|
||||
HOME: homeDir,
|
||||
CLAUDE_PLUGIN_ROOT: '',
|
||||
});
|
||||
|
||||
assert.strictEqual(result.code, 0, `Expected stop hook to exit 0, got ${result.code}: ${result.stderr}`);
|
||||
assert.ok(!result.stderr.includes("Cannot find module '/scripts/hooks/run-with-flags.js'"), 'Should not resolve to the filesystem root');
|
||||
assert.ok(!result.stderr.includes('could not resolve ECC plugin root'), 'Should discover the plugin root from known install paths');
|
||||
} finally {
|
||||
cleanupTestDir(homeDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('PreCompact hook logs to stderr', async () => {
|
||||
const result = await runHookWithInput(path.join(scriptsDir, 'pre-compact.js'), {});
|
||||
assert.ok(result.stderr.includes('[PreCompact]'), 'Should output to stderr with prefix');
|
||||
|
||||
@@ -10,7 +10,8 @@ const { spawnSync } = require('child_process');
|
||||
|
||||
const repoRoot = path.join(__dirname, '..', '..');
|
||||
const installScript = path.join(repoRoot, 'scripts', 'codex', 'install-global-git-hooks.sh');
|
||||
const installSource = fs.readFileSync(installScript, 'utf8');
|
||||
const syncScript = path.join(repoRoot, 'scripts', 'sync-ecc-to-codex.sh');
|
||||
const checkScript = path.join(repoRoot, 'scripts', 'codex', 'check-codex-global-state.sh');
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
@@ -32,25 +33,15 @@ function cleanup(dirPath) {
|
||||
fs.rmSync(dirPath, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function toBashPath(filePath) {
|
||||
if (process.platform !== 'win32') {
|
||||
return filePath;
|
||||
}
|
||||
|
||||
return String(filePath)
|
||||
.replace(/^([A-Za-z]):/, (_, driveLetter) => `/${driveLetter.toLowerCase()}`)
|
||||
.replace(/\\/g, '/');
|
||||
}
|
||||
|
||||
function runBash(scriptPath, args = [], env = {}, cwd = repoRoot) {
|
||||
return spawnSync('bash', [toBashPath(scriptPath), ...args], {
|
||||
return spawnSync('bash', [scriptPath, ...args], {
|
||||
cwd,
|
||||
env: {
|
||||
...process.env,
|
||||
...env
|
||||
...env,
|
||||
},
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
});
|
||||
}
|
||||
|
||||
@@ -58,24 +49,14 @@ let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
if (
|
||||
test('install-global-git-hooks.sh does not use eval and executes argv directly', () => {
|
||||
assert.ok(!installSource.includes('eval "$*"'), 'Expected installer to avoid eval');
|
||||
assert.ok(installSource.includes(' "$@"'), 'Expected installer to execute argv directly');
|
||||
assert.ok(installSource.includes(`printf ' %q' "$@"`), 'Expected dry-run logging to shell-escape argv');
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('install-global-git-hooks.sh handles shell-sensitive hook paths without shell injection', () => {
|
||||
test('install-global-git-hooks.sh handles quoted hook paths without shell injection', () => {
|
||||
const homeDir = createTempDir('codex-hooks-home-');
|
||||
const weirdHooksDir = path.join(homeDir, "git-hooks 'quoted' & spaced");
|
||||
const weirdHooksDir = path.join(homeDir, 'git-hooks "quoted"');
|
||||
|
||||
try {
|
||||
const result = runBash(installScript, [], {
|
||||
HOME: toBashPath(homeDir),
|
||||
ECC_GLOBAL_HOOKS_DIR: toBashPath(weirdHooksDir)
|
||||
HOME: homeDir,
|
||||
ECC_GLOBAL_HOOKS_DIR: weirdHooksDir,
|
||||
});
|
||||
|
||||
assert.strictEqual(result.status, 0, result.stderr || result.stdout);
|
||||
@@ -89,6 +70,66 @@ if (
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('sync and global sanity checks accept the legacy context7 MCP section', () => {
|
||||
const homeDir = createTempDir('codex-sync-home-');
|
||||
const codexDir = path.join(homeDir, '.codex');
|
||||
const configPath = path.join(codexDir, 'config.toml');
|
||||
const config = [
|
||||
'approval_policy = "on-request"',
|
||||
'sandbox_mode = "workspace-write"',
|
||||
'web_search = "live"',
|
||||
'persistent_instructions = ""',
|
||||
'',
|
||||
'[features]',
|
||||
'multi_agent = true',
|
||||
'',
|
||||
'[profiles.strict]',
|
||||
'approval_policy = "on-request"',
|
||||
'sandbox_mode = "read-only"',
|
||||
'web_search = "cached"',
|
||||
'',
|
||||
'[profiles.yolo]',
|
||||
'approval_policy = "never"',
|
||||
'sandbox_mode = "workspace-write"',
|
||||
'web_search = "live"',
|
||||
'',
|
||||
'[mcp_servers.context7]',
|
||||
'command = "npx"',
|
||||
'args = ["-y", "@upstash/context7-mcp"]',
|
||||
'',
|
||||
'[mcp_servers.github]',
|
||||
'command = "npx"',
|
||||
'args = ["-y", "@modelcontextprotocol/server-github"]',
|
||||
'',
|
||||
'[mcp_servers.memory]',
|
||||
'command = "npx"',
|
||||
'args = ["-y", "@modelcontextprotocol/server-memory"]',
|
||||
'',
|
||||
'[mcp_servers.sequential-thinking]',
|
||||
'command = "npx"',
|
||||
'args = ["-y", "@modelcontextprotocol/server-sequential-thinking"]',
|
||||
'',
|
||||
].join('\n');
|
||||
|
||||
try {
|
||||
fs.mkdirSync(codexDir, { recursive: true });
|
||||
fs.writeFileSync(configPath, config);
|
||||
|
||||
const syncResult = runBash(syncScript, [], { HOME: homeDir, CODEX_HOME: codexDir });
|
||||
assert.strictEqual(syncResult.status, 0, syncResult.stderr || syncResult.stdout);
|
||||
|
||||
const checkResult = runBash(checkScript, [], { HOME: homeDir, CODEX_HOME: codexDir });
|
||||
assert.strictEqual(checkResult.status, 0, checkResult.stderr || checkResult.stdout);
|
||||
assert.match(checkResult.stdout, /MCP section \[mcp_servers\.context7\] or \[mcp_servers\.context7-mcp\] exists/);
|
||||
} finally {
|
||||
cleanup(homeDir);
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
Reference in New Issue
Block a user