mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-30 13:43:26 +08:00
feat: deliver v1.8.0 harness reliability and parity updates
This commit is contained in:
@@ -14,7 +14,7 @@
|
||||
"name": "everything-claude-code",
|
||||
"source": "./",
|
||||
"description": "The most comprehensive Claude Code plugin — 14+ agents, 56+ skills, 33+ commands, and production-ready hooks for TDD, security scanning, code review, and continuous learning",
|
||||
"version": "1.7.0",
|
||||
"version": "1.8.0",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
"email": "me@affaanmustafa.com"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"version": "1.7.0",
|
||||
"version": "1.8.0",
|
||||
"description": "Complete collection of battle-tested Claude Code configs from an Anthropic hackathon winner - agents, skills, hooks, and rules evolved over 10+ months of intensive daily use",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
|
||||
@@ -29,13 +29,14 @@ function transformToClaude(cursorInput, overrides = {}) {
|
||||
return {
|
||||
tool_input: {
|
||||
command: cursorInput.command || cursorInput.args?.command || '',
|
||||
file_path: cursorInput.path || cursorInput.file || '',
|
||||
file_path: cursorInput.path || cursorInput.file || cursorInput.args?.filePath || '',
|
||||
...overrides.tool_input,
|
||||
},
|
||||
tool_output: {
|
||||
output: cursorInput.output || cursorInput.result || '',
|
||||
...overrides.tool_output,
|
||||
},
|
||||
transcript_path: cursorInput.transcript_path || cursorInput.transcriptPath || cursorInput.session?.transcript_path || '',
|
||||
_cursor: {
|
||||
conversation_id: cursorInput.conversation_id,
|
||||
hook_event_name: cursorInput.hook_event_name,
|
||||
@@ -59,4 +60,22 @@ function runExistingHook(scriptName, stdinData) {
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { readStdin, getPluginRoot, transformToClaude, runExistingHook };
|
||||
function hookEnabled(hookId, allowedProfiles = ['standard', 'strict']) {
|
||||
const rawProfile = String(process.env.ECC_HOOK_PROFILE || 'standard').toLowerCase();
|
||||
const profile = ['minimal', 'standard', 'strict'].includes(rawProfile) ? rawProfile : 'standard';
|
||||
|
||||
const disabled = new Set(
|
||||
String(process.env.ECC_DISABLED_HOOKS || '')
|
||||
.split(',')
|
||||
.map(v => v.trim().toLowerCase())
|
||||
.filter(Boolean)
|
||||
);
|
||||
|
||||
if (disabled.has(String(hookId || '').toLowerCase())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return allowedProfiles.includes(profile);
|
||||
}
|
||||
|
||||
module.exports = { readStdin, getPluginRoot, transformToClaude, runExistingHook, hookEnabled };
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
#!/usr/bin/env node
|
||||
const { readStdin } = require('./adapter');
|
||||
const { readStdin, hookEnabled } = require('./adapter');
|
||||
|
||||
readStdin().then(raw => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const cmd = input.command || '';
|
||||
const output = input.output || input.result || '';
|
||||
const input = JSON.parse(raw || '{}');
|
||||
const cmd = String(input.command || input.args?.command || '');
|
||||
const output = String(input.output || input.result || '');
|
||||
|
||||
// PR creation logging
|
||||
if (/gh pr create/.test(cmd)) {
|
||||
if (hookEnabled('post:bash:pr-created', ['standard', 'strict']) && /\bgh\s+pr\s+create\b/.test(cmd)) {
|
||||
const m = output.match(/https:\/\/github\.com\/[^/]+\/[^/]+\/pull\/\d+/);
|
||||
if (m) {
|
||||
console.error('[ECC] PR created: ' + m[0]);
|
||||
@@ -17,10 +17,12 @@ readStdin().then(raw => {
|
||||
}
|
||||
}
|
||||
|
||||
// Build completion notice
|
||||
if (/(npm run build|pnpm build|yarn build)/.test(cmd)) {
|
||||
if (hookEnabled('post:bash:build-complete', ['standard', 'strict']) && /(npm run build|pnpm build|yarn build)/.test(cmd)) {
|
||||
console.error('[ECC] Build completed');
|
||||
}
|
||||
} catch {}
|
||||
} catch {
|
||||
// noop
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
}).catch(() => process.exit(0));
|
||||
|
||||
@@ -1,27 +1,72 @@
|
||||
#!/usr/bin/env node
|
||||
const { readStdin } = require('./adapter');
|
||||
readStdin().then(raw => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const cmd = input.command || '';
|
||||
const { readStdin, hookEnabled } = require('./adapter');
|
||||
|
||||
// 1. Block dev server outside tmux
|
||||
if (process.platform !== 'win32' && /(npm run dev\b|pnpm( run)? dev\b|yarn dev\b|bun run dev\b)/.test(cmd)) {
|
||||
console.error('[ECC] BLOCKED: Dev server must run in tmux for log access');
|
||||
console.error('[ECC] Use: tmux new-session -d -s dev "npm run dev"');
|
||||
process.exit(2);
|
||||
function splitShellSegments(command) {
|
||||
const segments = [];
|
||||
let current = '';
|
||||
let quote = null;
|
||||
|
||||
for (let i = 0; i < command.length; i++) {
|
||||
const ch = command[i];
|
||||
if (quote) {
|
||||
if (ch === quote) quote = null;
|
||||
current += ch;
|
||||
continue;
|
||||
}
|
||||
|
||||
// 2. Tmux reminder for long-running commands
|
||||
if (process.platform !== 'win32' && !process.env.TMUX &&
|
||||
/(npm (install|test)|pnpm (install|test)|yarn (install|test)?|bun (install|test)|cargo build|make\b|docker\b|pytest|vitest|playwright)/.test(cmd)) {
|
||||
if (ch === '"' || ch === "'") {
|
||||
quote = ch;
|
||||
current += ch;
|
||||
continue;
|
||||
}
|
||||
|
||||
const next = command[i + 1] || '';
|
||||
if (ch === ';' || (ch === '&' && next === '&') || (ch === '|' && next === '|') || (ch === '&' && next !== '&')) {
|
||||
if (current.trim()) segments.push(current.trim());
|
||||
current = '';
|
||||
if ((ch === '&' && next === '&') || (ch === '|' && next === '|')) i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
current += ch;
|
||||
}
|
||||
|
||||
if (current.trim()) segments.push(current.trim());
|
||||
return segments;
|
||||
}
|
||||
|
||||
readStdin().then(raw => {
|
||||
try {
|
||||
const input = JSON.parse(raw || '{}');
|
||||
const cmd = String(input.command || input.args?.command || '');
|
||||
|
||||
if (hookEnabled('pre:bash:dev-server-block', ['standard', 'strict']) && process.platform !== 'win32') {
|
||||
const segments = splitShellSegments(cmd);
|
||||
const tmuxLauncher = /^\s*tmux\s+(new|new-session|new-window|split-window)\b/;
|
||||
const devPattern = /\b(npm\s+run\s+dev|pnpm(?:\s+run)?\s+dev|yarn\s+dev|bun\s+run\s+dev)\b/;
|
||||
const hasBlockedDev = segments.some(segment => devPattern.test(segment) && !tmuxLauncher.test(segment));
|
||||
if (hasBlockedDev) {
|
||||
console.error('[ECC] BLOCKED: Dev server must run in tmux for log access');
|
||||
console.error('[ECC] Use: tmux new-session -d -s dev "npm run dev"');
|
||||
process.exit(2);
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
hookEnabled('pre:bash:tmux-reminder', ['strict']) &&
|
||||
process.platform !== 'win32' &&
|
||||
!process.env.TMUX &&
|
||||
/(npm (install|test)|pnpm (install|test)|yarn (install|test)?|bun (install|test)|cargo build|make\b|docker\b|pytest|vitest|playwright)/.test(cmd)
|
||||
) {
|
||||
console.error('[ECC] Consider running in tmux for session persistence');
|
||||
}
|
||||
|
||||
// 3. Git push review reminder
|
||||
if (/git push/.test(cmd)) {
|
||||
if (hookEnabled('pre:bash:git-push-reminder', ['strict']) && /\bgit\s+push\b/.test(cmd)) {
|
||||
console.error('[ECC] Review changes before push: git diff origin/main...HEAD');
|
||||
}
|
||||
} catch {}
|
||||
} catch {
|
||||
// noop
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
}).catch(() => process.exit(0));
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
#!/usr/bin/env node
|
||||
const { readStdin, runExistingHook, transformToClaude } = require('./adapter');
|
||||
const { readStdin, runExistingHook, transformToClaude, hookEnabled } = require('./adapter');
|
||||
readStdin().then(raw => {
|
||||
const input = JSON.parse(raw);
|
||||
const input = JSON.parse(raw || '{}');
|
||||
const claudeInput = transformToClaude(input);
|
||||
runExistingHook('session-end.js', claudeInput);
|
||||
runExistingHook('evaluate-session.js', claudeInput);
|
||||
if (hookEnabled('session:end:marker', ['minimal', 'standard', 'strict'])) {
|
||||
runExistingHook('session-end-marker.js', claudeInput);
|
||||
}
|
||||
process.stdout.write(raw);
|
||||
}).catch(() => process.exit(0));
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
#!/usr/bin/env node
|
||||
const { readStdin, runExistingHook, transformToClaude } = require('./adapter');
|
||||
const { readStdin, runExistingHook, transformToClaude, hookEnabled } = require('./adapter');
|
||||
readStdin().then(raw => {
|
||||
const input = JSON.parse(raw);
|
||||
const input = JSON.parse(raw || '{}');
|
||||
const claudeInput = transformToClaude(input);
|
||||
runExistingHook('session-start.js', claudeInput);
|
||||
if (hookEnabled('session:start', ['minimal', 'standard', 'strict'])) {
|
||||
runExistingHook('session-start.js', claudeInput);
|
||||
}
|
||||
process.stdout.write(raw);
|
||||
}).catch(() => process.exit(0));
|
||||
|
||||
@@ -1,7 +1,21 @@
|
||||
#!/usr/bin/env node
|
||||
const { readStdin, runExistingHook, transformToClaude } = require('./adapter');
|
||||
const { readStdin, runExistingHook, transformToClaude, hookEnabled } = require('./adapter');
|
||||
readStdin().then(raw => {
|
||||
const claudeInput = JSON.parse(raw || '{}');
|
||||
runExistingHook('check-console-log.js', transformToClaude(claudeInput));
|
||||
const input = JSON.parse(raw || '{}');
|
||||
const claudeInput = transformToClaude(input);
|
||||
|
||||
if (hookEnabled('stop:check-console-log', ['standard', 'strict'])) {
|
||||
runExistingHook('check-console-log.js', claudeInput);
|
||||
}
|
||||
if (hookEnabled('stop:session-end', ['minimal', 'standard', 'strict'])) {
|
||||
runExistingHook('session-end.js', claudeInput);
|
||||
}
|
||||
if (hookEnabled('stop:evaluate-session', ['minimal', 'standard', 'strict'])) {
|
||||
runExistingHook('evaluate-session.js', claudeInput);
|
||||
}
|
||||
if (hookEnabled('stop:cost-tracker', ['minimal', 'standard', 'strict'])) {
|
||||
runExistingHook('cost-tracker.js', claudeInput);
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
}).catch(() => process.exit(0));
|
||||
|
||||
20
.github/release.yml
vendored
Normal file
20
.github/release.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
changelog:
|
||||
categories:
|
||||
- title: Core Harness
|
||||
labels:
|
||||
- enhancement
|
||||
- feature
|
||||
- title: Reliability & Bug Fixes
|
||||
labels:
|
||||
- bug
|
||||
- fix
|
||||
- title: Docs & Guides
|
||||
labels:
|
||||
- docs
|
||||
- title: Tooling & CI
|
||||
labels:
|
||||
- ci
|
||||
- chore
|
||||
exclude:
|
||||
labels:
|
||||
- skip-changelog
|
||||
38
.github/workflows/release.yml
vendored
38
.github/workflows/release.yml
vendored
@@ -37,23 +37,31 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Generate changelog
|
||||
id: changelog
|
||||
- name: Generate release highlights
|
||||
id: highlights
|
||||
env:
|
||||
TAG_NAME: ${{ github.ref_name }}
|
||||
run: |
|
||||
PREV_TAG=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "")
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
COMMITS=$(git log --pretty=format:"- %s" HEAD)
|
||||
else
|
||||
COMMITS=$(git log --pretty=format:"- %s" ${PREV_TAG}..HEAD)
|
||||
fi
|
||||
echo "commits<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "$COMMITS" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
TAG_VERSION="${TAG_NAME#v}"
|
||||
cat > release_body.md <<EOF
|
||||
## ECC ${TAG_VERSION}
|
||||
|
||||
### What This Release Focuses On
|
||||
- Harness reliability and hook stability across Claude Code, Cursor, OpenCode, and Codex
|
||||
- Stronger eval-driven workflows and quality gates
|
||||
- Better operator UX for autonomous loop execution
|
||||
|
||||
### Notable Changes
|
||||
- Session persistence and hook lifecycle fixes
|
||||
- Expanded skills and command coverage for harness performance work
|
||||
- Improved release-note generation and changelog hygiene
|
||||
|
||||
### Notes
|
||||
- For migration tips and compatibility notes, see README and CHANGELOG.
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
body: |
|
||||
## Changes
|
||||
${{ steps.changelog.outputs.commits }}
|
||||
generate_release_notes: false
|
||||
body_path: release_body.md
|
||||
generate_release_notes: true
|
||||
|
||||
29
.github/workflows/reusable-release.yml
vendored
29
.github/workflows/reusable-release.yml
vendored
@@ -34,26 +34,23 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Generate changelog
|
||||
id: changelog
|
||||
- name: Generate release highlights
|
||||
env:
|
||||
TAG_NAME: ${{ inputs.tag }}
|
||||
run: |
|
||||
PREV_TAG=$(git describe --tags --abbrev=0 HEAD^ 2>/dev/null || echo "")
|
||||
if [ -z "$PREV_TAG" ]; then
|
||||
COMMITS=$(git log --pretty=format:"- %s" HEAD)
|
||||
else
|
||||
COMMITS=$(git log --pretty=format:"- %s" ${PREV_TAG}..HEAD)
|
||||
fi
|
||||
# Use unique delimiter to prevent truncation if commit messages contain EOF
|
||||
DELIMITER="COMMITS_END_$(date +%s)"
|
||||
echo "commits<<${DELIMITER}" >> $GITHUB_OUTPUT
|
||||
echo "$COMMITS" >> $GITHUB_OUTPUT
|
||||
echo "${DELIMITER}" >> $GITHUB_OUTPUT
|
||||
TAG_VERSION="${TAG_NAME#v}"
|
||||
cat > release_body.md <<EOF
|
||||
## ECC ${TAG_VERSION}
|
||||
|
||||
### What This Release Focuses On
|
||||
- Harness reliability and cross-platform compatibility
|
||||
- Eval-driven quality improvements
|
||||
- Better workflow and operator ergonomics
|
||||
EOF
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: ${{ inputs.tag }}
|
||||
body: |
|
||||
## Changes
|
||||
${{ steps.changelog.outputs.commits }}
|
||||
body_path: release_body.md
|
||||
generate_release_notes: ${{ inputs.generate-notes }}
|
||||
|
||||
@@ -67,7 +67,7 @@ opencode
|
||||
| go-build-resolver | Go build errors |
|
||||
| database-reviewer | Database optimization |
|
||||
|
||||
### Commands (24)
|
||||
### Commands (31)
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
@@ -97,6 +97,11 @@ opencode
|
||||
| `/evolve` | Cluster instincts |
|
||||
| `/promote` | Promote project instincts |
|
||||
| `/projects` | List known projects |
|
||||
| `/harness-audit` | Audit harness reliability and eval readiness |
|
||||
| `/loop-start` | Start controlled agentic loops |
|
||||
| `/loop-status` | Check loop state and checkpoints |
|
||||
| `/quality-gate` | Run quality gates on file/repo scope |
|
||||
| `/model-route` | Route tasks by model and budget |
|
||||
|
||||
### Plugin Hooks
|
||||
|
||||
@@ -130,6 +135,18 @@ OpenCode's plugin system maps to Claude Code hooks:
|
||||
|
||||
OpenCode has 20+ additional events not available in Claude Code.
|
||||
|
||||
### Hook Runtime Controls
|
||||
|
||||
OpenCode plugin hooks honor the same runtime controls used by Claude Code/Cursor:
|
||||
|
||||
```bash
|
||||
export ECC_HOOK_PROFILE=standard
|
||||
export ECC_DISABLED_HOOKS="pre:bash:tmux-reminder,post:edit:typecheck"
|
||||
```
|
||||
|
||||
- `ECC_HOOK_PROFILE`: `minimal`, `standard` (default), `strict`
|
||||
- `ECC_DISABLED_HOOKS`: comma-separated hook IDs to disable
|
||||
|
||||
## Skills
|
||||
|
||||
The default OpenCode config loads 11 curated ECC skills via the `instructions` array:
|
||||
|
||||
58
.opencode/commands/harness-audit.md
Normal file
58
.opencode/commands/harness-audit.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Harness Audit Command
|
||||
|
||||
Audit the current repository's agent harness setup and return a prioritized scorecard.
|
||||
|
||||
## Usage
|
||||
|
||||
`/harness-audit [scope] [--format text|json]`
|
||||
|
||||
- `scope` (optional): `repo` (default), `hooks`, `skills`, `commands`, `agents`
|
||||
- `--format`: output style (`text` default, `json` for automation)
|
||||
|
||||
## What to Evaluate
|
||||
|
||||
Score each category from `0` to `10`:
|
||||
|
||||
1. Tool Coverage
|
||||
2. Context Efficiency
|
||||
3. Quality Gates
|
||||
4. Memory Persistence
|
||||
5. Eval Coverage
|
||||
6. Security Guardrails
|
||||
7. Cost Efficiency
|
||||
|
||||
## Output Contract
|
||||
|
||||
Return:
|
||||
|
||||
1. `overall_score` out of 70
|
||||
2. Category scores and concrete findings
|
||||
3. Top 3 actions with exact file paths
|
||||
4. Suggested ECC skills to apply next
|
||||
|
||||
## Checklist
|
||||
|
||||
- Inspect `hooks/hooks.json`, `scripts/hooks/`, and hook tests.
|
||||
- Inspect `skills/`, command coverage, and agent coverage.
|
||||
- Verify cross-harness parity for `.cursor/`, `.opencode/`, `.codex/`.
|
||||
- Flag broken or stale references.
|
||||
|
||||
## Example Result
|
||||
|
||||
```text
|
||||
Harness Audit (repo): 52/70
|
||||
- Quality Gates: 9/10
|
||||
- Eval Coverage: 6/10
|
||||
- Cost Efficiency: 4/10
|
||||
|
||||
Top 3 Actions:
|
||||
1) Add cost tracking hook in scripts/hooks/cost-tracker.js
|
||||
2) Add pass@k docs and templates in skills/eval-harness/SKILL.md
|
||||
3) Add command parity for /harness-audit in .opencode/commands/
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `repo|hooks|skills|commands|agents` (optional scope)
|
||||
- `--format text|json` (optional output format)
|
||||
32
.opencode/commands/loop-start.md
Normal file
32
.opencode/commands/loop-start.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Loop Start Command
|
||||
|
||||
Start a managed autonomous loop pattern with safety defaults.
|
||||
|
||||
## Usage
|
||||
|
||||
`/loop-start [pattern] [--mode safe|fast]`
|
||||
|
||||
- `pattern`: `sequential`, `continuous-pr`, `rfc-dag`, `infinite`
|
||||
- `--mode`:
|
||||
- `safe` (default): strict quality gates and checkpoints
|
||||
- `fast`: reduced gates for speed
|
||||
|
||||
## Flow
|
||||
|
||||
1. Confirm repository state and branch strategy.
|
||||
2. Select loop pattern and model tier strategy.
|
||||
3. Enable required hooks/profile for the chosen mode.
|
||||
4. Create loop plan and write runbook under `.claude/plans/`.
|
||||
5. Print commands to start and monitor the loop.
|
||||
|
||||
## Required Safety Checks
|
||||
|
||||
- Verify tests pass before first loop iteration.
|
||||
- Ensure `ECC_HOOK_PROFILE` is not disabled globally.
|
||||
- Ensure loop has explicit stop condition.
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `<pattern>` optional (`sequential|continuous-pr|rfc-dag|infinite`)
|
||||
- `--mode safe|fast` optional
|
||||
24
.opencode/commands/loop-status.md
Normal file
24
.opencode/commands/loop-status.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Loop Status Command
|
||||
|
||||
Inspect active loop state, progress, and failure signals.
|
||||
|
||||
## Usage
|
||||
|
||||
`/loop-status [--watch]`
|
||||
|
||||
## What to Report
|
||||
|
||||
- active loop pattern
|
||||
- current phase and last successful checkpoint
|
||||
- failing checks (if any)
|
||||
- estimated time/cost drift
|
||||
- recommended intervention (continue/pause/stop)
|
||||
|
||||
## Watch Mode
|
||||
|
||||
When `--watch` is present, refresh status periodically and surface state changes.
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `--watch` optional
|
||||
26
.opencode/commands/model-route.md
Normal file
26
.opencode/commands/model-route.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Model Route Command
|
||||
|
||||
Recommend the best model tier for the current task by complexity and budget.
|
||||
|
||||
## Usage
|
||||
|
||||
`/model-route [task-description] [--budget low|med|high]`
|
||||
|
||||
## Routing Heuristic
|
||||
|
||||
- `haiku`: deterministic, low-risk mechanical changes
|
||||
- `sonnet`: default for implementation and refactors
|
||||
- `opus`: architecture, deep review, ambiguous requirements
|
||||
|
||||
## Required Output
|
||||
|
||||
- recommended model
|
||||
- confidence level
|
||||
- why this model fits
|
||||
- fallback model if first attempt fails
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `[task-description]` optional free-text
|
||||
- `--budget low|med|high` optional
|
||||
29
.opencode/commands/quality-gate.md
Normal file
29
.opencode/commands/quality-gate.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Quality Gate Command
|
||||
|
||||
Run the ECC quality pipeline on demand for a file or project scope.
|
||||
|
||||
## Usage
|
||||
|
||||
`/quality-gate [path|.] [--fix] [--strict]`
|
||||
|
||||
- default target: current directory (`.`)
|
||||
- `--fix`: allow auto-format/fix where configured
|
||||
- `--strict`: fail on warnings where supported
|
||||
|
||||
## Pipeline
|
||||
|
||||
1. Detect language/tooling for target.
|
||||
2. Run formatter checks.
|
||||
3. Run lint/type checks when available.
|
||||
4. Produce a concise remediation list.
|
||||
|
||||
## Notes
|
||||
|
||||
This command mirrors hook behavior but is operator-invoked.
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `[path|.]` optional target path
|
||||
- `--fix` optional
|
||||
- `--strict` optional
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "ecc-universal",
|
||||
"version": "1.7.0",
|
||||
"version": "1.8.0",
|
||||
"description": "Everything Claude Code (ECC) plugin for OpenCode - agents, commands, hooks, and skills",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
|
||||
@@ -21,6 +21,8 @@ export const ECCHooksPlugin = async ({
|
||||
directory,
|
||||
worktree,
|
||||
}: PluginInput) => {
|
||||
type HookProfile = "minimal" | "standard" | "strict"
|
||||
|
||||
// Track files edited in current session for console.log audit
|
||||
const editedFiles = new Set<string>()
|
||||
|
||||
@@ -28,6 +30,40 @@ export const ECCHooksPlugin = async ({
|
||||
const log = (level: "debug" | "info" | "warn" | "error", message: string) =>
|
||||
client.app.log({ body: { service: "ecc", level, message } })
|
||||
|
||||
const normalizeProfile = (value: string | undefined): HookProfile => {
|
||||
if (value === "minimal" || value === "strict") return value
|
||||
return "standard"
|
||||
}
|
||||
|
||||
const currentProfile = normalizeProfile(process.env.ECC_HOOK_PROFILE)
|
||||
const disabledHooks = new Set(
|
||||
(process.env.ECC_DISABLED_HOOKS || "")
|
||||
.split(",")
|
||||
.map((item) => item.trim())
|
||||
.filter(Boolean)
|
||||
)
|
||||
|
||||
const profileOrder: Record<HookProfile, number> = {
|
||||
minimal: 0,
|
||||
standard: 1,
|
||||
strict: 2,
|
||||
}
|
||||
|
||||
const profileAllowed = (required: HookProfile | HookProfile[]): boolean => {
|
||||
if (Array.isArray(required)) {
|
||||
return required.some((entry) => profileOrder[currentProfile] >= profileOrder[entry])
|
||||
}
|
||||
return profileOrder[currentProfile] >= profileOrder[required]
|
||||
}
|
||||
|
||||
const hookEnabled = (
|
||||
hookId: string,
|
||||
requiredProfile: HookProfile | HookProfile[] = "standard"
|
||||
): boolean => {
|
||||
if (disabledHooks.has(hookId)) return false
|
||||
return profileAllowed(requiredProfile)
|
||||
}
|
||||
|
||||
return {
|
||||
/**
|
||||
* Prettier Auto-Format Hook
|
||||
@@ -41,7 +77,7 @@ export const ECCHooksPlugin = async ({
|
||||
editedFiles.add(event.path)
|
||||
|
||||
// Auto-format JS/TS files
|
||||
if (event.path.match(/\.(ts|tsx|js|jsx)$/)) {
|
||||
if (hookEnabled("post:edit:format", ["standard", "strict"]) && event.path.match(/\.(ts|tsx|js|jsx)$/)) {
|
||||
try {
|
||||
await $`prettier --write ${event.path} 2>/dev/null`
|
||||
log("info", `[ECC] Formatted: ${event.path}`)
|
||||
@@ -51,7 +87,7 @@ export const ECCHooksPlugin = async ({
|
||||
}
|
||||
|
||||
// Console.log warning check
|
||||
if (event.path.match(/\.(ts|tsx|js|jsx)$/)) {
|
||||
if (hookEnabled("post:edit:console-warn", ["standard", "strict"]) && event.path.match(/\.(ts|tsx|js|jsx)$/)) {
|
||||
try {
|
||||
const result = await $`grep -n "console\\.log" ${event.path} 2>/dev/null`.text()
|
||||
if (result.trim()) {
|
||||
@@ -80,6 +116,7 @@ export const ECCHooksPlugin = async ({
|
||||
) => {
|
||||
// Check if a TypeScript file was edited
|
||||
if (
|
||||
hookEnabled("post:edit:typecheck", ["standard", "strict"]) &&
|
||||
input.tool === "edit" &&
|
||||
input.args?.filePath?.match(/\.tsx?$/)
|
||||
) {
|
||||
@@ -98,7 +135,11 @@ export const ECCHooksPlugin = async ({
|
||||
}
|
||||
|
||||
// PR creation logging
|
||||
if (input.tool === "bash" && input.args?.toString().includes("gh pr create")) {
|
||||
if (
|
||||
hookEnabled("post:bash:pr-created", ["standard", "strict"]) &&
|
||||
input.tool === "bash" &&
|
||||
input.args?.toString().includes("gh pr create")
|
||||
) {
|
||||
log("info", "[ECC] PR created - check GitHub Actions status")
|
||||
}
|
||||
},
|
||||
@@ -115,6 +156,7 @@ export const ECCHooksPlugin = async ({
|
||||
) => {
|
||||
// Git push review reminder
|
||||
if (
|
||||
hookEnabled("pre:bash:git-push-reminder", "strict") &&
|
||||
input.tool === "bash" &&
|
||||
input.args?.toString().includes("git push")
|
||||
) {
|
||||
@@ -126,6 +168,7 @@ export const ECCHooksPlugin = async ({
|
||||
|
||||
// Block creation of unnecessary documentation files
|
||||
if (
|
||||
hookEnabled("pre:write:doc-file-warning", ["standard", "strict"]) &&
|
||||
input.tool === "write" &&
|
||||
input.args?.filePath &&
|
||||
typeof input.args.filePath === "string"
|
||||
@@ -146,7 +189,7 @@ export const ECCHooksPlugin = async ({
|
||||
}
|
||||
|
||||
// Long-running command reminder
|
||||
if (input.tool === "bash") {
|
||||
if (hookEnabled("pre:bash:tmux-reminder", "strict") && input.tool === "bash") {
|
||||
const cmd = String(input.args?.command || input.args || "")
|
||||
if (
|
||||
cmd.match(/^(npm|pnpm|yarn|bun)\s+(install|build|test|run)/) ||
|
||||
@@ -169,7 +212,9 @@ export const ECCHooksPlugin = async ({
|
||||
* Action: Loads context and displays welcome message
|
||||
*/
|
||||
"session.created": async () => {
|
||||
log("info", "[ECC] Session started - Everything Claude Code hooks active")
|
||||
if (!hookEnabled("session:start", ["minimal", "standard", "strict"])) return
|
||||
|
||||
log("info", `[ECC] Session started - profile=${currentProfile}`)
|
||||
|
||||
// Check for project-specific context files
|
||||
try {
|
||||
@@ -190,6 +235,7 @@ export const ECCHooksPlugin = async ({
|
||||
* Action: Runs console.log audit on all edited files
|
||||
*/
|
||||
"session.idle": async () => {
|
||||
if (!hookEnabled("stop:check-console-log", ["minimal", "standard", "strict"])) return
|
||||
if (editedFiles.size === 0) return
|
||||
|
||||
log("info", "[ECC] Session idle - running console.log audit")
|
||||
@@ -244,6 +290,7 @@ export const ECCHooksPlugin = async ({
|
||||
* Action: Final cleanup and state saving
|
||||
*/
|
||||
"session.deleted": async () => {
|
||||
if (!hookEnabled("session:end-marker", ["minimal", "standard", "strict"])) return
|
||||
log("info", "[ECC] Session ended - cleaning up")
|
||||
editedFiles.clear()
|
||||
},
|
||||
@@ -285,8 +332,10 @@ export const ECCHooksPlugin = async ({
|
||||
*/
|
||||
"shell.env": async () => {
|
||||
const env: Record<string, string> = {
|
||||
ECC_VERSION: "1.6.0",
|
||||
ECC_VERSION: "1.8.0",
|
||||
ECC_PLUGIN: "true",
|
||||
ECC_HOOK_PROFILE: currentProfile,
|
||||
ECC_DISABLED_HOOKS: process.env.ECC_DISABLED_HOOKS || "",
|
||||
PROJECT_ROOT: worktree || directory,
|
||||
}
|
||||
|
||||
@@ -343,7 +392,7 @@ export const ECCHooksPlugin = async ({
|
||||
const contextBlock = [
|
||||
"# ECC Context (preserve across compaction)",
|
||||
"",
|
||||
"## Active Plugin: Everything Claude Code v1.6.0",
|
||||
"## Active Plugin: Everything Claude Code v1.8.0",
|
||||
"- Hooks: file.edited, tool.execute.before/after, session.created/idle/deleted, shell.env, compacting, permission.ask",
|
||||
"- Tools: run-tests, check-coverage, security-audit, format-code, lint-check, git-summary",
|
||||
"- Agents: 13 specialized (planner, architect, tdd-guide, code-reviewer, security-reviewer, build-error-resolver, e2e-runner, refactor-cleaner, doc-updater, go-reviewer, go-build-resolver, database-reviewer, python-reviewer)",
|
||||
|
||||
@@ -1,66 +1,68 @@
|
||||
/**
|
||||
* ECC Custom Tool: Format Code
|
||||
*
|
||||
* Language-aware code formatter that auto-detects the project's formatter.
|
||||
* Supports: Biome/Prettier (JS/TS), Black (Python), gofmt (Go), rustfmt (Rust)
|
||||
* Returns the formatter command that should be run for a given file.
|
||||
* This avoids shell execution assumptions while still giving precise guidance.
|
||||
*/
|
||||
|
||||
import { tool } from "@opencode-ai/plugin"
|
||||
import { z } from "zod"
|
||||
import { tool } from "@opencode-ai/plugin/tool"
|
||||
import * as path from "path"
|
||||
import * as fs from "fs"
|
||||
|
||||
type Formatter = "biome" | "prettier" | "black" | "gofmt" | "rustfmt"
|
||||
|
||||
export default tool({
|
||||
name: "format-code",
|
||||
description: "Format a file using the project's configured formatter. Auto-detects Biome, Prettier, Black, gofmt, or rustfmt.",
|
||||
parameters: z.object({
|
||||
filePath: z.string().describe("Path to the file to format"),
|
||||
formatter: z.string().optional().describe("Override formatter: biome, prettier, black, gofmt, rustfmt (default: auto-detect)"),
|
||||
}),
|
||||
execute: async ({ filePath, formatter }, { $ }) => {
|
||||
const ext = filePath.split(".").pop()?.toLowerCase() || ""
|
||||
|
||||
// Auto-detect formatter based on file extension and config files
|
||||
let detected = formatter
|
||||
if (!detected) {
|
||||
if (["ts", "tsx", "js", "jsx", "json", "css", "scss"].includes(ext)) {
|
||||
// Check for Biome first, then Prettier
|
||||
try {
|
||||
await $`test -f biome.json || test -f biome.jsonc`
|
||||
detected = "biome"
|
||||
} catch {
|
||||
detected = "prettier"
|
||||
}
|
||||
} else if (["py", "pyi"].includes(ext)) {
|
||||
detected = "black"
|
||||
} else if (ext === "go") {
|
||||
detected = "gofmt"
|
||||
} else if (ext === "rs") {
|
||||
detected = "rustfmt"
|
||||
}
|
||||
}
|
||||
description:
|
||||
"Detect formatter for a file and return the exact command to run (Biome, Prettier, Black, gofmt, rustfmt).",
|
||||
args: {
|
||||
filePath: tool.schema.string().describe("Path to the file to format"),
|
||||
formatter: tool.schema
|
||||
.enum(["biome", "prettier", "black", "gofmt", "rustfmt"])
|
||||
.optional()
|
||||
.describe("Optional formatter override"),
|
||||
},
|
||||
async execute(args, context) {
|
||||
const cwd = context.worktree || context.directory
|
||||
const ext = args.filePath.split(".").pop()?.toLowerCase() || ""
|
||||
const detected = args.formatter || detectFormatter(cwd, ext)
|
||||
|
||||
if (!detected) {
|
||||
return { formatted: false, message: `No formatter detected for .${ext} files` }
|
||||
return JSON.stringify({
|
||||
success: false,
|
||||
message: `No formatter detected for .${ext} files`,
|
||||
})
|
||||
}
|
||||
|
||||
const commands: Record<string, string> = {
|
||||
biome: `npx @biomejs/biome format --write ${filePath}`,
|
||||
prettier: `npx prettier --write ${filePath}`,
|
||||
black: `black ${filePath}`,
|
||||
gofmt: `gofmt -w ${filePath}`,
|
||||
rustfmt: `rustfmt ${filePath}`,
|
||||
}
|
||||
|
||||
const cmd = commands[detected]
|
||||
if (!cmd) {
|
||||
return { formatted: false, message: `Unknown formatter: ${detected}` }
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await $`${cmd}`.text()
|
||||
return { formatted: true, formatter: detected, output: result }
|
||||
} catch (error: unknown) {
|
||||
const err = error as { stderr?: string }
|
||||
return { formatted: false, formatter: detected, error: err.stderr || "Format failed" }
|
||||
}
|
||||
const command = buildFormatterCommand(detected, args.filePath)
|
||||
return JSON.stringify({
|
||||
success: true,
|
||||
formatter: detected,
|
||||
command,
|
||||
instructions: `Run this command:\n\n${command}`,
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
function detectFormatter(cwd: string, ext: string): Formatter | null {
|
||||
if (["ts", "tsx", "js", "jsx", "json", "css", "scss", "md", "yaml", "yml"].includes(ext)) {
|
||||
if (fs.existsSync(path.join(cwd, "biome.json")) || fs.existsSync(path.join(cwd, "biome.jsonc"))) {
|
||||
return "biome"
|
||||
}
|
||||
return "prettier"
|
||||
}
|
||||
if (["py", "pyi"].includes(ext)) return "black"
|
||||
if (ext === "go") return "gofmt"
|
||||
if (ext === "rs") return "rustfmt"
|
||||
return null
|
||||
}
|
||||
|
||||
function buildFormatterCommand(formatter: Formatter, filePath: string): string {
|
||||
const commands: Record<Formatter, string> = {
|
||||
biome: `npx @biomejs/biome format --write ${filePath}`,
|
||||
prettier: `npx prettier --write ${filePath}`,
|
||||
black: `black ${filePath}`,
|
||||
gofmt: `gofmt -w ${filePath}`,
|
||||
rustfmt: `rustfmt ${filePath}`,
|
||||
}
|
||||
return commands[formatter]
|
||||
}
|
||||
|
||||
@@ -1,56 +1,54 @@
|
||||
/**
|
||||
* ECC Custom Tool: Git Summary
|
||||
*
|
||||
* Provides a comprehensive git status including branch info, status,
|
||||
* recent log, and diff against base branch.
|
||||
* Returns branch/status/log/diff details for the active repository.
|
||||
*/
|
||||
|
||||
import { tool } from "@opencode-ai/plugin"
|
||||
import { z } from "zod"
|
||||
import { tool } from "@opencode-ai/plugin/tool"
|
||||
import { execSync } from "child_process"
|
||||
|
||||
export default tool({
|
||||
name: "git-summary",
|
||||
description: "Get comprehensive git summary: branch, status, recent log, and diff against base branch.",
|
||||
parameters: z.object({
|
||||
depth: z.number().optional().describe("Number of recent commits to show (default: 5)"),
|
||||
includeDiff: z.boolean().optional().describe("Include diff against base branch (default: true)"),
|
||||
baseBranch: z.string().optional().describe("Base branch for comparison (default: main)"),
|
||||
}),
|
||||
execute: async ({ depth = 5, includeDiff = true, baseBranch = "main" }, { $ }) => {
|
||||
const results: Record<string, string> = {}
|
||||
description:
|
||||
"Generate git summary with branch, status, recent commits, and optional diff stats.",
|
||||
args: {
|
||||
depth: tool.schema
|
||||
.number()
|
||||
.optional()
|
||||
.describe("Number of recent commits to include (default: 5)"),
|
||||
includeDiff: tool.schema
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe("Include diff stats against base branch (default: true)"),
|
||||
baseBranch: tool.schema
|
||||
.string()
|
||||
.optional()
|
||||
.describe("Base branch for diff comparison (default: main)"),
|
||||
},
|
||||
async execute(args, context) {
|
||||
const cwd = context.worktree || context.directory
|
||||
const depth = args.depth ?? 5
|
||||
const includeDiff = args.includeDiff ?? true
|
||||
const baseBranch = args.baseBranch ?? "main"
|
||||
|
||||
try {
|
||||
results.branch = (await $`git branch --show-current`.text()).trim()
|
||||
} catch {
|
||||
results.branch = "unknown"
|
||||
}
|
||||
|
||||
try {
|
||||
results.status = (await $`git status --short`.text()).trim()
|
||||
} catch {
|
||||
results.status = "unable to get status"
|
||||
}
|
||||
|
||||
try {
|
||||
results.log = (await $`git log --oneline -${depth}`.text()).trim()
|
||||
} catch {
|
||||
results.log = "unable to get log"
|
||||
const result: Record<string, string> = {
|
||||
branch: run("git branch --show-current", cwd) || "unknown",
|
||||
status: run("git status --short", cwd) || "clean",
|
||||
log: run(`git log --oneline -${depth}`, cwd) || "no commits found",
|
||||
}
|
||||
|
||||
if (includeDiff) {
|
||||
try {
|
||||
results.stagedDiff = (await $`git diff --cached --stat`.text()).trim()
|
||||
} catch {
|
||||
results.stagedDiff = ""
|
||||
}
|
||||
|
||||
try {
|
||||
results.branchDiff = (await $`git diff ${baseBranch}...HEAD --stat`.text()).trim()
|
||||
} catch {
|
||||
results.branchDiff = `unable to diff against ${baseBranch}`
|
||||
}
|
||||
result.stagedDiff = run("git diff --cached --stat", cwd) || ""
|
||||
result.branchDiff = run(`git diff ${baseBranch}...HEAD --stat`, cwd) || `unable to diff against ${baseBranch}`
|
||||
}
|
||||
|
||||
return results
|
||||
return JSON.stringify(result)
|
||||
},
|
||||
})
|
||||
|
||||
function run(command: string, cwd: string): string {
|
||||
try {
|
||||
return execSync(command, { cwd, encoding: "utf-8", stdio: ["ignore", "pipe", "pipe"] }).trim()
|
||||
} catch {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,74 +1,85 @@
|
||||
/**
|
||||
* ECC Custom Tool: Lint Check
|
||||
*
|
||||
* Multi-language linter that auto-detects the project's linting tool.
|
||||
* Supports: ESLint/Biome (JS/TS), Pylint/Ruff (Python), golangci-lint (Go)
|
||||
* Detects the appropriate linter and returns a runnable lint command.
|
||||
*/
|
||||
|
||||
import { tool } from "@opencode-ai/plugin"
|
||||
import { z } from "zod"
|
||||
import { tool } from "@opencode-ai/plugin/tool"
|
||||
import * as path from "path"
|
||||
import * as fs from "fs"
|
||||
|
||||
type Linter = "biome" | "eslint" | "ruff" | "pylint" | "golangci-lint"
|
||||
|
||||
export default tool({
|
||||
name: "lint-check",
|
||||
description: "Run linter on files or directories. Auto-detects ESLint, Biome, Ruff, Pylint, or golangci-lint.",
|
||||
parameters: z.object({
|
||||
target: z.string().optional().describe("File or directory to lint (default: current directory)"),
|
||||
fix: z.boolean().optional().describe("Auto-fix issues if supported (default: false)"),
|
||||
linter: z.string().optional().describe("Override linter: eslint, biome, ruff, pylint, golangci-lint (default: auto-detect)"),
|
||||
}),
|
||||
execute: async ({ target = ".", fix = false, linter }, { $ }) => {
|
||||
// Auto-detect linter
|
||||
let detected = linter
|
||||
if (!detected) {
|
||||
try {
|
||||
await $`test -f biome.json || test -f biome.jsonc`
|
||||
detected = "biome"
|
||||
} catch {
|
||||
try {
|
||||
await $`test -f .eslintrc.json || test -f .eslintrc.js || test -f .eslintrc.cjs || test -f eslint.config.js || test -f eslint.config.mjs`
|
||||
detected = "eslint"
|
||||
} catch {
|
||||
try {
|
||||
await $`test -f pyproject.toml && grep -q "ruff" pyproject.toml`
|
||||
detected = "ruff"
|
||||
} catch {
|
||||
try {
|
||||
await $`test -f .golangci.yml || test -f .golangci.yaml`
|
||||
detected = "golangci-lint"
|
||||
} catch {
|
||||
// Fall back based on file extensions in target
|
||||
detected = "eslint"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
description:
|
||||
"Detect linter for a target path and return command for check/fix runs.",
|
||||
args: {
|
||||
target: tool.schema
|
||||
.string()
|
||||
.optional()
|
||||
.describe("File or directory to lint (default: current directory)"),
|
||||
fix: tool.schema
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe("Enable auto-fix mode"),
|
||||
linter: tool.schema
|
||||
.enum(["biome", "eslint", "ruff", "pylint", "golangci-lint"])
|
||||
.optional()
|
||||
.describe("Optional linter override"),
|
||||
},
|
||||
async execute(args, context) {
|
||||
const cwd = context.worktree || context.directory
|
||||
const target = args.target || "."
|
||||
const fix = args.fix ?? false
|
||||
const detected = args.linter || detectLinter(cwd)
|
||||
|
||||
const fixFlag = fix ? " --fix" : ""
|
||||
const commands: Record<string, string> = {
|
||||
biome: `npx @biomejs/biome lint${fix ? " --write" : ""} ${target}`,
|
||||
eslint: `npx eslint${fixFlag} ${target}`,
|
||||
ruff: `ruff check${fixFlag} ${target}`,
|
||||
pylint: `pylint ${target}`,
|
||||
"golangci-lint": `golangci-lint run${fixFlag} ${target}`,
|
||||
}
|
||||
|
||||
const cmd = commands[detected]
|
||||
if (!cmd) {
|
||||
return { success: false, message: `Unknown linter: ${detected}` }
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await $`${cmd}`.text()
|
||||
return { success: true, linter: detected, output: result, issues: 0 }
|
||||
} catch (error: unknown) {
|
||||
const err = error as { stdout?: string; stderr?: string }
|
||||
return {
|
||||
success: false,
|
||||
linter: detected,
|
||||
output: err.stdout || "",
|
||||
errors: err.stderr || "",
|
||||
}
|
||||
}
|
||||
const command = buildLintCommand(detected, target, fix)
|
||||
return JSON.stringify({
|
||||
success: true,
|
||||
linter: detected,
|
||||
command,
|
||||
instructions: `Run this command:\n\n${command}`,
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
function detectLinter(cwd: string): Linter {
|
||||
if (fs.existsSync(path.join(cwd, "biome.json")) || fs.existsSync(path.join(cwd, "biome.jsonc"))) {
|
||||
return "biome"
|
||||
}
|
||||
|
||||
const eslintConfigs = [
|
||||
".eslintrc.json",
|
||||
".eslintrc.js",
|
||||
".eslintrc.cjs",
|
||||
"eslint.config.js",
|
||||
"eslint.config.mjs",
|
||||
]
|
||||
if (eslintConfigs.some((name) => fs.existsSync(path.join(cwd, name)))) {
|
||||
return "eslint"
|
||||
}
|
||||
|
||||
const pyprojectPath = path.join(cwd, "pyproject.toml")
|
||||
if (fs.existsSync(pyprojectPath)) {
|
||||
try {
|
||||
const content = fs.readFileSync(pyprojectPath, "utf-8")
|
||||
if (content.includes("ruff")) return "ruff"
|
||||
} catch {
|
||||
// ignore read errors and keep fallback logic
|
||||
}
|
||||
}
|
||||
|
||||
if (fs.existsSync(path.join(cwd, ".golangci.yml")) || fs.existsSync(path.join(cwd, ".golangci.yaml"))) {
|
||||
return "golangci-lint"
|
||||
}
|
||||
|
||||
return "eslint"
|
||||
}
|
||||
|
||||
function buildLintCommand(linter: Linter, target: string, fix: boolean): string {
|
||||
if (linter === "biome") return `npx @biomejs/biome lint${fix ? " --write" : ""} ${target}`
|
||||
if (linter === "eslint") return `npx eslint${fix ? " --fix" : ""} ${target}`
|
||||
if (linter === "ruff") return `ruff check${fix ? " --fix" : ""} ${target}`
|
||||
if (linter === "pylint") return `pylint ${target}`
|
||||
return `golangci-lint run ${target}`
|
||||
}
|
||||
|
||||
46
CHANGELOG.md
Normal file
46
CHANGELOG.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Changelog
|
||||
|
||||
## 1.8.0 - 2026-03-04
|
||||
|
||||
### Highlights
|
||||
|
||||
- Harness-first release focused on reliability, eval discipline, and autonomous loop operations.
|
||||
- Hook runtime now supports profile-based control and targeted hook disabling.
|
||||
- NanoClaw v2 adds model routing, skill hot-load, branching, search, compaction, export, and metrics.
|
||||
|
||||
### Core
|
||||
|
||||
- Added new commands: `/harness-audit`, `/loop-start`, `/loop-status`, `/quality-gate`, `/model-route`.
|
||||
- Added new skills:
|
||||
- `agent-harness-construction`
|
||||
- `agentic-engineering`
|
||||
- `ralphinho-rfc-pipeline`
|
||||
- `ai-first-engineering`
|
||||
- `enterprise-agent-ops`
|
||||
- `nanoclaw-repl`
|
||||
- `continuous-agent-loop`
|
||||
- Added new agents:
|
||||
- `harness-optimizer`
|
||||
- `loop-operator`
|
||||
|
||||
### Hook Reliability
|
||||
|
||||
- Fixed SessionStart root resolution with robust fallback search.
|
||||
- Moved session summary persistence to `Stop` where transcript payload is available.
|
||||
- Added quality-gate and cost-tracker hooks.
|
||||
- Replaced fragile inline hook one-liners with dedicated script files.
|
||||
- Added `ECC_HOOK_PROFILE` and `ECC_DISABLED_HOOKS` controls.
|
||||
|
||||
### Cross-Platform
|
||||
|
||||
- Improved Windows-safe path handling in doc warning logic.
|
||||
- Hardened observer loop behavior to avoid non-interactive hangs.
|
||||
|
||||
### Notes
|
||||
|
||||
- `autonomous-loops` is kept as a compatibility alias for one release; `continuous-agent-loop` is the canonical name.
|
||||
|
||||
### Credits
|
||||
|
||||
- inspired by [zarazhangrui](https://github.com/zarazhangrui)
|
||||
- homunculus-inspired by [humanplane](https://github.com/humanplane)
|
||||
69
README.md
69
README.md
@@ -71,6 +71,16 @@ This repo is the raw code only. The guides explain everything.
|
||||
|
||||
## What's New
|
||||
|
||||
### v1.8.0 — Harness Performance System (Mar 2026)
|
||||
|
||||
- **Harness-first release** — ECC is now explicitly framed as an agent harness performance system, not just a config pack.
|
||||
- **Hook reliability overhaul** — SessionStart root fallback, Stop-phase session summaries, and script-based hooks replacing fragile inline one-liners.
|
||||
- **Hook runtime controls** — `ECC_HOOK_PROFILE=minimal|standard|strict` and `ECC_DISABLED_HOOKS=...` for runtime gating without editing hook files.
|
||||
- **New harness commands** — `/harness-audit`, `/loop-start`, `/loop-status`, `/quality-gate`, `/model-route`.
|
||||
- **NanoClaw v2** — model routing, skill hot-load, session branch/search/export/compact/metrics.
|
||||
- **Cross-harness parity** — behavior tightened across Claude Code, Cursor, OpenCode, and Codex app/CLI.
|
||||
- **997 internal tests passing** — full suite green after hook/runtime refactor and compatibility updates.
|
||||
|
||||
### v1.7.0 — Cross-Platform Expansion & Presentation Builder (Feb 2026)
|
||||
|
||||
- **Codex app + CLI support** — Direct `AGENTS.md`-based Codex support, installer targeting, and Codex docs
|
||||
@@ -164,7 +174,7 @@ For manual install instructions see the README in the `rules/` folder.
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
✨ **That's it!** You now have access to 13 agents, 56 skills, and 32 commands.
|
||||
✨ **That's it!** You now have access to 16 agents, 65 skills, and 40 commands.
|
||||
|
||||
---
|
||||
|
||||
@@ -201,6 +211,18 @@ node scripts/setup-package-manager.js --detect
|
||||
|
||||
Or use the `/setup-pm` command in Claude Code.
|
||||
|
||||
### Hook Runtime Controls
|
||||
|
||||
Use runtime flags to tune strictness or disable specific hooks temporarily:
|
||||
|
||||
```bash
|
||||
# Hook strictness profile (default: standard)
|
||||
export ECC_HOOK_PROFILE=standard
|
||||
|
||||
# Comma-separated hook IDs to disable
|
||||
export ECC_DISABLED_HOOKS="pre:bash:tmux-reminder,post:edit:typecheck"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📦 What's Inside
|
||||
@@ -750,7 +772,7 @@ Each component is fully independent.
|
||||
Yes. ECC is cross-platform:
|
||||
- **Cursor**: Pre-translated configs in `.cursor/`. See [Cursor IDE Support](#cursor-ide-support).
|
||||
- **OpenCode**: Full plugin support in `.opencode/`. See [OpenCode Support](#-opencode-support).
|
||||
- **Codex**: First-class support with adapter drift guards and SessionStart fallback. See PR [#257](https://github.com/affaan-m/everything-claude-code/pull/257).
|
||||
- **Codex**: First-class support for both macOS app and CLI, with adapter drift guards and SessionStart fallback. See PR [#257](https://github.com/affaan-m/everything-claude-code/pull/257).
|
||||
- **Claude Code**: Native — this is the primary target.
|
||||
</details>
|
||||
|
||||
@@ -858,20 +880,25 @@ alwaysApply: false
|
||||
|
||||
---
|
||||
|
||||
## Codex CLI Support
|
||||
## Codex macOS App + CLI Support
|
||||
|
||||
ECC provides **first-class Codex CLI support** with a reference configuration, Codex-specific AGENTS.md supplement, and 16 ported skills.
|
||||
ECC provides **first-class Codex support** for both the macOS app and CLI, with a reference configuration, Codex-specific AGENTS.md supplement, and shared skills.
|
||||
|
||||
### Quick Start (Codex)
|
||||
### Quick Start (Codex App + CLI)
|
||||
|
||||
```bash
|
||||
# Copy the reference config to your home directory
|
||||
cp .codex/config.toml ~/.codex/config.toml
|
||||
|
||||
# Run Codex in the repo — AGENTS.md is auto-detected
|
||||
# Run Codex CLI in the repo — AGENTS.md is auto-detected
|
||||
codex
|
||||
```
|
||||
|
||||
Codex macOS app:
|
||||
- Open this repository as your workspace.
|
||||
- The root `AGENTS.md` is auto-detected.
|
||||
- Optional: copy `.codex/config.toml` to `~/.codex/config.toml` for CLI/app behavior consistency.
|
||||
|
||||
### What's Included
|
||||
|
||||
| Component | Count | Details |
|
||||
@@ -907,7 +934,7 @@ Skills at `.agents/skills/` are auto-loaded by Codex:
|
||||
|
||||
### Key Limitation
|
||||
|
||||
Codex CLI does **not yet support hooks** (OpenAI Codex Issue #2109, 430+ upvotes). Security enforcement is instruction-based via `persistent_instructions` in config.toml and the sandbox permission system.
|
||||
Codex does **not yet provide Claude-style hook execution parity**. ECC enforcement there is instruction-based via `AGENTS.md` and `persistent_instructions`, plus sandbox permissions.
|
||||
|
||||
---
|
||||
|
||||
@@ -931,9 +958,9 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|
||||
|
||||
| Feature | Claude Code | OpenCode | Status |
|
||||
|---------|-------------|----------|--------|
|
||||
| Agents | ✅ 13 agents | ✅ 12 agents | **Claude Code leads** |
|
||||
| Commands | ✅ 33 commands | ✅ 24 commands | **Claude Code leads** |
|
||||
| Skills | ✅ 50+ skills | ✅ 37 skills | **Claude Code leads** |
|
||||
| Agents | ✅ 16 agents | ✅ 12 agents | **Claude Code leads** |
|
||||
| Commands | ✅ 40 commands | ✅ 31 commands | **Claude Code leads** |
|
||||
| Skills | ✅ 65 skills | ✅ 37 skills | **Claude Code leads** |
|
||||
| Hooks | ✅ 8 event types | ✅ 11 events | **OpenCode has more!** |
|
||||
| Rules | ✅ 29 rules | ✅ 13 instructions | **Claude Code leads** |
|
||||
| MCP Servers | ✅ 14 servers | ✅ Full | **Full parity** |
|
||||
@@ -953,7 +980,7 @@ OpenCode's plugin system is MORE sophisticated than Claude Code with 20+ event t
|
||||
|
||||
**Additional OpenCode events**: `file.edited`, `file.watcher.updated`, `message.updated`, `lsp.client.diagnostics`, `tui.toast.show`, and more.
|
||||
|
||||
### Available Commands (32)
|
||||
### Available Commands (31+)
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
@@ -991,6 +1018,11 @@ OpenCode's plugin system is MORE sophisticated than Claude Code with 20+ event t
|
||||
| `/projects` | List known projects and instinct stats |
|
||||
| `/learn-eval` | Extract and evaluate patterns before saving |
|
||||
| `/setup-pm` | Configure package manager |
|
||||
| `/harness-audit` | Audit harness reliability, eval readiness, and risk posture |
|
||||
| `/loop-start` | Start controlled agentic loop execution pattern |
|
||||
| `/loop-status` | Inspect active loop status and checkpoints |
|
||||
| `/quality-gate` | Run quality gate checks for paths or entire repo |
|
||||
| `/model-route` | Route tasks to models by complexity and budget |
|
||||
|
||||
### Plugin Installation
|
||||
|
||||
@@ -1027,11 +1059,11 @@ ECC is the **first plugin to maximize every major AI coding tool**. Here's how e
|
||||
|
||||
| Feature | Claude Code | Cursor IDE | Codex CLI | OpenCode |
|
||||
|---------|------------|------------|-----------|----------|
|
||||
| **Agents** | 13 | Shared (AGENTS.md) | Shared (AGENTS.md) | 12 |
|
||||
| **Commands** | 33 | Shared | Instruction-based | 24 |
|
||||
| **Skills** | 50+ | Shared | 10 (native format) | 37 |
|
||||
| **Agents** | 16 | Shared (AGENTS.md) | Shared (AGENTS.md) | 12 |
|
||||
| **Commands** | 40 | Shared | Instruction-based | 31 |
|
||||
| **Skills** | 65 | Shared | 10 (native format) | 37 |
|
||||
| **Hook Events** | 8 types | 15 types | None yet | 11 types |
|
||||
| **Hook Scripts** | 9 scripts | 16 scripts (DRY adapter) | N/A | Plugin hooks |
|
||||
| **Hook Scripts** | 20+ scripts | 16 scripts (DRY adapter) | N/A | Plugin hooks |
|
||||
| **Rules** | 29 (common + lang) | 29 (YAML frontmatter) | Instruction-based | 13 instructions |
|
||||
| **Custom Tools** | Via hooks | Via hooks | N/A | 6 native tools |
|
||||
| **MCP Servers** | 14 | Shared (mcp.json) | 4 (command-based) | Full |
|
||||
@@ -1039,7 +1071,7 @@ ECC is the **first plugin to maximize every major AI coding tool**. Here's how e
|
||||
| **Context File** | CLAUDE.md + AGENTS.md | AGENTS.md | AGENTS.md | AGENTS.md |
|
||||
| **Secret Detection** | Hook-based | beforeSubmitPrompt hook | Sandbox-based | Hook-based |
|
||||
| **Auto-Format** | PostToolUse hook | afterFileEdit hook | N/A | file.edited hook |
|
||||
| **Version** | Plugin | Plugin | Reference config | 1.6.0 |
|
||||
| **Version** | Plugin | Plugin | Reference config | 1.8.0 |
|
||||
|
||||
**Key architectural decisions:**
|
||||
- **AGENTS.md** at root is the universal cross-tool file (read by all 4 tools)
|
||||
@@ -1055,6 +1087,11 @@ I've been using Claude Code since the experimental rollout. Won the Anthropic x
|
||||
|
||||
These configs are battle-tested across multiple production applications.
|
||||
|
||||
## Inspiration Credits
|
||||
|
||||
- inspired by [zarazhangrui](https://github.com/zarazhangrui)
|
||||
- homunculus-inspired by [humanplane](https://github.com/humanplane)
|
||||
|
||||
---
|
||||
|
||||
## Token Optimization
|
||||
|
||||
@@ -222,3 +222,16 @@ When available, also check project-specific conventions from `CLAUDE.md` or proj
|
||||
- State management conventions (Zustand, Redux, Context)
|
||||
|
||||
Adapt your review to the project's established patterns. When in doubt, match what the rest of the codebase does.
|
||||
|
||||
## v1.8 AI-Generated Code Review Addendum
|
||||
|
||||
When reviewing AI-generated changes, prioritize:
|
||||
|
||||
1. Behavioral regressions and edge-case handling
|
||||
2. Security assumptions and trust boundaries
|
||||
3. Hidden coupling or accidental architecture drift
|
||||
4. Unnecessary model-cost-inducing complexity
|
||||
|
||||
Cost-awareness check:
|
||||
- Flag workflows that escalate to higher-cost models without clear reasoning need.
|
||||
- Recommend defaulting to lower-cost tiers for deterministic refactors.
|
||||
|
||||
35
agents/harness-optimizer.md
Normal file
35
agents/harness-optimizer.md
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
name: harness-optimizer
|
||||
description: Analyze and improve the local agent harness configuration for reliability, cost, and throughput.
|
||||
tools: ["Read", "Grep", "Glob", "Bash", "Edit"]
|
||||
model: sonnet
|
||||
color: teal
|
||||
---
|
||||
|
||||
You are the harness optimizer.
|
||||
|
||||
## Mission
|
||||
|
||||
Raise agent completion quality by improving harness configuration, not by rewriting product code.
|
||||
|
||||
## Workflow
|
||||
|
||||
1. Run `/harness-audit` and collect baseline score.
|
||||
2. Identify top 3 leverage areas (hooks, evals, routing, context, safety).
|
||||
3. Propose minimal, reversible configuration changes.
|
||||
4. Apply changes and run validation.
|
||||
5. Report before/after deltas.
|
||||
|
||||
## Constraints
|
||||
|
||||
- Prefer small changes with measurable effect.
|
||||
- Preserve cross-platform behavior.
|
||||
- Avoid introducing fragile shell quoting.
|
||||
- Keep compatibility across Claude Code, Cursor, OpenCode, and Codex.
|
||||
|
||||
## Output
|
||||
|
||||
- baseline scorecard
|
||||
- applied changes
|
||||
- measured improvements
|
||||
- remaining risks
|
||||
36
agents/loop-operator.md
Normal file
36
agents/loop-operator.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: loop-operator
|
||||
description: Operate autonomous agent loops, monitor progress, and intervene safely when loops stall.
|
||||
tools: ["Read", "Grep", "Glob", "Bash", "Edit"]
|
||||
model: sonnet
|
||||
color: orange
|
||||
---
|
||||
|
||||
You are the loop operator.
|
||||
|
||||
## Mission
|
||||
|
||||
Run autonomous loops safely with clear stop conditions, observability, and recovery actions.
|
||||
|
||||
## Workflow
|
||||
|
||||
1. Start loop from explicit pattern and mode.
|
||||
2. Track progress checkpoints.
|
||||
3. Detect stalls and retry storms.
|
||||
4. Pause and reduce scope when failure repeats.
|
||||
5. Resume only after verification passes.
|
||||
|
||||
## Required Checks
|
||||
|
||||
- quality gates are active
|
||||
- eval baseline exists
|
||||
- rollback path exists
|
||||
- branch/worktree isolation is configured
|
||||
|
||||
## Escalation
|
||||
|
||||
Escalate when any condition is true:
|
||||
- no progress across two consecutive checkpoints
|
||||
- repeated failures with identical stack traces
|
||||
- cost drift outside budget window
|
||||
- merge conflicts blocking queue advancement
|
||||
@@ -78,3 +78,14 @@ npm run test:coverage
|
||||
- [ ] Coverage is 80%+
|
||||
|
||||
For detailed mocking patterns and framework-specific examples, see `skill: tdd-workflow`.
|
||||
|
||||
## v1.8 Eval-Driven TDD Addendum
|
||||
|
||||
Integrate eval-driven development into TDD flow:
|
||||
|
||||
1. Define capability + regression evals before implementation.
|
||||
2. Run baseline and capture failure signatures.
|
||||
3. Implement minimum passing change.
|
||||
4. Re-run tests and evals; report pass@1 and pass@3.
|
||||
|
||||
Release-critical paths should target pass^3 stability before merge.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
---
|
||||
description: Start the NanoClaw agent REPL — a persistent, session-aware AI assistant powered by the claude CLI.
|
||||
description: Start NanoClaw v2 — ECC's persistent, zero-dependency REPL with model routing, skill hot-load, branching, compaction, export, and metrics.
|
||||
---
|
||||
|
||||
# Claw Command
|
||||
|
||||
Start an interactive AI agent session that persists conversation history to disk and optionally loads ECC skill context.
|
||||
Start an interactive AI agent session with persistent markdown history and operational controls.
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -23,57 +23,29 @@ npm run claw
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `CLAW_SESSION` | `default` | Session name (alphanumeric + hyphens) |
|
||||
| `CLAW_SKILLS` | *(empty)* | Comma-separated skill names to load as system context |
|
||||
| `CLAW_SKILLS` | *(empty)* | Comma-separated skills loaded at startup |
|
||||
| `CLAW_MODEL` | `sonnet` | Default model for the session |
|
||||
|
||||
## REPL Commands
|
||||
|
||||
Inside the REPL, type these commands directly at the prompt:
|
||||
|
||||
```
|
||||
/clear Clear current session history
|
||||
/history Print full conversation history
|
||||
/sessions List all saved sessions
|
||||
/help Show available commands
|
||||
exit Quit the REPL
|
||||
```text
|
||||
/help Show help
|
||||
/clear Clear current session history
|
||||
/history Print full conversation history
|
||||
/sessions List saved sessions
|
||||
/model [name] Show/set model
|
||||
/load <skill-name> Hot-load a skill into context
|
||||
/branch <session-name> Branch current session
|
||||
/search <query> Search query across sessions
|
||||
/compact Compact old turns, keep recent context
|
||||
/export <md|json|txt> [path] Export session
|
||||
/metrics Show session metrics
|
||||
exit Quit
|
||||
```
|
||||
|
||||
## How It Works
|
||||
## Notes
|
||||
|
||||
1. Reads `CLAW_SESSION` env var to select a named session (default: `default`)
|
||||
2. Loads conversation history from `~/.claude/claw/{session}.md`
|
||||
3. Optionally loads ECC skill context from `CLAW_SKILLS` env var
|
||||
4. Enters a blocking prompt loop — each user message is sent to `claude -p` with full history
|
||||
5. Responses are appended to the session file for persistence across restarts
|
||||
|
||||
## Session Storage
|
||||
|
||||
Sessions are stored as Markdown files in `~/.claude/claw/`:
|
||||
|
||||
```
|
||||
~/.claude/claw/default.md
|
||||
~/.claude/claw/my-project.md
|
||||
```
|
||||
|
||||
Each turn is formatted as:
|
||||
|
||||
```markdown
|
||||
### [2025-01-15T10:30:00.000Z] User
|
||||
What does this function do?
|
||||
---
|
||||
### [2025-01-15T10:30:05.000Z] Assistant
|
||||
This function calculates...
|
||||
---
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
```bash
|
||||
# Start default session
|
||||
node scripts/claw.js
|
||||
|
||||
# Named session
|
||||
CLAW_SESSION=my-project node scripts/claw.js
|
||||
|
||||
# With skill context
|
||||
CLAW_SKILLS=tdd-workflow,security-review node scripts/claw.js
|
||||
```
|
||||
- NanoClaw remains zero-dependency.
|
||||
- Sessions are stored at `~/.claude/claw/<session>.md`.
|
||||
- Compaction keeps the most recent turns and writes a compaction header.
|
||||
- Export supports markdown, JSON turns, and plain text.
|
||||
|
||||
58
commands/harness-audit.md
Normal file
58
commands/harness-audit.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Harness Audit Command
|
||||
|
||||
Audit the current repository's agent harness setup and return a prioritized scorecard.
|
||||
|
||||
## Usage
|
||||
|
||||
`/harness-audit [scope] [--format text|json]`
|
||||
|
||||
- `scope` (optional): `repo` (default), `hooks`, `skills`, `commands`, `agents`
|
||||
- `--format`: output style (`text` default, `json` for automation)
|
||||
|
||||
## What to Evaluate
|
||||
|
||||
Score each category from `0` to `10`:
|
||||
|
||||
1. Tool Coverage
|
||||
2. Context Efficiency
|
||||
3. Quality Gates
|
||||
4. Memory Persistence
|
||||
5. Eval Coverage
|
||||
6. Security Guardrails
|
||||
7. Cost Efficiency
|
||||
|
||||
## Output Contract
|
||||
|
||||
Return:
|
||||
|
||||
1. `overall_score` out of 70
|
||||
2. Category scores and concrete findings
|
||||
3. Top 3 actions with exact file paths
|
||||
4. Suggested ECC skills to apply next
|
||||
|
||||
## Checklist
|
||||
|
||||
- Inspect `hooks/hooks.json`, `scripts/hooks/`, and hook tests.
|
||||
- Inspect `skills/`, command coverage, and agent coverage.
|
||||
- Verify cross-harness parity for `.cursor/`, `.opencode/`, `.codex/`.
|
||||
- Flag broken or stale references.
|
||||
|
||||
## Example Result
|
||||
|
||||
```text
|
||||
Harness Audit (repo): 52/70
|
||||
- Quality Gates: 9/10
|
||||
- Eval Coverage: 6/10
|
||||
- Cost Efficiency: 4/10
|
||||
|
||||
Top 3 Actions:
|
||||
1) Add cost tracking hook in scripts/hooks/cost-tracker.js
|
||||
2) Add pass@k docs and templates in skills/eval-harness/SKILL.md
|
||||
3) Add command parity for /harness-audit in .opencode/commands/
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `repo|hooks|skills|commands|agents` (optional scope)
|
||||
- `--format text|json` (optional output format)
|
||||
32
commands/loop-start.md
Normal file
32
commands/loop-start.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Loop Start Command
|
||||
|
||||
Start a managed autonomous loop pattern with safety defaults.
|
||||
|
||||
## Usage
|
||||
|
||||
`/loop-start [pattern] [--mode safe|fast]`
|
||||
|
||||
- `pattern`: `sequential`, `continuous-pr`, `rfc-dag`, `infinite`
|
||||
- `--mode`:
|
||||
- `safe` (default): strict quality gates and checkpoints
|
||||
- `fast`: reduced gates for speed
|
||||
|
||||
## Flow
|
||||
|
||||
1. Confirm repository state and branch strategy.
|
||||
2. Select loop pattern and model tier strategy.
|
||||
3. Enable required hooks/profile for the chosen mode.
|
||||
4. Create loop plan and write runbook under `.claude/plans/`.
|
||||
5. Print commands to start and monitor the loop.
|
||||
|
||||
## Required Safety Checks
|
||||
|
||||
- Verify tests pass before first loop iteration.
|
||||
- Ensure `ECC_HOOK_PROFILE` is not disabled globally.
|
||||
- Ensure loop has explicit stop condition.
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `<pattern>` optional (`sequential|continuous-pr|rfc-dag|infinite`)
|
||||
- `--mode safe|fast` optional
|
||||
24
commands/loop-status.md
Normal file
24
commands/loop-status.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Loop Status Command
|
||||
|
||||
Inspect active loop state, progress, and failure signals.
|
||||
|
||||
## Usage
|
||||
|
||||
`/loop-status [--watch]`
|
||||
|
||||
## What to Report
|
||||
|
||||
- active loop pattern
|
||||
- current phase and last successful checkpoint
|
||||
- failing checks (if any)
|
||||
- estimated time/cost drift
|
||||
- recommended intervention (continue/pause/stop)
|
||||
|
||||
## Watch Mode
|
||||
|
||||
When `--watch` is present, refresh status periodically and surface state changes.
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `--watch` optional
|
||||
26
commands/model-route.md
Normal file
26
commands/model-route.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Model Route Command
|
||||
|
||||
Recommend the best model tier for the current task by complexity and budget.
|
||||
|
||||
## Usage
|
||||
|
||||
`/model-route [task-description] [--budget low|med|high]`
|
||||
|
||||
## Routing Heuristic
|
||||
|
||||
- `haiku`: deterministic, low-risk mechanical changes
|
||||
- `sonnet`: default for implementation and refactors
|
||||
- `opus`: architecture, deep review, ambiguous requirements
|
||||
|
||||
## Required Output
|
||||
|
||||
- recommended model
|
||||
- confidence level
|
||||
- why this model fits
|
||||
- fallback model if first attempt fails
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `[task-description]` optional free-text
|
||||
- `--budget low|med|high` optional
|
||||
29
commands/quality-gate.md
Normal file
29
commands/quality-gate.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Quality Gate Command
|
||||
|
||||
Run the ECC quality pipeline on demand for a file or project scope.
|
||||
|
||||
## Usage
|
||||
|
||||
`/quality-gate [path|.] [--fix] [--strict]`
|
||||
|
||||
- default target: current directory (`.`)
|
||||
- `--fix`: allow auto-format/fix where configured
|
||||
- `--strict`: fail on warnings where supported
|
||||
|
||||
## Pipeline
|
||||
|
||||
1. Detect language/tooling for target.
|
||||
2. Run formatter checks.
|
||||
3. Run lint/type checks when available.
|
||||
4. Produce a concise remediation list.
|
||||
|
||||
## Notes
|
||||
|
||||
This command mirrors hook behavior but is operator-invoked.
|
||||
|
||||
## Arguments
|
||||
|
||||
$ARGUMENTS:
|
||||
- `[path|.]` optional target path
|
||||
- `--fix` optional
|
||||
- `--strict` optional
|
||||
14
docs/continuous-learning-v2-spec.md
Normal file
14
docs/continuous-learning-v2-spec.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Continuous Learning v2 Spec
|
||||
|
||||
This document captures the v2 continuous-learning architecture:
|
||||
|
||||
1. Hook-based observation capture
|
||||
2. Background observer analysis loop
|
||||
3. Instinct scoring and persistence
|
||||
4. Evolution of instincts into reusable skills/commands
|
||||
|
||||
Primary implementation lives in:
|
||||
- `skills/continuous-learning-v2/`
|
||||
- `scripts/hooks/`
|
||||
|
||||
Use this file as the stable reference path for docs and translations.
|
||||
@@ -107,4 +107,4 @@ Homunculus v2はより洗練されたアプローチを採用:
|
||||
4. **ドメインタグ付け** - コードスタイル、テスト、git、デバッグなど
|
||||
5. **進化パス** - 関連する本能をスキル/コマンドにクラスタ化
|
||||
|
||||
詳細: `/Users/affoon/Documents/tasks/12-continuous-learning-v2.md`を参照。
|
||||
詳細: `docs/continuous-learning-v2-spec.md`を参照。
|
||||
|
||||
13
docs/releases/1.8.0/linkedin-post.md
Normal file
13
docs/releases/1.8.0/linkedin-post.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# LinkedIn Draft - ECC v1.8.0
|
||||
|
||||
ECC v1.8.0 is now focused on harness performance at the system level.
|
||||
|
||||
This release improves:
|
||||
- hook reliability and lifecycle behavior
|
||||
- eval-driven engineering workflows
|
||||
- operator tooling for autonomous loops
|
||||
- cross-platform support for Claude Code, Cursor, OpenCode, and Codex
|
||||
|
||||
We also shipped NanoClaw v2 with stronger session operations for real workflow usage.
|
||||
|
||||
If your AI coding workflow feels inconsistent, start by treating the harness as a first-class engineering system.
|
||||
16
docs/releases/1.8.0/reference-attribution.md
Normal file
16
docs/releases/1.8.0/reference-attribution.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Reference Attribution and Licensing Notes
|
||||
|
||||
ECC v1.8.0 references research and workflow inspiration from:
|
||||
|
||||
- `plankton`
|
||||
- `ralphinho`
|
||||
- `infinite-agentic-loop`
|
||||
- `continuous-claude`
|
||||
- public profiles: [zarazhangrui](https://github.com/zarazhangrui), [humanplane](https://github.com/humanplane)
|
||||
|
||||
## Policy
|
||||
|
||||
1. No direct code copying from unlicensed or incompatible sources.
|
||||
2. ECC implementations are re-authored for this repository’s architecture and licensing model.
|
||||
3. Referenced material is used for ideas, patterns, and conceptual framing only unless licensing explicitly permits reuse.
|
||||
4. Any future direct reuse requires explicit license verification and source attribution in-file and in release notes.
|
||||
19
docs/releases/1.8.0/release-notes.md
Normal file
19
docs/releases/1.8.0/release-notes.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# ECC v1.8.0 Release Notes
|
||||
|
||||
## Positioning
|
||||
|
||||
ECC v1.8.0 positions the project as an agent harness performance system, not just a config bundle.
|
||||
|
||||
## Key Improvements
|
||||
|
||||
- Stabilized hooks and lifecycle behavior.
|
||||
- Expanded eval and loop operations surface.
|
||||
- Upgraded NanoClaw for operational use.
|
||||
- Improved cross-harness parity (Claude Code, Cursor, OpenCode, Codex).
|
||||
|
||||
## Upgrade Focus
|
||||
|
||||
1. Validate hook profile defaults in your environment.
|
||||
2. Run `/harness-audit` to baseline your project.
|
||||
3. Use `/quality-gate` and updated eval workflows to enforce consistency.
|
||||
4. Review attribution and licensing notes for referenced ecosystems: [reference-attribution.md](./reference-attribution.md).
|
||||
5
docs/releases/1.8.0/x-quote-eval-skills.md
Normal file
5
docs/releases/1.8.0/x-quote-eval-skills.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# X Quote Draft - Eval Skills Post
|
||||
|
||||
Strong eval skills are now built deeper into ECC.
|
||||
|
||||
v1.8.0 expands eval-harness patterns, pass@k guidance, and release-level verification loops so teams can measure reliability, not guess it.
|
||||
5
docs/releases/1.8.0/x-quote-plankton-deslop.md
Normal file
5
docs/releases/1.8.0/x-quote-plankton-deslop.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# X Quote Draft - Plankton / De-slop Workflow
|
||||
|
||||
The quality gate model matters.
|
||||
|
||||
In v1.8.0 we pushed harder on write-time quality enforcement, deterministic checks, and cleaner loop recovery so agents converge faster with less noise.
|
||||
11
docs/releases/1.8.0/x-thread.md
Normal file
11
docs/releases/1.8.0/x-thread.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# X Thread Draft - ECC v1.8.0
|
||||
|
||||
1/ ECC v1.8.0 is live. This release is about one thing: better agent harness performance.
|
||||
|
||||
2/ We shipped hook reliability fixes, loop operations commands, and stronger eval workflows.
|
||||
|
||||
3/ NanoClaw v2 now supports model routing, skill hot-load, branching, search, compaction, export, and metrics.
|
||||
|
||||
4/ If your agents are underperforming, start with `/harness-audit` and tighten quality gates.
|
||||
|
||||
5/ Cross-harness parity remains a priority: Claude Code, Cursor, OpenCode, Codex.
|
||||
@@ -117,4 +117,4 @@ Homunculus v2 采用了更复杂的方法:
|
||||
4. **领域标记** - 代码风格、测试、git、调试等
|
||||
5. **演进路径** - 将相关本能聚类为技能/命令
|
||||
|
||||
完整规格请参见:`/Users/affoon/Documents/tasks/12-continuous-learning-v2.md`
|
||||
完整规格请参见:`docs/continuous-learning-v2-spec.md`
|
||||
|
||||
@@ -107,4 +107,4 @@ Homunculus v2 採用更複雜的方法:
|
||||
4. **領域標記** - code-style、testing、git、debugging 等
|
||||
5. **演化路徑** - 將相關本能聚類為技能/指令
|
||||
|
||||
參見:`/Users/affoon/Documents/tasks/12-continuous-learning-v2.md` 完整規格。
|
||||
參見:`docs/continuous-learning-v2-spec.md` 完整規格。
|
||||
|
||||
@@ -32,6 +32,7 @@ User request → Claude picks a tool → PreToolUse hook runs → Tool executes
|
||||
|------|---------|-------------|
|
||||
| **PR logger** | `Bash` | Logs PR URL and review command after `gh pr create` |
|
||||
| **Build analysis** | `Bash` | Background analysis after build commands (async, non-blocking) |
|
||||
| **Quality gate** | `Edit\|Write\|MultiEdit` | Runs fast quality checks after edits |
|
||||
| **Prettier format** | `Edit` | Auto-formats JS/TS files with Prettier after edits |
|
||||
| **TypeScript check** | `Edit` | Runs `tsc --noEmit` after editing `.ts`/`.tsx` files |
|
||||
| **console.log warning** | `Edit` | Warns about `console.log` statements in edited files |
|
||||
@@ -43,8 +44,10 @@ User request → Claude picks a tool → PreToolUse hook runs → Tool executes
|
||||
| **Session start** | `SessionStart` | Loads previous context and detects package manager |
|
||||
| **Pre-compact** | `PreCompact` | Saves state before context compaction |
|
||||
| **Console.log audit** | `Stop` | Checks all modified files for `console.log` after each response |
|
||||
| **Session end** | `SessionEnd` | Persists session state for next session |
|
||||
| **Pattern extraction** | `SessionEnd` | Evaluates session for extractable patterns (continuous learning) |
|
||||
| **Session summary** | `Stop` | Persists session state when transcript path is available |
|
||||
| **Pattern extraction** | `Stop` | Evaluates session for extractable patterns (continuous learning) |
|
||||
| **Cost tracker** | `Stop` | Emits lightweight run-cost telemetry markers |
|
||||
| **Session end marker** | `SessionEnd` | Lifecycle marker and cleanup log |
|
||||
|
||||
## Customizing Hooks
|
||||
|
||||
@@ -66,6 +69,23 @@ Remove or comment out the hook entry in `hooks.json`. If installed as a plugin,
|
||||
}
|
||||
```
|
||||
|
||||
### Runtime Hook Controls (Recommended)
|
||||
|
||||
Use environment variables to control hook behavior without editing `hooks.json`:
|
||||
|
||||
```bash
|
||||
# minimal | standard | strict (default: standard)
|
||||
export ECC_HOOK_PROFILE=standard
|
||||
|
||||
# Disable specific hook IDs (comma-separated)
|
||||
export ECC_DISABLED_HOOKS="pre:bash:tmux-reminder,post:edit:typecheck"
|
||||
```
|
||||
|
||||
Profiles:
|
||||
- `minimal` — keep essential lifecycle and safety hooks only.
|
||||
- `standard` — default; balanced quality + safety checks.
|
||||
- `strict` — enables additional reminders and stricter guardrails.
|
||||
|
||||
### Writing Your Own Hook
|
||||
|
||||
Hooks are shell commands that receive tool input as JSON on stdin and must output JSON on stdout.
|
||||
@@ -189,7 +209,7 @@ Async hooks run in the background. They cannot block tool execution.
|
||||
|
||||
## Cross-Platform Notes
|
||||
|
||||
All hooks in this plugin use Node.js (`node -e` or `node script.js`) for maximum compatibility across Windows, macOS, and Linux. Avoid bash-specific syntax in hooks.
|
||||
Hook logic is implemented in Node.js scripts for cross-platform behavior on Windows, macOS, and Linux. A small number of shell wrappers are retained for continuous-learning observer hooks; those wrappers are profile-gated and have Windows-safe fallback behavior.
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(process.platform!=='win32'&&/(npm run dev\\b|pnpm( run)? dev\\b|yarn dev\\b|bun run dev\\b)/.test(cmd)){console.error('[Hook] BLOCKED: Dev server must run in tmux for log access');console.error('[Hook] Use: tmux new-session -d -s dev \\\"npm run dev\\\"');console.error('[Hook] Then: tmux attach -t dev');process.exit(2)}}catch{}console.log(d)})\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:bash:dev-server-block\" \"scripts/hooks/pre-bash-dev-server-block.js\" \"standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "Block dev servers outside tmux - ensures you can access logs"
|
||||
@@ -17,7 +17,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(process.platform!=='win32'&&!process.env.TMUX&&/(npm (install|test)|pnpm (install|test)|yarn (install|test)?|bun (install|test)|cargo build|make\\b|docker\\b|pytest|vitest|playwright)/.test(cmd)){console.error('[Hook] Consider running in tmux for session persistence');console.error('[Hook] tmux new -s dev | tmux attach -t dev')}}catch{}console.log(d)})\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:bash:tmux-reminder\" \"scripts/hooks/pre-bash-tmux-reminder.js\" \"strict\""
|
||||
}
|
||||
],
|
||||
"description": "Reminder to use tmux for long-running commands"
|
||||
@@ -27,7 +27,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(/git push/.test(cmd)){console.error('[Hook] Review changes before push...');console.error('[Hook] Continuing with push (remove this hook to add interactive review)')}}catch{}console.log(d)})\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:bash:git-push-reminder\" \"scripts/hooks/pre-bash-git-push-reminder.js\" \"strict\""
|
||||
}
|
||||
],
|
||||
"description": "Reminder before git push to review changes"
|
||||
@@ -37,7 +37,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/doc-file-warning.js\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:write:doc-file-warning\" \"scripts/hooks/doc-file-warning.js\" \"standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "Doc file warning: warn about non-standard documentation files (exit code 0; warns only)"
|
||||
@@ -47,7 +47,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/suggest-compact.js\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:edit-write:suggest-compact\" \"scripts/hooks/suggest-compact.js\" \"standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "Suggest manual compaction at logical intervals"
|
||||
@@ -57,7 +57,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/skills/continuous-learning-v2/hooks/observe.sh",
|
||||
"command": "bash \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags-shell.sh\" \"pre:observe\" \"skills/continuous-learning-v2/hooks/observe.sh\" \"standard,strict\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
@@ -71,7 +71,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/pre-compact.js\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:compact\" \"scripts/hooks/pre-compact.js\" \"standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "Save state before context compaction"
|
||||
@@ -83,7 +83,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/session-start.js\""
|
||||
"command": "bash -lc 'input=$(cat); for root in \"${CLAUDE_PLUGIN_ROOT:-}\" \"$HOME/.claude/plugins/everything-claude-code\" \"$HOME/.claude/plugins/everything-claude-code@everything-claude-code\" \"$HOME/.claude/plugins/marketplace/everything-claude-code\"; do if [ -n \"$root\" ] && [ -f \"$root/scripts/hooks/run-with-flags.js\" ]; then printf \"%s\" \"$input\" | node \"$root/scripts/hooks/run-with-flags.js\" \"session:start\" \"scripts/hooks/session-start.js\" \"minimal,standard,strict\"; exit $?; fi; done; for parent in \"$HOME/.claude/plugins\" \"$HOME/.claude/plugins/marketplace\"; do if [ -d \"$parent\" ]; then candidate=$(find \"$parent\" -maxdepth 2 -type f -path \"*/scripts/hooks/run-with-flags.js\" 2>/dev/null | head -n 1); if [ -n \"$candidate\" ]; then root=$(dirname \"$(dirname \"$(dirname \"$candidate\")\")\"); printf \"%s\" \"$input\" | node \"$root/scripts/hooks/run-with-flags.js\" \"session:start\" \"scripts/hooks/session-start.js\" \"minimal,standard,strict\"; exit $?; fi; fi; done; echo \"[SessionStart] WARNING: could not resolve ECC plugin root; skipping session-start hook\" >&2; printf \"%s\" \"$input\"; exit 0'"
|
||||
}
|
||||
],
|
||||
"description": "Load previous context and detect package manager on new session"
|
||||
@@ -95,7 +95,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(/gh pr create/.test(cmd)){const out=i.tool_output?.output||'';const m=out.match(/https:\\/\\/github.com\\/[^/]+\\/[^/]+\\/pull\\/\\d+/);if(m){console.error('[Hook] PR created: '+m[0]);const repo=m[0].replace(/https:\\/\\/github.com\\/([^/]+\\/[^/]+)\\/pull\\/\\d+/,'$1');const pr=m[0].replace(/.+\\/pull\\/(\\d+)/,'$1');console.error('[Hook] To review: gh pr review '+pr+' --repo '+repo)}}}catch{}console.log(d)})\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"post:bash:pr-created\" \"scripts/hooks/post-bash-pr-created.js\" \"standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "Log PR URL and provide review command after PR creation"
|
||||
@@ -105,19 +105,31 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const cmd=i.tool_input?.command||'';if(/(npm run build|pnpm build|yarn build)/.test(cmd)){console.error('[Hook] Build completed - async analysis running in background')}}catch{}console.log(d)})\"",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"post:bash:build-complete\" \"scripts/hooks/post-bash-build-complete.js\" \"standard,strict\"",
|
||||
"async": true,
|
||||
"timeout": 30
|
||||
}
|
||||
],
|
||||
"description": "Example: async hook for build analysis (runs in background without blocking)"
|
||||
},
|
||||
{
|
||||
"matcher": "Edit|Write|MultiEdit",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"post:quality-gate\" \"scripts/hooks/quality-gate.js\" \"standard,strict\"",
|
||||
"async": true,
|
||||
"timeout": 30
|
||||
}
|
||||
],
|
||||
"description": "Run quality gate checks after file edits"
|
||||
},
|
||||
{
|
||||
"matcher": "Edit",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/post-edit-format.js\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"post:edit:format\" \"scripts/hooks/post-edit-format.js\" \"standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "Auto-format JS/TS files after edits (auto-detects Biome or Prettier)"
|
||||
@@ -127,7 +139,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/post-edit-typecheck.js\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"post:edit:typecheck\" \"scripts/hooks/post-edit-typecheck.js\" \"standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "TypeScript check after editing .ts/.tsx files"
|
||||
@@ -137,7 +149,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/post-edit-console-warn.js\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"post:edit:console-warn\" \"scripts/hooks/post-edit-console-warn.js\" \"standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "Warn about console.log statements after edits"
|
||||
@@ -147,7 +159,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "${CLAUDE_PLUGIN_ROOT}/skills/continuous-learning-v2/hooks/observe.sh",
|
||||
"command": "bash \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags-shell.sh\" \"post:observe\" \"skills/continuous-learning-v2/hooks/observe.sh\" \"standard,strict\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
@@ -161,10 +173,46 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/check-console-log.js\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:check-console-log\" \"scripts/hooks/check-console-log.js\" \"standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "Check for console.log in modified files after each response"
|
||||
},
|
||||
{
|
||||
"matcher": "*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:session-end\" \"scripts/hooks/session-end.js\" \"minimal,standard,strict\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
],
|
||||
"description": "Persist session state after each response (Stop carries transcript_path)"
|
||||
},
|
||||
{
|
||||
"matcher": "*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:evaluate-session\" \"scripts/hooks/evaluate-session.js\" \"minimal,standard,strict\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
],
|
||||
"description": "Evaluate session for extractable patterns"
|
||||
},
|
||||
{
|
||||
"matcher": "*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"stop:cost-tracker\" \"scripts/hooks/cost-tracker.js\" \"minimal,standard,strict\"",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
],
|
||||
"description": "Track token and cost metrics per session"
|
||||
}
|
||||
],
|
||||
"SessionEnd": [
|
||||
@@ -173,20 +221,10 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/session-end.js\""
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"session:end:marker\" \"scripts/hooks/session-end-marker.js\" \"minimal,standard,strict\""
|
||||
}
|
||||
],
|
||||
"description": "Persist session state on end"
|
||||
},
|
||||
{
|
||||
"matcher": "*",
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/evaluate-session.js\""
|
||||
}
|
||||
],
|
||||
"description": "Evaluate session for extractable patterns"
|
||||
"description": "Session end lifecycle marker (non-blocking)"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
4
package-lock.json
generated
4
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "ecc-universal",
|
||||
"version": "1.4.1",
|
||||
"version": "1.8.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "ecc-universal",
|
||||
"version": "1.4.1",
|
||||
"version": "1.8.0",
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"bin": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "ecc-universal",
|
||||
"version": "1.7.0",
|
||||
"version": "1.8.0",
|
||||
"description": "Complete collection of battle-tested Claude Code configs — agents, skills, hooks, commands, and rules evolved over 10+ months of intensive daily use by an Anthropic hackathon winner",
|
||||
"keywords": [
|
||||
"claude-code",
|
||||
@@ -83,7 +83,7 @@
|
||||
"postinstall": "echo '\\n ecc-universal installed!\\n Run: npx ecc-install typescript\\n Docs: https://github.com/affaan-m/everything-claude-code\\n'",
|
||||
"lint": "eslint . && markdownlint '**/*.md' --ignore node_modules",
|
||||
"claw": "node scripts/claw.js",
|
||||
"test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node tests/run-all.js"
|
||||
"test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.2",
|
||||
|
||||
63
scripts/ci/validate-no-personal-paths.js
Executable file
63
scripts/ci/validate-no-personal-paths.js
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Prevent shipping user-specific absolute paths in public docs/skills/commands.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const ROOT = path.join(__dirname, '../..');
|
||||
const TARGETS = [
|
||||
'README.md',
|
||||
'skills',
|
||||
'commands',
|
||||
'agents',
|
||||
'docs',
|
||||
'.opencode/commands',
|
||||
];
|
||||
|
||||
const BLOCK_PATTERNS = [
|
||||
/\/Users\/affoon\b/g,
|
||||
/C:\\Users\\affoon\b/gi,
|
||||
];
|
||||
|
||||
function collectFiles(targetPath, out) {
|
||||
if (!fs.existsSync(targetPath)) return;
|
||||
const stat = fs.statSync(targetPath);
|
||||
if (stat.isFile()) {
|
||||
out.push(targetPath);
|
||||
return;
|
||||
}
|
||||
|
||||
for (const entry of fs.readdirSync(targetPath)) {
|
||||
if (entry === 'node_modules' || entry === '.git') continue;
|
||||
collectFiles(path.join(targetPath, entry), out);
|
||||
}
|
||||
}
|
||||
|
||||
const files = [];
|
||||
for (const target of TARGETS) {
|
||||
collectFiles(path.join(ROOT, target), files);
|
||||
}
|
||||
|
||||
let failures = 0;
|
||||
for (const file of files) {
|
||||
if (!/\.(md|json|js|ts|sh|toml|yml|yaml)$/i.test(file)) continue;
|
||||
const content = fs.readFileSync(file, 'utf8');
|
||||
for (const pattern of BLOCK_PATTERNS) {
|
||||
const match = content.match(pattern);
|
||||
if (match) {
|
||||
console.error(`ERROR: personal path detected in ${path.relative(ROOT, file)}`);
|
||||
failures += match.length;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (failures > 0) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log('Validated: no personal absolute paths in shipped docs/skills/commands');
|
||||
@@ -8,14 +8,47 @@ const path = require('path');
|
||||
|
||||
const RULES_DIR = path.join(__dirname, '../../rules');
|
||||
|
||||
/**
|
||||
* Recursively collect markdown rule files.
|
||||
* Uses explicit traversal for portability across Node versions.
|
||||
* @param {string} dir - Directory to scan
|
||||
* @returns {string[]} Relative file paths from RULES_DIR
|
||||
*/
|
||||
function collectRuleFiles(dir) {
|
||||
const files = [];
|
||||
|
||||
let entries;
|
||||
try {
|
||||
entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||
} catch {
|
||||
return files;
|
||||
}
|
||||
|
||||
for (const entry of entries) {
|
||||
const absolute = path.join(dir, entry.name);
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
files.push(...collectRuleFiles(absolute));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (entry.name.endsWith('.md')) {
|
||||
files.push(path.relative(RULES_DIR, absolute));
|
||||
}
|
||||
|
||||
// Non-markdown files are ignored.
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
function validateRules() {
|
||||
if (!fs.existsSync(RULES_DIR)) {
|
||||
console.log('No rules directory found, skipping validation');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const files = fs.readdirSync(RULES_DIR, { recursive: true })
|
||||
.filter(f => f.endsWith('.md'));
|
||||
const files = collectRuleFiles(RULES_DIR);
|
||||
let hasErrors = false;
|
||||
let validatedCount = 0;
|
||||
|
||||
|
||||
406
scripts/claw.js
406
scripts/claw.js
@@ -1,14 +1,8 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* NanoClaw — Barebones Agent REPL for Everything Claude Code
|
||||
* NanoClaw v2 — Barebones Agent REPL for Everything Claude Code
|
||||
*
|
||||
* A persistent, session-aware AI agent loop that delegates to `claude -p`.
|
||||
* Zero external dependencies. Markdown-as-database. Synchronous REPL.
|
||||
*
|
||||
* Usage:
|
||||
* node scripts/claw.js
|
||||
* CLAW_SESSION=my-project node scripts/claw.js
|
||||
* CLAW_SKILLS=tdd-workflow,security-review node scripts/claw.js
|
||||
* Zero external dependencies. Session-aware REPL around `claude -p`.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
@@ -19,29 +13,25 @@ const os = require('os');
|
||||
const { spawnSync } = require('child_process');
|
||||
const readline = require('readline');
|
||||
|
||||
// ─── Session name validation ────────────────────────────────────────────────
|
||||
|
||||
const SESSION_NAME_RE = /^[a-zA-Z0-9][-a-zA-Z0-9]*$/;
|
||||
const DEFAULT_MODEL = process.env.CLAW_MODEL || 'sonnet';
|
||||
const DEFAULT_COMPACT_KEEP_TURNS = 20;
|
||||
|
||||
function isValidSessionName(name) {
|
||||
return typeof name === 'string' && name.length > 0 && SESSION_NAME_RE.test(name);
|
||||
}
|
||||
|
||||
// ─── Storage Adapter (Markdown-as-Database) ─────────────────────────────────
|
||||
|
||||
function getClawDir() {
|
||||
return path.join(os.homedir(), '.claude', 'claw');
|
||||
}
|
||||
|
||||
function getSessionPath(name) {
|
||||
return path.join(getClawDir(), name + '.md');
|
||||
return path.join(getClawDir(), `${name}.md`);
|
||||
}
|
||||
|
||||
function listSessions(dir) {
|
||||
const clawDir = dir || getClawDir();
|
||||
if (!fs.existsSync(clawDir)) {
|
||||
return [];
|
||||
}
|
||||
if (!fs.existsSync(clawDir)) return [];
|
||||
return fs.readdirSync(clawDir)
|
||||
.filter(f => f.endsWith('.md'))
|
||||
.map(f => f.replace(/\.md$/, ''));
|
||||
@@ -50,7 +40,7 @@ function listSessions(dir) {
|
||||
function loadHistory(filePath) {
|
||||
try {
|
||||
return fs.readFileSync(filePath, 'utf8');
|
||||
} catch (_err) {
|
||||
} catch {
|
||||
return '';
|
||||
}
|
||||
}
|
||||
@@ -58,29 +48,27 @@ function loadHistory(filePath) {
|
||||
function appendTurn(filePath, role, content, timestamp) {
|
||||
const ts = timestamp || new Date().toISOString();
|
||||
const entry = `### [${ts}] ${role}\n${content}\n---\n`;
|
||||
const dir = path.dirname(filePath);
|
||||
fs.mkdirSync(dir, { recursive: true });
|
||||
fs.mkdirSync(path.dirname(filePath), { recursive: true });
|
||||
fs.appendFileSync(filePath, entry, 'utf8');
|
||||
}
|
||||
|
||||
// ─── Context & Delegation Pipeline ──────────────────────────────────────────
|
||||
function normalizeSkillList(raw) {
|
||||
if (!raw) return [];
|
||||
if (Array.isArray(raw)) return raw.map(s => String(s).trim()).filter(Boolean);
|
||||
return String(raw).split(',').map(s => s.trim()).filter(Boolean);
|
||||
}
|
||||
|
||||
function loadECCContext(skillList) {
|
||||
const raw = skillList !== undefined ? skillList : (process.env.CLAW_SKILLS || '');
|
||||
if (!raw.trim()) {
|
||||
return '';
|
||||
}
|
||||
const requested = normalizeSkillList(skillList !== undefined ? skillList : process.env.CLAW_SKILLS || '');
|
||||
if (requested.length === 0) return '';
|
||||
|
||||
const names = raw.split(',').map(s => s.trim()).filter(Boolean);
|
||||
const chunks = [];
|
||||
|
||||
for (const name of names) {
|
||||
for (const name of requested) {
|
||||
const skillPath = path.join(process.cwd(), 'skills', name, 'SKILL.md');
|
||||
try {
|
||||
const content = fs.readFileSync(skillPath, 'utf8');
|
||||
chunks.push(content);
|
||||
} catch (_err) {
|
||||
// Gracefully skip missing skills
|
||||
chunks.push(fs.readFileSync(skillPath, 'utf8'));
|
||||
} catch {
|
||||
// Skip missing skills silently to keep REPL usable.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,38 +77,158 @@ function loadECCContext(skillList) {
|
||||
|
||||
function buildPrompt(systemPrompt, history, userMessage) {
|
||||
const parts = [];
|
||||
if (systemPrompt) {
|
||||
parts.push('=== SYSTEM CONTEXT ===\n' + systemPrompt + '\n');
|
||||
}
|
||||
if (history) {
|
||||
parts.push('=== CONVERSATION HISTORY ===\n' + history + '\n');
|
||||
}
|
||||
parts.push('=== USER MESSAGE ===\n' + userMessage);
|
||||
if (systemPrompt) parts.push(`=== SYSTEM CONTEXT ===\n${systemPrompt}\n`);
|
||||
if (history) parts.push(`=== CONVERSATION HISTORY ===\n${history}\n`);
|
||||
parts.push(`=== USER MESSAGE ===\n${userMessage}`);
|
||||
return parts.join('\n');
|
||||
}
|
||||
|
||||
function askClaude(systemPrompt, history, userMessage) {
|
||||
function askClaude(systemPrompt, history, userMessage, model) {
|
||||
const fullPrompt = buildPrompt(systemPrompt, history, userMessage);
|
||||
const args = [];
|
||||
if (model) {
|
||||
args.push('--model', model);
|
||||
}
|
||||
args.push('-p', fullPrompt);
|
||||
|
||||
const result = spawnSync('claude', ['-p', fullPrompt], {
|
||||
const result = spawnSync('claude', args, {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
env: { ...process.env, CLAUDECODE: '' },
|
||||
timeout: 300000 // 5 minute timeout
|
||||
timeout: 300000,
|
||||
});
|
||||
|
||||
if (result.error) {
|
||||
return '[Error: ' + result.error.message + ']';
|
||||
return `[Error: ${result.error.message}]`;
|
||||
}
|
||||
|
||||
if (result.status !== 0 && result.stderr) {
|
||||
return '[Error: claude exited with code ' + result.status + ': ' + result.stderr.trim() + ']';
|
||||
return `[Error: claude exited with code ${result.status}: ${result.stderr.trim()}]`;
|
||||
}
|
||||
|
||||
return (result.stdout || '').trim();
|
||||
}
|
||||
|
||||
// ─── REPL Commands ──────────────────────────────────────────────────────────
|
||||
function parseTurns(history) {
|
||||
const turns = [];
|
||||
const regex = /### \[([^\]]+)\] ([^\n]+)\n([\s\S]*?)\n---\n/g;
|
||||
let match;
|
||||
while ((match = regex.exec(history)) !== null) {
|
||||
turns.push({ timestamp: match[1], role: match[2], content: match[3] });
|
||||
}
|
||||
return turns;
|
||||
}
|
||||
|
||||
function estimateTokenCount(text) {
|
||||
return Math.ceil((text || '').length / 4);
|
||||
}
|
||||
|
||||
function getSessionMetrics(filePath) {
|
||||
const history = loadHistory(filePath);
|
||||
const turns = parseTurns(history);
|
||||
const charCount = history.length;
|
||||
const tokenEstimate = estimateTokenCount(history);
|
||||
const userTurns = turns.filter(t => t.role === 'User').length;
|
||||
const assistantTurns = turns.filter(t => t.role === 'Assistant').length;
|
||||
|
||||
return {
|
||||
turns: turns.length,
|
||||
userTurns,
|
||||
assistantTurns,
|
||||
charCount,
|
||||
tokenEstimate,
|
||||
};
|
||||
}
|
||||
|
||||
function searchSessions(query, dir) {
|
||||
const q = String(query || '').toLowerCase().trim();
|
||||
if (!q) return [];
|
||||
|
||||
const sessions = listSessions(dir);
|
||||
const results = [];
|
||||
for (const name of sessions) {
|
||||
const p = getSessionPath(name);
|
||||
const content = loadHistory(p);
|
||||
if (!content) continue;
|
||||
|
||||
const idx = content.toLowerCase().indexOf(q);
|
||||
if (idx >= 0) {
|
||||
const start = Math.max(0, idx - 40);
|
||||
const end = Math.min(content.length, idx + q.length + 40);
|
||||
const snippet = content.slice(start, end).replace(/\n/g, ' ');
|
||||
results.push({ session: name, snippet });
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
function compactSession(filePath, keepTurns = DEFAULT_COMPACT_KEEP_TURNS) {
|
||||
const history = loadHistory(filePath);
|
||||
if (!history) return false;
|
||||
|
||||
const turns = parseTurns(history);
|
||||
if (turns.length <= keepTurns) return false;
|
||||
|
||||
const retained = turns.slice(-keepTurns);
|
||||
const compactedHeader = `# NanoClaw Compaction\nCompacted at: ${new Date().toISOString()}\nRetained turns: ${keepTurns}/${turns.length}\n\n---\n`;
|
||||
const compactedTurns = retained.map(t => `### [${t.timestamp}] ${t.role}\n${t.content}\n---\n`).join('');
|
||||
fs.writeFileSync(filePath, compactedHeader + compactedTurns, 'utf8');
|
||||
return true;
|
||||
}
|
||||
|
||||
function exportSession(filePath, format, outputPath) {
|
||||
const history = loadHistory(filePath);
|
||||
const sessionName = path.basename(filePath, '.md');
|
||||
const fmt = String(format || 'md').toLowerCase();
|
||||
|
||||
if (!history) {
|
||||
return { ok: false, message: 'No session history to export.' };
|
||||
}
|
||||
|
||||
const dir = path.dirname(filePath);
|
||||
let out = outputPath;
|
||||
if (!out) {
|
||||
out = path.join(dir, `${sessionName}.export.${fmt === 'markdown' ? 'md' : fmt}`);
|
||||
}
|
||||
|
||||
if (fmt === 'md' || fmt === 'markdown') {
|
||||
fs.writeFileSync(out, history, 'utf8');
|
||||
return { ok: true, path: out };
|
||||
}
|
||||
|
||||
if (fmt === 'json') {
|
||||
const turns = parseTurns(history);
|
||||
fs.writeFileSync(out, JSON.stringify({ session: sessionName, turns }, null, 2), 'utf8');
|
||||
return { ok: true, path: out };
|
||||
}
|
||||
|
||||
if (fmt === 'txt' || fmt === 'text') {
|
||||
const turns = parseTurns(history);
|
||||
const txt = turns.map(t => `[${t.timestamp}] ${t.role}:\n${t.content}\n`).join('\n');
|
||||
fs.writeFileSync(out, txt, 'utf8');
|
||||
return { ok: true, path: out };
|
||||
}
|
||||
|
||||
return { ok: false, message: `Unsupported export format: ${format}` };
|
||||
}
|
||||
|
||||
function branchSession(currentSessionPath, newSessionName) {
|
||||
if (!isValidSessionName(newSessionName)) {
|
||||
return { ok: false, message: `Invalid branch session name: ${newSessionName}` };
|
||||
}
|
||||
|
||||
const target = getSessionPath(newSessionName);
|
||||
fs.mkdirSync(path.dirname(target), { recursive: true });
|
||||
|
||||
const content = loadHistory(currentSessionPath);
|
||||
fs.writeFileSync(target, content, 'utf8');
|
||||
return { ok: true, path: target, session: newSessionName };
|
||||
}
|
||||
|
||||
function skillExists(skillName) {
|
||||
const p = path.join(process.cwd(), 'skills', skillName, 'SKILL.md');
|
||||
return fs.existsSync(p);
|
||||
}
|
||||
|
||||
function handleClear(sessionPath) {
|
||||
fs.mkdirSync(path.dirname(sessionPath), { recursive: true });
|
||||
@@ -132,72 +240,73 @@ function handleHistory(sessionPath) {
|
||||
const history = loadHistory(sessionPath);
|
||||
if (!history) {
|
||||
console.log('(no history)');
|
||||
} else {
|
||||
console.log(history);
|
||||
return;
|
||||
}
|
||||
console.log(history);
|
||||
}
|
||||
|
||||
function handleSessions(dir) {
|
||||
const sessions = listSessions(dir);
|
||||
if (sessions.length === 0) {
|
||||
console.log('(no sessions)');
|
||||
} else {
|
||||
console.log('Sessions:');
|
||||
for (const s of sessions) {
|
||||
console.log(' - ' + s);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('Sessions:');
|
||||
for (const s of sessions) {
|
||||
console.log(` - ${s}`);
|
||||
}
|
||||
}
|
||||
|
||||
function handleHelp() {
|
||||
console.log('NanoClaw REPL Commands:');
|
||||
console.log(' /clear Clear current session history');
|
||||
console.log(' /history Print full conversation history');
|
||||
console.log(' /sessions List all saved sessions');
|
||||
console.log(' /help Show this help message');
|
||||
console.log(' exit Quit the REPL');
|
||||
console.log(' /help Show this help');
|
||||
console.log(' /clear Clear current session history');
|
||||
console.log(' /history Print full conversation history');
|
||||
console.log(' /sessions List saved sessions');
|
||||
console.log(' /model [name] Show/set model');
|
||||
console.log(' /load <skill-name> Load a skill into active context');
|
||||
console.log(' /branch <session-name> Branch current session into a new session');
|
||||
console.log(' /search <query> Search query across sessions');
|
||||
console.log(' /compact Keep recent turns, compact older context');
|
||||
console.log(' /export <md|json|txt> [path] Export current session');
|
||||
console.log(' /metrics Show session metrics');
|
||||
console.log(' exit Quit the REPL');
|
||||
}
|
||||
|
||||
// ─── Main REPL ──────────────────────────────────────────────────────────────
|
||||
|
||||
function main() {
|
||||
const sessionName = process.env.CLAW_SESSION || 'default';
|
||||
|
||||
if (!isValidSessionName(sessionName)) {
|
||||
console.error('Error: Invalid session name "' + sessionName + '". Use alphanumeric characters and hyphens only.');
|
||||
const initialSessionName = process.env.CLAW_SESSION || 'default';
|
||||
if (!isValidSessionName(initialSessionName)) {
|
||||
console.error(`Error: Invalid session name "${initialSessionName}". Use alphanumeric characters and hyphens only.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const clawDir = getClawDir();
|
||||
fs.mkdirSync(clawDir, { recursive: true });
|
||||
fs.mkdirSync(getClawDir(), { recursive: true });
|
||||
|
||||
const sessionPath = getSessionPath(sessionName);
|
||||
const eccContext = loadECCContext();
|
||||
const state = {
|
||||
sessionName: initialSessionName,
|
||||
sessionPath: getSessionPath(initialSessionName),
|
||||
model: DEFAULT_MODEL,
|
||||
skills: normalizeSkillList(process.env.CLAW_SKILLS || ''),
|
||||
};
|
||||
|
||||
const requestedSkills = (process.env.CLAW_SKILLS || '').split(',').map(s => s.trim()).filter(Boolean);
|
||||
const loadedCount = requestedSkills.filter(name =>
|
||||
fs.existsSync(path.join(process.cwd(), 'skills', name, 'SKILL.md'))
|
||||
).length;
|
||||
let eccContext = loadECCContext(state.skills);
|
||||
|
||||
console.log('NanoClaw v1.0 — Session: ' + sessionName);
|
||||
const loadedCount = state.skills.filter(skillExists).length;
|
||||
|
||||
console.log(`NanoClaw v2 — Session: ${state.sessionName}`);
|
||||
console.log(`Model: ${state.model}`);
|
||||
if (loadedCount > 0) {
|
||||
console.log('Loaded ' + loadedCount + ' skill(s) as context.');
|
||||
console.log(`Loaded ${loadedCount} skill(s) as context.`);
|
||||
}
|
||||
console.log('Type /help for commands, exit to quit.\n');
|
||||
|
||||
const rl = readline.createInterface({
|
||||
input: process.stdin,
|
||||
output: process.stdout
|
||||
});
|
||||
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
||||
|
||||
const prompt = () => {
|
||||
rl.question('claw> ', (input) => {
|
||||
const line = input.trim();
|
||||
|
||||
if (!line) {
|
||||
prompt();
|
||||
return;
|
||||
}
|
||||
if (!line) return prompt();
|
||||
|
||||
if (line === 'exit') {
|
||||
console.log('Goodbye.');
|
||||
@@ -205,37 +314,123 @@ function main() {
|
||||
return;
|
||||
}
|
||||
|
||||
if (line === '/help') {
|
||||
handleHelp();
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (line === '/clear') {
|
||||
handleClear(sessionPath);
|
||||
prompt();
|
||||
return;
|
||||
handleClear(state.sessionPath);
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (line === '/history') {
|
||||
handleHistory(sessionPath);
|
||||
prompt();
|
||||
return;
|
||||
handleHistory(state.sessionPath);
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (line === '/sessions') {
|
||||
handleSessions();
|
||||
prompt();
|
||||
return;
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (line === '/help') {
|
||||
handleHelp();
|
||||
prompt();
|
||||
return;
|
||||
if (line.startsWith('/model')) {
|
||||
const model = line.replace('/model', '').trim();
|
||||
if (!model) {
|
||||
console.log(`Current model: ${state.model}`);
|
||||
} else {
|
||||
state.model = model;
|
||||
console.log(`Model set to: ${state.model}`);
|
||||
}
|
||||
return prompt();
|
||||
}
|
||||
|
||||
// Regular message — send to Claude
|
||||
const history = loadHistory(sessionPath);
|
||||
appendTurn(sessionPath, 'User', line);
|
||||
const response = askClaude(eccContext, history, line);
|
||||
console.log('\n' + response + '\n');
|
||||
appendTurn(sessionPath, 'Assistant', response);
|
||||
if (line.startsWith('/load ')) {
|
||||
const skill = line.replace('/load', '').trim();
|
||||
if (!skill) {
|
||||
console.log('Usage: /load <skill-name>');
|
||||
return prompt();
|
||||
}
|
||||
if (!skillExists(skill)) {
|
||||
console.log(`Skill not found: ${skill}`);
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (!state.skills.includes(skill)) {
|
||||
state.skills.push(skill);
|
||||
}
|
||||
eccContext = loadECCContext(state.skills);
|
||||
console.log(`Loaded skill: ${skill}`);
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (line.startsWith('/branch ')) {
|
||||
const target = line.replace('/branch', '').trim();
|
||||
const result = branchSession(state.sessionPath, target);
|
||||
if (!result.ok) {
|
||||
console.log(result.message);
|
||||
return prompt();
|
||||
}
|
||||
|
||||
state.sessionName = result.session;
|
||||
state.sessionPath = result.path;
|
||||
console.log(`Branched to session: ${state.sessionName}`);
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (line.startsWith('/search ')) {
|
||||
const query = line.replace('/search', '').trim();
|
||||
const matches = searchSessions(query);
|
||||
if (matches.length === 0) {
|
||||
console.log('(no matches)');
|
||||
return prompt();
|
||||
}
|
||||
console.log(`Found ${matches.length} match(es):`);
|
||||
for (const match of matches) {
|
||||
console.log(`- ${match.session}: ${match.snippet}`);
|
||||
}
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (line === '/compact') {
|
||||
const changed = compactSession(state.sessionPath);
|
||||
console.log(changed ? 'Session compacted.' : 'No compaction needed.');
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (line.startsWith('/export ')) {
|
||||
const parts = line.split(/\s+/).filter(Boolean);
|
||||
const format = parts[1];
|
||||
const outputPath = parts[2];
|
||||
if (!format) {
|
||||
console.log('Usage: /export <md|json|txt> [path]');
|
||||
return prompt();
|
||||
}
|
||||
const result = exportSession(state.sessionPath, format, outputPath);
|
||||
if (!result.ok) {
|
||||
console.log(result.message);
|
||||
} else {
|
||||
console.log(`Exported: ${result.path}`);
|
||||
}
|
||||
return prompt();
|
||||
}
|
||||
|
||||
if (line === '/metrics') {
|
||||
const m = getSessionMetrics(state.sessionPath);
|
||||
console.log(`Session: ${state.sessionName}`);
|
||||
console.log(`Model: ${state.model}`);
|
||||
console.log(`Turns: ${m.turns} (user ${m.userTurns}, assistant ${m.assistantTurns})`);
|
||||
console.log(`Chars: ${m.charCount}`);
|
||||
console.log(`Estimated tokens: ${m.tokenEstimate}`);
|
||||
return prompt();
|
||||
}
|
||||
|
||||
// Regular message
|
||||
const history = loadHistory(state.sessionPath);
|
||||
appendTurn(state.sessionPath, 'User', line);
|
||||
const response = askClaude(eccContext, history, line, state.model);
|
||||
console.log(`\n${response}\n`);
|
||||
appendTurn(state.sessionPath, 'Assistant', response);
|
||||
prompt();
|
||||
});
|
||||
};
|
||||
@@ -243,8 +438,6 @@ function main() {
|
||||
prompt();
|
||||
}
|
||||
|
||||
// ─── Exports & CLI Entry ────────────────────────────────────────────────────
|
||||
|
||||
module.exports = {
|
||||
getClawDir,
|
||||
getSessionPath,
|
||||
@@ -252,14 +445,21 @@ module.exports = {
|
||||
loadHistory,
|
||||
appendTurn,
|
||||
loadECCContext,
|
||||
askClaude,
|
||||
buildPrompt,
|
||||
askClaude,
|
||||
isValidSessionName,
|
||||
handleClear,
|
||||
handleHistory,
|
||||
handleSessions,
|
||||
handleHelp,
|
||||
main
|
||||
parseTurns,
|
||||
estimateTokenCount,
|
||||
getSessionMetrics,
|
||||
searchSessions,
|
||||
compactSession,
|
||||
exportSession,
|
||||
branchSession,
|
||||
main,
|
||||
};
|
||||
|
||||
if (require.main === module) {
|
||||
|
||||
12
scripts/hooks/check-hook-enabled.js
Executable file
12
scripts/hooks/check-hook-enabled.js
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const { isHookEnabled } = require('../lib/hook-flags');
|
||||
|
||||
const [, , hookId, profilesCsv] = process.argv;
|
||||
if (!hookId) {
|
||||
process.stdout.write('yes');
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
process.stdout.write(isHookEnabled(hookId, { profiles: profilesCsv }) ? 'yes' : 'no');
|
||||
78
scripts/hooks/cost-tracker.js
Executable file
78
scripts/hooks/cost-tracker.js
Executable file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Cost Tracker Hook
|
||||
*
|
||||
* Appends lightweight session usage metrics to ~/.claude/metrics/costs.jsonl.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const path = require('path');
|
||||
const {
|
||||
ensureDir,
|
||||
appendFile,
|
||||
getClaudeDir,
|
||||
} = require('../lib/utils');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
|
||||
function toNumber(value) {
|
||||
const n = Number(value);
|
||||
return Number.isFinite(n) ? n : 0;
|
||||
}
|
||||
|
||||
function estimateCost(model, inputTokens, outputTokens) {
|
||||
// Approximate per-1M-token blended rates. Conservative defaults.
|
||||
const table = {
|
||||
'haiku': { in: 0.8, out: 4.0 },
|
||||
'sonnet': { in: 3.0, out: 15.0 },
|
||||
'opus': { in: 15.0, out: 75.0 },
|
||||
};
|
||||
|
||||
const normalized = String(model || '').toLowerCase();
|
||||
let rates = table.sonnet;
|
||||
if (normalized.includes('haiku')) rates = table.haiku;
|
||||
if (normalized.includes('opus')) rates = table.opus;
|
||||
|
||||
const cost = (inputTokens / 1_000_000) * rates.in + (outputTokens / 1_000_000) * rates.out;
|
||||
return Math.round(cost * 1e6) / 1e6;
|
||||
}
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = raw.trim() ? JSON.parse(raw) : {};
|
||||
const usage = input.usage || input.token_usage || {};
|
||||
const inputTokens = toNumber(usage.input_tokens || usage.prompt_tokens || 0);
|
||||
const outputTokens = toNumber(usage.output_tokens || usage.completion_tokens || 0);
|
||||
|
||||
const model = String(input.model || input._cursor?.model || process.env.CLAUDE_MODEL || 'unknown');
|
||||
const sessionId = String(process.env.CLAUDE_SESSION_ID || 'default');
|
||||
|
||||
const metricsDir = path.join(getClaudeDir(), 'metrics');
|
||||
ensureDir(metricsDir);
|
||||
|
||||
const row = {
|
||||
timestamp: new Date().toISOString(),
|
||||
session_id: sessionId,
|
||||
model,
|
||||
input_tokens: inputTokens,
|
||||
output_tokens: outputTokens,
|
||||
estimated_cost_usd: estimateCost(model, inputTokens, outputTokens),
|
||||
};
|
||||
|
||||
appendFile(path.join(metricsDir, 'costs.jsonl'), `${JSON.stringify(row)}\n`);
|
||||
} catch {
|
||||
// Keep hook non-blocking.
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
@@ -1,28 +1,63 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Doc file warning hook (PreToolUse - Write)
|
||||
* Warns about non-standard documentation files.
|
||||
* Exit code 0 always (warns only, never blocks).
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const path = require('path');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let data = '';
|
||||
process.stdin.on('data', c => (data += c));
|
||||
|
||||
function isAllowedDocPath(filePath) {
|
||||
const normalized = filePath.replace(/\\/g, '/');
|
||||
const basename = path.basename(filePath);
|
||||
|
||||
if (!/\.(md|txt)$/i.test(filePath)) return true;
|
||||
|
||||
if (/^(README|CLAUDE|AGENTS|CONTRIBUTING|CHANGELOG|LICENSE|SKILL|MEMORY|WORKLOG)\.md$/i.test(basename)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (/\.claude\/(commands|plans|projects)\//.test(normalized)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (/(^|\/)(docs|skills|\.history|memory)\//.test(normalized)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (/\.plan\.md$/i.test(basename)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', c => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += c.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(data);
|
||||
const filePath = input.tool_input?.file_path || '';
|
||||
const filePath = String(input.tool_input?.file_path || '');
|
||||
|
||||
if (
|
||||
/\.(md|txt)$/.test(filePath) &&
|
||||
!/(README|CLAUDE|AGENTS|CONTRIBUTING|CHANGELOG|LICENSE|SKILL)\.md$/i.test(filePath) &&
|
||||
!/\.claude[/\\]plans[/\\]/.test(filePath) &&
|
||||
!/(^|[/\\])(docs|skills|\.history)[/\\]/.test(filePath)
|
||||
) {
|
||||
if (filePath && !isAllowedDocPath(filePath)) {
|
||||
console.error('[Hook] WARNING: Non-standard documentation file detected');
|
||||
console.error('[Hook] File: ' + filePath);
|
||||
console.error(`[Hook] File: ${filePath}`);
|
||||
console.error('[Hook] Consider consolidating into README.md or docs/ directory');
|
||||
}
|
||||
} catch {
|
||||
/* ignore parse errors */
|
||||
// ignore parse errors
|
||||
}
|
||||
console.log(data);
|
||||
|
||||
process.stdout.write(data);
|
||||
});
|
||||
|
||||
27
scripts/hooks/post-bash-build-complete.js
Executable file
27
scripts/hooks/post-bash-build-complete.js
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const cmd = String(input.tool_input?.command || '');
|
||||
if (/(npm run build|pnpm build|yarn build)/.test(cmd)) {
|
||||
console.error('[Hook] Build completed - async analysis running in background');
|
||||
}
|
||||
} catch {
|
||||
// ignore parse errors and pass through
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
36
scripts/hooks/post-bash-pr-created.js
Executable file
36
scripts/hooks/post-bash-pr-created.js
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const cmd = String(input.tool_input?.command || '');
|
||||
|
||||
if (/\bgh\s+pr\s+create\b/.test(cmd)) {
|
||||
const out = String(input.tool_output?.output || '');
|
||||
const match = out.match(/https:\/\/github\.com\/[^/]+\/[^/]+\/pull\/\d+/);
|
||||
if (match) {
|
||||
const prUrl = match[0];
|
||||
const repo = prUrl.replace(/https:\/\/github\.com\/([^/]+\/[^/]+)\/pull\/\d+/, '$1');
|
||||
const prNum = prUrl.replace(/.+\/pull\/(\d+)/, '$1');
|
||||
console.error(`[Hook] PR created: ${prUrl}`);
|
||||
console.error(`[Hook] To review: gh pr review ${prNum} --repo ${repo}`);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// ignore parse errors and pass through
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
73
scripts/hooks/pre-bash-dev-server-block.js
Executable file
73
scripts/hooks/pre-bash-dev-server-block.js
Executable file
@@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
|
||||
function splitShellSegments(command) {
|
||||
const segments = [];
|
||||
let current = '';
|
||||
let quote = null;
|
||||
|
||||
for (let i = 0; i < command.length; i++) {
|
||||
const ch = command[i];
|
||||
if (quote) {
|
||||
if (ch === quote) quote = null;
|
||||
current += ch;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '"' || ch === "'") {
|
||||
quote = ch;
|
||||
current += ch;
|
||||
continue;
|
||||
}
|
||||
|
||||
const next = command[i + 1] || '';
|
||||
if (ch === ';' || (ch === '&' && next === '&') || (ch === '|' && next === '|') || (ch === '&' && next !== '&')) {
|
||||
if (current.trim()) segments.push(current.trim());
|
||||
current = '';
|
||||
if ((ch === '&' && next === '&') || (ch === '|' && next === '|')) i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
current += ch;
|
||||
}
|
||||
|
||||
if (current.trim()) segments.push(current.trim());
|
||||
return segments;
|
||||
}
|
||||
|
||||
let raw = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const cmd = String(input.tool_input?.command || '');
|
||||
|
||||
if (process.platform !== 'win32') {
|
||||
const segments = splitShellSegments(cmd);
|
||||
const tmuxLauncher = /^\s*tmux\s+(new|new-session|new-window|split-window)\b/;
|
||||
const devPattern = /\b(npm\s+run\s+dev|pnpm(?:\s+run)?\s+dev|yarn\s+dev|bun\s+run\s+dev)\b/;
|
||||
|
||||
const hasBlockedDev = segments.some(segment => devPattern.test(segment) && !tmuxLauncher.test(segment));
|
||||
|
||||
if (hasBlockedDev) {
|
||||
console.error('[Hook] BLOCKED: Dev server must run in tmux for log access');
|
||||
console.error('[Hook] Use: tmux new-session -d -s dev "npm run dev"');
|
||||
console.error('[Hook] Then: tmux attach -t dev');
|
||||
process.exit(2);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// ignore parse errors and pass through
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
28
scripts/hooks/pre-bash-git-push-reminder.js
Executable file
28
scripts/hooks/pre-bash-git-push-reminder.js
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const cmd = String(input.tool_input?.command || '');
|
||||
if (/\bgit\s+push\b/.test(cmd)) {
|
||||
console.error('[Hook] Review changes before push...');
|
||||
console.error('[Hook] Continuing with push (remove this hook to add interactive review)');
|
||||
}
|
||||
} catch {
|
||||
// ignore parse errors and pass through
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
33
scripts/hooks/pre-bash-tmux-reminder.js
Executable file
33
scripts/hooks/pre-bash-tmux-reminder.js
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const cmd = String(input.tool_input?.command || '');
|
||||
|
||||
if (
|
||||
process.platform !== 'win32' &&
|
||||
!process.env.TMUX &&
|
||||
/(npm (install|test)|pnpm (install|test)|yarn (install|test)?|bun (install|test)|cargo build|make\b|docker\b|pytest|vitest|playwright)/.test(cmd)
|
||||
) {
|
||||
console.error('[Hook] Consider running in tmux for session persistence');
|
||||
console.error('[Hook] tmux new -s dev | tmux attach -t dev');
|
||||
}
|
||||
} catch {
|
||||
// ignore parse errors and pass through
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
@@ -1,61 +1,9 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* PreToolUse Hook: Warn about non-standard documentation files
|
||||
*
|
||||
* Cross-platform (Windows, macOS, Linux)
|
||||
*
|
||||
* Runs before Write tool use. If the file is a .md or .txt file that isn't
|
||||
* a standard documentation file (README, CLAUDE, AGENTS, etc.) or in an
|
||||
* expected directory (docs/, skills/, .claude/plans/), warns the user.
|
||||
*
|
||||
* Exit code 0 — warn only, does not block.
|
||||
* Backward-compatible doc warning hook entrypoint.
|
||||
* Kept for consumers that still reference pre-write-doc-warn.js directly.
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
'use strict';
|
||||
|
||||
const MAX_STDIN = 1024 * 1024; // 1MB limit
|
||||
let data = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
|
||||
process.stdin.on('data', chunk => {
|
||||
if (data.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - data.length;
|
||||
data += chunk.length > remaining ? chunk.slice(0, remaining) : chunk;
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(data);
|
||||
const filePath = input.tool_input?.file_path || '';
|
||||
|
||||
// Only check .md and .txt files
|
||||
if (!/\.(md|txt)$/.test(filePath)) {
|
||||
process.stdout.write(data);
|
||||
return;
|
||||
}
|
||||
|
||||
// Allow standard documentation files
|
||||
const basename = path.basename(filePath);
|
||||
if (/^(README|CLAUDE|AGENTS|CONTRIBUTING|CHANGELOG|LICENSE|SKILL)\.md$/i.test(basename)) {
|
||||
process.stdout.write(data);
|
||||
return;
|
||||
}
|
||||
|
||||
// Allow files in .claude/plans/, docs/, and skills/ directories
|
||||
const normalized = filePath.replace(/\\/g, '/');
|
||||
if (/\.claude\/plans\//.test(normalized) || /(^|\/)(docs|skills)\//.test(normalized)) {
|
||||
process.stdout.write(data);
|
||||
return;
|
||||
}
|
||||
|
||||
// Warn about non-standard documentation files
|
||||
console.error('[Hook] WARNING: Non-standard documentation file detected');
|
||||
console.error('[Hook] File: ' + filePath);
|
||||
console.error('[Hook] Consider consolidating into README.md or docs/ directory');
|
||||
} catch {
|
||||
// Parse error — pass through
|
||||
}
|
||||
|
||||
process.stdout.write(data);
|
||||
});
|
||||
require('./doc-file-warning.js');
|
||||
|
||||
98
scripts/hooks/quality-gate.js
Executable file
98
scripts/hooks/quality-gate.js
Executable file
@@ -0,0 +1,98 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Quality Gate Hook
|
||||
*
|
||||
* Runs lightweight quality checks after file edits.
|
||||
* - Targets one file when file_path is provided
|
||||
* - Falls back to no-op when language/tooling is unavailable
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
|
||||
function run(command, args, cwd = process.cwd()) {
|
||||
return spawnSync(command, args, {
|
||||
cwd,
|
||||
encoding: 'utf8',
|
||||
env: process.env,
|
||||
});
|
||||
}
|
||||
|
||||
function log(msg) {
|
||||
process.stderr.write(`${msg}\n`);
|
||||
}
|
||||
|
||||
function maybeRunQualityGate(filePath) {
|
||||
if (!filePath || !fs.existsSync(filePath)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const ext = path.extname(filePath).toLowerCase();
|
||||
const fix = String(process.env.ECC_QUALITY_GATE_FIX || '').toLowerCase() === 'true';
|
||||
const strict = String(process.env.ECC_QUALITY_GATE_STRICT || '').toLowerCase() === 'true';
|
||||
|
||||
if (['.ts', '.tsx', '.js', '.jsx', '.json', '.md'].includes(ext)) {
|
||||
// Prefer biome if present
|
||||
if (fs.existsSync(path.join(process.cwd(), 'biome.json')) || fs.existsSync(path.join(process.cwd(), 'biome.jsonc'))) {
|
||||
const args = ['biome', 'check', filePath];
|
||||
if (fix) args.push('--write');
|
||||
const result = run('npx', args);
|
||||
if (result.status !== 0 && strict) {
|
||||
log(`[QualityGate] Biome check failed for ${filePath}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Fallback to prettier when installed
|
||||
const prettierArgs = ['prettier', '--check', filePath];
|
||||
if (fix) {
|
||||
prettierArgs[1] = '--write';
|
||||
}
|
||||
const prettier = run('npx', prettierArgs);
|
||||
if (prettier.status !== 0 && strict) {
|
||||
log(`[QualityGate] Prettier check failed for ${filePath}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (ext === '.go' && fix) {
|
||||
run('gofmt', ['-w', filePath]);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ext === '.py') {
|
||||
const args = ['format'];
|
||||
if (!fix) args.push('--check');
|
||||
args.push(filePath);
|
||||
const r = run('ruff', args);
|
||||
if (r.status !== 0 && strict) {
|
||||
log(`[QualityGate] Ruff check failed for ${filePath}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const filePath = String(input.tool_input?.file_path || '');
|
||||
maybeRunQualityGate(filePath);
|
||||
} catch {
|
||||
// Ignore parse errors.
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
30
scripts/hooks/run-with-flags-shell.sh
Executable file
30
scripts/hooks/run-with-flags-shell.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
HOOK_ID="${1:-}"
|
||||
REL_SCRIPT_PATH="${2:-}"
|
||||
PROFILES_CSV="${3:-standard,strict}"
|
||||
|
||||
# Preserve stdin for passthrough or script execution
|
||||
INPUT="$(cat)"
|
||||
|
||||
if [[ -z "$HOOK_ID" || -z "$REL_SCRIPT_PATH" ]]; then
|
||||
printf '%s' "$INPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Ask Node helper if this hook is enabled
|
||||
ENABLED="$(node "${CLAUDE_PLUGIN_ROOT}/scripts/hooks/check-hook-enabled.js" "$HOOK_ID" "$PROFILES_CSV" 2>/dev/null || echo yes)"
|
||||
if [[ "$ENABLED" != "yes" ]]; then
|
||||
printf '%s' "$INPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
SCRIPT_PATH="${CLAUDE_PLUGIN_ROOT}/${REL_SCRIPT_PATH}"
|
||||
if [[ ! -f "$SCRIPT_PATH" ]]; then
|
||||
echo "[Hook] Script not found for ${HOOK_ID}: ${SCRIPT_PATH}" >&2
|
||||
printf '%s' "$INPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
printf '%s' "$INPUT" | "$SCRIPT_PATH"
|
||||
80
scripts/hooks/run-with-flags.js
Executable file
80
scripts/hooks/run-with-flags.js
Executable file
@@ -0,0 +1,80 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Executes a hook script only when enabled by ECC hook profile flags.
|
||||
*
|
||||
* Usage:
|
||||
* node run-with-flags.js <hookId> <scriptRelativePath> [profilesCsv]
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
const { isHookEnabled } = require('../lib/hook-flags');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
|
||||
function readStdinRaw() {
|
||||
return new Promise(resolve => {
|
||||
let raw = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
process.stdin.on('end', () => resolve(raw));
|
||||
process.stdin.on('error', () => resolve(raw));
|
||||
});
|
||||
}
|
||||
|
||||
function getPluginRoot() {
|
||||
if (process.env.CLAUDE_PLUGIN_ROOT && process.env.CLAUDE_PLUGIN_ROOT.trim()) {
|
||||
return process.env.CLAUDE_PLUGIN_ROOT;
|
||||
}
|
||||
return path.resolve(__dirname, '..', '..');
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const [, , hookId, relScriptPath, profilesCsv] = process.argv;
|
||||
const raw = await readStdinRaw();
|
||||
|
||||
if (!hookId || !relScriptPath) {
|
||||
process.stdout.write(raw);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
if (!isHookEnabled(hookId, { profiles: profilesCsv })) {
|
||||
process.stdout.write(raw);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const pluginRoot = getPluginRoot();
|
||||
const scriptPath = path.join(pluginRoot, relScriptPath);
|
||||
|
||||
if (!fs.existsSync(scriptPath)) {
|
||||
process.stderr.write(`[Hook] Script not found for ${hookId}: ${scriptPath}\n`);
|
||||
process.stdout.write(raw);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const result = spawnSync('node', [scriptPath], {
|
||||
input: raw,
|
||||
encoding: 'utf8',
|
||||
env: process.env,
|
||||
cwd: process.cwd(),
|
||||
});
|
||||
|
||||
if (result.stdout) process.stdout.write(result.stdout);
|
||||
if (result.stderr) process.stderr.write(result.stderr);
|
||||
|
||||
const code = Number.isInteger(result.status) ? result.status : 0;
|
||||
process.exit(code);
|
||||
}
|
||||
|
||||
main().catch(err => {
|
||||
process.stderr.write(`[Hook] run-with-flags error: ${err.message}\n`);
|
||||
process.exit(0);
|
||||
});
|
||||
15
scripts/hooks/session-end-marker.js
Executable file
15
scripts/hooks/session-end-marker.js
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
process.stdin.on('end', () => {
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
@@ -1,12 +1,12 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Stop Hook (Session End) - Persist learnings when session ends
|
||||
* Stop Hook (Session End) - Persist learnings during active sessions
|
||||
*
|
||||
* Cross-platform (Windows, macOS, Linux)
|
||||
*
|
||||
* Runs when Claude session ends. Extracts a meaningful summary from
|
||||
* the session transcript (via stdin JSON transcript_path) and saves it
|
||||
* to a session file for cross-session continuity.
|
||||
* Runs on Stop events (after each response). Extracts a meaningful summary
|
||||
* from the session transcript (via stdin JSON transcript_path) and updates a
|
||||
* session file for cross-session continuity.
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
@@ -23,6 +23,9 @@ const {
|
||||
log
|
||||
} = require('../lib/utils');
|
||||
|
||||
const SUMMARY_START_MARKER = '<!-- ECC:SUMMARY:START -->';
|
||||
const SUMMARY_END_MARKER = '<!-- ECC:SUMMARY:END -->';
|
||||
|
||||
/**
|
||||
* Extract a meaningful summary from the session transcript.
|
||||
* Reads the JSONL transcript and pulls out key information:
|
||||
@@ -167,16 +170,28 @@ async function main() {
|
||||
log(`[SessionEnd] Failed to update timestamp in ${sessionFile}`);
|
||||
}
|
||||
|
||||
// If we have a new summary, update the session file content
|
||||
// If we have a new summary, update only the generated summary block.
|
||||
// This keeps repeated Stop invocations idempotent and preserves
|
||||
// user-authored sections in the same session file.
|
||||
if (summary) {
|
||||
const existing = readFile(sessionFile);
|
||||
if (existing) {
|
||||
// Use a flexible regex that matches both "## Session Summary" and "## Current State"
|
||||
// Match to end-of-string to avoid duplicate ### Stats sections
|
||||
const updatedContent = existing.replace(
|
||||
/## (?:Session Summary|Current State)[\s\S]*?$/ ,
|
||||
buildSummarySection(summary).trim() + '\n'
|
||||
);
|
||||
const summaryBlock = buildSummaryBlock(summary);
|
||||
let updatedContent = existing;
|
||||
|
||||
if (existing.includes(SUMMARY_START_MARKER) && existing.includes(SUMMARY_END_MARKER)) {
|
||||
updatedContent = existing.replace(
|
||||
new RegExp(`${escapeRegExp(SUMMARY_START_MARKER)}[\\s\\S]*?${escapeRegExp(SUMMARY_END_MARKER)}`),
|
||||
summaryBlock
|
||||
);
|
||||
} else {
|
||||
// Migration path for files created before summary markers existed.
|
||||
updatedContent = existing.replace(
|
||||
/## (?:Session Summary|Current State)[\s\S]*?$/,
|
||||
`${summaryBlock}\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\`\n`
|
||||
);
|
||||
}
|
||||
|
||||
writeFile(sessionFile, updatedContent);
|
||||
}
|
||||
}
|
||||
@@ -185,7 +200,7 @@ async function main() {
|
||||
} else {
|
||||
// Create new session file
|
||||
const summarySection = summary
|
||||
? buildSummarySection(summary)
|
||||
? `${buildSummaryBlock(summary)}\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\``
|
||||
: `## Current State\n\n[Session context goes here]\n\n### Completed\n- [ ]\n\n### In Progress\n- [ ]\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\``;
|
||||
|
||||
const template = `# Session: ${today}
|
||||
@@ -234,3 +249,10 @@ function buildSummarySection(summary) {
|
||||
return section;
|
||||
}
|
||||
|
||||
function buildSummaryBlock(summary) {
|
||||
return `${SUMMARY_START_MARKER}\n${buildSummarySection(summary).trim()}\n${SUMMARY_END_MARKER}`;
|
||||
}
|
||||
|
||||
function escapeRegExp(value) {
|
||||
return String(value).replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
}
|
||||
|
||||
74
scripts/lib/hook-flags.js
Normal file
74
scripts/lib/hook-flags.js
Normal file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Shared hook enable/disable controls.
|
||||
*
|
||||
* Controls:
|
||||
* - ECC_HOOK_PROFILE=minimal|standard|strict (default: standard)
|
||||
* - ECC_DISABLED_HOOKS=comma,separated,hook,ids
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const VALID_PROFILES = new Set(['minimal', 'standard', 'strict']);
|
||||
|
||||
function normalizeId(value) {
|
||||
return String(value || '').trim().toLowerCase();
|
||||
}
|
||||
|
||||
function getHookProfile() {
|
||||
const raw = String(process.env.ECC_HOOK_PROFILE || 'standard').trim().toLowerCase();
|
||||
return VALID_PROFILES.has(raw) ? raw : 'standard';
|
||||
}
|
||||
|
||||
function getDisabledHookIds() {
|
||||
const raw = String(process.env.ECC_DISABLED_HOOKS || '');
|
||||
if (!raw.trim()) return new Set();
|
||||
|
||||
return new Set(
|
||||
raw
|
||||
.split(',')
|
||||
.map(v => normalizeId(v))
|
||||
.filter(Boolean)
|
||||
);
|
||||
}
|
||||
|
||||
function parseProfiles(rawProfiles, fallback = ['standard', 'strict']) {
|
||||
if (!rawProfiles) return [...fallback];
|
||||
|
||||
if (Array.isArray(rawProfiles)) {
|
||||
const parsed = rawProfiles
|
||||
.map(v => String(v || '').trim().toLowerCase())
|
||||
.filter(v => VALID_PROFILES.has(v));
|
||||
return parsed.length > 0 ? parsed : [...fallback];
|
||||
}
|
||||
|
||||
const parsed = String(rawProfiles)
|
||||
.split(',')
|
||||
.map(v => v.trim().toLowerCase())
|
||||
.filter(v => VALID_PROFILES.has(v));
|
||||
|
||||
return parsed.length > 0 ? parsed : [...fallback];
|
||||
}
|
||||
|
||||
function isHookEnabled(hookId, options = {}) {
|
||||
const id = normalizeId(hookId);
|
||||
if (!id) return true;
|
||||
|
||||
const disabled = getDisabledHookIds();
|
||||
if (disabled.has(id)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const profile = getHookProfile();
|
||||
const allowedProfiles = parseProfiles(options.profiles);
|
||||
return allowedProfiles.includes(profile);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
VALID_PROFILES,
|
||||
normalizeId,
|
||||
getHookProfile,
|
||||
getDisabledHookIds,
|
||||
parseProfiles,
|
||||
isHookEnabled,
|
||||
};
|
||||
@@ -16,10 +16,12 @@ const {
|
||||
log
|
||||
} = require('./utils');
|
||||
|
||||
// Session filename pattern: YYYY-MM-DD-[short-id]-session.tmp
|
||||
// The short-id is optional (old format) and can be 8+ alphanumeric characters
|
||||
// Matches: "2026-02-01-session.tmp" or "2026-02-01-a1b2c3d4-session.tmp"
|
||||
const SESSION_FILENAME_REGEX = /^(\d{4}-\d{2}-\d{2})(?:-([a-z0-9]{8,}))?-session\.tmp$/;
|
||||
// Session filename pattern: YYYY-MM-DD-[session-id]-session.tmp
|
||||
// The session-id is optional (old format) and can include lowercase
|
||||
// alphanumeric characters and hyphens, with a minimum length of 8.
|
||||
// Matches: "2026-02-01-session.tmp", "2026-02-01-a1b2c3d4-session.tmp",
|
||||
// and "2026-02-01-frontend-worktree-1-session.tmp"
|
||||
const SESSION_FILENAME_REGEX = /^(\d{4}-\d{2}-\d{2})(?:-([a-z0-9-]{8,}))?-session\.tmp$/;
|
||||
|
||||
/**
|
||||
* Parse session filename to extract metadata
|
||||
|
||||
73
skills/agent-harness-construction/SKILL.md
Normal file
73
skills/agent-harness-construction/SKILL.md
Normal file
@@ -0,0 +1,73 @@
|
||||
---
|
||||
name: agent-harness-construction
|
||||
description: Design and optimize AI agent action spaces, tool definitions, and observation formatting for higher completion rates.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Agent Harness Construction
|
||||
|
||||
Use this skill when you are improving how an agent plans, calls tools, recovers from errors, and converges on completion.
|
||||
|
||||
## Core Model
|
||||
|
||||
Agent output quality is constrained by:
|
||||
1. Action space quality
|
||||
2. Observation quality
|
||||
3. Recovery quality
|
||||
4. Context budget quality
|
||||
|
||||
## Action Space Design
|
||||
|
||||
1. Use stable, explicit tool names.
|
||||
2. Keep inputs schema-first and narrow.
|
||||
3. Return deterministic output shapes.
|
||||
4. Avoid catch-all tools unless isolation is impossible.
|
||||
|
||||
## Granularity Rules
|
||||
|
||||
- Use micro-tools for high-risk operations (deploy, migration, permissions).
|
||||
- Use medium tools for common edit/read/search loops.
|
||||
- Use macro-tools only when round-trip overhead is the dominant cost.
|
||||
|
||||
## Observation Design
|
||||
|
||||
Every tool response should include:
|
||||
- `status`: success|warning|error
|
||||
- `summary`: one-line result
|
||||
- `next_actions`: actionable follow-ups
|
||||
- `artifacts`: file paths / IDs
|
||||
|
||||
## Error Recovery Contract
|
||||
|
||||
For every error path, include:
|
||||
- root cause hint
|
||||
- safe retry instruction
|
||||
- explicit stop condition
|
||||
|
||||
## Context Budgeting
|
||||
|
||||
1. Keep system prompt minimal and invariant.
|
||||
2. Move large guidance into skills loaded on demand.
|
||||
3. Prefer references to files over inlining long documents.
|
||||
4. Compact at phase boundaries, not arbitrary token thresholds.
|
||||
|
||||
## Architecture Pattern Guidance
|
||||
|
||||
- ReAct: best for exploratory tasks with uncertain path.
|
||||
- Function-calling: best for structured deterministic flows.
|
||||
- Hybrid (recommended): ReAct planning + typed tool execution.
|
||||
|
||||
## Benchmarking
|
||||
|
||||
Track:
|
||||
- completion rate
|
||||
- retries per task
|
||||
- pass@1 and pass@3
|
||||
- cost per successful task
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
- Too many tools with overlapping semantics.
|
||||
- Opaque tool output with no recovery hints.
|
||||
- Error-only output without next steps.
|
||||
- Context overloading with irrelevant references.
|
||||
63
skills/agentic-engineering/SKILL.md
Normal file
63
skills/agentic-engineering/SKILL.md
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
name: agentic-engineering
|
||||
description: Operate as an agentic engineer using eval-first execution, decomposition, and cost-aware model routing.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Agentic Engineering
|
||||
|
||||
Use this skill for engineering workflows where AI agents perform most implementation work and humans enforce quality and risk controls.
|
||||
|
||||
## Operating Principles
|
||||
|
||||
1. Define completion criteria before execution.
|
||||
2. Decompose work into agent-sized units.
|
||||
3. Route model tiers by task complexity.
|
||||
4. Measure with evals and regression checks.
|
||||
|
||||
## Eval-First Loop
|
||||
|
||||
1. Define capability eval and regression eval.
|
||||
2. Run baseline and capture failure signatures.
|
||||
3. Execute implementation.
|
||||
4. Re-run evals and compare deltas.
|
||||
|
||||
## Task Decomposition
|
||||
|
||||
Apply the 15-minute unit rule:
|
||||
- each unit should be independently verifiable
|
||||
- each unit should have a single dominant risk
|
||||
- each unit should expose a clear done condition
|
||||
|
||||
## Model Routing
|
||||
|
||||
- Haiku: classification, boilerplate transforms, narrow edits
|
||||
- Sonnet: implementation and refactors
|
||||
- Opus: architecture, root-cause analysis, multi-file invariants
|
||||
|
||||
## Session Strategy
|
||||
|
||||
- Continue session for closely-coupled units.
|
||||
- Start fresh session after major phase transitions.
|
||||
- Compact after milestone completion, not during active debugging.
|
||||
|
||||
## Review Focus for AI-Generated Code
|
||||
|
||||
Prioritize:
|
||||
- invariants and edge cases
|
||||
- error boundaries
|
||||
- security and auth assumptions
|
||||
- hidden coupling and rollout risk
|
||||
|
||||
Do not waste review cycles on style-only disagreements when automated format/lint already enforce style.
|
||||
|
||||
## Cost Discipline
|
||||
|
||||
Track per task:
|
||||
- model
|
||||
- token estimate
|
||||
- retries
|
||||
- wall-clock time
|
||||
- success/failure
|
||||
|
||||
Escalate model tier only when lower tier fails with a clear reasoning gap.
|
||||
51
skills/ai-first-engineering/SKILL.md
Normal file
51
skills/ai-first-engineering/SKILL.md
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
name: ai-first-engineering
|
||||
description: Engineering operating model for teams where AI agents generate a large share of implementation output.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# AI-First Engineering
|
||||
|
||||
Use this skill when designing process, reviews, and architecture for teams shipping with AI-assisted code generation.
|
||||
|
||||
## Process Shifts
|
||||
|
||||
1. Planning quality matters more than typing speed.
|
||||
2. Eval coverage matters more than anecdotal confidence.
|
||||
3. Review focus shifts from syntax to system behavior.
|
||||
|
||||
## Architecture Requirements
|
||||
|
||||
Prefer architectures that are agent-friendly:
|
||||
- explicit boundaries
|
||||
- stable contracts
|
||||
- typed interfaces
|
||||
- deterministic tests
|
||||
|
||||
Avoid implicit behavior spread across hidden conventions.
|
||||
|
||||
## Code Review in AI-First Teams
|
||||
|
||||
Review for:
|
||||
- behavior regressions
|
||||
- security assumptions
|
||||
- data integrity
|
||||
- failure handling
|
||||
- rollout safety
|
||||
|
||||
Minimize time spent on style issues already covered by automation.
|
||||
|
||||
## Hiring and Evaluation Signals
|
||||
|
||||
Strong AI-first engineers:
|
||||
- decompose ambiguous work cleanly
|
||||
- define measurable acceptance criteria
|
||||
- produce high-signal prompts and evals
|
||||
- enforce risk controls under delivery pressure
|
||||
|
||||
## Testing Standard
|
||||
|
||||
Raise testing bar for generated code:
|
||||
- required regression coverage for touched domains
|
||||
- explicit edge-case assertions
|
||||
- integration checks for interface boundaries
|
||||
@@ -6,6 +6,11 @@ origin: ECC
|
||||
|
||||
# Autonomous Loops Skill
|
||||
|
||||
> Compatibility note (v1.8.0): `autonomous-loops` is retained for one release.
|
||||
> The canonical skill name is now `continuous-agent-loop`. New loop guidance
|
||||
> should be authored there, while this skill remains available to avoid
|
||||
> breaking existing workflows.
|
||||
|
||||
Patterns, architectures, and reference implementations for running Claude Code autonomously in loops. Covers everything from simple `claude -p` pipelines to full RFC-driven multi-agent DAG orchestration.
|
||||
|
||||
## When to Use
|
||||
|
||||
45
skills/continuous-agent-loop/SKILL.md
Normal file
45
skills/continuous-agent-loop/SKILL.md
Normal file
@@ -0,0 +1,45 @@
|
||||
---
|
||||
name: continuous-agent-loop
|
||||
description: Patterns for continuous autonomous agent loops with quality gates, evals, and recovery controls.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Continuous Agent Loop
|
||||
|
||||
This is the v1.8+ canonical loop skill name. It supersedes `autonomous-loops` while keeping compatibility for one release.
|
||||
|
||||
## Loop Selection Flow
|
||||
|
||||
```text
|
||||
Start
|
||||
|
|
||||
+-- Need strict CI/PR control? -- yes --> continuous-pr
|
||||
|
|
||||
+-- Need RFC decomposition? -- yes --> rfc-dag
|
||||
|
|
||||
+-- Need exploratory parallel generation? -- yes --> infinite
|
||||
|
|
||||
+-- default --> sequential
|
||||
```
|
||||
|
||||
## Combined Pattern
|
||||
|
||||
Recommended production stack:
|
||||
1. RFC decomposition (`ralphinho-rfc-pipeline`)
|
||||
2. quality gates (`plankton-code-quality` + `/quality-gate`)
|
||||
3. eval loop (`eval-harness`)
|
||||
4. session persistence (`nanoclaw-repl`)
|
||||
|
||||
## Failure Modes
|
||||
|
||||
- loop churn without measurable progress
|
||||
- repeated retries with same root cause
|
||||
- merge queue stalls
|
||||
- cost drift from unbounded escalation
|
||||
|
||||
## Recovery
|
||||
|
||||
- freeze loop
|
||||
- run `/harness-audit`
|
||||
- reduce scope to failing unit
|
||||
- replay with explicit acceptance criteria
|
||||
133
skills/continuous-learning-v2/agents/observer-loop.sh
Executable file
133
skills/continuous-learning-v2/agents/observer-loop.sh
Executable file
@@ -0,0 +1,133 @@
|
||||
#!/usr/bin/env bash
|
||||
# Continuous Learning v2 - Observer background loop
|
||||
|
||||
set +e
|
||||
unset CLAUDECODE
|
||||
|
||||
SLEEP_PID=""
|
||||
USR1_FIRED=0
|
||||
|
||||
cleanup() {
|
||||
[ -n "$SLEEP_PID" ] && kill "$SLEEP_PID" 2>/dev/null
|
||||
if [ -f "$PID_FILE" ] && [ "$(cat "$PID_FILE" 2>/dev/null)" = "$$" ]; then
|
||||
rm -f "$PID_FILE"
|
||||
fi
|
||||
exit 0
|
||||
}
|
||||
trap cleanup TERM INT
|
||||
|
||||
analyze_observations() {
|
||||
if [ ! -f "$OBSERVATIONS_FILE" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
obs_count=$(wc -l < "$OBSERVATIONS_FILE" 2>/dev/null || echo 0)
|
||||
if [ "$obs_count" -lt "$MIN_OBSERVATIONS" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "[$(date)] Analyzing $obs_count observations for project ${PROJECT_NAME}..." >> "$LOG_FILE"
|
||||
|
||||
if [ "${CLV2_IS_WINDOWS:-false}" = "true" ] && [ "${ECC_OBSERVER_ALLOW_WINDOWS:-false}" != "true" ]; then
|
||||
echo "[$(date)] Skipping claude analysis on Windows due to known non-interactive hang issue (#295). Set ECC_OBSERVER_ALLOW_WINDOWS=true to override." >> "$LOG_FILE"
|
||||
return
|
||||
fi
|
||||
|
||||
if ! command -v claude >/dev/null 2>&1; then
|
||||
echo "[$(date)] claude CLI not found, skipping analysis" >> "$LOG_FILE"
|
||||
return
|
||||
fi
|
||||
|
||||
prompt_file="$(mktemp "${TMPDIR:-/tmp}/ecc-observer-prompt.XXXXXX")"
|
||||
cat > "$prompt_file" <<PROMPT
|
||||
Read ${OBSERVATIONS_FILE} and identify patterns for the project ${PROJECT_NAME} (user corrections, error resolutions, repeated workflows, tool preferences).
|
||||
If you find 3+ occurrences of the same pattern, create an instinct file in ${INSTINCTS_DIR}/<id>.md.
|
||||
|
||||
CRITICAL: Every instinct file MUST use this exact format:
|
||||
|
||||
---
|
||||
id: kebab-case-name
|
||||
trigger: when <specific condition>
|
||||
confidence: <0.3-0.85 based on frequency: 3-5 times=0.5, 6-10=0.7, 11+=0.85>
|
||||
domain: <one of: code-style, testing, git, debugging, workflow, file-patterns>
|
||||
source: session-observation
|
||||
scope: project
|
||||
project_id: ${PROJECT_ID}
|
||||
project_name: ${PROJECT_NAME}
|
||||
---
|
||||
|
||||
# Title
|
||||
|
||||
## Action
|
||||
<what to do, one clear sentence>
|
||||
|
||||
## Evidence
|
||||
- Observed N times in session <id>
|
||||
- Pattern: <description>
|
||||
- Last observed: <date>
|
||||
|
||||
Rules:
|
||||
- Be conservative, only clear patterns with 3+ observations
|
||||
- Use narrow, specific triggers
|
||||
- Never include actual code snippets, only describe patterns
|
||||
- If a similar instinct already exists in ${INSTINCTS_DIR}/, update it instead of creating a duplicate
|
||||
- The YAML frontmatter (between --- markers) with id field is MANDATORY
|
||||
- If a pattern seems universal (not project-specific), set scope to global instead of project
|
||||
- Examples of global patterns: always validate user input, prefer explicit error handling
|
||||
- Examples of project patterns: use React functional components, follow Django REST framework conventions
|
||||
PROMPT
|
||||
|
||||
timeout_seconds="${ECC_OBSERVER_TIMEOUT_SECONDS:-120}"
|
||||
exit_code=0
|
||||
|
||||
claude --model haiku --max-turns 3 --print < "$prompt_file" >> "$LOG_FILE" 2>&1 &
|
||||
claude_pid=$!
|
||||
|
||||
(
|
||||
sleep "$timeout_seconds"
|
||||
if kill -0 "$claude_pid" 2>/dev/null; then
|
||||
echo "[$(date)] Claude analysis timed out after ${timeout_seconds}s; terminating process" >> "$LOG_FILE"
|
||||
kill "$claude_pid" 2>/dev/null || true
|
||||
fi
|
||||
) &
|
||||
watchdog_pid=$!
|
||||
|
||||
wait "$claude_pid"
|
||||
exit_code=$?
|
||||
kill "$watchdog_pid" 2>/dev/null || true
|
||||
rm -f "$prompt_file"
|
||||
|
||||
if [ "$exit_code" -ne 0 ]; then
|
||||
echo "[$(date)] Claude analysis failed (exit $exit_code)" >> "$LOG_FILE"
|
||||
fi
|
||||
|
||||
if [ -f "$OBSERVATIONS_FILE" ]; then
|
||||
archive_dir="${PROJECT_DIR}/observations.archive"
|
||||
mkdir -p "$archive_dir"
|
||||
mv "$OBSERVATIONS_FILE" "$archive_dir/processed-$(date +%Y%m%d-%H%M%S)-$$.jsonl" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
on_usr1() {
|
||||
[ -n "$SLEEP_PID" ] && kill "$SLEEP_PID" 2>/dev/null
|
||||
SLEEP_PID=""
|
||||
USR1_FIRED=1
|
||||
analyze_observations
|
||||
}
|
||||
trap on_usr1 USR1
|
||||
|
||||
echo "$$" > "$PID_FILE"
|
||||
echo "[$(date)] Observer started for ${PROJECT_NAME} (PID: $$)" >> "$LOG_FILE"
|
||||
|
||||
while true; do
|
||||
sleep "$OBSERVER_INTERVAL_SECONDS" &
|
||||
SLEEP_PID=$!
|
||||
wait "$SLEEP_PID" 2>/dev/null
|
||||
SLEEP_PID=""
|
||||
|
||||
if [ "$USR1_FIRED" -eq 1 ]; then
|
||||
USR1_FIRED=0
|
||||
else
|
||||
analyze_observations
|
||||
fi
|
||||
done
|
||||
@@ -23,6 +23,7 @@ set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
SKILL_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
OBSERVER_LOOP_SCRIPT="${SCRIPT_DIR}/observer-loop.sh"
|
||||
|
||||
# Source shared project detection helper
|
||||
# This sets: PROJECT_ID, PROJECT_NAME, PROJECT_ROOT, PROJECT_DIR
|
||||
@@ -74,6 +75,13 @@ OBSERVER_INTERVAL_SECONDS=$((OBSERVER_INTERVAL_MINUTES * 60))
|
||||
echo "Project: ${PROJECT_NAME} (${PROJECT_ID})"
|
||||
echo "Storage: ${PROJECT_DIR}"
|
||||
|
||||
# Windows/Git-Bash detection (Issue #295)
|
||||
UNAME_LOWER="$(uname -s 2>/dev/null | tr '[:upper:]' '[:lower:]')"
|
||||
IS_WINDOWS=false
|
||||
case "$UNAME_LOWER" in
|
||||
*mingw*|*msys*|*cygwin*) IS_WINDOWS=true ;;
|
||||
esac
|
||||
|
||||
case "${1:-start}" in
|
||||
stop)
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
@@ -135,8 +143,13 @@ case "${1:-start}" in
|
||||
|
||||
echo "Starting observer agent for ${PROJECT_NAME}..."
|
||||
|
||||
if [ ! -x "$OBSERVER_LOOP_SCRIPT" ]; then
|
||||
echo "Observer loop script not found or not executable: $OBSERVER_LOOP_SCRIPT"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# The observer loop — fully detached with nohup, IO redirected to log.
|
||||
# Variables passed safely via env to avoid shell injection from special chars in paths.
|
||||
# Variables are passed via env; observer-loop.sh handles analysis/retry flow.
|
||||
nohup env \
|
||||
CONFIG_DIR="$CONFIG_DIR" \
|
||||
PID_FILE="$PID_FILE" \
|
||||
@@ -148,116 +161,8 @@ case "${1:-start}" in
|
||||
PROJECT_ID="$PROJECT_ID" \
|
||||
MIN_OBSERVATIONS="$MIN_OBSERVATIONS" \
|
||||
OBSERVER_INTERVAL_SECONDS="$OBSERVER_INTERVAL_SECONDS" \
|
||||
/bin/bash -c '
|
||||
set +e
|
||||
unset CLAUDECODE
|
||||
|
||||
SLEEP_PID=""
|
||||
USR1_FIRED=0
|
||||
|
||||
cleanup() {
|
||||
[ -n "$SLEEP_PID" ] && kill "$SLEEP_PID" 2>/dev/null
|
||||
# Only remove PID file if it still belongs to this process
|
||||
if [ -f "$PID_FILE" ] && [ "$(cat "$PID_FILE" 2>/dev/null)" = "$$" ]; then
|
||||
rm -f "$PID_FILE"
|
||||
fi
|
||||
exit 0
|
||||
}
|
||||
trap cleanup TERM INT
|
||||
|
||||
analyze_observations() {
|
||||
if [ ! -f "$OBSERVATIONS_FILE" ]; then
|
||||
return
|
||||
fi
|
||||
obs_count=$(wc -l < "$OBSERVATIONS_FILE" 2>/dev/null || echo 0)
|
||||
if [ "$obs_count" -lt "$MIN_OBSERVATIONS" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
echo "[$(date)] Analyzing $obs_count observations for project ${PROJECT_NAME}..." >> "$LOG_FILE"
|
||||
|
||||
# Use Claude Code with Haiku to analyze observations
|
||||
# The prompt specifies project-scoped instinct creation
|
||||
if command -v claude &> /dev/null; then
|
||||
exit_code=0
|
||||
claude --model haiku --max-turns 3 --print \
|
||||
"Read $OBSERVATIONS_FILE and identify patterns for the project '${PROJECT_NAME}' (user corrections, error resolutions, repeated workflows, tool preferences).
|
||||
If you find 3+ occurrences of the same pattern, create an instinct file in $INSTINCTS_DIR/<id>.md.
|
||||
|
||||
CRITICAL: Every instinct file MUST use this exact format:
|
||||
|
||||
---
|
||||
id: kebab-case-name
|
||||
trigger: \"when <specific condition>\"
|
||||
confidence: <0.3-0.85 based on frequency: 3-5 times=0.5, 6-10=0.7, 11+=0.85>
|
||||
domain: <one of: code-style, testing, git, debugging, workflow, file-patterns>
|
||||
source: session-observation
|
||||
scope: project
|
||||
project_id: ${PROJECT_ID}
|
||||
project_name: ${PROJECT_NAME}
|
||||
---
|
||||
|
||||
# Title
|
||||
|
||||
## Action
|
||||
<what to do, one clear sentence>
|
||||
|
||||
## Evidence
|
||||
- Observed N times in session <id>
|
||||
- Pattern: <description>
|
||||
- Last observed: <date>
|
||||
|
||||
Rules:
|
||||
- Be conservative, only clear patterns with 3+ observations
|
||||
- Use narrow, specific triggers
|
||||
- Never include actual code snippets, only describe patterns
|
||||
- If a similar instinct already exists in $INSTINCTS_DIR/, update it instead of creating a duplicate
|
||||
- The YAML frontmatter (between --- markers) with id field is MANDATORY
|
||||
- If a pattern seems universal (not project-specific), set scope to 'global' instead of 'project'
|
||||
- Examples of global patterns: 'always validate user input', 'prefer explicit error handling'
|
||||
- Examples of project patterns: 'use React functional components', 'follow Django REST framework conventions'" \
|
||||
>> "$LOG_FILE" 2>&1 || exit_code=$?
|
||||
if [ "$exit_code" -ne 0 ]; then
|
||||
echo "[$(date)] Claude analysis failed (exit $exit_code)" >> "$LOG_FILE"
|
||||
fi
|
||||
else
|
||||
echo "[$(date)] claude CLI not found, skipping analysis" >> "$LOG_FILE"
|
||||
fi
|
||||
|
||||
if [ -f "$OBSERVATIONS_FILE" ]; then
|
||||
archive_dir="${PROJECT_DIR}/observations.archive"
|
||||
mkdir -p "$archive_dir"
|
||||
mv "$OBSERVATIONS_FILE" "$archive_dir/processed-$(date +%Y%m%d-%H%M%S)-$$.jsonl" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
on_usr1() {
|
||||
# Kill pending sleep to avoid leak, then analyze
|
||||
[ -n "$SLEEP_PID" ] && kill "$SLEEP_PID" 2>/dev/null
|
||||
SLEEP_PID=""
|
||||
USR1_FIRED=1
|
||||
analyze_observations
|
||||
}
|
||||
trap on_usr1 USR1
|
||||
|
||||
echo "$$" > "$PID_FILE"
|
||||
echo "[$(date)] Observer started for ${PROJECT_NAME} (PID: $$)" >> "$LOG_FILE"
|
||||
|
||||
while true; do
|
||||
# Interruptible sleep — allows USR1 trap to fire immediately
|
||||
sleep "$OBSERVER_INTERVAL_SECONDS" &
|
||||
SLEEP_PID=$!
|
||||
wait $SLEEP_PID 2>/dev/null
|
||||
SLEEP_PID=""
|
||||
|
||||
# Skip scheduled analysis if USR1 already ran it
|
||||
if [ "$USR1_FIRED" -eq 1 ]; then
|
||||
USR1_FIRED=0
|
||||
else
|
||||
analyze_observations
|
||||
fi
|
||||
done
|
||||
' >> "$LOG_FILE" 2>&1 &
|
||||
CLV2_IS_WINDOWS="$IS_WINDOWS" \
|
||||
"$OBSERVER_LOOP_SCRIPT" >> "$LOG_FILE" 2>&1 &
|
||||
|
||||
# Wait for PID file
|
||||
sleep 2
|
||||
|
||||
@@ -116,4 +116,4 @@ Homunculus v2 takes a more sophisticated approach:
|
||||
4. **Domain tagging** - code-style, testing, git, debugging, etc.
|
||||
5. **Evolution path** - Cluster related instincts into skills/commands
|
||||
|
||||
See: `/Users/affoon/Documents/tasks/12-continuous-learning-v2.md` for full spec.
|
||||
See: `docs/continuous-learning-v2-spec.md` for full spec.
|
||||
|
||||
50
skills/enterprise-agent-ops/SKILL.md
Normal file
50
skills/enterprise-agent-ops/SKILL.md
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
name: enterprise-agent-ops
|
||||
description: Operate long-lived agent workloads with observability, security boundaries, and lifecycle management.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Enterprise Agent Ops
|
||||
|
||||
Use this skill for cloud-hosted or continuously running agent systems that need operational controls beyond single CLI sessions.
|
||||
|
||||
## Operational Domains
|
||||
|
||||
1. runtime lifecycle (start, pause, stop, restart)
|
||||
2. observability (logs, metrics, traces)
|
||||
3. safety controls (scopes, permissions, kill switches)
|
||||
4. change management (rollout, rollback, audit)
|
||||
|
||||
## Baseline Controls
|
||||
|
||||
- immutable deployment artifacts
|
||||
- least-privilege credentials
|
||||
- environment-level secret injection
|
||||
- hard timeout and retry budgets
|
||||
- audit log for high-risk actions
|
||||
|
||||
## Metrics to Track
|
||||
|
||||
- success rate
|
||||
- mean retries per task
|
||||
- time to recovery
|
||||
- cost per successful task
|
||||
- failure class distribution
|
||||
|
||||
## Incident Pattern
|
||||
|
||||
When failure spikes:
|
||||
1. freeze new rollout
|
||||
2. capture representative traces
|
||||
3. isolate failing route
|
||||
4. patch with smallest safe change
|
||||
5. run regression + security checks
|
||||
6. resume gradually
|
||||
|
||||
## Deployment Integrations
|
||||
|
||||
This skill pairs with:
|
||||
- PM2 workflows
|
||||
- systemd services
|
||||
- container orchestrators
|
||||
- CI/CD gates
|
||||
@@ -234,3 +234,37 @@ Capability: 5/5 passed (pass@3: 100%)
|
||||
Regression: 3/3 passed (pass^3: 100%)
|
||||
Status: SHIP IT
|
||||
```
|
||||
|
||||
## Product Evals (v1.8)
|
||||
|
||||
Use product evals when behavior quality cannot be captured by unit tests alone.
|
||||
|
||||
### Grader Types
|
||||
|
||||
1. Code grader (deterministic assertions)
|
||||
2. Rule grader (regex/schema constraints)
|
||||
3. Model grader (LLM-as-judge rubric)
|
||||
4. Human grader (manual adjudication for ambiguous outputs)
|
||||
|
||||
### pass@k Guidance
|
||||
|
||||
- `pass@1`: direct reliability
|
||||
- `pass@3`: practical reliability under controlled retries
|
||||
- `pass^3`: stability test (all 3 runs must pass)
|
||||
|
||||
Recommended thresholds:
|
||||
- Capability evals: pass@3 >= 0.90
|
||||
- Regression evals: pass^3 = 1.00 for release-critical paths
|
||||
|
||||
### Eval Anti-Patterns
|
||||
|
||||
- Overfitting prompts to known eval examples
|
||||
- Measuring only happy-path outputs
|
||||
- Ignoring cost and latency drift while chasing pass rates
|
||||
- Allowing flaky graders in release gates
|
||||
|
||||
### Minimal Eval Artifact Layout
|
||||
|
||||
- `.claude/evals/<feature>.md` definition
|
||||
- `.claude/evals/<feature>.log` run history
|
||||
- `docs/releases/<version>/eval-summary.md` release snapshot
|
||||
|
||||
33
skills/nanoclaw-repl/SKILL.md
Normal file
33
skills/nanoclaw-repl/SKILL.md
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: nanoclaw-repl
|
||||
description: Operate and extend NanoClaw v2, ECC's zero-dependency session-aware REPL built on claude -p.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# NanoClaw REPL
|
||||
|
||||
Use this skill when running or extending `scripts/claw.js`.
|
||||
|
||||
## Capabilities
|
||||
|
||||
- persistent markdown-backed sessions
|
||||
- model switching with `/model`
|
||||
- dynamic skill loading with `/load`
|
||||
- session branching with `/branch`
|
||||
- cross-session search with `/search`
|
||||
- history compaction with `/compact`
|
||||
- export to md/json/txt with `/export`
|
||||
- session metrics with `/metrics`
|
||||
|
||||
## Operating Guidance
|
||||
|
||||
1. Keep sessions task-focused.
|
||||
2. Branch before high-risk changes.
|
||||
3. Compact after major milestones.
|
||||
4. Export before sharing or archival.
|
||||
|
||||
## Extension Rules
|
||||
|
||||
- keep zero external runtime dependencies
|
||||
- preserve markdown-as-database compatibility
|
||||
- keep command handlers deterministic and local
|
||||
@@ -194,3 +194,46 @@ Plankton's `.claude/hooks/config.json` controls all behavior:
|
||||
- Plankton (credit: @alxfazio)
|
||||
- Plankton REFERENCE.md — Full architecture documentation (credit: @alxfazio)
|
||||
- Plankton SETUP.md — Detailed installation guide (credit: @alxfazio)
|
||||
|
||||
## ECC v1.8 Additions
|
||||
|
||||
### Copyable Hook Profile
|
||||
|
||||
Set strict quality behavior:
|
||||
|
||||
```bash
|
||||
export ECC_HOOK_PROFILE=strict
|
||||
export ECC_QUALITY_GATE_FIX=true
|
||||
export ECC_QUALITY_GATE_STRICT=true
|
||||
```
|
||||
|
||||
### Language Gate Table
|
||||
|
||||
- TypeScript/JavaScript: Biome preferred, Prettier fallback
|
||||
- Python: Ruff format/check
|
||||
- Go: gofmt
|
||||
|
||||
### Config Tamper Guard
|
||||
|
||||
During quality enforcement, flag changes to config files in same iteration:
|
||||
|
||||
- `biome.json`, `.eslintrc*`, `prettier.config*`, `tsconfig.json`, `pyproject.toml`
|
||||
|
||||
If config is changed to suppress violations, require explicit review before merge.
|
||||
|
||||
### CI Integration Pattern
|
||||
|
||||
Use the same commands in CI as local hooks:
|
||||
|
||||
1. run formatter checks
|
||||
2. run lint/type checks
|
||||
3. fail fast on strict mode
|
||||
4. publish remediation summary
|
||||
|
||||
### Health Metrics
|
||||
|
||||
Track:
|
||||
- edits flagged by gates
|
||||
- average remediation time
|
||||
- repeat violations by category
|
||||
- merge blocks due to gate failures
|
||||
|
||||
67
skills/ralphinho-rfc-pipeline/SKILL.md
Normal file
67
skills/ralphinho-rfc-pipeline/SKILL.md
Normal file
@@ -0,0 +1,67 @@
|
||||
---
|
||||
name: ralphinho-rfc-pipeline
|
||||
description: RFC-driven multi-agent DAG execution pattern with quality gates, merge queues, and work unit orchestration.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Ralphinho RFC Pipeline
|
||||
|
||||
Inspired by [humanplane](https://github.com/humanplane) style RFC decomposition patterns and multi-unit orchestration workflows.
|
||||
|
||||
Use this skill when a feature is too large for a single agent pass and must be split into independently verifiable work units.
|
||||
|
||||
## Pipeline Stages
|
||||
|
||||
1. RFC intake
|
||||
2. DAG decomposition
|
||||
3. Unit assignment
|
||||
4. Unit implementation
|
||||
5. Unit validation
|
||||
6. Merge queue and integration
|
||||
7. Final system verification
|
||||
|
||||
## Unit Spec Template
|
||||
|
||||
Each work unit should include:
|
||||
- `id`
|
||||
- `depends_on`
|
||||
- `scope`
|
||||
- `acceptance_tests`
|
||||
- `risk_level`
|
||||
- `rollback_plan`
|
||||
|
||||
## Complexity Tiers
|
||||
|
||||
- Tier 1: isolated file edits, deterministic tests
|
||||
- Tier 2: multi-file behavior changes, moderate integration risk
|
||||
- Tier 3: schema/auth/perf/security changes
|
||||
|
||||
## Quality Pipeline per Unit
|
||||
|
||||
1. research
|
||||
2. implementation plan
|
||||
3. implementation
|
||||
4. tests
|
||||
5. review
|
||||
6. merge-ready report
|
||||
|
||||
## Merge Queue Rules
|
||||
|
||||
- Never merge a unit with unresolved dependency failures.
|
||||
- Always rebase unit branches on latest integration branch.
|
||||
- Re-run integration tests after each queued merge.
|
||||
|
||||
## Recovery
|
||||
|
||||
If a unit stalls:
|
||||
- evict from active queue
|
||||
- snapshot findings
|
||||
- regenerate narrowed unit scope
|
||||
- retry with updated constraints
|
||||
|
||||
## Outputs
|
||||
|
||||
- RFC execution log
|
||||
- unit scorecards
|
||||
- dependency graph snapshot
|
||||
- integration risk summary
|
||||
@@ -1183,7 +1183,7 @@ async function runTests() {
|
||||
assert.ok(hooks.hooks.PreCompact, 'Should have PreCompact hooks');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('all hook commands use node or are skill shell scripts', () => {
|
||||
if (test('all hook commands use node or approved shell wrappers', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
|
||||
|
||||
@@ -1196,9 +1196,11 @@ async function runTests() {
|
||||
/^(bash|sh)\s/.test(hook.command) ||
|
||||
hook.command.startsWith('${CLAUDE_PLUGIN_ROOT}/skills/')
|
||||
);
|
||||
const isHookShellWrapper = /^(bash|sh)\s+["']?\$\{CLAUDE_PLUGIN_ROOT\}\/scripts\/hooks\/run-with-flags-shell\.sh/.test(hook.command);
|
||||
const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js');
|
||||
assert.ok(
|
||||
isNode || isSkillScript,
|
||||
`Hook command should start with 'node' or be a skill shell script: ${hook.command.substring(0, 80)}...`
|
||||
isNode || isSkillScript || isHookShellWrapper || isSessionStartFallback,
|
||||
`Hook command should use node or approved shell wrapper: ${hook.command.substring(0, 100)}...`
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1210,7 +1212,7 @@ async function runTests() {
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('script references use CLAUDE_PLUGIN_ROOT variable', () => {
|
||||
if (test('script references use CLAUDE_PLUGIN_ROOT variable (except SessionStart fallback)', () => {
|
||||
const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json');
|
||||
const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8'));
|
||||
|
||||
@@ -1219,7 +1221,8 @@ async function runTests() {
|
||||
for (const hook of entry.hooks) {
|
||||
if (hook.type === 'command' && hook.command.includes('scripts/hooks/')) {
|
||||
// Check for the literal string "${CLAUDE_PLUGIN_ROOT}" in the command
|
||||
const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}');
|
||||
const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js');
|
||||
const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}') || isSessionStartFallback;
|
||||
assert.ok(
|
||||
hasPluginRoot,
|
||||
`Script paths should use CLAUDE_PLUGIN_ROOT: ${hook.command.substring(0, 80)}...`
|
||||
|
||||
@@ -11,6 +11,7 @@ const path = require('path');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const { spawn } = require('child_process');
|
||||
const REPO_ROOT = path.join(__dirname, '..', '..');
|
||||
|
||||
// Test helper
|
||||
function _test(name, fn) {
|
||||
@@ -90,22 +91,20 @@ function runHookWithInput(scriptPath, input = {}, env = {}, timeoutMs = 10000) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Run an inline hook command (like those in hooks.json)
|
||||
* @param {string} command - The node -e "..." command
|
||||
* Run a hook command string exactly as declared in hooks.json.
|
||||
* Supports wrapped node script commands and shell wrappers.
|
||||
* @param {string} command - Hook command from hooks.json
|
||||
* @param {object} input - Hook input object
|
||||
* @param {object} env - Environment variables
|
||||
*/
|
||||
function _runInlineHook(command, input = {}, env = {}, timeoutMs = 10000) {
|
||||
function runHookCommand(command, input = {}, env = {}, timeoutMs = 10000) {
|
||||
return new Promise((resolve, reject) => {
|
||||
// Extract the code from node -e "..."
|
||||
const match = command.match(/^node -e "(.+)"$/s);
|
||||
if (!match) {
|
||||
reject(new Error('Invalid inline hook command format'));
|
||||
return;
|
||||
}
|
||||
const isWindows = process.platform === 'win32';
|
||||
const shell = isWindows ? 'cmd' : 'bash';
|
||||
const shellArgs = isWindows ? ['/d', '/s', '/c', command] : ['-lc', command];
|
||||
|
||||
const proc = spawn('node', ['-e', match[1]], {
|
||||
env: { ...process.env, ...env },
|
||||
const proc = spawn(shell, shellArgs, {
|
||||
env: { ...process.env, CLAUDE_PLUGIN_ROOT: REPO_ROOT, ...env },
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
@@ -116,9 +115,9 @@ function _runInlineHook(command, input = {}, env = {}, timeoutMs = 10000) {
|
||||
proc.stdout.on('data', data => stdout += data);
|
||||
proc.stderr.on('data', data => stderr += data);
|
||||
|
||||
// Ignore EPIPE errors (process may exit before we finish writing)
|
||||
// Ignore EPIPE/EOF errors (process may exit before we finish writing)
|
||||
proc.stdin.on('error', (err) => {
|
||||
if (err.code !== 'EPIPE') {
|
||||
if (err.code !== 'EPIPE' && err.code !== 'EOF') {
|
||||
if (timer) clearTimeout(timer);
|
||||
reject(err);
|
||||
}
|
||||
@@ -130,8 +129,8 @@ function _runInlineHook(command, input = {}, env = {}, timeoutMs = 10000) {
|
||||
proc.stdin.end();
|
||||
|
||||
timer = setTimeout(() => {
|
||||
proc.kill('SIGKILL');
|
||||
reject(new Error(`Inline hook timed out after ${timeoutMs}ms`));
|
||||
proc.kill(isWindows ? undefined : 'SIGKILL');
|
||||
reject(new Error(`Hook command timed out after ${timeoutMs}ms`));
|
||||
}, timeoutMs);
|
||||
|
||||
proc.on('close', code => {
|
||||
@@ -239,35 +238,16 @@ async function runTests() {
|
||||
if (await asyncTest('blocking hooks output BLOCKED message', async () => {
|
||||
// Test the dev server blocking hook — must send a matching command
|
||||
const blockingCommand = hooks.hooks.PreToolUse[0].hooks[0].command;
|
||||
const match = blockingCommand.match(/^node -e "(.+)"$/s);
|
||||
|
||||
const proc = spawn('node', ['-e', match[1]], {
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
let stderr = '';
|
||||
let code = null;
|
||||
proc.stderr.on('data', data => stderr += data);
|
||||
|
||||
// Send a dev server command so the hook triggers the block
|
||||
proc.stdin.write(JSON.stringify({
|
||||
const result = await runHookCommand(blockingCommand, {
|
||||
tool_input: { command: 'npm run dev' }
|
||||
}));
|
||||
proc.stdin.end();
|
||||
|
||||
await new Promise(resolve => {
|
||||
proc.on('close', (c) => {
|
||||
code = c;
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Hook only blocks on non-Windows platforms (tmux is Unix-only)
|
||||
if (process.platform === 'win32') {
|
||||
assert.strictEqual(code, 0, 'On Windows, hook should not block (exit 0)');
|
||||
assert.strictEqual(result.code, 0, 'On Windows, hook should not block (exit 0)');
|
||||
} else {
|
||||
assert.ok(stderr.includes('BLOCKED'), 'Blocking hook should output BLOCKED');
|
||||
assert.strictEqual(code, 2, 'Blocking hook should exit with code 2');
|
||||
assert.ok(result.stderr.includes('BLOCKED'), 'Blocking hook should output BLOCKED');
|
||||
assert.strictEqual(result.code, 2, 'Blocking hook should exit with code 2');
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
@@ -284,30 +264,15 @@ async function runTests() {
|
||||
if (await asyncTest('blocking hooks exit with code 2', async () => {
|
||||
// The dev server blocker blocks when a dev server command is detected
|
||||
const blockingCommand = hooks.hooks.PreToolUse[0].hooks[0].command;
|
||||
const match = blockingCommand.match(/^node -e "(.+)"$/s);
|
||||
|
||||
const proc = spawn('node', ['-e', match[1]], {
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
let code = null;
|
||||
proc.stdin.write(JSON.stringify({
|
||||
const result = await runHookCommand(blockingCommand, {
|
||||
tool_input: { command: 'yarn dev' }
|
||||
}));
|
||||
proc.stdin.end();
|
||||
|
||||
await new Promise(resolve => {
|
||||
proc.on('close', (c) => {
|
||||
code = c;
|
||||
resolve();
|
||||
});
|
||||
});
|
||||
|
||||
// Hook only blocks on non-Windows platforms (tmux is Unix-only)
|
||||
if (process.platform === 'win32') {
|
||||
assert.strictEqual(code, 0, 'On Windows, hook should not block (exit 0)');
|
||||
assert.strictEqual(result.code, 0, 'On Windows, hook should not block (exit 0)');
|
||||
} else {
|
||||
assert.strictEqual(code, 2, 'Blocking hook should exit 2');
|
||||
assert.strictEqual(result.code, 2, 'Blocking hook should exit 2');
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
@@ -391,26 +356,13 @@ async function runTests() {
|
||||
|
||||
assert.ok(prHook, 'PR hook should exist');
|
||||
|
||||
const match = prHook.hooks[0].command.match(/^node -e "(.+)"$/s);
|
||||
|
||||
const proc = spawn('node', ['-e', match[1]], {
|
||||
stdio: ['pipe', 'pipe', 'pipe']
|
||||
});
|
||||
|
||||
let stderr = '';
|
||||
proc.stderr.on('data', data => stderr += data);
|
||||
|
||||
// Simulate gh pr create output
|
||||
proc.stdin.write(JSON.stringify({
|
||||
const result = await runHookCommand(prHook.hooks[0].command, {
|
||||
tool_input: { command: 'gh pr create --title "Test"' },
|
||||
tool_output: { output: 'Creating pull request...\nhttps://github.com/owner/repo/pull/123' }
|
||||
}));
|
||||
proc.stdin.end();
|
||||
|
||||
await new Promise(resolve => proc.on('close', resolve));
|
||||
});
|
||||
|
||||
assert.ok(
|
||||
stderr.includes('PR created') || stderr.includes('github.com'),
|
||||
result.stderr.includes('PR created') || result.stderr.includes('github.com'),
|
||||
'Should extract and log PR URL'
|
||||
);
|
||||
})) passed++; else failed++;
|
||||
@@ -678,8 +630,18 @@ async function runTests() {
|
||||
assert.strictEqual(typeof asyncHook.hooks[0].timeout, 'number', 'Timeout should be a number');
|
||||
assert.ok(asyncHook.hooks[0].timeout > 0, 'Timeout should be positive');
|
||||
|
||||
const match = asyncHook.hooks[0].command.match(/^node -e "(.+)"$/s);
|
||||
assert.ok(match, 'Async hook command should be node -e format');
|
||||
const command = asyncHook.hooks[0].command;
|
||||
const isNodeInline = command.startsWith('node -e');
|
||||
const isNodeScript = command.startsWith('node "');
|
||||
const isShellWrapper =
|
||||
command.startsWith('bash "') ||
|
||||
command.startsWith('sh "') ||
|
||||
command.startsWith('bash -lc ') ||
|
||||
command.startsWith('sh -c ');
|
||||
assert.ok(
|
||||
isNodeInline || isNodeScript || isShellWrapper,
|
||||
`Async hook command should be runnable (node -e, node script, or shell wrapper), got: ${command.substring(0, 80)}`
|
||||
);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (await asyncTest('all hook commands in hooks.json are valid format', async () => {
|
||||
@@ -692,11 +654,16 @@ async function runTests() {
|
||||
|
||||
const isInline = hook.command.startsWith('node -e');
|
||||
const isFilePath = hook.command.startsWith('node "');
|
||||
const isShellScript = hook.command.endsWith('.sh');
|
||||
const isShellWrapper =
|
||||
hook.command.startsWith('bash "') ||
|
||||
hook.command.startsWith('sh "') ||
|
||||
hook.command.startsWith('bash -lc ') ||
|
||||
hook.command.startsWith('sh -c ');
|
||||
const isShellScriptPath = hook.command.endsWith('.sh');
|
||||
|
||||
assert.ok(
|
||||
isInline || isFilePath || isShellScript,
|
||||
`Hook command in ${hookType} should be inline (node -e), file path (node "), or shell script (.sh), got: ${hook.command.substring(0, 80)}`
|
||||
isInline || isFilePath || isShellWrapper || isShellScriptPath,
|
||||
`Hook command in ${hookType} should be node -e, node script, or shell wrapper/script, got: ${hook.command.substring(0, 80)}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -583,9 +583,10 @@ src/main.ts
|
||||
assert.strictEqual(result, null, 'Uppercase letters should be rejected');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects filenames with extra segments', () => {
|
||||
if (test('accepts hyphenated short IDs (extra segments)', () => {
|
||||
const result = sessionManager.parseSessionFilename('2026-02-01-abc12345-extra-session.tmp');
|
||||
assert.strictEqual(result, null, 'Extra segments should be rejected');
|
||||
assert.ok(result, 'Hyphenated short IDs should be accepted');
|
||||
assert.strictEqual(result.shortId, 'abc12345-extra');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects impossible month (13)', () => {
|
||||
|
||||
@@ -21,7 +21,12 @@ const {
|
||||
buildPrompt,
|
||||
askClaude,
|
||||
isValidSessionName,
|
||||
handleClear
|
||||
handleClear,
|
||||
getSessionMetrics,
|
||||
searchSessions,
|
||||
branchSession,
|
||||
exportSession,
|
||||
compactSession
|
||||
} = require(path.join(__dirname, '..', '..', 'scripts', 'claw.js'));
|
||||
|
||||
// Test helper — matches ECC's custom test pattern
|
||||
@@ -229,6 +234,92 @@ function runTests() {
|
||||
assert.strictEqual(isValidSessionName(undefined), false);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nNanoClaw v2:');
|
||||
|
||||
if (test('getSessionMetrics returns non-zero token estimate for populated history', () => {
|
||||
const tmpDir = makeTmpDir();
|
||||
const filePath = path.join(tmpDir, 'metrics.md');
|
||||
try {
|
||||
appendTurn(filePath, 'User', 'Implement auth');
|
||||
appendTurn(filePath, 'Assistant', 'Working on it');
|
||||
const metrics = getSessionMetrics(filePath);
|
||||
assert.strictEqual(metrics.turns, 2);
|
||||
assert.ok(metrics.tokenEstimate > 0);
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('searchSessions finds query in saved session', () => {
|
||||
const tmpDir = makeTmpDir();
|
||||
const originalHome = process.env.HOME;
|
||||
process.env.HOME = tmpDir;
|
||||
try {
|
||||
const sessionPath = path.join(tmpDir, '.claude', 'claw', 'alpha.md');
|
||||
fs.mkdirSync(path.dirname(sessionPath), { recursive: true });
|
||||
appendTurn(sessionPath, 'User', 'Need oauth migration');
|
||||
const results = searchSessions('oauth');
|
||||
assert.strictEqual(results.length, 1);
|
||||
assert.strictEqual(results[0].session, 'alpha');
|
||||
} finally {
|
||||
process.env.HOME = originalHome;
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('branchSession copies history into new branch session', () => {
|
||||
const tmpDir = makeTmpDir();
|
||||
const originalHome = process.env.HOME;
|
||||
process.env.HOME = tmpDir;
|
||||
try {
|
||||
const source = path.join(tmpDir, '.claude', 'claw', 'base.md');
|
||||
fs.mkdirSync(path.dirname(source), { recursive: true });
|
||||
appendTurn(source, 'User', 'base content');
|
||||
const result = branchSession(source, 'feature-branch');
|
||||
assert.strictEqual(result.ok, true);
|
||||
const branched = fs.readFileSync(result.path, 'utf8');
|
||||
assert.ok(branched.includes('base content'));
|
||||
} finally {
|
||||
process.env.HOME = originalHome;
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('exportSession writes JSON export', () => {
|
||||
const tmpDir = makeTmpDir();
|
||||
const filePath = path.join(tmpDir, 'export.md');
|
||||
const outPath = path.join(tmpDir, 'export.json');
|
||||
try {
|
||||
appendTurn(filePath, 'User', 'hello');
|
||||
appendTurn(filePath, 'Assistant', 'world');
|
||||
const result = exportSession(filePath, 'json', outPath);
|
||||
assert.strictEqual(result.ok, true);
|
||||
const exported = JSON.parse(fs.readFileSync(outPath, 'utf8'));
|
||||
assert.strictEqual(Array.isArray(exported.turns), true);
|
||||
assert.strictEqual(exported.turns.length, 2);
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('compactSession reduces long histories', () => {
|
||||
const tmpDir = makeTmpDir();
|
||||
const filePath = path.join(tmpDir, 'compact.md');
|
||||
try {
|
||||
for (let i = 0; i < 30; i++) {
|
||||
appendTurn(filePath, i % 2 ? 'Assistant' : 'User', `turn-${i}`);
|
||||
}
|
||||
const changed = compactSession(filePath, 10);
|
||||
assert.strictEqual(changed, true);
|
||||
const content = fs.readFileSync(filePath, 'utf8');
|
||||
assert.ok(content.includes('NanoClaw Compaction'));
|
||||
assert.ok(!content.includes('turn-0'));
|
||||
assert.ok(content.includes('turn-29'));
|
||||
} finally {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// ── Summary ───────────────────────────────────────────────────────────
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
|
||||
Reference in New Issue
Block a user