From 440178d6970e1187e4882b427074ec5beaa5c567 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Mon, 9 Mar 2026 21:07:42 -0700 Subject: [PATCH 01/42] fix: harden hook portability and plugin docs --- commands/e2e.md | 6 +- commands/plan.md | 6 +- commands/tdd.md | 10 +- scripts/hooks/post-edit-format.js | 91 ++++++--- scripts/hooks/pre-bash-dev-server-block.js | 144 +++++++++++++- scripts/hooks/run-with-flags-shell.sh | 6 +- .../agents/start-observer.sh | 29 +-- .../continuous-learning-v2/hooks/observe.sh | 38 +++- .../scripts/detect-project.sh | 44 ++++- skills/iterative-retrieval/SKILL.md | 2 +- tests/hooks/hooks.test.js | 176 ++++++++++++++++++ 11 files changed, 490 insertions(+), 62 deletions(-) diff --git a/commands/e2e.md b/commands/e2e.md index f0f4a5b7..8caf086d 100644 --- a/commands/e2e.md +++ b/commands/e2e.md @@ -337,8 +337,10 @@ For PMX, prioritize these E2E tests: ## Related Agents -This command invokes the `e2e-runner` agent located at: -`~/.claude/agents/e2e-runner.md` +This command invokes the `e2e-runner` agent provided by ECC. + +For manual installs, the source file lives at: +`agents/e2e-runner.md` ## Quick Commands diff --git a/commands/plan.md b/commands/plan.md index b7e9905d..198ea5a4 100644 --- a/commands/plan.md +++ b/commands/plan.md @@ -109,5 +109,7 @@ After planning: ## Related Agents -This command invokes the `planner` agent located at: -`~/.claude/agents/planner.md` +This command invokes the `planner` agent provided by ECC. + +For manual installs, the source file lives at: +`agents/planner.md` diff --git a/commands/tdd.md b/commands/tdd.md index 3f7b02b5..f98cb58b 100644 --- a/commands/tdd.md +++ b/commands/tdd.md @@ -319,8 +319,10 @@ Never skip the RED phase. Never write code before tests. ## Related Agents -This command invokes the `tdd-guide` agent located at: -`~/.claude/agents/tdd-guide.md` +This command invokes the `tdd-guide` agent provided by ECC. -And can reference the `tdd-workflow` skill at: -`~/.claude/skills/tdd-workflow/` +The related `tdd-workflow` skill is also bundled with ECC. + +For manual installs, the source files live at: +- `agents/tdd-guide.md` +- `skills/tdd-workflow/SKILL.md` diff --git a/scripts/hooks/post-edit-format.js b/scripts/hooks/post-edit-format.js index 2f1d0334..d8a1754c 100644 --- a/scripts/hooks/post-edit-format.js +++ b/scripts/hooks/post-edit-format.js @@ -13,8 +13,31 @@ const { execFileSync } = require('child_process'); const fs = require('fs'); const path = require('path'); +const { getPackageManager } = require('../lib/package-manager'); const MAX_STDIN = 1024 * 1024; // 1MB limit +const BIOME_CONFIGS = ['biome.json', 'biome.jsonc']; +const PRETTIER_CONFIGS = [ + '.prettierrc', + '.prettierrc.json', + '.prettierrc.json5', + '.prettierrc.js', + '.prettierrc.cjs', + '.prettierrc.mjs', + '.prettierrc.ts', + '.prettierrc.cts', + '.prettierrc.mts', + '.prettierrc.yml', + '.prettierrc.yaml', + '.prettierrc.toml', + 'prettier.config.js', + 'prettier.config.cjs', + 'prettier.config.mjs', + 'prettier.config.ts', + 'prettier.config.cts', + 'prettier.config.mts', +]; +const PROJECT_ROOT_MARKERS = ['package.json', ...BIOME_CONFIGS, ...PRETTIER_CONFIGS]; let data = ''; process.stdin.setEncoding('utf8'); @@ -27,46 +50,66 @@ process.stdin.on('data', chunk => { function findProjectRoot(startDir) { let dir = startDir; - while (dir !== path.dirname(dir)) { - if (fs.existsSync(path.join(dir, 'package.json'))) return dir; - dir = path.dirname(dir); + + while (true) { + if (PROJECT_ROOT_MARKERS.some(marker => fs.existsSync(path.join(dir, marker)))) { + return dir; + } + + const parentDir = path.dirname(dir); + if (parentDir === dir) break; + dir = parentDir; } + return startDir; } function detectFormatter(projectRoot) { - const biomeConfigs = ['biome.json', 'biome.jsonc']; - for (const cfg of biomeConfigs) { + for (const cfg of BIOME_CONFIGS) { if (fs.existsSync(path.join(projectRoot, cfg))) return 'biome'; } - const prettierConfigs = [ - '.prettierrc', - '.prettierrc.json', - '.prettierrc.js', - '.prettierrc.cjs', - '.prettierrc.mjs', - '.prettierrc.yml', - '.prettierrc.yaml', - '.prettierrc.toml', - 'prettier.config.js', - 'prettier.config.cjs', - 'prettier.config.mjs', - ]; - for (const cfg of prettierConfigs) { + for (const cfg of PRETTIER_CONFIGS) { if (fs.existsSync(path.join(projectRoot, cfg))) return 'prettier'; } return null; } -function getFormatterCommand(formatter, filePath) { - const npxBin = process.platform === 'win32' ? 'npx.cmd' : 'npx'; +function getRunnerBin(bin) { + if (process.platform !== 'win32') return bin; + if (bin === 'npx') return 'npx.cmd'; + if (bin === 'pnpm') return 'pnpm.cmd'; + if (bin === 'yarn') return 'yarn.cmd'; + if (bin === 'bunx') return 'bunx.cmd'; + return bin; +} + +function getFormatterRunner(projectRoot) { + const pm = getPackageManager({ projectDir: projectRoot }); + const execCmd = pm?.config?.execCmd || 'npx'; + const [bin = 'npx', ...prefix] = execCmd.split(/\s+/).filter(Boolean); + + return { + bin: getRunnerBin(bin), + prefix + }; +} + +function getFormatterCommand(formatter, filePath, projectRoot) { + const runner = getFormatterRunner(projectRoot); + if (formatter === 'biome') { - return { bin: npxBin, args: ['@biomejs/biome', 'format', '--write', filePath] }; + return { + bin: runner.bin, + args: [...runner.prefix, '@biomejs/biome', 'format', '--write', filePath] + }; } if (formatter === 'prettier') { - return { bin: npxBin, args: ['prettier', '--write', filePath] }; + return { + bin: runner.bin, + args: [...runner.prefix, 'prettier', '--write', filePath] + }; } return null; } @@ -80,7 +123,7 @@ process.stdin.on('end', () => { try { const projectRoot = findProjectRoot(path.dirname(path.resolve(filePath))); const formatter = detectFormatter(projectRoot); - const cmd = getFormatterCommand(formatter, filePath); + const cmd = getFormatterCommand(formatter, filePath, projectRoot); if (cmd) { execFileSync(cmd.bin, cmd.args, { diff --git a/scripts/hooks/pre-bash-dev-server-block.js b/scripts/hooks/pre-bash-dev-server-block.js index 26b2a555..01c76c1f 100755 --- a/scripts/hooks/pre-bash-dev-server-block.js +++ b/scripts/hooks/pre-bash-dev-server-block.js @@ -4,6 +4,142 @@ const MAX_STDIN = 1024 * 1024; const { splitShellSegments } = require('../lib/shell-split'); +const DEV_COMMAND_WORDS = new Set([ + 'npm', + 'pnpm', + 'yarn', + 'bun', + 'npx', + 'bash', + 'sh', + 'zsh', + 'fish', + 'tmux' +]); +const SKIPPABLE_PREFIX_WORDS = new Set(['env', 'command', 'builtin', 'exec', 'noglob', 'sudo']); +const PREFIX_OPTION_VALUE_WORDS = { + env: new Set(['-u', '-C', '-S', '--unset', '--chdir', '--split-string']), + sudo: new Set([ + '-u', + '-g', + '-h', + '-p', + '-r', + '-t', + '-C', + '--user', + '--group', + '--host', + '--prompt', + '--role', + '--type', + '--close-from' + ]) +}; + +function readToken(input, startIndex) { + let index = startIndex; + while (index < input.length && /\s/.test(input[index])) index += 1; + if (index >= input.length) return null; + + let token = ''; + let quote = null; + + while (index < input.length) { + const ch = input[index]; + + if (quote) { + if (ch === quote) { + quote = null; + index += 1; + continue; + } + + if (ch === '\\' && quote === '"' && index + 1 < input.length) { + token += input[index + 1]; + index += 2; + continue; + } + + token += ch; + index += 1; + continue; + } + + if (ch === '"' || ch === "'") { + quote = ch; + index += 1; + continue; + } + + if (/\s/.test(ch)) break; + + if (ch === '\\' && index + 1 < input.length) { + token += input[index + 1]; + index += 2; + continue; + } + + token += ch; + index += 1; + } + + return { token, end: index }; +} + +function shouldSkipOptionValue(wrapper, optionToken) { + if (!wrapper || !optionToken || optionToken.includes('=')) return false; + const optionSet = PREFIX_OPTION_VALUE_WORDS[wrapper]; + return Boolean(optionSet && optionSet.has(optionToken)); +} + +function isOptionToken(token) { + return token.startsWith('-') && token.length > 1; +} + +function getLeadingCommandWord(segment) { + let index = 0; + let activeWrapper = null; + let skipNextValue = false; + + while (index < segment.length) { + const parsed = readToken(segment, index); + if (!parsed) return null; + index = parsed.end; + + const token = parsed.token; + if (!token) continue; + + if (skipNextValue) { + skipNextValue = false; + continue; + } + + if (token === '--') { + activeWrapper = null; + continue; + } + + if (/^[A-Za-z_][A-Za-z0-9_]*=.*/.test(token)) continue; + + if (SKIPPABLE_PREFIX_WORDS.has(token)) { + activeWrapper = token; + continue; + } + + if (activeWrapper && isOptionToken(token)) { + if (shouldSkipOptionValue(activeWrapper, token)) { + skipNextValue = true; + } + continue; + } + + return token; + } + + return null; +} + let raw = ''; process.stdin.setEncoding('utf8'); process.stdin.on('data', chunk => { @@ -23,7 +159,13 @@ process.stdin.on('end', () => { const tmuxLauncher = /^\s*tmux\s+(new|new-session|new-window|split-window)\b/; const devPattern = /\b(npm\s+run\s+dev|pnpm(?:\s+run)?\s+dev|yarn\s+dev|bun\s+run\s+dev)\b/; - const hasBlockedDev = segments.some(segment => devPattern.test(segment) && !tmuxLauncher.test(segment)); + const hasBlockedDev = segments.some(segment => { + const commandWord = getLeadingCommandWord(segment); + if (!commandWord || !DEV_COMMAND_WORDS.has(commandWord)) { + return false; + } + return devPattern.test(segment) && !tmuxLauncher.test(segment); + }); if (hasBlockedDev) { console.error('[Hook] BLOCKED: Dev server must run in tmux for log access'); diff --git a/scripts/hooks/run-with-flags-shell.sh b/scripts/hooks/run-with-flags-shell.sh index 21c65ebb..4b064c32 100755 --- a/scripts/hooks/run-with-flags-shell.sh +++ b/scripts/hooks/run-with-flags-shell.sh @@ -4,6 +4,8 @@ set -euo pipefail HOOK_ID="${1:-}" REL_SCRIPT_PATH="${2:-}" PROFILES_CSV="${3:-standard,strict}" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-$(cd "${SCRIPT_DIR}/../.." && pwd)}" # Preserve stdin for passthrough or script execution INPUT="$(cat)" @@ -14,13 +16,13 @@ if [[ -z "$HOOK_ID" || -z "$REL_SCRIPT_PATH" ]]; then fi # Ask Node helper if this hook is enabled -ENABLED="$(node "${CLAUDE_PLUGIN_ROOT}/scripts/hooks/check-hook-enabled.js" "$HOOK_ID" "$PROFILES_CSV" 2>/dev/null || echo yes)" +ENABLED="$(node "${PLUGIN_ROOT}/scripts/hooks/check-hook-enabled.js" "$HOOK_ID" "$PROFILES_CSV" 2>/dev/null || echo yes)" if [[ "$ENABLED" != "yes" ]]; then printf '%s' "$INPUT" exit 0 fi -SCRIPT_PATH="${CLAUDE_PLUGIN_ROOT}/${REL_SCRIPT_PATH}" +SCRIPT_PATH="${PLUGIN_ROOT}/${REL_SCRIPT_PATH}" if [[ ! -f "$SCRIPT_PATH" ]]; then echo "[Hook] Script not found for ${HOOK_ID}: ${SCRIPT_PATH}" >&2 printf '%s' "$INPUT" diff --git a/skills/continuous-learning-v2/agents/start-observer.sh b/skills/continuous-learning-v2/agents/start-observer.sh index ba9994de..c6be18a8 100755 --- a/skills/continuous-learning-v2/agents/start-observer.sh +++ b/skills/continuous-learning-v2/agents/start-observer.sh @@ -28,6 +28,7 @@ OBSERVER_LOOP_SCRIPT="${SCRIPT_DIR}/observer-loop.sh" # Source shared project detection helper # This sets: PROJECT_ID, PROJECT_NAME, PROJECT_ROOT, PROJECT_DIR source "${SKILL_ROOT}/scripts/detect-project.sh" +PYTHON_CMD="${CLV2_PYTHON_CMD:-}" # ───────────────────────────────────────────── # Configuration @@ -46,7 +47,10 @@ OBSERVER_INTERVAL_MINUTES=5 MIN_OBSERVATIONS=20 OBSERVER_ENABLED=false if [ -f "$CONFIG_FILE" ]; then - _config=$(CLV2_CONFIG="$CONFIG_FILE" python3 -c " + if [ -z "$PYTHON_CMD" ]; then + echo "No python interpreter found; using built-in observer defaults." >&2 + else + _config=$(CLV2_CONFIG="$CONFIG_FILE" "$PYTHON_CMD" -c " import json, os with open(os.environ['CLV2_CONFIG']) as f: cfg = json.load(f) @@ -57,17 +61,18 @@ print(str(obs.get('enabled', False)).lower()) " 2>/dev/null || echo "5 20 false") - _interval=$(echo "$_config" | sed -n '1p') - _min_obs=$(echo "$_config" | sed -n '2p') - _enabled=$(echo "$_config" | sed -n '3p') - if [ "$_interval" -gt 0 ] 2>/dev/null; then - OBSERVER_INTERVAL_MINUTES="$_interval" - fi - if [ "$_min_obs" -gt 0 ] 2>/dev/null; then - MIN_OBSERVATIONS="$_min_obs" - fi - if [ "$_enabled" = "true" ]; then - OBSERVER_ENABLED=true + _interval=$(echo "$_config" | sed -n '1p') + _min_obs=$(echo "$_config" | sed -n '2p') + _enabled=$(echo "$_config" | sed -n '3p') + if [ "$_interval" -gt 0 ] 2>/dev/null; then + OBSERVER_INTERVAL_MINUTES="$_interval" + fi + if [ "$_min_obs" -gt 0 ] 2>/dev/null; then + MIN_OBSERVATIONS="$_min_obs" + fi + if [ "$_enabled" = "true" ]; then + OBSERVER_ENABLED=true + fi fi fi OBSERVER_INTERVAL_SECONDS=$((OBSERVER_INTERVAL_MINUTES * 60)) diff --git a/skills/continuous-learning-v2/hooks/observe.sh b/skills/continuous-learning-v2/hooks/observe.sh index 307cde67..ea44386b 100755 --- a/skills/continuous-learning-v2/hooks/observe.sh +++ b/skills/continuous-learning-v2/hooks/observe.sh @@ -27,13 +27,38 @@ if [ -z "$INPUT_JSON" ]; then exit 0 fi +resolve_python_cmd() { + if [ -n "${CLV2_PYTHON_CMD:-}" ] && command -v "$CLV2_PYTHON_CMD" >/dev/null 2>&1; then + printf '%s\n' "$CLV2_PYTHON_CMD" + return 0 + fi + + if command -v python3 >/dev/null 2>&1; then + printf '%s\n' python3 + return 0 + fi + + if command -v python >/dev/null 2>&1; then + printf '%s\n' python + return 0 + fi + + return 1 +} + +PYTHON_CMD="$(resolve_python_cmd 2>/dev/null || true)" +if [ -z "$PYTHON_CMD" ]; then + echo "[observe] No python interpreter found, skipping observation" >&2 + exit 0 +fi + # ───────────────────────────────────────────── # Extract cwd from stdin for project detection # ───────────────────────────────────────────── # Extract cwd from the hook JSON to use for project detection. # This avoids spawning a separate git subprocess when cwd is available. -STDIN_CWD=$(echo "$INPUT_JSON" | python3 -c ' +STDIN_CWD=$(echo "$INPUT_JSON" | "$PYTHON_CMD" -c ' import json, sys try: data = json.load(sys.stdin) @@ -58,6 +83,7 @@ SKILL_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" # Source shared project detection helper # This sets: PROJECT_ID, PROJECT_NAME, PROJECT_ROOT, PROJECT_DIR source "${SKILL_ROOT}/scripts/detect-project.sh" +PYTHON_CMD="${CLV2_PYTHON_CMD:-$PYTHON_CMD}" # ───────────────────────────────────────────── # Configuration @@ -79,9 +105,9 @@ if [ ! -f "$PURGE_MARKER" ] || [ "$(find "$PURGE_MARKER" -mtime +1 2>/dev/null)" touch "$PURGE_MARKER" 2>/dev/null || true fi -# Parse using python via stdin pipe (safe for all JSON payloads) +# Parse using Python via stdin pipe (safe for all JSON payloads) # Pass HOOK_PHASE via env var since Claude Code does not include hook type in stdin JSON -PARSED=$(echo "$INPUT_JSON" | HOOK_PHASE="$HOOK_PHASE" python3 -c ' +PARSED=$(echo "$INPUT_JSON" | HOOK_PHASE="$HOOK_PHASE" "$PYTHON_CMD" -c ' import json import sys import os @@ -129,13 +155,13 @@ except Exception as e: ') # Check if parsing succeeded -PARSED_OK=$(echo "$PARSED" | python3 -c "import json,sys; print(json.load(sys.stdin).get('parsed', False))" 2>/dev/null || echo "False") +PARSED_OK=$(echo "$PARSED" | "$PYTHON_CMD" -c "import json,sys; print(json.load(sys.stdin).get('parsed', False))" 2>/dev/null || echo "False") if [ "$PARSED_OK" != "True" ]; then # Fallback: log raw input for debugging (scrub secrets before persisting) timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ") export TIMESTAMP="$timestamp" - echo "$INPUT_JSON" | python3 -c ' + echo "$INPUT_JSON" | "$PYTHON_CMD" -c ' import json, sys, os, re _SECRET_RE = re.compile( @@ -170,7 +196,7 @@ export PROJECT_ID_ENV="$PROJECT_ID" export PROJECT_NAME_ENV="$PROJECT_NAME" export TIMESTAMP="$timestamp" -echo "$PARSED" | python3 -c ' +echo "$PARSED" | "$PYTHON_CMD" -c ' import json, sys, os, re parsed = json.load(sys.stdin) diff --git a/skills/continuous-learning-v2/scripts/detect-project.sh b/skills/continuous-learning-v2/scripts/detect-project.sh index a4bbfb32..b2677538 100755 --- a/skills/continuous-learning-v2/scripts/detect-project.sh +++ b/skills/continuous-learning-v2/scripts/detect-project.sh @@ -23,6 +23,28 @@ _CLV2_HOMUNCULUS_DIR="${HOME}/.claude/homunculus" _CLV2_PROJECTS_DIR="${_CLV2_HOMUNCULUS_DIR}/projects" _CLV2_REGISTRY_FILE="${_CLV2_HOMUNCULUS_DIR}/projects.json" +_clv2_resolve_python_cmd() { + if [ -n "${CLV2_PYTHON_CMD:-}" ] && command -v "$CLV2_PYTHON_CMD" >/dev/null 2>&1; then + printf '%s\n' "$CLV2_PYTHON_CMD" + return 0 + fi + + if command -v python3 >/dev/null 2>&1; then + printf '%s\n' python3 + return 0 + fi + + if command -v python >/dev/null 2>&1; then + printf '%s\n' python + return 0 + fi + + return 1 +} + +_CLV2_PYTHON_CMD="$(_clv2_resolve_python_cmd 2>/dev/null || true)" +export CLV2_PYTHON_CMD + _clv2_detect_project() { local project_root="" local project_name="" @@ -73,10 +95,12 @@ _clv2_detect_project() { fi local hash_input="${remote_url:-$project_root}" - # Use SHA256 via python3 (portable across macOS/Linux, no shasum/sha256sum divergence) - project_id=$(printf '%s' "$hash_input" | python3 -c "import sys,hashlib; print(hashlib.sha256(sys.stdin.buffer.read()).hexdigest()[:12])" 2>/dev/null) + # Prefer Python for consistent SHA256 behavior across shells/platforms. + if [ -n "$_CLV2_PYTHON_CMD" ]; then + project_id=$(printf '%s' "$hash_input" | "$_CLV2_PYTHON_CMD" -c "import sys,hashlib; print(hashlib.sha256(sys.stdin.buffer.read()).hexdigest()[:12])" 2>/dev/null) + fi - # Fallback if python3 failed + # Fallback if Python is unavailable or hash generation failed. if [ -z "$project_id" ]; then project_id=$(printf '%s' "$hash_input" | shasum -a 256 2>/dev/null | cut -c1-12 || \ printf '%s' "$hash_input" | sha256sum 2>/dev/null | cut -c1-12 || \ @@ -85,9 +109,9 @@ _clv2_detect_project() { # Backward compatibility: if credentials were stripped and the hash changed, # check if a project dir exists under the legacy hash and reuse it - if [ "$legacy_hash_input" != "$hash_input" ]; then - local legacy_id - legacy_id=$(printf '%s' "$legacy_hash_input" | python3 -c "import sys,hashlib; print(hashlib.sha256(sys.stdin.buffer.read()).hexdigest()[:12])" 2>/dev/null) + if [ "$legacy_hash_input" != "$hash_input" ] && [ -n "$_CLV2_PYTHON_CMD" ]; then + local legacy_id="" + legacy_id=$(printf '%s' "$legacy_hash_input" | "$_CLV2_PYTHON_CMD" -c "import sys,hashlib; print(hashlib.sha256(sys.stdin.buffer.read()).hexdigest()[:12])" 2>/dev/null) if [ -n "$legacy_id" ] && [ -d "${_CLV2_PROJECTS_DIR}/${legacy_id}" ] && [ ! -d "${_CLV2_PROJECTS_DIR}/${project_id}" ]; then # Migrate legacy directory to new hash mv "${_CLV2_PROJECTS_DIR}/${legacy_id}" "${_CLV2_PROJECTS_DIR}/${project_id}" 2>/dev/null || project_id="$legacy_id" @@ -120,14 +144,18 @@ _clv2_update_project_registry() { mkdir -p "$(dirname "$_CLV2_REGISTRY_FILE")" + if [ -z "$_CLV2_PYTHON_CMD" ]; then + return 0 + fi + # Pass values via env vars to avoid shell→python injection. - # python3 reads them with os.environ, which is safe for any string content. + # Python reads them with os.environ, which is safe for any string content. _CLV2_REG_PID="$pid" \ _CLV2_REG_PNAME="$pname" \ _CLV2_REG_PROOT="$proot" \ _CLV2_REG_PREMOTE="$premote" \ _CLV2_REG_FILE="$_CLV2_REGISTRY_FILE" \ - python3 -c ' + "$_CLV2_PYTHON_CMD" -c ' import json, os from datetime import datetime, timezone diff --git a/skills/iterative-retrieval/SKILL.md b/skills/iterative-retrieval/SKILL.md index 27760f9a..0a24a6dd 100644 --- a/skills/iterative-retrieval/SKILL.md +++ b/skills/iterative-retrieval/SKILL.md @@ -208,4 +208,4 @@ When retrieving context for this task: - [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) - Subagent orchestration section - `continuous-learning` skill - For patterns that improve over time -- Agent definitions in `~/.claude/agents/` +- Agent definitions bundled with ECC (manual install path: `agents/`) diff --git a/tests/hooks/hooks.test.js b/tests/hooks/hooks.test.js index 2c873204..6cc2f45b 100644 --- a/tests/hooks/hooks.test.js +++ b/tests/hooks/hooks.test.js @@ -75,6 +75,35 @@ function cleanupTestDir(testDir) { fs.rmSync(testDir, { recursive: true, force: true }); } +function createCommandShim(binDir, baseName, logFile) { + fs.mkdirSync(binDir, { recursive: true }); + + const shimJs = path.join(binDir, `${baseName}-shim.js`); + fs.writeFileSync(shimJs, [ + 'const fs = require(\'fs\');', + `fs.appendFileSync(${JSON.stringify(logFile)}, JSON.stringify({ bin: ${JSON.stringify(baseName)}, args: process.argv.slice(2), cwd: process.cwd() }) + '\\n');` + ].join('\n')); + + if (process.platform === 'win32') { + const shimCmd = path.join(binDir, `${baseName}.cmd`); + fs.writeFileSync(shimCmd, `@echo off\r\nnode "${shimJs}" %*\r\n`); + return shimCmd; + } + + const shimPath = path.join(binDir, baseName); + fs.writeFileSync(shimPath, `#!/usr/bin/env node\nrequire(${JSON.stringify(shimJs)});\n`); + fs.chmodSync(shimPath, 0o755); + return shimPath; +} + +function readCommandLog(logFile) { + if (!fs.existsSync(logFile)) return []; + return fs.readFileSync(logFile, 'utf8') + .split('\n') + .filter(Boolean) + .map(line => JSON.parse(line)); +} + // Test suite async function runTests() { console.log('\n=== Testing Hook Scripts ===\n'); @@ -701,6 +730,131 @@ async function runTests() { assert.ok(result.stdout.includes('tool_input'), 'Should pass through original data'); })) passed++; else failed++; + if (await asyncTest('finds formatter config in parent dirs without package.json', async () => { + const testDir = createTestDir(); + const rootDir = path.join(testDir, 'config-only-repo'); + const nestedDir = path.join(rootDir, 'src', 'nested'); + const filePath = path.join(nestedDir, 'component.ts'); + const binDir = path.join(testDir, 'bin'); + const logFile = path.join(testDir, 'formatter.log'); + + fs.mkdirSync(nestedDir, { recursive: true }); + fs.writeFileSync(path.join(rootDir, '.prettierrc'), '{}'); + fs.writeFileSync(filePath, 'export const value = 1;\n'); + createCommandShim(binDir, 'npx', logFile); + + const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson, { + PATH: `${binDir}${path.delimiter}${process.env.PATH || ''}` + }); + + assert.strictEqual(result.code, 0, 'Should exit 0 for config-only repo'); + const logEntries = readCommandLog(logFile); + assert.strictEqual(logEntries.length, 1, 'Should invoke formatter once'); + assert.strictEqual( + fs.realpathSync(logEntries[0].cwd), + fs.realpathSync(rootDir), + 'Should run formatter from config root' + ); + assert.deepStrictEqual( + logEntries[0].args, + ['prettier', '--write', filePath], + 'Should use the formatter on the nested file' + ); + cleanupTestDir(testDir); + })) passed++; else failed++; + + if (await asyncTest('respects CLAUDE_PACKAGE_MANAGER for formatter fallback runner', async () => { + const testDir = createTestDir(); + const rootDir = path.join(testDir, 'pnpm-repo'); + const filePath = path.join(rootDir, 'index.ts'); + const binDir = path.join(testDir, 'bin'); + const logFile = path.join(testDir, 'pnpm.log'); + + fs.mkdirSync(rootDir, { recursive: true }); + fs.writeFileSync(path.join(rootDir, '.prettierrc'), '{}'); + fs.writeFileSync(filePath, 'export const value = 1;\n'); + createCommandShim(binDir, 'pnpm', logFile); + + const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson, { + PATH: `${binDir}${path.delimiter}${process.env.PATH || ''}`, + CLAUDE_PACKAGE_MANAGER: 'pnpm' + }); + + assert.strictEqual(result.code, 0, 'Should exit 0 when pnpm fallback is used'); + const logEntries = readCommandLog(logFile); + assert.strictEqual(logEntries.length, 1, 'Should invoke pnpm fallback runner once'); + assert.strictEqual(logEntries[0].bin, 'pnpm', 'Should use pnpm runner'); + assert.deepStrictEqual( + logEntries[0].args, + ['dlx', 'prettier', '--write', filePath], + 'Should use pnpm dlx for fallback formatter execution' + ); + cleanupTestDir(testDir); + })) passed++; else failed++; + + if (await asyncTest('respects project package-manager config for formatter fallback runner', async () => { + const testDir = createTestDir(); + const rootDir = path.join(testDir, 'bun-repo'); + const filePath = path.join(rootDir, 'index.ts'); + const binDir = path.join(testDir, 'bin'); + const logFile = path.join(testDir, 'bun.log'); + + fs.mkdirSync(path.join(rootDir, '.claude'), { recursive: true }); + fs.writeFileSync(path.join(rootDir, '.claude', 'package-manager.json'), JSON.stringify({ packageManager: 'bun' })); + fs.writeFileSync(path.join(rootDir, '.prettierrc'), '{}'); + fs.writeFileSync(filePath, 'export const value = 1;\n'); + createCommandShim(binDir, 'bunx', logFile); + + const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson, { + PATH: `${binDir}${path.delimiter}${process.env.PATH || ''}` + }); + + assert.strictEqual(result.code, 0, 'Should exit 0 when project config selects bun'); + const logEntries = readCommandLog(logFile); + assert.strictEqual(logEntries.length, 1, 'Should invoke bunx fallback runner once'); + assert.strictEqual(logEntries[0].bin, 'bunx', 'Should use bunx runner'); + assert.deepStrictEqual( + logEntries[0].args, + ['prettier', '--write', filePath], + 'Should use bunx for fallback formatter execution' + ); + cleanupTestDir(testDir); + })) passed++; else failed++; + + console.log('\npre-bash-dev-server-block.js:'); + + if (await asyncTest('allows non-dev commands whose heredoc text mentions npm run dev', async () => { + const command = [ + 'gh pr create --title "fix: docs" --body "$(cat <<\'EOF\'', + '## Test plan', + '- run npm run dev to verify the site starts', + 'EOF', + ')"' + ].join('\n'); + const stdinJson = JSON.stringify({ tool_input: { command } }); + const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); + + assert.strictEqual(result.code, 0, 'Non-dev commands should pass through'); + assert.strictEqual(result.stdout, stdinJson, 'Should preserve original input'); + assert.ok(!result.stderr.includes('BLOCKED'), 'Should not emit a block message'); + })) passed++; else failed++; + + if (await asyncTest('blocks bare npm run dev outside tmux on non-Windows platforms', async () => { + const stdinJson = JSON.stringify({ tool_input: { command: 'npm run dev' } }); + const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); + + if (process.platform === 'win32') { + assert.strictEqual(result.code, 0, 'Windows path should pass through'); + assert.strictEqual(result.stdout, stdinJson, 'Windows path should preserve original input'); + } else { + assert.strictEqual(result.code, 2, 'Unix path should block bare dev servers'); + assert.ok(result.stderr.includes('BLOCKED'), 'Should explain why the command was blocked'); + } + })) passed++; else failed++; + // post-edit-typecheck.js tests console.log('\npost-edit-typecheck.js:'); @@ -1516,6 +1670,28 @@ async function runTests() { assert.ok(typecheckSource.includes('npx.cmd'), 'Should use npx.cmd for Windows cross-platform safety'); })) passed++; else failed++; + console.log('\nShell wrapper portability:'); + + if (test('run-with-flags-shell resolves plugin root when CLAUDE_PLUGIN_ROOT is unset', () => { + const wrapperSource = fs.readFileSync(path.join(scriptsDir, 'run-with-flags-shell.sh'), 'utf8'); + assert.ok( + wrapperSource.includes('PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-'), + 'Shell wrapper should derive PLUGIN_ROOT from its own script path' + ); + })) passed++; else failed++; + + if (test('continuous-learning shell scripts use resolved Python command instead of hardcoded python3 invocations', () => { + const observeSource = fs.readFileSync(path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'hooks', 'observe.sh'), 'utf8'); + const startObserverSource = fs.readFileSync(path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'agents', 'start-observer.sh'), 'utf8'); + const detectProjectSource = fs.readFileSync(path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'scripts', 'detect-project.sh'), 'utf8'); + + assert.ok(!/python3\s+-c/.test(observeSource), 'observe.sh should not invoke python3 directly'); + assert.ok(!/python3\s+-c/.test(startObserverSource), 'start-observer.sh should not invoke python3 directly'); + assert.ok(observeSource.includes('PYTHON_CMD'), 'observe.sh should resolve Python dynamically'); + assert.ok(startObserverSource.includes('CLV2_PYTHON_CMD'), 'start-observer.sh should reuse detected Python command'); + assert.ok(detectProjectSource.includes('_clv2_resolve_python_cmd'), 'detect-project.sh should provide shared Python resolution'); + })) passed++; else failed++; + if (await asyncTest('matches .tsx extension for type checking', async () => { const testDir = createTestDir(); const testFile = path.join(testDir, 'component.tsx'); From d66bd6439b1f5f726291654e00718061025b425a Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Mon, 9 Mar 2026 21:10:46 -0700 Subject: [PATCH 02/42] docs: clarify opencode npm plugin scope --- .opencode/MIGRATION.md | 10 ++++++++++ .opencode/README.md | 11 ++++++++++- .opencode/index.ts | 11 +++++++---- README.md | 7 +++++++ 4 files changed, 34 insertions(+), 5 deletions(-) diff --git a/.opencode/MIGRATION.md b/.opencode/MIGRATION.md index 2277d7ae..afb2507b 100644 --- a/.opencode/MIGRATION.md +++ b/.opencode/MIGRATION.md @@ -297,6 +297,15 @@ Then in your `opencode.json`: } ``` +This only loads the published ECC OpenCode plugin module (hooks/events and exported plugin tools). +It does **not** automatically inject ECC's full `agent`, `command`, or `instructions` config into your project. + +If you want the full ECC OpenCode workflow surface, use the repository's bundled `.opencode/opencode.json` as your base config or copy these pieces into your project: +- `.opencode/commands/` +- `.opencode/prompts/` +- `.opencode/instructions/INSTRUCTIONS.md` +- the `agent` and `command` sections from `.opencode/opencode.json` + ## Troubleshooting ### Configuration Not Loading @@ -322,6 +331,7 @@ Then in your `opencode.json`: 1. Verify the command is defined in `opencode.json` or as `.md` file in `.opencode/commands/` 2. Check the referenced agent exists 3. Ensure the template uses `$ARGUMENTS` for user input +4. If you installed only `plugin: ["ecc-universal"]`, note that npm plugin install does not auto-add ECC commands or agents to your project config ## Best Practices diff --git a/.opencode/README.md b/.opencode/README.md index 60d901aa..eaaa73a5 100644 --- a/.opencode/README.md +++ b/.opencode/README.md @@ -32,7 +32,16 @@ Add to your `opencode.json`: "plugin": ["ecc-universal"] } ``` -After installation, the `ecc-install` CLI becomes available: + +This loads the ECC OpenCode plugin module from npm: +- hook/event integrations +- bundled custom tools exported by the plugin + +It does **not** auto-register the full ECC command/agent/instruction catalog in your project config. For the full OpenCode setup, either: +- run OpenCode inside this repository, or +- copy the relevant `.opencode/commands/`, `.opencode/prompts/`, `.opencode/instructions/`, and `agent` / `command` config entries into your own project + +After installation, the `ecc-install` CLI is also available: ```bash npx ecc-install typescript diff --git a/.opencode/index.ts b/.opencode/index.ts index 4b686715..c8736525 100644 --- a/.opencode/index.ts +++ b/.opencode/index.ts @@ -1,12 +1,10 @@ /** * Everything Claude Code (ECC) Plugin for OpenCode * - * This package provides a complete OpenCode plugin with: - * - 13 specialized agents (planner, architect, code-reviewer, etc.) - * - 31 commands (/plan, /tdd, /code-review, etc.) + * This package provides the published ECC OpenCode plugin module: * - Plugin hooks (auto-format, TypeScript check, console.log warning, env injection, etc.) * - Custom tools (run-tests, check-coverage, security-audit, format-code, lint-check, git-summary) - * - 37 skills (coding-standards, security-review, tdd-workflow, etc.) + * - Bundled reference config/assets for the wider ECC OpenCode setup * * Usage: * @@ -22,6 +20,10 @@ * } * ``` * + * That enables the published plugin module only. For ECC commands, agents, + * prompts, and instructions, use this repository's `.opencode/opencode.json` + * as a base or copy the bundled `.opencode/` assets into your project. + * * Option 2: Clone and use directly * ```bash * git clone https://github.com/affaan-m/everything-claude-code @@ -51,6 +53,7 @@ export const metadata = { agents: 13, commands: 31, skills: 37, + configAssets: true, hookEvents: [ "file.edited", "tool.execute.before", diff --git a/README.md b/README.md index 05e33b99..0426ed6c 100644 --- a/README.md +++ b/README.md @@ -1050,6 +1050,13 @@ Then add to your `opencode.json`: } ``` +That npm plugin entry enables ECC's published OpenCode plugin module (hooks/events and plugin tools). +It does **not** automatically add ECC's full command/agent/instruction catalog to your project config. + +For the full ECC OpenCode setup, either: +- run OpenCode inside this repository, or +- copy the bundled `.opencode/` config assets into your project and wire the `agent` / `command` entries in `opencode.json` + ### Documentation - **Migration Guide**: `.opencode/MIGRATION.md` From 1c5e07ff77b170149196a23567b253236d1aa834 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Mon, 9 Mar 2026 21:13:11 -0700 Subject: [PATCH 03/42] test: fix windows path shims for formatter hooks --- tests/hooks/hooks.test.js | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/tests/hooks/hooks.test.js b/tests/hooks/hooks.test.js index 6cc2f45b..09be038c 100644 --- a/tests/hooks/hooks.test.js +++ b/tests/hooks/hooks.test.js @@ -104,6 +104,19 @@ function readCommandLog(logFile) { .map(line => JSON.parse(line)); } +function withPrependedPath(binDir, env = {}) { + const pathKey = Object.keys(process.env).find(key => key.toLowerCase() === 'path') + || (process.platform === 'win32' ? 'Path' : 'PATH'); + const currentPath = process.env[pathKey] || process.env.PATH || ''; + const nextPath = `${binDir}${path.delimiter}${currentPath}`; + + return { + ...env, + [pathKey]: nextPath, + PATH: nextPath + }; +} + // Test suite async function runTests() { console.log('\n=== Testing Hook Scripts ===\n'); @@ -744,9 +757,11 @@ async function runTests() { createCommandShim(binDir, 'npx', logFile); const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson, { - PATH: `${binDir}${path.delimiter}${process.env.PATH || ''}` - }); + const result = await runScript( + path.join(scriptsDir, 'post-edit-format.js'), + stdinJson, + withPrependedPath(binDir) + ); assert.strictEqual(result.code, 0, 'Should exit 0 for config-only repo'); const logEntries = readCommandLog(logFile); @@ -777,10 +792,11 @@ async function runTests() { createCommandShim(binDir, 'pnpm', logFile); const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson, { - PATH: `${binDir}${path.delimiter}${process.env.PATH || ''}`, - CLAUDE_PACKAGE_MANAGER: 'pnpm' - }); + const result = await runScript( + path.join(scriptsDir, 'post-edit-format.js'), + stdinJson, + withPrependedPath(binDir, { CLAUDE_PACKAGE_MANAGER: 'pnpm' }) + ); assert.strictEqual(result.code, 0, 'Should exit 0 when pnpm fallback is used'); const logEntries = readCommandLog(logFile); @@ -808,9 +824,11 @@ async function runTests() { createCommandShim(binDir, 'bunx', logFile); const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson, { - PATH: `${binDir}${path.delimiter}${process.env.PATH || ''}` - }); + const result = await runScript( + path.join(scriptsDir, 'post-edit-format.js'), + stdinJson, + withPrependedPath(binDir) + ); assert.strictEqual(result.code, 0, 'Should exit 0 when project config selects bun'); const logEntries = readCommandLog(logFile); From af51fcacb70d42ed52ba8111026d9b0f0e49b364 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Mon, 9 Mar 2026 22:05:35 -0700 Subject: [PATCH 04/42] fix: resolve PR 371 portability regressions --- .opencode/README.md | 2 +- README.md | 2 +- schemas/plugin.schema.json | 21 +++++- scripts/hooks/post-edit-format.js | 44 +++++++++--- scripts/hooks/pre-bash-dev-server-block.js | 21 +++--- .../scripts/detect-project.sh | 1 + tests/hooks/hooks.test.js | 71 ++++++++++++++++++- 7 files changed, 141 insertions(+), 21 deletions(-) diff --git a/.opencode/README.md b/.opencode/README.md index eaaa73a5..e8737e81 100644 --- a/.opencode/README.md +++ b/.opencode/README.md @@ -39,7 +39,7 @@ This loads the ECC OpenCode plugin module from npm: It does **not** auto-register the full ECC command/agent/instruction catalog in your project config. For the full OpenCode setup, either: - run OpenCode inside this repository, or -- copy the relevant `.opencode/commands/`, `.opencode/prompts/`, `.opencode/instructions/`, and `agent` / `command` config entries into your own project +- copy the relevant `.opencode/commands/`, `.opencode/prompts/`, `.opencode/instructions/`, and the `instructions`, `agent`, and `command` config entries into your own project After installation, the `ecc-install` CLI is also available: diff --git a/README.md b/README.md index 0426ed6c..09267ca5 100644 --- a/README.md +++ b/README.md @@ -1055,7 +1055,7 @@ It does **not** automatically add ECC's full command/agent/instruction catalog t For the full ECC OpenCode setup, either: - run OpenCode inside this repository, or -- copy the bundled `.opencode/` config assets into your project and wire the `agent` / `command` entries in `opencode.json` +- copy the bundled `.opencode/` config assets into your project and wire the `instructions`, `agent`, and `command` entries in `opencode.json` ### Documentation diff --git a/schemas/plugin.schema.json b/schemas/plugin.schema.json index 834bb984..326a5a3b 100644 --- a/schemas/plugin.schema.json +++ b/schemas/plugin.schema.json @@ -34,6 +34,25 @@ "agents": { "type": "array", "items": { "type": "string" } + }, + "features": { + "type": "object", + "properties": { + "agents": { "type": "integer", "minimum": 0 }, + "commands": { "type": "integer", "minimum": 0 }, + "skills": { "type": "integer", "minimum": 0 }, + "configAssets": { "type": "boolean" }, + "hookEvents": { + "type": "array", + "items": { "type": "string" } + }, + "customTools": { + "type": "array", + "items": { "type": "string" } + } + }, + "additionalProperties": false } - } + }, + "additionalProperties": false } diff --git a/scripts/hooks/post-edit-format.js b/scripts/hooks/post-edit-format.js index d8a1754c..08afad09 100644 --- a/scripts/hooks/post-edit-format.js +++ b/scripts/hooks/post-edit-format.js @@ -10,7 +10,7 @@ * Fails silently if no formatter is found or installed. */ -const { execFileSync } = require('child_process'); +const { execFileSync, spawnSync } = require('child_process'); const fs = require('fs'); const path = require('path'); const { getPackageManager } = require('../lib/package-manager'); @@ -50,18 +50,23 @@ process.stdin.on('data', chunk => { function findProjectRoot(startDir) { let dir = startDir; + let fallbackDir = null; while (true) { - if (PROJECT_ROOT_MARKERS.some(marker => fs.existsSync(path.join(dir, marker)))) { + if (detectFormatter(dir)) { return dir; } + if (!fallbackDir && PROJECT_ROOT_MARKERS.some(marker => fs.existsSync(path.join(dir, marker)))) { + fallbackDir = dir; + } + const parentDir = path.dirname(dir); if (parentDir === dir) break; dir = parentDir; } - return startDir; + return fallbackDir || startDir; } function detectFormatter(projectRoot) { @@ -114,6 +119,33 @@ function getFormatterCommand(formatter, filePath, projectRoot) { return null; } +function runFormatterCommand(cmd, projectRoot) { + if (process.platform === 'win32' && cmd.bin.endsWith('.cmd')) { + const result = spawnSync(cmd.bin, cmd.args, { + cwd: projectRoot, + shell: true, + stdio: 'pipe', + timeout: 15000 + }); + + if (result.error) { + throw result.error; + } + + if (typeof result.status === 'number' && result.status !== 0) { + throw new Error(result.stderr?.toString() || `Formatter exited with status ${result.status}`); + } + + return; + } + + execFileSync(cmd.bin, cmd.args, { + cwd: projectRoot, + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 15000 + }); +} + process.stdin.on('end', () => { try { const input = JSON.parse(data); @@ -126,11 +158,7 @@ process.stdin.on('end', () => { const cmd = getFormatterCommand(formatter, filePath, projectRoot); if (cmd) { - execFileSync(cmd.bin, cmd.args, { - cwd: projectRoot, - stdio: ['pipe', 'pipe', 'pipe'], - timeout: 15000 - }); + runFormatterCommand(cmd, projectRoot); } } catch { // Formatter not installed, file missing, or failed — non-blocking diff --git a/scripts/hooks/pre-bash-dev-server-block.js b/scripts/hooks/pre-bash-dev-server-block.js index 01c76c1f..9c0861b8 100755 --- a/scripts/hooks/pre-bash-dev-server-block.js +++ b/scripts/hooks/pre-bash-dev-server-block.js @@ -2,6 +2,7 @@ 'use strict'; const MAX_STDIN = 1024 * 1024; +const path = require('path'); const { splitShellSegments } = require('../lib/shell-split'); const DEV_COMMAND_WORDS = new Set([ @@ -10,13 +11,9 @@ const DEV_COMMAND_WORDS = new Set([ 'yarn', 'bun', 'npx', - 'bash', - 'sh', - 'zsh', - 'fish', 'tmux' ]); -const SKIPPABLE_PREFIX_WORDS = new Set(['env', 'command', 'builtin', 'exec', 'noglob', 'sudo']); +const SKIPPABLE_PREFIX_WORDS = new Set(['env', 'command', 'builtin', 'exec', 'noglob', 'sudo', 'nohup']); const PREFIX_OPTION_VALUE_WORDS = { env: new Set(['-u', '-C', '-S', '--unset', '--chdir', '--split-string']), sudo: new Set([ @@ -97,6 +94,12 @@ function isOptionToken(token) { return token.startsWith('-') && token.length > 1; } +function normalizeCommandWord(token) { + if (!token) return ''; + const base = path.basename(token).toLowerCase(); + return base.replace(/\.(cmd|exe|bat)$/i, ''); +} + function getLeadingCommandWord(segment) { let index = 0; let activeWrapper = null; @@ -122,8 +125,10 @@ function getLeadingCommandWord(segment) { if (/^[A-Za-z_][A-Za-z0-9_]*=.*/.test(token)) continue; - if (SKIPPABLE_PREFIX_WORDS.has(token)) { - activeWrapper = token; + const normalizedToken = normalizeCommandWord(token); + + if (SKIPPABLE_PREFIX_WORDS.has(normalizedToken)) { + activeWrapper = normalizedToken; continue; } @@ -134,7 +139,7 @@ function getLeadingCommandWord(segment) { continue; } - return token; + return normalizedToken; } return null; diff --git a/skills/continuous-learning-v2/scripts/detect-project.sh b/skills/continuous-learning-v2/scripts/detect-project.sh index b2677538..a44a0264 100755 --- a/skills/continuous-learning-v2/scripts/detect-project.sh +++ b/skills/continuous-learning-v2/scripts/detect-project.sh @@ -43,6 +43,7 @@ _clv2_resolve_python_cmd() { } _CLV2_PYTHON_CMD="$(_clv2_resolve_python_cmd 2>/dev/null || true)" +CLV2_PYTHON_CMD="$_CLV2_PYTHON_CMD" export CLV2_PYTHON_CMD _clv2_detect_project() { diff --git a/tests/hooks/hooks.test.js b/tests/hooks/hooks.test.js index 09be038c..a5ecaaa0 100644 --- a/tests/hooks/hooks.test.js +++ b/tests/hooks/hooks.test.js @@ -101,7 +101,14 @@ function readCommandLog(logFile) { return fs.readFileSync(logFile, 'utf8') .split('\n') .filter(Boolean) - .map(line => JSON.parse(line)); + .map(line => { + try { + return JSON.parse(line); + } catch { + return null; + } + }) + .filter(Boolean); } function withPrependedPath(binDir, env = {}) { @@ -873,6 +880,32 @@ async function runTests() { } })) passed++; else failed++; + if (await asyncTest('blocks env-wrapped npm run dev outside tmux on non-Windows platforms', async () => { + const stdinJson = JSON.stringify({ tool_input: { command: '/usr/bin/env npm run dev' } }); + const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); + + if (process.platform === 'win32') { + assert.strictEqual(result.code, 0, 'Windows path should pass through'); + assert.strictEqual(result.stdout, stdinJson, 'Windows path should preserve original input'); + } else { + assert.strictEqual(result.code, 2, 'Unix path should block wrapped dev servers'); + assert.ok(result.stderr.includes('BLOCKED'), 'Should explain why the command was blocked'); + } + })) passed++; else failed++; + + if (await asyncTest('blocks nohup-wrapped npm run dev outside tmux on non-Windows platforms', async () => { + const stdinJson = JSON.stringify({ tool_input: { command: 'nohup npm run dev >/tmp/dev.log 2>&1 &' } }); + const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); + + if (process.platform === 'win32') { + assert.strictEqual(result.code, 0, 'Windows path should pass through'); + assert.strictEqual(result.stdout, stdinJson, 'Windows path should preserve original input'); + } else { + assert.strictEqual(result.code, 2, 'Unix path should block wrapped dev servers'); + assert.ok(result.stderr.includes('BLOCKED'), 'Should explain why the command was blocked'); + } + })) passed++; else failed++; + // post-edit-typecheck.js tests console.log('\npost-edit-typecheck.js:'); @@ -1659,7 +1692,14 @@ async function runTests() { const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); // Strip comments to avoid matching "shell: true" in comment text const codeOnly = formatSource.replace(/\/\/.*$/gm, '').replace(/\/\*[\s\S]*?\*\//g, ''); - assert.ok(!codeOnly.includes('shell:'), 'post-edit-format.js should not pass shell option in code'); + assert.ok( + !/execFileSync\([^)]*shell\s*:/.test(codeOnly), + 'post-edit-format.js should not pass shell option to execFileSync' + ); + assert.ok( + codeOnly.includes("process.platform === 'win32' && cmd.bin.endsWith('.cmd')"), + 'Windows shell execution must stay gated to .cmd shims' + ); assert.ok(formatSource.includes('npx.cmd'), 'Should use npx.cmd for Windows cross-platform safety'); })) passed++; else failed++; @@ -1710,6 +1750,33 @@ async function runTests() { assert.ok(detectProjectSource.includes('_clv2_resolve_python_cmd'), 'detect-project.sh should provide shared Python resolution'); })) passed++; else failed++; + if (await asyncTest('detect-project exports the resolved Python command for downstream scripts', async () => { + const detectProjectPath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'scripts', 'detect-project.sh'); + const shellCommand = [ + `source "${detectProjectPath}" >/dev/null 2>&1`, + 'printf "%s\\n" "${CLV2_PYTHON_CMD:-}"' + ].join('; '); + + const shell = process.platform === 'win32' ? 'bash' : 'bash'; + const proc = spawn(shell, ['-lc', shellCommand], { + env: process.env, + stdio: ['ignore', 'pipe', 'pipe'] + }); + + let stdout = ''; + let stderr = ''; + proc.stdout.on('data', data => stdout += data); + proc.stderr.on('data', data => stderr += data); + + const code = await new Promise((resolve, reject) => { + proc.on('close', resolve); + proc.on('error', reject); + }); + + assert.strictEqual(code, 0, `detect-project.sh should source cleanly, stderr: ${stderr}`); + assert.ok(stdout.trim().length > 0, 'CLV2_PYTHON_CMD should export a resolved interpreter path'); + })) passed++; else failed++; + if (await asyncTest('matches .tsx extension for type checking', async () => { const testDir = createTestDir(); const testFile = path.join(testDir, 'component.tsx'); From f331d3ecc9b029198f1545576104a19fd8e0c33c Mon Sep 17 00:00:00 2001 From: Jonghyeok Park Date: Sun, 8 Mar 2026 16:46:35 +0900 Subject: [PATCH 05/42] feat(hooks): add shared resolve-formatter utility with caching Extract project-root discovery, formatter detection, and binary resolution into a reusable module. Caches results per-process to avoid redundant filesystem lookups on every Edit hook invocation. This is the foundation for eliminating npx overhead in format hooks. --- scripts/lib/resolve-formatter.js | 156 ++++++++++++++++++++++ tests/lib/resolve-formatter.test.js | 198 ++++++++++++++++++++++++++++ 2 files changed, 354 insertions(+) create mode 100644 scripts/lib/resolve-formatter.js create mode 100644 tests/lib/resolve-formatter.test.js diff --git a/scripts/lib/resolve-formatter.js b/scripts/lib/resolve-formatter.js new file mode 100644 index 00000000..727a0734 --- /dev/null +++ b/scripts/lib/resolve-formatter.js @@ -0,0 +1,156 @@ +/** + * Shared formatter resolution utilities with caching. + * + * Extracts project-root discovery, formatter detection, and binary + * resolution into a single module so that post-edit-format.js and + * quality-gate.js avoid duplicating work and filesystem lookups. + */ + +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +// ── Caches (per-process, cleared on next hook invocation) ─────────── +const projectRootCache = new Map(); +const formatterCache = new Map(); +const binCache = new Map(); + +// ── Public helpers ────────────────────────────────────────────────── + +/** + * Walk up from `startDir` until a directory containing package.json is found. + * Returns `startDir` as fallback when no package.json exists above it. + * + * @param {string} startDir - Absolute directory path to start from + * @returns {string} Absolute path to the project root + */ +function findProjectRoot(startDir) { + if (projectRootCache.has(startDir)) return projectRootCache.get(startDir); + + let dir = startDir; + while (dir !== path.dirname(dir)) { + if (fs.existsSync(path.join(dir, 'package.json'))) { + projectRootCache.set(startDir, dir); + return dir; + } + dir = path.dirname(dir); + } + + projectRootCache.set(startDir, startDir); + return startDir; +} + +/** + * Detect the formatter configured in the project. + * Biome takes priority over Prettier. + * + * @param {string} projectRoot - Absolute path to the project root + * @returns {'biome' | 'prettier' | null} + */ +function detectFormatter(projectRoot) { + if (formatterCache.has(projectRoot)) return formatterCache.get(projectRoot); + + const biomeConfigs = ['biome.json', 'biome.jsonc']; + for (const cfg of biomeConfigs) { + if (fs.existsSync(path.join(projectRoot, cfg))) { + formatterCache.set(projectRoot, 'biome'); + return 'biome'; + } + } + + const prettierConfigs = [ + '.prettierrc', + '.prettierrc.json', + '.prettierrc.js', + '.prettierrc.cjs', + '.prettierrc.mjs', + '.prettierrc.yml', + '.prettierrc.yaml', + '.prettierrc.toml', + 'prettier.config.js', + 'prettier.config.cjs', + 'prettier.config.mjs', + ]; + for (const cfg of prettierConfigs) { + if (fs.existsSync(path.join(projectRoot, cfg))) { + formatterCache.set(projectRoot, 'prettier'); + return 'prettier'; + } + } + + formatterCache.set(projectRoot, null); + return null; +} + +/** + * Resolve the formatter binary, preferring the local node_modules/.bin + * installation over npx to avoid package-resolution overhead. + * + * @param {string} projectRoot - Absolute path to the project root + * @param {'biome' | 'prettier'} formatter - Detected formatter name + * @returns {{ bin: string, prefix: string[] }} + * `bin` – executable path (absolute local path or npx/npx.cmd) + * `prefix` – extra args to prepend (e.g. ['@biomejs/biome'] when using npx) + */ +function resolveFormatterBin(projectRoot, formatter) { + const cacheKey = `${projectRoot}:${formatter}`; + if (binCache.has(cacheKey)) return binCache.get(cacheKey); + + const isWin = process.platform === 'win32'; + const npxBin = isWin ? 'npx.cmd' : 'npx'; + + if (formatter === 'biome') { + const localBin = path.join( + projectRoot, + 'node_modules', + '.bin', + isWin ? 'biome.cmd' : 'biome', + ); + if (fs.existsSync(localBin)) { + const result = { bin: localBin, prefix: [] }; + binCache.set(cacheKey, result); + return result; + } + const result = { bin: npxBin, prefix: ['@biomejs/biome'] }; + binCache.set(cacheKey, result); + return result; + } + + if (formatter === 'prettier') { + const localBin = path.join( + projectRoot, + 'node_modules', + '.bin', + isWin ? 'prettier.cmd' : 'prettier', + ); + if (fs.existsSync(localBin)) { + const result = { bin: localBin, prefix: [] }; + binCache.set(cacheKey, result); + return result; + } + const result = { bin: npxBin, prefix: ['prettier'] }; + binCache.set(cacheKey, result); + return result; + } + + const result = { bin: npxBin, prefix: [] }; + binCache.set(cacheKey, result); + return result; +} + +/** + * Clear all caches. Useful for testing. + */ +function clearCaches() { + projectRootCache.clear(); + formatterCache.clear(); + binCache.clear(); +} + +module.exports = { + findProjectRoot, + detectFormatter, + resolveFormatterBin, + clearCaches, +}; \ No newline at end of file diff --git a/tests/lib/resolve-formatter.test.js b/tests/lib/resolve-formatter.test.js new file mode 100644 index 00000000..92741f84 --- /dev/null +++ b/tests/lib/resolve-formatter.test.js @@ -0,0 +1,198 @@ +/** + * Tests for scripts/lib/resolve-formatter.js + * + * Run with: node tests/lib/resolve-formatter.test.js + */ + +const assert = require('assert'); +const path = require('path'); +const fs = require('fs'); +const os = require('os'); + +const { + findProjectRoot, + detectFormatter, + resolveFormatterBin, + clearCaches, +} = require('../../scripts/lib/resolve-formatter'); + +// Test helper +function test(name, fn) { + try { + fn(); + console.log(` ✓ ${name}`); + return true; + } catch (err) { + console.log(` ✗ ${name}`); + console.log(` Error: ${err.message}`); + return false; + } +} + +function makeTmpDir() { + return fs.mkdtempSync(path.join(os.tmpdir(), 'resolve-fmt-')); +} + +function runTests() { + console.log('\n=== Testing resolve-formatter.js ===\n'); + + let passed = 0; + let failed = 0; + + function run(name, fn) { + clearCaches(); + if (test(name, fn)) passed++; + else failed++; + } + + // ── findProjectRoot ─────────────────────────────────────────── + + run('findProjectRoot: finds package.json in parent dir', () => { + const root = makeTmpDir(); + const sub = path.join(root, 'src', 'lib'); + fs.mkdirSync(sub, { recursive: true }); + fs.writeFileSync(path.join(root, 'package.json'), '{}'); + + assert.strictEqual(findProjectRoot(sub), root); + }); + + run('findProjectRoot: returns startDir when no package.json', () => { + const root = makeTmpDir(); + const sub = path.join(root, 'deep'); + fs.mkdirSync(sub, { recursive: true }); + + // No package.json anywhere in tmp → falls back to startDir + assert.strictEqual(findProjectRoot(sub), sub); + }); + + run('findProjectRoot: caches result for same startDir', () => { + const root = makeTmpDir(); + fs.writeFileSync(path.join(root, 'package.json'), '{}'); + + const first = findProjectRoot(root); + // Remove package.json — cache should still return the old result + fs.unlinkSync(path.join(root, 'package.json')); + const second = findProjectRoot(root); + + assert.strictEqual(first, second); + }); + + // ── detectFormatter ─────────────────────────────────────────── + + run('detectFormatter: detects biome.json', () => { + const root = makeTmpDir(); + fs.writeFileSync(path.join(root, 'biome.json'), '{}'); + assert.strictEqual(detectFormatter(root), 'biome'); + }); + + run('detectFormatter: detects biome.jsonc', () => { + const root = makeTmpDir(); + fs.writeFileSync(path.join(root, 'biome.jsonc'), '{}'); + assert.strictEqual(detectFormatter(root), 'biome'); + }); + + run('detectFormatter: detects .prettierrc', () => { + const root = makeTmpDir(); + fs.writeFileSync(path.join(root, '.prettierrc'), '{}'); + assert.strictEqual(detectFormatter(root), 'prettier'); + }); + + run('detectFormatter: detects prettier.config.js', () => { + const root = makeTmpDir(); + fs.writeFileSync(path.join(root, 'prettier.config.js'), 'module.exports = {}'); + assert.strictEqual(detectFormatter(root), 'prettier'); + }); + + run('detectFormatter: biome takes priority over prettier', () => { + const root = makeTmpDir(); + fs.writeFileSync(path.join(root, 'biome.json'), '{}'); + fs.writeFileSync(path.join(root, '.prettierrc'), '{}'); + assert.strictEqual(detectFormatter(root), 'biome'); + }); + + run('detectFormatter: returns null when no config found', () => { + const root = makeTmpDir(); + assert.strictEqual(detectFormatter(root), null); + }); + + // ── resolveFormatterBin ─────────────────────────────────────── + + run('resolveFormatterBin: uses local biome binary when available', () => { + const root = makeTmpDir(); + const binDir = path.join(root, 'node_modules', '.bin'); + fs.mkdirSync(binDir, { recursive: true }); + const binName = process.platform === 'win32' ? 'biome.cmd' : 'biome'; + fs.writeFileSync(path.join(binDir, binName), ''); + + const result = resolveFormatterBin(root, 'biome'); + assert.strictEqual(result.bin, path.join(binDir, binName)); + assert.deepStrictEqual(result.prefix, []); + }); + + run('resolveFormatterBin: falls back to npx for biome', () => { + const root = makeTmpDir(); + const result = resolveFormatterBin(root, 'biome'); + const expectedBin = process.platform === 'win32' ? 'npx.cmd' : 'npx'; + assert.strictEqual(result.bin, expectedBin); + assert.deepStrictEqual(result.prefix, ['@biomejs/biome']); + }); + + run('resolveFormatterBin: uses local prettier binary when available', () => { + const root = makeTmpDir(); + const binDir = path.join(root, 'node_modules', '.bin'); + fs.mkdirSync(binDir, { recursive: true }); + const binName = process.platform === 'win32' ? 'prettier.cmd' : 'prettier'; + fs.writeFileSync(path.join(binDir, binName), ''); + + const result = resolveFormatterBin(root, 'prettier'); + assert.strictEqual(result.bin, path.join(binDir, binName)); + assert.deepStrictEqual(result.prefix, []); + }); + + run('resolveFormatterBin: falls back to npx for prettier', () => { + const root = makeTmpDir(); + const result = resolveFormatterBin(root, 'prettier'); + const expectedBin = process.platform === 'win32' ? 'npx.cmd' : 'npx'; + assert.strictEqual(result.bin, expectedBin); + assert.deepStrictEqual(result.prefix, ['prettier']); + }); + + run('resolveFormatterBin: caches resolved binary', () => { + const root = makeTmpDir(); + const binDir = path.join(root, 'node_modules', '.bin'); + fs.mkdirSync(binDir, { recursive: true }); + const binName = process.platform === 'win32' ? 'biome.cmd' : 'biome'; + fs.writeFileSync(path.join(binDir, binName), ''); + + const first = resolveFormatterBin(root, 'biome'); + fs.unlinkSync(path.join(binDir, binName)); + const second = resolveFormatterBin(root, 'biome'); + + assert.strictEqual(first.bin, second.bin); + }); + + // ── clearCaches ─────────────────────────────────────────────── + + run('clearCaches: clears all cached values', () => { + const root = makeTmpDir(); + fs.writeFileSync(path.join(root, 'package.json'), '{}'); + fs.writeFileSync(path.join(root, 'biome.json'), '{}'); + + findProjectRoot(root); + detectFormatter(root); + resolveFormatterBin(root, 'biome'); + + clearCaches(); + + // After clearing, removing config should change detection + fs.unlinkSync(path.join(root, 'biome.json')); + assert.strictEqual(detectFormatter(root), null); + }); + + // ── Summary ─────────────────────────────────────────────────── + + console.log(`\n ${passed} passed, ${failed} failed\n`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); From e5d02000c3d60f3259e4a4186ccc11516ed0b5b4 Mon Sep 17 00:00:00 2001 From: Jonghyeok Park Date: Sun, 8 Mar 2026 16:52:39 +0900 Subject: [PATCH 06/42] perf(hooks): eliminate npx overhead and merge biome invocations - Use local node_modules/.bin/biome binary instead of npx (~200-500ms savings) - Change post-edit-format from `biome format --write` to `biome check --write` (format + lint in one pass) - Skip redundant biome check in quality-gate for JS/TS files already handled by post-edit-format - Fix quality-gate to use findProjectRoot instead of process.cwd() - Export run() function from both hooks for direct invocation - Update tests to match shared resolve-formatter module usage --- scripts/hooks/post-edit-format.js | 202 +- scripts/hooks/quality-gate.js | 95 +- tests/hooks/hooks.test.js | 7272 ++++++++++++++++------------- 3 files changed, 4106 insertions(+), 3463 deletions(-) diff --git a/scripts/hooks/post-edit-format.js b/scripts/hooks/post-edit-format.js index 08afad09..89643830 100644 --- a/scripts/hooks/post-edit-format.js +++ b/scripts/hooks/post-edit-format.js @@ -7,159 +7,59 @@ * Runs after Edit tool use. If the edited file is a JS/TS file, * auto-detects the project formatter (Biome or Prettier) by looking * for config files, then formats accordingly. + * + * For Biome, uses `check --write` (format + lint in one pass) to + * avoid a redundant second invocation from quality-gate.js. + * + * Prefers the local node_modules/.bin binary over npx to skip + * package-resolution overhead (~200-500ms savings per invocation). + * * Fails silently if no formatter is found or installed. */ -const { execFileSync, spawnSync } = require('child_process'); -const fs = require('fs'); +const { execFileSync } = require('child_process'); const path = require('path'); -const { getPackageManager } = require('../lib/package-manager'); + +const { + findProjectRoot, + detectFormatter, + resolveFormatterBin, +} = require('../lib/resolve-formatter'); const MAX_STDIN = 1024 * 1024; // 1MB limit -const BIOME_CONFIGS = ['biome.json', 'biome.jsonc']; -const PRETTIER_CONFIGS = [ - '.prettierrc', - '.prettierrc.json', - '.prettierrc.json5', - '.prettierrc.js', - '.prettierrc.cjs', - '.prettierrc.mjs', - '.prettierrc.ts', - '.prettierrc.cts', - '.prettierrc.mts', - '.prettierrc.yml', - '.prettierrc.yaml', - '.prettierrc.toml', - 'prettier.config.js', - 'prettier.config.cjs', - 'prettier.config.mjs', - 'prettier.config.ts', - 'prettier.config.cts', - 'prettier.config.mts', -]; -const PROJECT_ROOT_MARKERS = ['package.json', ...BIOME_CONFIGS, ...PRETTIER_CONFIGS]; -let data = ''; -process.stdin.setEncoding('utf8'); -process.stdin.on('data', chunk => { - if (data.length < MAX_STDIN) { - const remaining = MAX_STDIN - data.length; - data += chunk.substring(0, remaining); - } -}); - -function findProjectRoot(startDir) { - let dir = startDir; - let fallbackDir = null; - - while (true) { - if (detectFormatter(dir)) { - return dir; - } - - if (!fallbackDir && PROJECT_ROOT_MARKERS.some(marker => fs.existsSync(path.join(dir, marker)))) { - fallbackDir = dir; - } - - const parentDir = path.dirname(dir); - if (parentDir === dir) break; - dir = parentDir; - } - - return fallbackDir || startDir; -} - -function detectFormatter(projectRoot) { - for (const cfg of BIOME_CONFIGS) { - if (fs.existsSync(path.join(projectRoot, cfg))) return 'biome'; - } - - for (const cfg of PRETTIER_CONFIGS) { - if (fs.existsSync(path.join(projectRoot, cfg))) return 'prettier'; - } - - return null; -} - -function getRunnerBin(bin) { - if (process.platform !== 'win32') return bin; - if (bin === 'npx') return 'npx.cmd'; - if (bin === 'pnpm') return 'pnpm.cmd'; - if (bin === 'yarn') return 'yarn.cmd'; - if (bin === 'bunx') return 'bunx.cmd'; - return bin; -} - -function getFormatterRunner(projectRoot) { - const pm = getPackageManager({ projectDir: projectRoot }); - const execCmd = pm?.config?.execCmd || 'npx'; - const [bin = 'npx', ...prefix] = execCmd.split(/\s+/).filter(Boolean); - - return { - bin: getRunnerBin(bin), - prefix - }; -} - -function getFormatterCommand(formatter, filePath, projectRoot) { - const runner = getFormatterRunner(projectRoot); - - if (formatter === 'biome') { - return { - bin: runner.bin, - args: [...runner.prefix, '@biomejs/biome', 'format', '--write', filePath] - }; - } - if (formatter === 'prettier') { - return { - bin: runner.bin, - args: [...runner.prefix, 'prettier', '--write', filePath] - }; - } - return null; -} - -function runFormatterCommand(cmd, projectRoot) { - if (process.platform === 'win32' && cmd.bin.endsWith('.cmd')) { - const result = spawnSync(cmd.bin, cmd.args, { - cwd: projectRoot, - shell: true, - stdio: 'pipe', - timeout: 15000 - }); - - if (result.error) { - throw result.error; - } - - if (typeof result.status === 'number' && result.status !== 0) { - throw new Error(result.stderr?.toString() || `Formatter exited with status ${result.status}`); - } - - return; - } - - execFileSync(cmd.bin, cmd.args, { - cwd: projectRoot, - stdio: ['pipe', 'pipe', 'pipe'], - timeout: 15000 - }); -} - -process.stdin.on('end', () => { +/** + * Core logic — exported so run-with-flags.js can call directly + * without spawning a child process. + * + * @param {string} rawInput - Raw JSON string from stdin + * @returns {string} The original input (pass-through) + */ +function run(rawInput) { try { - const input = JSON.parse(data); + const input = JSON.parse(rawInput); const filePath = input.tool_input?.file_path; if (filePath && /\.(ts|tsx|js|jsx)$/.test(filePath)) { try { const projectRoot = findProjectRoot(path.dirname(path.resolve(filePath))); const formatter = detectFormatter(projectRoot); - const cmd = getFormatterCommand(formatter, filePath, projectRoot); + if (!formatter) return rawInput; - if (cmd) { - runFormatterCommand(cmd, projectRoot); - } + const resolved = resolveFormatterBin(projectRoot, formatter); + + // Biome: `check --write` = format + lint in one pass + // Prettier: `--write` = format only + const args = + formatter === 'biome' + ? [...resolved.prefix, 'check', '--write', filePath] + : [...resolved.prefix, '--write', filePath]; + + execFileSync(resolved.bin, args, { + cwd: projectRoot, + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 15000, + }); } catch { // Formatter not installed, file missing, or failed — non-blocking } @@ -168,6 +68,26 @@ process.stdin.on('end', () => { // Invalid input — pass through } - process.stdout.write(data); - process.exit(0); -}); + return rawInput; +} + +// ── stdin entry point (backwards-compatible) ──────────────────── +if (require.main === module) { + let data = ''; + process.stdin.setEncoding('utf8'); + + process.stdin.on('data', (chunk) => { + if (data.length < MAX_STDIN) { + const remaining = MAX_STDIN - data.length; + data += chunk.substring(0, remaining); + } + }); + + process.stdin.on('end', () => { + const result = run(data); + process.stdout.write(result); + process.exit(0); + }); +} + +module.exports = { run }; diff --git a/scripts/hooks/quality-gate.js b/scripts/hooks/quality-gate.js index a6f180f0..aee94b27 100755 --- a/scripts/hooks/quality-gate.js +++ b/scripts/hooks/quality-gate.js @@ -5,6 +5,11 @@ * Runs lightweight quality checks after file edits. * - Targets one file when file_path is provided * - Falls back to no-op when language/tooling is unavailable + * + * For JS/TS files with Biome, this hook is skipped because + * post-edit-format.js already runs `biome check --write`. + * This hook still handles .json/.md files for Biome, and all + * Prettier / Go / Python checks. */ 'use strict'; @@ -13,10 +18,15 @@ const fs = require('fs'); const path = require('path'); const { spawnSync } = require('child_process'); -const MAX_STDIN = 1024 * 1024; -let raw = ''; +const { + findProjectRoot, + detectFormatter, + resolveFormatterBin, +} = require('../lib/resolve-formatter'); -function run(command, args, cwd = process.cwd()) { +const MAX_STDIN = 1024 * 1024; + +function exec(command, args, cwd = process.cwd()) { return spawnSync(command, args, { cwd, encoding: 'utf8', @@ -38,31 +48,42 @@ function maybeRunQualityGate(filePath) { const strict = String(process.env.ECC_QUALITY_GATE_STRICT || '').toLowerCase() === 'true'; if (['.ts', '.tsx', '.js', '.jsx', '.json', '.md'].includes(ext)) { - // Prefer biome if present - if (fs.existsSync(path.join(process.cwd(), 'biome.json')) || fs.existsSync(path.join(process.cwd(), 'biome.jsonc'))) { - const args = ['biome', 'check', filePath]; + const projectRoot = findProjectRoot(path.dirname(path.resolve(filePath))); + const formatter = detectFormatter(projectRoot); + + if (formatter === 'biome') { + // JS/TS already handled by post-edit-format via `biome check --write` + if (['.ts', '.tsx', '.js', '.jsx'].includes(ext)) { + return; + } + + // .json / .md — still need quality gate + const resolved = resolveFormatterBin(projectRoot, 'biome'); + const args = [...resolved.prefix, 'check', filePath]; if (fix) args.push('--write'); - const result = run('npx', args); + const result = exec(resolved.bin, args, projectRoot); if (result.status !== 0 && strict) { log(`[QualityGate] Biome check failed for ${filePath}`); } return; } - // Fallback to prettier when installed - const prettierArgs = ['prettier', '--check', filePath]; - if (fix) { - prettierArgs[1] = '--write'; - } - const prettier = run('npx', prettierArgs); - if (prettier.status !== 0 && strict) { - log(`[QualityGate] Prettier check failed for ${filePath}`); + if (formatter === 'prettier') { + const resolved = resolveFormatterBin(projectRoot, 'prettier'); + const args = [...resolved.prefix, fix ? '--write' : '--check', filePath]; + const result = exec(resolved.bin, args, projectRoot); + if (result.status !== 0 && strict) { + log(`[QualityGate] Prettier check failed for ${filePath}`); + } + return; } + + // No formatter configured — skip return; } if (ext === '.go' && fix) { - run('gofmt', ['-w', filePath]); + exec('gofmt', ['-w', filePath]); return; } @@ -70,29 +91,45 @@ function maybeRunQualityGate(filePath) { const args = ['format']; if (!fix) args.push('--check'); args.push(filePath); - const r = run('ruff', args); + const r = exec('ruff', args); if (r.status !== 0 && strict) { log(`[QualityGate] Ruff check failed for ${filePath}`); } } } -process.stdin.setEncoding('utf8'); -process.stdin.on('data', chunk => { - if (raw.length < MAX_STDIN) { - const remaining = MAX_STDIN - raw.length; - raw += chunk.substring(0, remaining); - } -}); - -process.stdin.on('end', () => { +/** + * Core logic — exported so run-with-flags.js can call directly. + * + * @param {string} rawInput - Raw JSON string from stdin + * @returns {string} The original input (pass-through) + */ +function run(rawInput) { try { - const input = JSON.parse(raw); + const input = JSON.parse(rawInput); const filePath = String(input.tool_input?.file_path || ''); maybeRunQualityGate(filePath); } catch { // Ignore parse errors. } + return rawInput; +} - process.stdout.write(raw); -}); +// ── stdin entry point (backwards-compatible) ──────────────────── +if (require.main === module) { + let raw = ''; + process.stdin.setEncoding('utf8'); + process.stdin.on('data', (chunk) => { + if (raw.length < MAX_STDIN) { + const remaining = MAX_STDIN - raw.length; + raw += chunk.substring(0, remaining); + } + }); + + process.stdin.on('end', () => { + const result = run(raw); + process.stdout.write(result); + }); +} + +module.exports = { run }; diff --git a/tests/hooks/hooks.test.js b/tests/hooks/hooks.test.js index a5ecaaa0..81766ea2 100644 --- a/tests/hooks/hooks.test.js +++ b/tests/hooks/hooks.test.js @@ -47,8 +47,8 @@ function runScript(scriptPath, input = '', env = {}) { let stdout = ''; let stderr = ''; - proc.stdout.on('data', data => stdout += data); - proc.stderr.on('data', data => stderr += data); + proc.stdout.on('data', data => (stdout += data)); + proc.stderr.on('data', data => (stderr += data)); if (input) { proc.stdin.write(input); @@ -79,10 +79,12 @@ function createCommandShim(binDir, baseName, logFile) { fs.mkdirSync(binDir, { recursive: true }); const shimJs = path.join(binDir, `${baseName}-shim.js`); - fs.writeFileSync(shimJs, [ - 'const fs = require(\'fs\');', - `fs.appendFileSync(${JSON.stringify(logFile)}, JSON.stringify({ bin: ${JSON.stringify(baseName)}, args: process.argv.slice(2), cwd: process.cwd() }) + '\\n');` - ].join('\n')); + fs.writeFileSync( + shimJs, + ["const fs = require('fs');", `fs.appendFileSync(${JSON.stringify(logFile)}, JSON.stringify({ bin: ${JSON.stringify(baseName)}, args: process.argv.slice(2), cwd: process.cwd() }) + '\\n');`].join( + '\n' + ) + ); if (process.platform === 'win32') { const shimCmd = path.join(binDir, `${baseName}.cmd`); @@ -98,7 +100,8 @@ function createCommandShim(binDir, baseName, logFile) { function readCommandLog(logFile) { if (!fs.existsSync(logFile)) return []; - return fs.readFileSync(logFile, 'utf8') + return fs + .readFileSync(logFile, 'utf8') .split('\n') .filter(Boolean) .map(line => { @@ -112,8 +115,7 @@ function readCommandLog(logFile) { } function withPrependedPath(binDir, env = {}) { - const pathKey = Object.keys(process.env).find(key => key.toLowerCase() === 'path') - || (process.platform === 'win32' ? 'Path' : 'PATH'); + const pathKey = Object.keys(process.env).find(key => key.toLowerCase() === 'path') || (process.platform === 'win32' ? 'Path' : 'PATH'); const currentPath = process.env[pathKey] || process.env.PATH || ''; const nextPath = `${binDir}${path.delimiter}${currentPath}`; @@ -136,1658 +138,1951 @@ async function runTests() { // session-start.js tests console.log('session-start.js:'); - if (await asyncTest('runs without error', async () => { - const result = await runScript(path.join(scriptsDir, 'session-start.js')); - assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); - })) passed++; else failed++; + if ( + await asyncTest('runs without error', async () => { + const result = await runScript(path.join(scriptsDir, 'session-start.js')); + assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); + }) + ) + passed++; + else failed++; - if (await asyncTest('outputs session info to stderr', async () => { - const result = await runScript(path.join(scriptsDir, 'session-start.js')); - assert.ok( - result.stderr.includes('[SessionStart]') || - result.stderr.includes('Package manager'), - 'Should output session info' - ); - })) passed++; else failed++; + if ( + await asyncTest('outputs session info to stderr', async () => { + const result = await runScript(path.join(scriptsDir, 'session-start.js')); + assert.ok(result.stderr.includes('[SessionStart]') || result.stderr.includes('Package manager'), 'Should output session info'); + }) + ) + passed++; + else failed++; // session-start.js edge cases console.log('\nsession-start.js (edge cases):'); - if (await asyncTest('exits 0 even with isolated empty HOME', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-iso-start-${Date.now()}`); - fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + if ( + await asyncTest('exits 0 even with isolated empty HOME', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-iso-start-${Date.now()}`); + fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; - if (await asyncTest('reports package manager detection', async () => { - const result = await runScript(path.join(scriptsDir, 'session-start.js')); - assert.ok( - result.stderr.includes('Package manager') || result.stderr.includes('[SessionStart]'), - 'Should report package manager info' - ); - })) passed++; else failed++; + if ( + await asyncTest('reports package manager detection', async () => { + const result = await runScript(path.join(scriptsDir, 'session-start.js')); + assert.ok(result.stderr.includes('Package manager') || result.stderr.includes('[SessionStart]'), 'Should report package manager info'); + }) + ) + passed++; + else failed++; - if (await asyncTest('skips template session content', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-tpl-start-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + if ( + await asyncTest('skips template session content', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-tpl-start-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - // Create a session file with template placeholder - const sessionFile = path.join(sessionsDir, '2026-02-11-abcd1234-session.tmp'); - fs.writeFileSync(sessionFile, '## Current State\n\n[Session context goes here]\n'); + // Create a session file with template placeholder + const sessionFile = path.join(sessionsDir, '2026-02-11-abcd1234-session.tmp'); + fs.writeFileSync(sessionFile, '## Current State\n\n[Session context goes here]\n'); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); - // stdout should NOT contain the template content - assert.ok( - !result.stdout.includes('Previous session summary'), - 'Should not inject template session content' - ); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); + // stdout should NOT contain the template content + assert.ok(!result.stdout.includes('Previous session summary'), 'Should not inject template session content'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; - if (await asyncTest('injects real session content', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-real-start-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + if ( + await asyncTest('injects real session content', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-real-start-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - // Create a real session file - const sessionFile = path.join(sessionsDir, '2026-02-11-efgh5678-session.tmp'); - fs.writeFileSync(sessionFile, '# Real Session\n\nI worked on authentication refactor.\n'); + // Create a real session file + const sessionFile = path.join(sessionsDir, '2026-02-11-efgh5678-session.tmp'); + fs.writeFileSync(sessionFile, '# Real Session\n\nI worked on authentication refactor.\n'); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); - assert.ok( - result.stdout.includes('Previous session summary'), - 'Should inject real session content' - ); - assert.ok( - result.stdout.includes('authentication refactor'), - 'Should include session content text' - ); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('Previous session summary'), 'Should inject real session content'); + assert.ok(result.stdout.includes('authentication refactor'), 'Should include session content text'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; - if (await asyncTest('reports learned skills count', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-skills-start-${Date.now()}`); - const learnedDir = path.join(isoHome, '.claude', 'skills', 'learned'); - fs.mkdirSync(learnedDir, { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); + if ( + await asyncTest('reports learned skills count', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-skills-start-${Date.now()}`); + const learnedDir = path.join(isoHome, '.claude', 'skills', 'learned'); + fs.mkdirSync(learnedDir, { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); - // Create learned skill files - fs.writeFileSync(path.join(learnedDir, 'testing-patterns.md'), '# Testing'); - fs.writeFileSync(path.join(learnedDir, 'debugging.md'), '# Debugging'); + // Create learned skill files + fs.writeFileSync(path.join(learnedDir, 'testing-patterns.md'), '# Testing'); + fs.writeFileSync(path.join(learnedDir, 'debugging.md'), '# Debugging'); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); - assert.ok( - result.stderr.includes('2 learned skill(s)'), - `Should report 2 learned skills, stderr: ${result.stderr}` - ); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('2 learned skill(s)'), `Should report 2 learned skills, stderr: ${result.stderr}`); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // check-console-log.js tests console.log('\ncheck-console-log.js:'); - if (await asyncTest('passes through stdin data to stdout', async () => { - const stdinData = JSON.stringify({ tool_name: 'Write', tool_input: {} }); - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData); - assert.strictEqual(result.code, 0); - assert.ok(result.stdout.includes('tool_name'), 'Should pass through stdin data'); - })) passed++; else failed++; + if ( + await asyncTest('passes through stdin data to stdout', async () => { + const stdinData = JSON.stringify({ tool_name: 'Write', tool_input: {} }); + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('tool_name'), 'Should pass through stdin data'); + }) + ) + passed++; + else failed++; - if (await asyncTest('exits 0 with empty stdin', async () => { - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), ''); - assert.strictEqual(result.code, 0); - })) passed++; else failed++; + if ( + await asyncTest('exits 0 with empty stdin', async () => { + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), ''); + assert.strictEqual(result.code, 0); + }) + ) + passed++; + else failed++; - if (await asyncTest('handles invalid JSON stdin gracefully', async () => { - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), 'not valid json'); - assert.strictEqual(result.code, 0, 'Should exit 0 on invalid JSON'); - // Should still pass through the data - assert.ok(result.stdout.includes('not valid json'), 'Should pass through invalid data'); - })) passed++; else failed++; + if ( + await asyncTest('handles invalid JSON stdin gracefully', async () => { + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), 'not valid json'); + assert.strictEqual(result.code, 0, 'Should exit 0 on invalid JSON'); + // Should still pass through the data + assert.ok(result.stdout.includes('not valid json'), 'Should pass through invalid data'); + }) + ) + passed++; + else failed++; // session-end.js tests console.log('\nsession-end.js:'); - if (await asyncTest('runs without error', async () => { - const result = await runScript(path.join(scriptsDir, 'session-end.js')); - assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); - })) passed++; else failed++; + if ( + await asyncTest('runs without error', async () => { + const result = await runScript(path.join(scriptsDir, 'session-end.js')); + assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); + }) + ) + passed++; + else failed++; - if (await asyncTest('creates or updates session file', async () => { - // Run the script - await runScript(path.join(scriptsDir, 'session-end.js')); + if ( + await asyncTest('creates or updates session file', async () => { + // Run the script + await runScript(path.join(scriptsDir, 'session-end.js')); - // Check if session file was created - // Note: Without CLAUDE_SESSION_ID, falls back to project name (not 'default') - // Use local time to match the script's getDateString() function - const sessionsDir = path.join(os.homedir(), '.claude', 'sessions'); - const now = new Date(); - const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`; + // Check if session file was created + // Note: Without CLAUDE_SESSION_ID, falls back to project name (not 'default') + // Use local time to match the script's getDateString() function + const sessionsDir = path.join(os.homedir(), '.claude', 'sessions'); + const now = new Date(); + const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`; - // Get the expected session ID (project name fallback) - const utils = require('../../scripts/lib/utils'); - const expectedId = utils.getSessionIdShort(); - const sessionFile = path.join(sessionsDir, `${today}-${expectedId}-session.tmp`); + // Get the expected session ID (project name fallback) + const utils = require('../../scripts/lib/utils'); + const expectedId = utils.getSessionIdShort(); + const sessionFile = path.join(sessionsDir, `${today}-${expectedId}-session.tmp`); - assert.ok(fs.existsSync(sessionFile), `Session file should exist: ${sessionFile}`); - })) passed++; else failed++; + assert.ok(fs.existsSync(sessionFile), `Session file should exist: ${sessionFile}`); + }) + ) + passed++; + else failed++; - if (await asyncTest('includes session ID in filename', async () => { - const testSessionId = 'test-session-abc12345'; - const expectedShortId = 'abc12345'; // Last 8 chars + if ( + await asyncTest('includes session ID in filename', async () => { + const testSessionId = 'test-session-abc12345'; + const expectedShortId = 'abc12345'; // Last 8 chars - // Run with custom session ID - await runScript(path.join(scriptsDir, 'session-end.js'), '', { - CLAUDE_SESSION_ID: testSessionId - }); + // Run with custom session ID + await runScript(path.join(scriptsDir, 'session-end.js'), '', { + CLAUDE_SESSION_ID: testSessionId + }); - // Check if session file was created with session ID - // Use local time to match the script's getDateString() function - const sessionsDir = path.join(os.homedir(), '.claude', 'sessions'); - const now = new Date(); - const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`; - const sessionFile = path.join(sessionsDir, `${today}-${expectedShortId}-session.tmp`); + // Check if session file was created with session ID + // Use local time to match the script's getDateString() function + const sessionsDir = path.join(os.homedir(), '.claude', 'sessions'); + const now = new Date(); + const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`; + const sessionFile = path.join(sessionsDir, `${today}-${expectedShortId}-session.tmp`); - assert.ok(fs.existsSync(sessionFile), `Session file should exist: ${sessionFile}`); - })) passed++; else failed++; + assert.ok(fs.existsSync(sessionFile), `Session file should exist: ${sessionFile}`); + }) + ) + passed++; + else failed++; // pre-compact.js tests console.log('\npre-compact.js:'); - if (await asyncTest('runs without error', async () => { - const result = await runScript(path.join(scriptsDir, 'pre-compact.js')); - assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); - })) passed++; else failed++; + if ( + await asyncTest('runs without error', async () => { + const result = await runScript(path.join(scriptsDir, 'pre-compact.js')); + assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); + }) + ) + passed++; + else failed++; - if (await asyncTest('outputs PreCompact message', async () => { - const result = await runScript(path.join(scriptsDir, 'pre-compact.js')); - assert.ok(result.stderr.includes('[PreCompact]'), 'Should output PreCompact message'); - })) passed++; else failed++; + if ( + await asyncTest('outputs PreCompact message', async () => { + const result = await runScript(path.join(scriptsDir, 'pre-compact.js')); + assert.ok(result.stderr.includes('[PreCompact]'), 'Should output PreCompact message'); + }) + ) + passed++; + else failed++; - if (await asyncTest('creates compaction log', async () => { - await runScript(path.join(scriptsDir, 'pre-compact.js')); - const logFile = path.join(os.homedir(), '.claude', 'sessions', 'compaction-log.txt'); - assert.ok(fs.existsSync(logFile), 'Compaction log should exist'); - })) passed++; else failed++; - - if (await asyncTest('annotates active session file with compaction marker', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-compact-annotate-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - - // Create an active .tmp session file - const sessionFile = path.join(sessionsDir, '2026-02-11-test-session.tmp'); - fs.writeFileSync(sessionFile, '# Session: 2026-02-11\n**Started:** 10:00\n'); - - try { - await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - - const content = fs.readFileSync(sessionFile, 'utf8'); - assert.ok( - content.includes('Compaction occurred'), - 'Should annotate the session file with compaction marker' - ); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; - - if (await asyncTest('compaction log contains timestamp', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-compact-ts-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - - try { - await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - - const logFile = path.join(sessionsDir, 'compaction-log.txt'); + if ( + await asyncTest('creates compaction log', async () => { + await runScript(path.join(scriptsDir, 'pre-compact.js')); + const logFile = path.join(os.homedir(), '.claude', 'sessions', 'compaction-log.txt'); assert.ok(fs.existsSync(logFile), 'Compaction log should exist'); - const content = fs.readFileSync(logFile, 'utf8'); - // Should have a timestamp like [2026-02-11 14:30:00] - assert.ok( - /\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\]/.test(content), - `Log should contain timestamped entry, got: ${content.substring(0, 100)}` - ); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('annotates active session file with compaction marker', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-compact-annotate-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + + // Create an active .tmp session file + const sessionFile = path.join(sessionsDir, '2026-02-11-test-session.tmp'); + fs.writeFileSync(sessionFile, '# Session: 2026-02-11\n**Started:** 10:00\n'); + + try { + await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + + const content = fs.readFileSync(sessionFile, 'utf8'); + assert.ok(content.includes('Compaction occurred'), 'Should annotate the session file with compaction marker'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('compaction log contains timestamp', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-compact-ts-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + + try { + await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + + const logFile = path.join(sessionsDir, 'compaction-log.txt'); + assert.ok(fs.existsSync(logFile), 'Compaction log should exist'); + const content = fs.readFileSync(logFile, 'utf8'); + // Should have a timestamp like [2026-02-11 14:30:00] + assert.ok(/\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\]/.test(content), `Log should contain timestamped entry, got: ${content.substring(0, 100)}`); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // suggest-compact.js tests console.log('\nsuggest-compact.js:'); - if (await asyncTest('runs without error', async () => { - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: 'test-session-' + Date.now() - }); - assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); - })) passed++; else failed++; - - if (await asyncTest('increments counter on each call', async () => { - const sessionId = 'test-counter-' + Date.now(); - - // Run multiple times - for (let i = 0; i < 3; i++) { - await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId - }); - } - - // Check counter file - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); - assert.strictEqual(count, 3, `Counter should be 3, got ${count}`); - - // Cleanup - fs.unlinkSync(counterFile); - })) passed++; else failed++; - - if (await asyncTest('suggests compact at threshold', async () => { - const sessionId = 'test-threshold-' + Date.now(); - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - - // Set counter to threshold - 1 - fs.writeFileSync(counterFile, '49'); - - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: '50' - }); - - assert.ok( - result.stderr.includes('50 tool calls reached'), - 'Should suggest compact at threshold' - ); - - // Cleanup - fs.unlinkSync(counterFile); - })) passed++; else failed++; - - if (await asyncTest('does not suggest below threshold', async () => { - const sessionId = 'test-below-' + Date.now(); - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - - fs.writeFileSync(counterFile, '10'); - - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: '50' - }); - - assert.ok( - !result.stderr.includes('tool calls'), - 'Should not suggest compact below threshold' - ); - - fs.unlinkSync(counterFile); - })) passed++; else failed++; - - if (await asyncTest('suggests at regular intervals after threshold', async () => { - const sessionId = 'test-interval-' + Date.now(); - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - - // Set counter to 74 (next will be 75, which is >50 and 75%25==0) - fs.writeFileSync(counterFile, '74'); - - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: '50' - }); - - assert.ok( - result.stderr.includes('75 tool calls'), - 'Should suggest at 25-call intervals after threshold' - ); - - fs.unlinkSync(counterFile); - })) passed++; else failed++; - - if (await asyncTest('handles corrupted counter file', async () => { - const sessionId = 'test-corrupt-' + Date.now(); - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - - fs.writeFileSync(counterFile, 'not-a-number'); - - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId - }); - - assert.strictEqual(result.code, 0, 'Should handle corrupted counter gracefully'); - - // Counter should be reset to 1 - const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); - assert.strictEqual(newCount, 1, 'Should reset counter to 1 on corrupt data'); - - fs.unlinkSync(counterFile); - })) passed++; else failed++; - - if (await asyncTest('uses default session ID when no env var', async () => { - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: '' // Empty, should use 'default' - }); - - assert.strictEqual(result.code, 0, 'Should work with default session ID'); - - // Cleanup the default counter file - const counterFile = path.join(os.tmpdir(), 'claude-tool-count-default'); - if (fs.existsSync(counterFile)) fs.unlinkSync(counterFile); - })) passed++; else failed++; - - if (await asyncTest('validates threshold bounds', async () => { - const sessionId = 'test-bounds-' + Date.now(); - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - - // Invalid threshold should fall back to 50 - fs.writeFileSync(counterFile, '49'); - - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: '-5' // Invalid: negative - }); - - assert.ok( - result.stderr.includes('50 tool calls'), - 'Should use default threshold (50) for invalid value' - ); - - fs.unlinkSync(counterFile); - })) passed++; else failed++; - - // evaluate-session.js tests - console.log('\nevaluate-session.js:'); - - if (await asyncTest('runs without error when no transcript', async () => { - const result = await runScript(path.join(scriptsDir, 'evaluate-session.js')); - assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); - })) passed++; else failed++; - - if (await asyncTest('skips short sessions', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // Create a short transcript (less than 10 user messages) - const transcript = Array(5).fill('{"type":"user","content":"test"}\n').join(''); - fs.writeFileSync(transcriptPath, transcript); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); - - assert.ok( - result.stderr.includes('Session too short'), - 'Should indicate session is too short' - ); - - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('processes sessions with enough messages', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // Create a longer transcript (more than 10 user messages) - const transcript = Array(15).fill('{"type":"user","content":"test"}\n').join(''); - fs.writeFileSync(transcriptPath, transcript); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); - - assert.ok( - result.stderr.includes('15 messages'), - 'Should report message count' - ); - - cleanupTestDir(testDir); - })) passed++; else failed++; - - // evaluate-session.js: whitespace tolerance regression test - if (await asyncTest('counts user messages with whitespace in JSON (regression)', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // Create transcript with whitespace around colons (pretty-printed style) - const lines = []; - for (let i = 0; i < 15; i++) { - lines.push('{ "type" : "user", "content": "message ' + i + '" }'); - } - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); - - assert.ok( - result.stderr.includes('15 messages'), - 'Should count user messages with whitespace in JSON, got: ' + result.stderr.trim() - ); - - cleanupTestDir(testDir); - })) passed++; else failed++; - - // session-end.js: content array with null elements regression test - if (await asyncTest('handles transcript with null content array elements (regression)', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // Create transcript with null elements in content array - const lines = [ - '{"type":"user","content":[null,{"text":"hello"},null,{"text":"world"}]}', - '{"type":"user","content":"simple string message"}', - '{"type":"user","content":[{"text":"normal"},{"text":"array"}]}', - '{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/test.js"}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); - - // Should not crash (exit 0) - assert.strictEqual(result.code, 0, 'Should handle null content elements without crash'); - })) passed++; else failed++; - - // post-edit-console-warn.js tests - console.log('\npost-edit-console-warn.js:'); - - if (await asyncTest('warns about console.log in JS files', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'test.js'); - fs.writeFileSync(testFile, 'const x = 1;\nconsole.log(x);\nreturn x;'); - - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - - assert.ok(result.stderr.includes('console.log'), 'Should warn about console.log'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('does not warn for non-JS files', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'test.md'); - fs.writeFileSync(testFile, 'Use console.log for debugging'); - - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - - assert.ok(!result.stderr.includes('console.log'), 'Should not warn for non-JS files'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('does not warn for clean JS files', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'clean.ts'); - fs.writeFileSync(testFile, 'const x = 1;\nreturn x;'); - - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - - assert.ok(!result.stderr.includes('WARNING'), 'Should not warn for clean files'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('handles missing file gracefully', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/file.ts' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - - assert.strictEqual(result.code, 0, 'Should not crash on missing file'); - })) passed++; else failed++; - - if (await asyncTest('limits console.log output to 5 matches', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'many-logs.js'); - // Create a file with 8 console.log statements - const lines = []; - for (let i = 1; i <= 8; i++) { - lines.push(`console.log('debug ${i}');`); - } - fs.writeFileSync(testFile, lines.join('\n')); - - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - - assert.ok(result.stderr.includes('console.log'), 'Should warn about console.log'); - // Count how many "debug N" lines appear in stderr (the line-number output) - const debugLines = result.stderr.split('\n').filter(l => /^\d+:/.test(l.trim())); - assert.ok(debugLines.length <= 5, `Should show at most 5 matches, got ${debugLines.length}`); - // Should include debug 1 but not debug 8 (sliced) - assert.ok(result.stderr.includes('debug 1'), 'Should include first match'); - assert.ok(!result.stderr.includes('debug 8'), 'Should not include 8th match'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('ignores console.warn and console.error (only flags console.log)', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'other-console.ts'); - fs.writeFileSync(testFile, [ - 'console.warn("this is a warning");', - 'console.error("this is an error");', - 'console.debug("this is debug");', - 'console.info("this is info");', - ].join('\n')); - - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - - assert.ok(!result.stderr.includes('WARNING'), 'Should NOT warn about console.warn/error/debug/info'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('passes through original data on stdout', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/test.py' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - - assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data'); - })) passed++; else failed++; - - // post-edit-format.js tests - console.log('\npost-edit-format.js:'); - - if (await asyncTest('runs without error on empty stdin', async () => { - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js')); - assert.strictEqual(result.code, 0, 'Should exit 0 on empty stdin'); - })) passed++; else failed++; - - if (await asyncTest('skips non-JS/TS files', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/test.py' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for non-JS files'); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data'); - })) passed++; else failed++; - - if (await asyncTest('passes through data for invalid JSON', async () => { - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), 'not json'); - assert.strictEqual(result.code, 0, 'Should exit 0 for invalid JSON'); - })) passed++; else failed++; - - if (await asyncTest('handles null tool_input gracefully', async () => { - const stdinJson = JSON.stringify({ tool_input: null }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for null tool_input'); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); - })) passed++; else failed++; - - if (await asyncTest('handles missing file_path in tool_input', async () => { - const stdinJson = JSON.stringify({ tool_input: {} }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for missing file_path'); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); - })) passed++; else failed++; - - if (await asyncTest('exits 0 and passes data when prettier is unavailable', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/path/file.ts' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 even when prettier fails'); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through original data'); - })) passed++; else failed++; - - if (await asyncTest('finds formatter config in parent dirs without package.json', async () => { - const testDir = createTestDir(); - const rootDir = path.join(testDir, 'config-only-repo'); - const nestedDir = path.join(rootDir, 'src', 'nested'); - const filePath = path.join(nestedDir, 'component.ts'); - const binDir = path.join(testDir, 'bin'); - const logFile = path.join(testDir, 'formatter.log'); - - fs.mkdirSync(nestedDir, { recursive: true }); - fs.writeFileSync(path.join(rootDir, '.prettierrc'), '{}'); - fs.writeFileSync(filePath, 'export const value = 1;\n'); - createCommandShim(binDir, 'npx', logFile); - - const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); - const result = await runScript( - path.join(scriptsDir, 'post-edit-format.js'), - stdinJson, - withPrependedPath(binDir) - ); - - assert.strictEqual(result.code, 0, 'Should exit 0 for config-only repo'); - const logEntries = readCommandLog(logFile); - assert.strictEqual(logEntries.length, 1, 'Should invoke formatter once'); - assert.strictEqual( - fs.realpathSync(logEntries[0].cwd), - fs.realpathSync(rootDir), - 'Should run formatter from config root' - ); - assert.deepStrictEqual( - logEntries[0].args, - ['prettier', '--write', filePath], - 'Should use the formatter on the nested file' - ); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('respects CLAUDE_PACKAGE_MANAGER for formatter fallback runner', async () => { - const testDir = createTestDir(); - const rootDir = path.join(testDir, 'pnpm-repo'); - const filePath = path.join(rootDir, 'index.ts'); - const binDir = path.join(testDir, 'bin'); - const logFile = path.join(testDir, 'pnpm.log'); - - fs.mkdirSync(rootDir, { recursive: true }); - fs.writeFileSync(path.join(rootDir, '.prettierrc'), '{}'); - fs.writeFileSync(filePath, 'export const value = 1;\n'); - createCommandShim(binDir, 'pnpm', logFile); - - const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); - const result = await runScript( - path.join(scriptsDir, 'post-edit-format.js'), - stdinJson, - withPrependedPath(binDir, { CLAUDE_PACKAGE_MANAGER: 'pnpm' }) - ); - - assert.strictEqual(result.code, 0, 'Should exit 0 when pnpm fallback is used'); - const logEntries = readCommandLog(logFile); - assert.strictEqual(logEntries.length, 1, 'Should invoke pnpm fallback runner once'); - assert.strictEqual(logEntries[0].bin, 'pnpm', 'Should use pnpm runner'); - assert.deepStrictEqual( - logEntries[0].args, - ['dlx', 'prettier', '--write', filePath], - 'Should use pnpm dlx for fallback formatter execution' - ); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('respects project package-manager config for formatter fallback runner', async () => { - const testDir = createTestDir(); - const rootDir = path.join(testDir, 'bun-repo'); - const filePath = path.join(rootDir, 'index.ts'); - const binDir = path.join(testDir, 'bin'); - const logFile = path.join(testDir, 'bun.log'); - - fs.mkdirSync(path.join(rootDir, '.claude'), { recursive: true }); - fs.writeFileSync(path.join(rootDir, '.claude', 'package-manager.json'), JSON.stringify({ packageManager: 'bun' })); - fs.writeFileSync(path.join(rootDir, '.prettierrc'), '{}'); - fs.writeFileSync(filePath, 'export const value = 1;\n'); - createCommandShim(binDir, 'bunx', logFile); - - const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); - const result = await runScript( - path.join(scriptsDir, 'post-edit-format.js'), - stdinJson, - withPrependedPath(binDir) - ); - - assert.strictEqual(result.code, 0, 'Should exit 0 when project config selects bun'); - const logEntries = readCommandLog(logFile); - assert.strictEqual(logEntries.length, 1, 'Should invoke bunx fallback runner once'); - assert.strictEqual(logEntries[0].bin, 'bunx', 'Should use bunx runner'); - assert.deepStrictEqual( - logEntries[0].args, - ['prettier', '--write', filePath], - 'Should use bunx for fallback formatter execution' - ); - cleanupTestDir(testDir); - })) passed++; else failed++; - - console.log('\npre-bash-dev-server-block.js:'); - - if (await asyncTest('allows non-dev commands whose heredoc text mentions npm run dev', async () => { - const command = [ - 'gh pr create --title "fix: docs" --body "$(cat <<\'EOF\'', - '## Test plan', - '- run npm run dev to verify the site starts', - 'EOF', - ')"' - ].join('\n'); - const stdinJson = JSON.stringify({ tool_input: { command } }); - const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); - - assert.strictEqual(result.code, 0, 'Non-dev commands should pass through'); - assert.strictEqual(result.stdout, stdinJson, 'Should preserve original input'); - assert.ok(!result.stderr.includes('BLOCKED'), 'Should not emit a block message'); - })) passed++; else failed++; - - if (await asyncTest('blocks bare npm run dev outside tmux on non-Windows platforms', async () => { - const stdinJson = JSON.stringify({ tool_input: { command: 'npm run dev' } }); - const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); - - if (process.platform === 'win32') { - assert.strictEqual(result.code, 0, 'Windows path should pass through'); - assert.strictEqual(result.stdout, stdinJson, 'Windows path should preserve original input'); - } else { - assert.strictEqual(result.code, 2, 'Unix path should block bare dev servers'); - assert.ok(result.stderr.includes('BLOCKED'), 'Should explain why the command was blocked'); - } - })) passed++; else failed++; - - if (await asyncTest('blocks env-wrapped npm run dev outside tmux on non-Windows platforms', async () => { - const stdinJson = JSON.stringify({ tool_input: { command: '/usr/bin/env npm run dev' } }); - const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); - - if (process.platform === 'win32') { - assert.strictEqual(result.code, 0, 'Windows path should pass through'); - assert.strictEqual(result.stdout, stdinJson, 'Windows path should preserve original input'); - } else { - assert.strictEqual(result.code, 2, 'Unix path should block wrapped dev servers'); - assert.ok(result.stderr.includes('BLOCKED'), 'Should explain why the command was blocked'); - } - })) passed++; else failed++; - - if (await asyncTest('blocks nohup-wrapped npm run dev outside tmux on non-Windows platforms', async () => { - const stdinJson = JSON.stringify({ tool_input: { command: 'nohup npm run dev >/tmp/dev.log 2>&1 &' } }); - const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); - - if (process.platform === 'win32') { - assert.strictEqual(result.code, 0, 'Windows path should pass through'); - assert.strictEqual(result.stdout, stdinJson, 'Windows path should preserve original input'); - } else { - assert.strictEqual(result.code, 2, 'Unix path should block wrapped dev servers'); - assert.ok(result.stderr.includes('BLOCKED'), 'Should explain why the command was blocked'); - } - })) passed++; else failed++; - - // post-edit-typecheck.js tests - console.log('\npost-edit-typecheck.js:'); - - if (await asyncTest('runs without error on empty stdin', async () => { - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js')); - assert.strictEqual(result.code, 0, 'Should exit 0 on empty stdin'); - })) passed++; else failed++; - - if (await asyncTest('skips non-TypeScript files', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/test.js' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for non-TS files'); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data'); - })) passed++; else failed++; - - if (await asyncTest('handles nonexistent TS file gracefully', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/file.ts' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for missing file'); - })) passed++; else failed++; - - if (await asyncTest('handles TS file with no tsconfig gracefully', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'test.ts'); - fs.writeFileSync(testFile, 'const x: number = 1;'); - - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 when no tsconfig found'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('stops tsconfig walk at max depth (20)', async () => { - // Create a deeply nested directory (>20 levels) with no tsconfig anywhere - const testDir = createTestDir(); - let deepDir = testDir; - for (let i = 0; i < 25; i++) { - deepDir = path.join(deepDir, `d${i}`); - } - fs.mkdirSync(deepDir, { recursive: true }); - const testFile = path.join(deepDir, 'deep.ts'); - fs.writeFileSync(testFile, 'const x: number = 1;'); - - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const startTime = Date.now(); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - const elapsed = Date.now() - startTime; - - assert.strictEqual(result.code, 0, 'Should not hang at depth limit'); - assert.ok(elapsed < 5000, `Should complete quickly at depth limit, took ${elapsed}ms`); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('passes through stdin data on stdout (post-edit-typecheck)', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'test.ts'); - fs.writeFileSync(testFile, 'const x: number = 1;'); - - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data on stdout'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - // session-end.js extractSessionSummary tests - console.log('\nsession-end.js (extractSessionSummary):'); - - if (await asyncTest('extracts user messages from transcript', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - const lines = [ - '{"type":"user","content":"Fix the login bug"}', - '{"type":"assistant","content":"I will fix it"}', - '{"type":"user","content":"Also add tests"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); - assert.strictEqual(result.code, 0); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('handles transcript with array content fields', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - const lines = [ - '{"type":"user","content":[{"text":"Part 1"},{"text":"Part 2"}]}', - '{"type":"user","content":"Simple message"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should handle array content without crash'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('extracts tool names and file paths from transcript', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - const lines = [ - '{"type":"user","content":"Edit the file"}', - '{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/src/main.ts"}}', - '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/src/utils.ts"}}', - '{"type":"tool_use","tool_name":"Write","tool_input":{"file_path":"/src/new.ts"}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); - assert.strictEqual(result.code, 0); - // Session file should contain summary with tools used - assert.ok( - result.stderr.includes('Created session file') || result.stderr.includes('Updated session file'), - 'Should create/update session file' - ); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('handles transcript with malformed JSON lines', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - const lines = [ - '{"type":"user","content":"Valid message"}', - 'NOT VALID JSON', - '{"broken json', - '{"type":"user","content":"Another valid"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should skip malformed lines gracefully'); - assert.ok( - result.stderr.includes('unparseable') || result.stderr.includes('Skipped'), - `Should report parse errors, got: ${result.stderr.substring(0, 200)}` - ); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('handles empty transcript (no user messages)', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // Only tool_use entries, no user messages - const lines = [ - '{"type":"tool_use","tool_name":"Read","tool_input":{}}', - '{"type":"assistant","content":"done"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should handle transcript with no user messages'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('truncates long user messages to 200 chars', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - const longMsg = 'x'.repeat(500); - const lines = [ - `{"type":"user","content":"${longMsg}"}`, - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should handle and truncate long messages'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('uses CLAUDE_TRANSCRIPT_PATH env var as fallback', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - const lines = [ - '{"type":"user","content":"Fallback test message"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - // Send invalid JSON to stdin so it falls back to env var - const result = await runScript(path.join(scriptsDir, 'session-end.js'), 'not json', { - CLAUDE_TRANSCRIPT_PATH: transcriptPath - }); - assert.strictEqual(result.code, 0, 'Should use env var fallback'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('escapes backticks in user messages in session file', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // User messages with backticks that could break markdown - const lines = [ - '{"type":"user","content":"Fix the `handleAuth` function in `auth.ts`"}', - '{"type":"user","content":"Run `npm test` to verify"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0, 'Should handle backticks without crash'); - - // Find the session file in the temp HOME - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - // Backticks should be escaped in the output - assert.ok(content.includes('\\`'), 'Should escape backticks in session file'); - assert.ok(!content.includes('`handleAuth`'), 'Raw backticks should be escaped'); - } - } - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('session file contains tools used and files modified', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - const lines = [ - '{"type":"user","content":"Edit the config"}', - '{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/src/config.ts"}}', - '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/src/utils.ts"}}', - '{"type":"tool_use","tool_name":"Write","tool_input":{"file_path":"/src/new-file.ts"}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); - - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - // Should contain files modified (Edit and Write, not Read) - assert.ok(content.includes('/src/config.ts'), 'Should list edited file'); - assert.ok(content.includes('/src/new-file.ts'), 'Should list written file'); - // Should contain tools used - assert.ok(content.includes('Edit'), 'Should list Edit tool'); - assert.ok(content.includes('Read'), 'Should list Read tool'); - } - } - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('omits Tools Used and Files Modified sections when empty', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // Only user messages, no tool_use entries - const lines = [ - '{"type":"user","content":"Just chatting"}', - '{"type":"user","content":"No tools used at all"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); - - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - assert.ok(content.includes('### Tasks'), 'Should have Tasks section'); - assert.ok(!content.includes('### Files Modified'), 'Should NOT have Files Modified when empty'); - assert.ok(!content.includes('### Tools Used'), 'Should NOT have Tools Used when empty'); - assert.ok(content.includes('Total user messages: 2'), 'Should show correct message count'); - } - } - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('slices user messages to last 10', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // 15 user messages — should keep only last 10 - const lines = []; - for (let i = 1; i <= 15; i++) { - lines.push(`{"type":"user","content":"UserMsg_${i}"}`); - } - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); - - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - // Should NOT contain first 5 messages (sliced to last 10) - assert.ok(!content.includes('UserMsg_1\n'), 'Should not include first message (sliced)'); - assert.ok(!content.includes('UserMsg_5\n'), 'Should not include 5th message (sliced)'); - // Should contain messages 6-15 - assert.ok(content.includes('UserMsg_6'), 'Should include 6th message'); - assert.ok(content.includes('UserMsg_15'), 'Should include last message'); - assert.ok(content.includes('Total user messages: 15'), 'Should show total of 15'); - } - } - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('slices tools to first 20', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // 25 unique tools — should keep only first 20 - const lines = ['{"type":"user","content":"Do stuff"}']; - for (let i = 1; i <= 25; i++) { - lines.push(`{"type":"tool_use","tool_name":"Tool${i}","tool_input":{}}`); - } - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); - - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - // Should contain Tool1 through Tool20 - assert.ok(content.includes('Tool1'), 'Should include Tool1'); - assert.ok(content.includes('Tool20'), 'Should include Tool20'); - // Should NOT contain Tool21-25 (sliced) - assert.ok(!content.includes('Tool21'), 'Should not include Tool21 (sliced to 20)'); - assert.ok(!content.includes('Tool25'), 'Should not include Tool25 (sliced to 20)'); - } - } - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('slices files modified to first 30', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // 35 unique files via Edit — should keep only first 30 - const lines = ['{"type":"user","content":"Edit all the things"}']; - for (let i = 1; i <= 35; i++) { - lines.push(`{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/src/file${i}.ts"}}`); - } - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); - - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - // Should contain file1 through file30 - assert.ok(content.includes('/src/file1.ts'), 'Should include file1'); - assert.ok(content.includes('/src/file30.ts'), 'Should include file30'); - // Should NOT contain file31-35 (sliced) - assert.ok(!content.includes('/src/file31.ts'), 'Should not include file31 (sliced to 30)'); - assert.ok(!content.includes('/src/file35.ts'), 'Should not include file35 (sliced to 30)'); - } - } - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('parses Claude Code JSONL format (entry.message.content)', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // Claude Code v2.1.41+ JSONL format: user messages nested in entry.message - const lines = [ - '{"type":"user","message":{"role":"user","content":"Fix the build error"}}', - '{"type":"user","message":{"role":"user","content":[{"type":"text","text":"Also update tests"}]}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); - - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - assert.ok(content.includes('Fix the build error'), 'Should extract string content from message'); - assert.ok(content.includes('Also update tests'), 'Should extract array content from message'); - } - } - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('extracts tool_use from assistant message content blocks', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - - // Claude Code JSONL: tool uses nested in assistant message content array - const lines = [ - '{"type":"user","content":"Edit the config"}', - JSON.stringify({ - type: 'assistant', - message: { - role: 'assistant', - content: [ - { type: 'text', text: 'I will edit the file.' }, - { type: 'tool_use', name: 'Edit', input: { file_path: '/src/app.ts' } }, - { type: 'tool_use', name: 'Write', input: { file_path: '/src/new.ts' } }, - ] - } - }), - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); - - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - assert.ok(content.includes('Edit'), 'Should extract Edit tool from content blocks'); - assert.ok(content.includes('/src/app.ts'), 'Should extract file path from Edit block'); - assert.ok(content.includes('/src/new.ts'), 'Should extract file path from Write block'); - } - } - cleanupTestDir(testDir); - })) passed++; else failed++; - - // hooks.json validation - console.log('\nhooks.json Validation:'); - - if (test('hooks.json is valid JSON', () => { - const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json'); - const content = fs.readFileSync(hooksPath, 'utf8'); - JSON.parse(content); // Will throw if invalid - })) passed++; else failed++; - - if (test('hooks.json has required event types', () => { - const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json'); - const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8')); - - assert.ok(hooks.hooks.PreToolUse, 'Should have PreToolUse hooks'); - assert.ok(hooks.hooks.PostToolUse, 'Should have PostToolUse hooks'); - assert.ok(hooks.hooks.SessionStart, 'Should have SessionStart hooks'); - assert.ok(hooks.hooks.SessionEnd, 'Should have SessionEnd hooks'); - assert.ok(hooks.hooks.Stop, 'Should have Stop hooks'); - assert.ok(hooks.hooks.PreCompact, 'Should have PreCompact hooks'); - })) passed++; else failed++; - - if (test('all hook commands use node or approved shell wrappers', () => { - const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json'); - const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8')); - - const checkHooks = (hookArray) => { - for (const entry of hookArray) { - for (const hook of entry.hooks) { - if (hook.type === 'command') { - const isNode = hook.command.startsWith('node'); - const isSkillScript = hook.command.includes('/skills/') && ( - /^(bash|sh)\s/.test(hook.command) || - hook.command.startsWith('${CLAUDE_PLUGIN_ROOT}/skills/') - ); - const isHookShellWrapper = /^(bash|sh)\s+["']?\$\{CLAUDE_PLUGIN_ROOT\}\/scripts\/hooks\/run-with-flags-shell\.sh/.test(hook.command); - const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js'); - assert.ok( - isNode || isSkillScript || isHookShellWrapper || isSessionStartFallback, - `Hook command should use node or approved shell wrapper: ${hook.command.substring(0, 100)}...` - ); - } - } - } - }; - - for (const [, hookArray] of Object.entries(hooks.hooks)) { - checkHooks(hookArray); - } - })) passed++; else failed++; - - if (test('script references use CLAUDE_PLUGIN_ROOT variable (except SessionStart fallback)', () => { - const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json'); - const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8')); - - const checkHooks = (hookArray) => { - for (const entry of hookArray) { - for (const hook of entry.hooks) { - if (hook.type === 'command' && hook.command.includes('scripts/hooks/')) { - // Check for the literal string "${CLAUDE_PLUGIN_ROOT}" in the command - const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js'); - const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}') || isSessionStartFallback; - assert.ok( - hasPluginRoot, - `Script paths should use CLAUDE_PLUGIN_ROOT: ${hook.command.substring(0, 80)}...` - ); - } - } - } - }; - - for (const [, hookArray] of Object.entries(hooks.hooks)) { - checkHooks(hookArray); - } - })) passed++; else failed++; - - // plugin.json validation - console.log('\nplugin.json Validation:'); - - if (test('plugin.json does NOT have explicit hooks declaration', () => { - // Claude Code automatically loads hooks/hooks.json by convention. - // Explicitly declaring it in plugin.json causes a duplicate detection error. - // See: https://github.com/affaan-m/everything-claude-code/issues/103 - const pluginPath = path.join(__dirname, '..', '..', '.claude-plugin', 'plugin.json'); - const plugin = JSON.parse(fs.readFileSync(pluginPath, 'utf8')); - - assert.ok( - !plugin.hooks, - 'plugin.json should NOT have "hooks" field - Claude Code auto-loads hooks/hooks.json' - ); - })) passed++; else failed++; - - // ─── evaluate-session.js tests ─── - console.log('\nevaluate-session.js:'); - - if (await asyncTest('skips when no transcript_path in stdin', async () => { - const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), '{}'); - assert.strictEqual(result.code, 0, 'Should exit 0 (non-blocking)'); - })) passed++; else failed++; - - if (await asyncTest('skips when transcript file does not exist', async () => { - const stdinJson = JSON.stringify({ transcript_path: '/tmp/nonexistent-transcript-12345.jsonl' }); - const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 when file missing'); - })) passed++; else failed++; - - if (await asyncTest('skips short sessions (< 10 user messages)', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'short.jsonl'); - // Only 3 user messages — below the default threshold of 10 - const lines = [ - '{"type":"user","content":"msg1"}', - '{"type":"user","content":"msg2"}', - '{"type":"user","content":"msg3"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('too short'), 'Should log "too short" message'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('evaluates long sessions (>= 10 user messages)', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'long.jsonl'); - // 12 user messages — above the default threshold - const lines = []; - for (let i = 0; i < 12; i++) { - lines.push(`{"type":"user","content":"message ${i}"}`); - } - fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('12 messages'), 'Should report message count'); - assert.ok(result.stderr.includes('evaluate'), 'Should signal evaluation'); - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('handles malformed stdin JSON (falls back to env var)', async () => { - const result = await runScript( - path.join(scriptsDir, 'evaluate-session.js'), - 'not json at all', - { CLAUDE_TRANSCRIPT_PATH: '' } - ); - // No valid transcript path from either source → exit 0 - assert.strictEqual(result.code, 0); - })) passed++; else failed++; - - // ─── suggest-compact.js tests ─── - console.log('\nsuggest-compact.js:'); - - if (await asyncTest('increments tool counter on each invocation', async () => { - const sessionId = `test-counter-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - // First invocation → count = 1 - await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId - }); - let val = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); - assert.strictEqual(val, 1, 'First call should write count 1'); - - // Second invocation → count = 2 - await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId - }); - val = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); - assert.strictEqual(val, 2, 'Second call should write count 2'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; - - if (await asyncTest('suggests compact at exact threshold', async () => { - const sessionId = `test-threshold-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - // Pre-seed counter at threshold - 1 so next call hits threshold - fs.writeFileSync(counterFile, '4'); + if ( + await asyncTest('runs without error', async () => { const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: '5' + CLAUDE_SESSION_ID: 'test-session-' + Date.now() }); - assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('5 tool calls reached'), 'Should suggest compact at threshold'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; + assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); + }) + ) + passed++; + else failed++; - if (await asyncTest('suggests at periodic intervals after threshold', async () => { - const sessionId = `test-periodic-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - // Pre-seed at 29 so next call = 30 (threshold 5 + 25 = 30) - // (30 - 5) % 25 === 0 → should trigger periodic suggestion - fs.writeFileSync(counterFile, '29'); - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: '5' - }); - assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('30 tool calls'), 'Should suggest at threshold + 25n intervals'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; + if ( + await asyncTest('increments counter on each call', async () => { + const sessionId = 'test-counter-' + Date.now(); + + // Run multiple times + for (let i = 0; i < 3; i++) { + await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId + }); + } + + // Check counter file + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); + assert.strictEqual(count, 3, `Counter should be 3, got ${count}`); + + // Cleanup + fs.unlinkSync(counterFile); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('suggests compact at threshold', async () => { + const sessionId = 'test-threshold-' + Date.now(); + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + + // Set counter to threshold - 1 + fs.writeFileSync(counterFile, '49'); - if (await asyncTest('does not suggest below threshold', async () => { - const sessionId = `test-below-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - fs.writeFileSync(counterFile, '2'); const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '50' }); - assert.strictEqual(result.code, 0); - assert.ok(!result.stderr.includes('tool calls reached'), 'Should not suggest below threshold'); - assert.ok(!result.stderr.includes('checkpoint'), 'Should not suggest checkpoint'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; - if (await asyncTest('resets counter when file contains huge overflow number', async () => { - const sessionId = `test-overflow-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - // Write a value that passes Number.isFinite() but exceeds 1000000 clamp - fs.writeFileSync(counterFile, '999999999999'); + assert.ok(result.stderr.includes('50 tool calls reached'), 'Should suggest compact at threshold'); + + // Cleanup + fs.unlinkSync(counterFile); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('does not suggest below threshold', async () => { + const sessionId = 'test-below-' + Date.now(); + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + + fs.writeFileSync(counterFile, '10'); + + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId, + COMPACT_THRESHOLD: '50' + }); + + assert.ok(!result.stderr.includes('tool calls'), 'Should not suggest compact below threshold'); + + fs.unlinkSync(counterFile); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('suggests at regular intervals after threshold', async () => { + const sessionId = 'test-interval-' + Date.now(); + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + + // Set counter to 74 (next will be 75, which is >50 and 75%25==0) + fs.writeFileSync(counterFile, '74'); + + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId, + COMPACT_THRESHOLD: '50' + }); + + assert.ok(result.stderr.includes('75 tool calls'), 'Should suggest at 25-call intervals after threshold'); + + fs.unlinkSync(counterFile); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles corrupted counter file', async () => { + const sessionId = 'test-corrupt-' + Date.now(); + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + + fs.writeFileSync(counterFile, 'not-a-number'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { CLAUDE_SESSION_ID: sessionId }); - assert.strictEqual(result.code, 0); - // Should reset to 1 because 999999999999 > 1000000 - const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); - assert.strictEqual(newCount, 1, 'Should reset to 1 on overflow value'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; - if (await asyncTest('resets counter when file contains negative number', async () => { - const sessionId = `test-negative-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - fs.writeFileSync(counterFile, '-42'); + assert.strictEqual(result.code, 0, 'Should handle corrupted counter gracefully'); + + // Counter should be reset to 1 + const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); + assert.strictEqual(newCount, 1, 'Should reset counter to 1 on corrupt data'); + + fs.unlinkSync(counterFile); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('uses default session ID when no env var', async () => { const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId + CLAUDE_SESSION_ID: '' // Empty, should use 'default' }); - assert.strictEqual(result.code, 0); - const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); - assert.strictEqual(newCount, 1, 'Should reset to 1 on negative value'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; - if (await asyncTest('handles COMPACT_THRESHOLD of zero (falls back to 50)', async () => { - const sessionId = `test-zero-thresh-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { + assert.strictEqual(result.code, 0, 'Should work with default session ID'); + + // Cleanup the default counter file + const counterFile = path.join(os.tmpdir(), 'claude-tool-count-default'); + if (fs.existsSync(counterFile)) fs.unlinkSync(counterFile); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('validates threshold bounds', async () => { + const sessionId = 'test-bounds-' + Date.now(); + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + + // Invalid threshold should fall back to 50 fs.writeFileSync(counterFile, '49'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: '0' + COMPACT_THRESHOLD: '-5' // Invalid: negative }); - assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('50 tool calls reached'), 'Zero threshold should fall back to 50'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; - if (await asyncTest('handles invalid COMPACT_THRESHOLD (falls back to 50)', async () => { - const sessionId = `test-invalid-thresh-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - // Pre-seed at 49 so next call = 50 (the fallback default) - fs.writeFileSync(counterFile, '49'); - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: 'not-a-number' + assert.ok(result.stderr.includes('50 tool calls'), 'Should use default threshold (50) for invalid value'); + + fs.unlinkSync(counterFile); + }) + ) + passed++; + else failed++; + + // evaluate-session.js tests + console.log('\nevaluate-session.js:'); + + if ( + await asyncTest('runs without error when no transcript', async () => { + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js')); + assert.strictEqual(result.code, 0, `Exit code should be 0, got ${result.code}`); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('skips short sessions', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // Create a short transcript (less than 10 user messages) + const transcript = Array(5).fill('{"type":"user","content":"test"}\n').join(''); + fs.writeFileSync(transcriptPath, transcript); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); + + assert.ok(result.stderr.includes('Session too short'), 'Should indicate session is too short'); + + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('processes sessions with enough messages', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // Create a longer transcript (more than 10 user messages) + const transcript = Array(15).fill('{"type":"user","content":"test"}\n').join(''); + fs.writeFileSync(transcriptPath, transcript); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); + + assert.ok(result.stderr.includes('15 messages'), 'Should report message count'); + + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + // evaluate-session.js: whitespace tolerance regression test + if ( + await asyncTest('counts user messages with whitespace in JSON (regression)', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // Create transcript with whitespace around colons (pretty-printed style) + const lines = []; + for (let i = 0; i < 15; i++) { + lines.push('{ "type" : "user", "content": "message ' + i + '" }'); + } + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); + + assert.ok(result.stderr.includes('15 messages'), 'Should count user messages with whitespace in JSON, got: ' + result.stderr.trim()); + + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + // session-end.js: content array with null elements regression test + if ( + await asyncTest('handles transcript with null content array elements (regression)', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // Create transcript with null elements in content array + const lines = [ + '{"type":"user","content":[null,{"text":"hello"},null,{"text":"world"}]}', + '{"type":"user","content":"simple string message"}', + '{"type":"user","content":[{"text":"normal"},{"text":"array"}]}', + '{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/test.js"}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + + // Should not crash (exit 0) + assert.strictEqual(result.code, 0, 'Should handle null content elements without crash'); + }) + ) + passed++; + else failed++; + + // post-edit-console-warn.js tests + console.log('\npost-edit-console-warn.js:'); + + if ( + await asyncTest('warns about console.log in JS files', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'test.js'); + fs.writeFileSync(testFile, 'const x = 1;\nconsole.log(x);\nreturn x;'); + + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + + assert.ok(result.stderr.includes('console.log'), 'Should warn about console.log'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('does not warn for non-JS files', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'test.md'); + fs.writeFileSync(testFile, 'Use console.log for debugging'); + + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + + assert.ok(!result.stderr.includes('console.log'), 'Should not warn for non-JS files'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('does not warn for clean JS files', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'clean.ts'); + fs.writeFileSync(testFile, 'const x = 1;\nreturn x;'); + + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + + assert.ok(!result.stderr.includes('WARNING'), 'Should not warn for clean files'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles missing file gracefully', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/file.ts' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + + assert.strictEqual(result.code, 0, 'Should not crash on missing file'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('limits console.log output to 5 matches', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'many-logs.js'); + // Create a file with 8 console.log statements + const lines = []; + for (let i = 1; i <= 8; i++) { + lines.push(`console.log('debug ${i}');`); + } + fs.writeFileSync(testFile, lines.join('\n')); + + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + + assert.ok(result.stderr.includes('console.log'), 'Should warn about console.log'); + // Count how many "debug N" lines appear in stderr (the line-number output) + const debugLines = result.stderr.split('\n').filter(l => /^\d+:/.test(l.trim())); + assert.ok(debugLines.length <= 5, `Should show at most 5 matches, got ${debugLines.length}`); + // Should include debug 1 but not debug 8 (sliced) + assert.ok(result.stderr.includes('debug 1'), 'Should include first match'); + assert.ok(!result.stderr.includes('debug 8'), 'Should not include 8th match'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('ignores console.warn and console.error (only flags console.log)', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'other-console.ts'); + fs.writeFileSync(testFile, ['console.warn("this is a warning");', 'console.error("this is an error");', 'console.debug("this is debug");', 'console.info("this is info");'].join('\n')); + + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + + assert.ok(!result.stderr.includes('WARNING'), 'Should NOT warn about console.warn/error/debug/info'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('passes through original data on stdout', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/test.py' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + + assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data'); + }) + ) + passed++; + else failed++; + + // post-edit-format.js tests + console.log('\npost-edit-format.js:'); + + if ( + await asyncTest('runs without error on empty stdin', async () => { + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js')); + assert.strictEqual(result.code, 0, 'Should exit 0 on empty stdin'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('skips non-JS/TS files', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/test.py' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 for non-JS files'); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('passes through data for invalid JSON', async () => { + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), 'not json'); + assert.strictEqual(result.code, 0, 'Should exit 0 for invalid JSON'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles null tool_input gracefully', async () => { + const stdinJson = JSON.stringify({ tool_input: null }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 for null tool_input'); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles missing file_path in tool_input', async () => { + const stdinJson = JSON.stringify({ tool_input: {} }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 for missing file_path'); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('exits 0 and passes data when prettier is unavailable', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/path/file.ts' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 even when prettier fails'); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through original data'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('finds formatter config in parent dirs without package.json', async () => { + const testDir = createTestDir(); + const rootDir = path.join(testDir, 'config-only-repo'); + const nestedDir = path.join(rootDir, 'src', 'nested'); + const filePath = path.join(nestedDir, 'component.ts'); + const binDir = path.join(testDir, 'bin'); + const logFile = path.join(testDir, 'formatter.log'); + + fs.mkdirSync(nestedDir, { recursive: true }); + fs.writeFileSync(path.join(rootDir, '.prettierrc'), '{}'); + fs.writeFileSync(filePath, 'export const value = 1;\n'); + createCommandShim(binDir, 'npx', logFile); + + const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson, withPrependedPath(binDir)); + + assert.strictEqual(result.code, 0, 'Should exit 0 for config-only repo'); + const logEntries = readCommandLog(logFile); + assert.strictEqual(logEntries.length, 1, 'Should invoke formatter once'); + assert.strictEqual(fs.realpathSync(logEntries[0].cwd), fs.realpathSync(rootDir), 'Should run formatter from config root'); + assert.deepStrictEqual(logEntries[0].args, ['prettier', '--write', filePath], 'Should use the formatter on the nested file'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('respects CLAUDE_PACKAGE_MANAGER for formatter fallback runner', async () => { + const testDir = createTestDir(); + const rootDir = path.join(testDir, 'pnpm-repo'); + const filePath = path.join(rootDir, 'index.ts'); + const binDir = path.join(testDir, 'bin'); + const logFile = path.join(testDir, 'pnpm.log'); + + fs.mkdirSync(rootDir, { recursive: true }); + fs.writeFileSync(path.join(rootDir, '.prettierrc'), '{}'); + fs.writeFileSync(filePath, 'export const value = 1;\n'); + createCommandShim(binDir, 'pnpm', logFile); + + const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson, withPrependedPath(binDir, { CLAUDE_PACKAGE_MANAGER: 'pnpm' })); + + assert.strictEqual(result.code, 0, 'Should exit 0 when pnpm fallback is used'); + const logEntries = readCommandLog(logFile); + assert.strictEqual(logEntries.length, 1, 'Should invoke pnpm fallback runner once'); + assert.strictEqual(logEntries[0].bin, 'pnpm', 'Should use pnpm runner'); + assert.deepStrictEqual(logEntries[0].args, ['dlx', 'prettier', '--write', filePath], 'Should use pnpm dlx for fallback formatter execution'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('respects project package-manager config for formatter fallback runner', async () => { + const testDir = createTestDir(); + const rootDir = path.join(testDir, 'bun-repo'); + const filePath = path.join(rootDir, 'index.ts'); + const binDir = path.join(testDir, 'bin'); + const logFile = path.join(testDir, 'bun.log'); + + fs.mkdirSync(path.join(rootDir, '.claude'), { recursive: true }); + fs.writeFileSync(path.join(rootDir, '.claude', 'package-manager.json'), JSON.stringify({ packageManager: 'bun' })); + fs.writeFileSync(path.join(rootDir, '.prettierrc'), '{}'); + fs.writeFileSync(filePath, 'export const value = 1;\n'); + createCommandShim(binDir, 'bunx', logFile); + + const stdinJson = JSON.stringify({ tool_input: { file_path: filePath } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson, withPrependedPath(binDir)); + + assert.strictEqual(result.code, 0, 'Should exit 0 when project config selects bun'); + const logEntries = readCommandLog(logFile); + assert.strictEqual(logEntries.length, 1, 'Should invoke bunx fallback runner once'); + assert.strictEqual(logEntries[0].bin, 'bunx', 'Should use bunx runner'); + assert.deepStrictEqual(logEntries[0].args, ['prettier', '--write', filePath], 'Should use bunx for fallback formatter execution'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + console.log('\npre-bash-dev-server-block.js:'); + + if ( + await asyncTest('allows non-dev commands whose heredoc text mentions npm run dev', async () => { + const command = ['gh pr create --title "fix: docs" --body "$(cat <<\'EOF\'', '## Test plan', '- run npm run dev to verify the site starts', 'EOF', ')"'].join('\n'); + const stdinJson = JSON.stringify({ tool_input: { command } }); + const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); + + assert.strictEqual(result.code, 0, 'Non-dev commands should pass through'); + assert.strictEqual(result.stdout, stdinJson, 'Should preserve original input'); + assert.ok(!result.stderr.includes('BLOCKED'), 'Should not emit a block message'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('blocks bare npm run dev outside tmux on non-Windows platforms', async () => { + const stdinJson = JSON.stringify({ tool_input: { command: 'npm run dev' } }); + const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); + + if (process.platform === 'win32') { + assert.strictEqual(result.code, 0, 'Windows path should pass through'); + assert.strictEqual(result.stdout, stdinJson, 'Windows path should preserve original input'); + } else { + assert.strictEqual(result.code, 2, 'Unix path should block bare dev servers'); + assert.ok(result.stderr.includes('BLOCKED'), 'Should explain why the command was blocked'); + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('blocks env-wrapped npm run dev outside tmux on non-Windows platforms', async () => { + const stdinJson = JSON.stringify({ tool_input: { command: '/usr/bin/env npm run dev' } }); + const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); + + if (process.platform === 'win32') { + assert.strictEqual(result.code, 0, 'Windows path should pass through'); + assert.strictEqual(result.stdout, stdinJson, 'Windows path should preserve original input'); + } else { + assert.strictEqual(result.code, 2, 'Unix path should block wrapped dev servers'); + assert.ok(result.stderr.includes('BLOCKED'), 'Should explain why the command was blocked'); + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('blocks nohup-wrapped npm run dev outside tmux on non-Windows platforms', async () => { + const stdinJson = JSON.stringify({ tool_input: { command: 'nohup npm run dev >/tmp/dev.log 2>&1 &' } }); + const result = await runScript(path.join(scriptsDir, 'pre-bash-dev-server-block.js'), stdinJson); + + if (process.platform === 'win32') { + assert.strictEqual(result.code, 0, 'Windows path should pass through'); + assert.strictEqual(result.stdout, stdinJson, 'Windows path should preserve original input'); + } else { + assert.strictEqual(result.code, 2, 'Unix path should block wrapped dev servers'); + assert.ok(result.stderr.includes('BLOCKED'), 'Should explain why the command was blocked'); + } + }) + ) + passed++; + else failed++; + + // post-edit-typecheck.js tests + console.log('\npost-edit-typecheck.js:'); + + if ( + await asyncTest('runs without error on empty stdin', async () => { + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js')); + assert.strictEqual(result.code, 0, 'Should exit 0 on empty stdin'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('skips non-TypeScript files', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/test.js' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 for non-TS files'); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles nonexistent TS file gracefully', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/file.ts' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 for missing file'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles TS file with no tsconfig gracefully', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'test.ts'); + fs.writeFileSync(testFile, 'const x: number = 1;'); + + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 when no tsconfig found'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('stops tsconfig walk at max depth (20)', async () => { + // Create a deeply nested directory (>20 levels) with no tsconfig anywhere + const testDir = createTestDir(); + let deepDir = testDir; + for (let i = 0; i < 25; i++) { + deepDir = path.join(deepDir, `d${i}`); + } + fs.mkdirSync(deepDir, { recursive: true }); + const testFile = path.join(deepDir, 'deep.ts'); + fs.writeFileSync(testFile, 'const x: number = 1;'); + + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const startTime = Date.now(); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + const elapsed = Date.now() - startTime; + + assert.strictEqual(result.code, 0, 'Should not hang at depth limit'); + assert.ok(elapsed < 5000, `Should complete quickly at depth limit, took ${elapsed}ms`); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('passes through stdin data on stdout (post-edit-typecheck)', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'test.ts'); + fs.writeFileSync(testFile, 'const x: number = 1;'); + + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data on stdout'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + // session-end.js extractSessionSummary tests + console.log('\nsession-end.js (extractSessionSummary):'); + + if ( + await asyncTest('extracts user messages from transcript', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + const lines = ['{"type":"user","content":"Fix the login bug"}', '{"type":"assistant","content":"I will fix it"}', '{"type":"user","content":"Also add tests"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + assert.strictEqual(result.code, 0); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles transcript with array content fields', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + const lines = ['{"type":"user","content":[{"text":"Part 1"},{"text":"Part 2"}]}', '{"type":"user","content":"Simple message"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should handle array content without crash'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('extracts tool names and file paths from transcript', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + const lines = [ + '{"type":"user","content":"Edit the file"}', + '{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/src/main.ts"}}', + '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/src/utils.ts"}}', + '{"type":"tool_use","tool_name":"Write","tool_input":{"file_path":"/src/new.ts"}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + assert.strictEqual(result.code, 0); + // Session file should contain summary with tools used + assert.ok(result.stderr.includes('Created session file') || result.stderr.includes('Updated session file'), 'Should create/update session file'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles transcript with malformed JSON lines', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + const lines = ['{"type":"user","content":"Valid message"}', 'NOT VALID JSON', '{"broken json', '{"type":"user","content":"Another valid"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should skip malformed lines gracefully'); + assert.ok(result.stderr.includes('unparseable') || result.stderr.includes('Skipped'), `Should report parse errors, got: ${result.stderr.substring(0, 200)}`); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles empty transcript (no user messages)', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // Only tool_use entries, no user messages + const lines = ['{"type":"tool_use","tool_name":"Read","tool_input":{}}', '{"type":"assistant","content":"done"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should handle transcript with no user messages'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('truncates long user messages to 200 chars', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + const longMsg = 'x'.repeat(500); + const lines = [`{"type":"user","content":"${longMsg}"}`]; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should handle and truncate long messages'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('uses CLAUDE_TRANSCRIPT_PATH env var as fallback', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + const lines = ['{"type":"user","content":"Fallback test message"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + // Send invalid JSON to stdin so it falls back to env var + const result = await runScript(path.join(scriptsDir, 'session-end.js'), 'not json', { + CLAUDE_TRANSCRIPT_PATH: transcriptPath + }); + assert.strictEqual(result.code, 0, 'Should use env var fallback'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('escapes backticks in user messages in session file', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // User messages with backticks that could break markdown + const lines = ['{"type":"user","content":"Fix the `handleAuth` function in `auth.ts`"}', '{"type":"user","content":"Run `npm test` to verify"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0, 'Should handle backticks without crash'); + + // Find the session file in the temp HOME + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + // Backticks should be escaped in the output + assert.ok(content.includes('\\`'), 'Should escape backticks in session file'); + assert.ok(!content.includes('`handleAuth`'), 'Raw backticks should be escaped'); + } + } + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('session file contains tools used and files modified', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + const lines = [ + '{"type":"user","content":"Edit the config"}', + '{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/src/config.ts"}}', + '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/src/utils.ts"}}', + '{"type":"tool_use","tool_name":"Write","tool_input":{"file_path":"/src/new-file.ts"}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir }); assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('50 tool calls reached'), 'Should use default threshold of 50'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; + + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + // Should contain files modified (Edit and Write, not Read) + assert.ok(content.includes('/src/config.ts'), 'Should list edited file'); + assert.ok(content.includes('/src/new-file.ts'), 'Should list written file'); + // Should contain tools used + assert.ok(content.includes('Edit'), 'Should list Edit tool'); + assert.ok(content.includes('Read'), 'Should list Read tool'); + } + } + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('omits Tools Used and Files Modified sections when empty', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // Only user messages, no tool_use entries + const lines = ['{"type":"user","content":"Just chatting"}', '{"type":"user","content":"No tools used at all"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0); + + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + assert.ok(content.includes('### Tasks'), 'Should have Tasks section'); + assert.ok(!content.includes('### Files Modified'), 'Should NOT have Files Modified when empty'); + assert.ok(!content.includes('### Tools Used'), 'Should NOT have Tools Used when empty'); + assert.ok(content.includes('Total user messages: 2'), 'Should show correct message count'); + } + } + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('slices user messages to last 10', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // 15 user messages — should keep only last 10 + const lines = []; + for (let i = 1; i <= 15; i++) { + lines.push(`{"type":"user","content":"UserMsg_${i}"}`); + } + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0); + + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + // Should NOT contain first 5 messages (sliced to last 10) + assert.ok(!content.includes('UserMsg_1\n'), 'Should not include first message (sliced)'); + assert.ok(!content.includes('UserMsg_5\n'), 'Should not include 5th message (sliced)'); + // Should contain messages 6-15 + assert.ok(content.includes('UserMsg_6'), 'Should include 6th message'); + assert.ok(content.includes('UserMsg_15'), 'Should include last message'); + assert.ok(content.includes('Total user messages: 15'), 'Should show total of 15'); + } + } + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('slices tools to first 20', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // 25 unique tools — should keep only first 20 + const lines = ['{"type":"user","content":"Do stuff"}']; + for (let i = 1; i <= 25; i++) { + lines.push(`{"type":"tool_use","tool_name":"Tool${i}","tool_input":{}}`); + } + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0); + + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + // Should contain Tool1 through Tool20 + assert.ok(content.includes('Tool1'), 'Should include Tool1'); + assert.ok(content.includes('Tool20'), 'Should include Tool20'); + // Should NOT contain Tool21-25 (sliced) + assert.ok(!content.includes('Tool21'), 'Should not include Tool21 (sliced to 20)'); + assert.ok(!content.includes('Tool25'), 'Should not include Tool25 (sliced to 20)'); + } + } + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('slices files modified to first 30', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // 35 unique files via Edit — should keep only first 30 + const lines = ['{"type":"user","content":"Edit all the things"}']; + for (let i = 1; i <= 35; i++) { + lines.push(`{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/src/file${i}.ts"}}`); + } + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0); + + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + // Should contain file1 through file30 + assert.ok(content.includes('/src/file1.ts'), 'Should include file1'); + assert.ok(content.includes('/src/file30.ts'), 'Should include file30'); + // Should NOT contain file31-35 (sliced) + assert.ok(!content.includes('/src/file31.ts'), 'Should not include file31 (sliced to 30)'); + assert.ok(!content.includes('/src/file35.ts'), 'Should not include file35 (sliced to 30)'); + } + } + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('parses Claude Code JSONL format (entry.message.content)', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // Claude Code v2.1.41+ JSONL format: user messages nested in entry.message + const lines = ['{"type":"user","message":{"role":"user","content":"Fix the build error"}}', '{"type":"user","message":{"role":"user","content":[{"type":"text","text":"Also update tests"}]}}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0); + + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + assert.ok(content.includes('Fix the build error'), 'Should extract string content from message'); + assert.ok(content.includes('Also update tests'), 'Should extract array content from message'); + } + } + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('extracts tool_use from assistant message content blocks', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + + // Claude Code JSONL: tool uses nested in assistant message content array + const lines = [ + '{"type":"user","content":"Edit the config"}', + JSON.stringify({ + type: 'assistant', + message: { + role: 'assistant', + content: [ + { type: 'text', text: 'I will edit the file.' }, + { type: 'tool_use', name: 'Edit', input: { file_path: '/src/app.ts' } }, + { type: 'tool_use', name: 'Write', input: { file_path: '/src/new.ts' } } + ] + } + }) + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0); + + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + assert.ok(content.includes('Edit'), 'Should extract Edit tool from content blocks'); + assert.ok(content.includes('/src/app.ts'), 'Should extract file path from Edit block'); + assert.ok(content.includes('/src/new.ts'), 'Should extract file path from Write block'); + } + } + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + // hooks.json validation + console.log('\nhooks.json Validation:'); + + if ( + test('hooks.json is valid JSON', () => { + const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json'); + const content = fs.readFileSync(hooksPath, 'utf8'); + JSON.parse(content); // Will throw if invalid + }) + ) + passed++; + else failed++; + + if ( + test('hooks.json has required event types', () => { + const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json'); + const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8')); + + assert.ok(hooks.hooks.PreToolUse, 'Should have PreToolUse hooks'); + assert.ok(hooks.hooks.PostToolUse, 'Should have PostToolUse hooks'); + assert.ok(hooks.hooks.SessionStart, 'Should have SessionStart hooks'); + assert.ok(hooks.hooks.SessionEnd, 'Should have SessionEnd hooks'); + assert.ok(hooks.hooks.Stop, 'Should have Stop hooks'); + assert.ok(hooks.hooks.PreCompact, 'Should have PreCompact hooks'); + }) + ) + passed++; + else failed++; + + if ( + test('all hook commands use node or approved shell wrappers', () => { + const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json'); + const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8')); + + const checkHooks = hookArray => { + for (const entry of hookArray) { + for (const hook of entry.hooks) { + if (hook.type === 'command') { + const isNode = hook.command.startsWith('node'); + const isSkillScript = hook.command.includes('/skills/') && (/^(bash|sh)\s/.test(hook.command) || hook.command.startsWith('${CLAUDE_PLUGIN_ROOT}/skills/')); + const isHookShellWrapper = /^(bash|sh)\s+["']?\$\{CLAUDE_PLUGIN_ROOT\}\/scripts\/hooks\/run-with-flags-shell\.sh/.test(hook.command); + const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js'); + assert.ok(isNode || isSkillScript || isHookShellWrapper || isSessionStartFallback, `Hook command should use node or approved shell wrapper: ${hook.command.substring(0, 100)}...`); + } + } + } + }; + + for (const [, hookArray] of Object.entries(hooks.hooks)) { + checkHooks(hookArray); + } + }) + ) + passed++; + else failed++; + + if ( + test('script references use CLAUDE_PLUGIN_ROOT variable (except SessionStart fallback)', () => { + const hooksPath = path.join(__dirname, '..', '..', 'hooks', 'hooks.json'); + const hooks = JSON.parse(fs.readFileSync(hooksPath, 'utf8')); + + const checkHooks = hookArray => { + for (const entry of hookArray) { + for (const hook of entry.hooks) { + if (hook.type === 'command' && hook.command.includes('scripts/hooks/')) { + // Check for the literal string "${CLAUDE_PLUGIN_ROOT}" in the command + const isSessionStartFallback = hook.command.startsWith('bash -lc') && hook.command.includes('run-with-flags.js'); + const hasPluginRoot = hook.command.includes('${CLAUDE_PLUGIN_ROOT}') || isSessionStartFallback; + assert.ok(hasPluginRoot, `Script paths should use CLAUDE_PLUGIN_ROOT: ${hook.command.substring(0, 80)}...`); + } + } + } + }; + + for (const [, hookArray] of Object.entries(hooks.hooks)) { + checkHooks(hookArray); + } + }) + ) + passed++; + else failed++; + + // plugin.json validation + console.log('\nplugin.json Validation:'); + + if ( + test('plugin.json does NOT have explicit hooks declaration', () => { + // Claude Code automatically loads hooks/hooks.json by convention. + // Explicitly declaring it in plugin.json causes a duplicate detection error. + // See: https://github.com/affaan-m/everything-claude-code/issues/103 + const pluginPath = path.join(__dirname, '..', '..', '.claude-plugin', 'plugin.json'); + const plugin = JSON.parse(fs.readFileSync(pluginPath, 'utf8')); + + assert.ok(!plugin.hooks, 'plugin.json should NOT have "hooks" field - Claude Code auto-loads hooks/hooks.json'); + }) + ) + passed++; + else failed++; + + // ─── evaluate-session.js tests ─── + console.log('\nevaluate-session.js:'); + + if ( + await asyncTest('skips when no transcript_path in stdin', async () => { + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), '{}'); + assert.strictEqual(result.code, 0, 'Should exit 0 (non-blocking)'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('skips when transcript file does not exist', async () => { + const stdinJson = JSON.stringify({ transcript_path: '/tmp/nonexistent-transcript-12345.jsonl' }); + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 when file missing'); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('skips short sessions (< 10 user messages)', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'short.jsonl'); + // Only 3 user messages — below the default threshold of 10 + const lines = ['{"type":"user","content":"msg1"}', '{"type":"user","content":"msg2"}', '{"type":"user","content":"msg3"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('too short'), 'Should log "too short" message'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('evaluates long sessions (>= 10 user messages)', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'long.jsonl'); + // 12 user messages — above the default threshold + const lines = []; + for (let i = 0; i < 12; i++) { + lines.push(`{"type":"user","content":"message ${i}"}`); + } + fs.writeFileSync(transcriptPath, lines.join('\n')); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('12 messages'), 'Should report message count'); + assert.ok(result.stderr.includes('evaluate'), 'Should signal evaluation'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles malformed stdin JSON (falls back to env var)', async () => { + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), 'not json at all', { CLAUDE_TRANSCRIPT_PATH: '' }); + // No valid transcript path from either source → exit 0 + assert.strictEqual(result.code, 0); + }) + ) + passed++; + else failed++; + + // ─── suggest-compact.js tests ─── + console.log('\nsuggest-compact.js:'); + + if ( + await asyncTest('increments tool counter on each invocation', async () => { + const sessionId = `test-counter-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + // First invocation → count = 1 + await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId + }); + let val = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); + assert.strictEqual(val, 1, 'First call should write count 1'); + + // Second invocation → count = 2 + await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId + }); + val = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); + assert.strictEqual(val, 2, 'Second call should write count 2'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('suggests compact at exact threshold', async () => { + const sessionId = `test-threshold-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + // Pre-seed counter at threshold - 1 so next call hits threshold + fs.writeFileSync(counterFile, '4'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId, + COMPACT_THRESHOLD: '5' + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('5 tool calls reached'), 'Should suggest compact at threshold'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('suggests at periodic intervals after threshold', async () => { + const sessionId = `test-periodic-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + // Pre-seed at 29 so next call = 30 (threshold 5 + 25 = 30) + // (30 - 5) % 25 === 0 → should trigger periodic suggestion + fs.writeFileSync(counterFile, '29'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId, + COMPACT_THRESHOLD: '5' + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('30 tool calls'), 'Should suggest at threshold + 25n intervals'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('does not suggest below threshold', async () => { + const sessionId = `test-below-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + fs.writeFileSync(counterFile, '2'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId, + COMPACT_THRESHOLD: '50' + }); + assert.strictEqual(result.code, 0); + assert.ok(!result.stderr.includes('tool calls reached'), 'Should not suggest below threshold'); + assert.ok(!result.stderr.includes('checkpoint'), 'Should not suggest checkpoint'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('resets counter when file contains huge overflow number', async () => { + const sessionId = `test-overflow-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + // Write a value that passes Number.isFinite() but exceeds 1000000 clamp + fs.writeFileSync(counterFile, '999999999999'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId + }); + assert.strictEqual(result.code, 0); + // Should reset to 1 because 999999999999 > 1000000 + const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); + assert.strictEqual(newCount, 1, 'Should reset to 1 on overflow value'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('resets counter when file contains negative number', async () => { + const sessionId = `test-negative-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + fs.writeFileSync(counterFile, '-42'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId + }); + assert.strictEqual(result.code, 0); + const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); + assert.strictEqual(newCount, 1, 'Should reset to 1 on negative value'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles COMPACT_THRESHOLD of zero (falls back to 50)', async () => { + const sessionId = `test-zero-thresh-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + fs.writeFileSync(counterFile, '49'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId, + COMPACT_THRESHOLD: '0' + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('50 tool calls reached'), 'Zero threshold should fall back to 50'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('handles invalid COMPACT_THRESHOLD (falls back to 50)', async () => { + const sessionId = `test-invalid-thresh-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + // Pre-seed at 49 so next call = 50 (the fallback default) + fs.writeFileSync(counterFile, '49'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId, + COMPACT_THRESHOLD: 'not-a-number' + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('50 tool calls reached'), 'Should use default threshold of 50'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; // ─── Round 20 bug fix tests ─── console.log('\ncheck-console-log.js (exact pass-through):'); - if (await asyncTest('stdout is exact byte match of stdin (no trailing newline)', async () => { - // Before the fix, console.log(data) added a trailing \n. - // process.stdout.write(data) should preserve exact bytes. - const stdinData = '{"tool":"test","value":42}'; - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData); - assert.strictEqual(result.code, 0); - // stdout should be exactly the input — no extra newline appended - assert.strictEqual(result.stdout, stdinData, 'Should not append extra newline to output'); - })) passed++; else failed++; + if ( + await asyncTest('stdout is exact byte match of stdin (no trailing newline)', async () => { + // Before the fix, console.log(data) added a trailing \n. + // process.stdout.write(data) should preserve exact bytes. + const stdinData = '{"tool":"test","value":42}'; + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData); + assert.strictEqual(result.code, 0); + // stdout should be exactly the input — no extra newline appended + assert.strictEqual(result.stdout, stdinData, 'Should not append extra newline to output'); + }) + ) + passed++; + else failed++; - if (await asyncTest('preserves empty string stdin without adding newline', async () => { - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), ''); - assert.strictEqual(result.code, 0); - assert.strictEqual(result.stdout, '', 'Empty input should produce empty output'); - })) passed++; else failed++; + if ( + await asyncTest('preserves empty string stdin without adding newline', async () => { + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), ''); + assert.strictEqual(result.code, 0); + assert.strictEqual(result.stdout, '', 'Empty input should produce empty output'); + }) + ) + passed++; + else failed++; - if (await asyncTest('preserves data with embedded newlines exactly', async () => { - const stdinData = 'line1\nline2\nline3'; - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData); - assert.strictEqual(result.code, 0); - assert.strictEqual(result.stdout, stdinData, 'Should preserve embedded newlines without adding extra'); - })) passed++; else failed++; + if ( + await asyncTest('preserves data with embedded newlines exactly', async () => { + const stdinData = 'line1\nline2\nline3'; + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData); + assert.strictEqual(result.code, 0); + assert.strictEqual(result.stdout, stdinData, 'Should preserve embedded newlines without adding extra'); + }) + ) + passed++; + else failed++; console.log('\npost-edit-format.js (security & extension tests):'); - if (await asyncTest('source code does not pass shell option to execFileSync (security)', async () => { - const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); - // Strip comments to avoid matching "shell: true" in comment text - const codeOnly = formatSource.replace(/\/\/.*$/gm, '').replace(/\/\*[\s\S]*?\*\//g, ''); - assert.ok( - !/execFileSync\([^)]*shell\s*:/.test(codeOnly), - 'post-edit-format.js should not pass shell option to execFileSync' - ); - assert.ok( - codeOnly.includes("process.platform === 'win32' && cmd.bin.endsWith('.cmd')"), - 'Windows shell execution must stay gated to .cmd shims' - ); - assert.ok(formatSource.includes('npx.cmd'), 'Should use npx.cmd for Windows cross-platform safety'); - })) passed++; else failed++; + if ( + await asyncTest('source code does not pass shell option to execFileSync (security)', async () => { + const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); + // Strip comments to avoid matching "shell: true" in comment text + const codeOnly = formatSource.replace(/\/\/.*$/gm, '').replace(/\/\*[\s\S]*?\*\//g, ''); + assert.ok(!codeOnly.includes('shell:'), 'post-edit-format.js should not pass shell option in code'); + // npx.cmd handling moved to shared resolve-formatter.js — verify it uses the shared module + const resolverSource = fs.readFileSync(path.join(scriptsDir, '..', 'lib', 'resolve-formatter.js'), 'utf8'); + assert.ok(resolverSource.includes('npx.cmd'), 'resolve-formatter.js should use npx.cmd for Windows cross-platform safety'); + assert.ok(formatSource.includes('resolveFormatterBin'), 'post-edit-format.js should use shared resolveFormatterBin'); + }) + ) + passed++; + else failed++; - if (await asyncTest('matches .tsx extension for formatting', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/component.tsx' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0); - // Should attempt to format (will fail silently since file doesn't exist, but should pass through) - assert.ok(result.stdout.includes('component.tsx'), 'Should pass through data for .tsx files'); - })) passed++; else failed++; + if ( + await asyncTest('matches .tsx extension for formatting', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/component.tsx' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + assert.strictEqual(result.code, 0); + // Should attempt to format (will fail silently since file doesn't exist, but should pass through) + assert.ok(result.stdout.includes('component.tsx'), 'Should pass through data for .tsx files'); + }) + ) + passed++; + else failed++; - if (await asyncTest('matches .jsx extension for formatting', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/component.jsx' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.ok(result.stdout.includes('component.jsx'), 'Should pass through data for .jsx files'); - })) passed++; else failed++; + if ( + await asyncTest('matches .jsx extension for formatting', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/component.jsx' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('component.jsx'), 'Should pass through data for .jsx files'); + }) + ) + passed++; + else failed++; console.log('\npost-edit-typecheck.js (security & extension tests):'); - if (await asyncTest('source code does not pass shell option to execFileSync (security)', async () => { - const typecheckSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-typecheck.js'), 'utf8'); - // Strip comments to avoid matching "shell: true" in comment text - const codeOnly = typecheckSource.replace(/\/\/.*$/gm, '').replace(/\/\*[\s\S]*?\*\//g, ''); - assert.ok(!codeOnly.includes('shell:'), 'post-edit-typecheck.js should not pass shell option in code'); - assert.ok(typecheckSource.includes('npx.cmd'), 'Should use npx.cmd for Windows cross-platform safety'); - })) passed++; else failed++; + if ( + await asyncTest('source code does not pass shell option to execFileSync (security)', async () => { + const typecheckSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-typecheck.js'), 'utf8'); + // Strip comments to avoid matching "shell: true" in comment text + const codeOnly = typecheckSource.replace(/\/\/.*$/gm, '').replace(/\/\*[\s\S]*?\*\//g, ''); + assert.ok(!codeOnly.includes('shell:'), 'post-edit-typecheck.js should not pass shell option in code'); + assert.ok(typecheckSource.includes('npx.cmd'), 'Should use npx.cmd for Windows cross-platform safety'); + }) + ) + passed++; + else failed++; console.log('\nShell wrapper portability:'); - if (test('run-with-flags-shell resolves plugin root when CLAUDE_PLUGIN_ROOT is unset', () => { - const wrapperSource = fs.readFileSync(path.join(scriptsDir, 'run-with-flags-shell.sh'), 'utf8'); - assert.ok( - wrapperSource.includes('PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-'), - 'Shell wrapper should derive PLUGIN_ROOT from its own script path' - ); - })) passed++; else failed++; + if ( + test('run-with-flags-shell resolves plugin root when CLAUDE_PLUGIN_ROOT is unset', () => { + const wrapperSource = fs.readFileSync(path.join(scriptsDir, 'run-with-flags-shell.sh'), 'utf8'); + assert.ok(wrapperSource.includes('PLUGIN_ROOT="${CLAUDE_PLUGIN_ROOT:-'), 'Shell wrapper should derive PLUGIN_ROOT from its own script path'); + }) + ) + passed++; + else failed++; - if (test('continuous-learning shell scripts use resolved Python command instead of hardcoded python3 invocations', () => { - const observeSource = fs.readFileSync(path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'hooks', 'observe.sh'), 'utf8'); - const startObserverSource = fs.readFileSync(path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'agents', 'start-observer.sh'), 'utf8'); - const detectProjectSource = fs.readFileSync(path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'scripts', 'detect-project.sh'), 'utf8'); + if ( + test('continuous-learning shell scripts use resolved Python command instead of hardcoded python3 invocations', () => { + const observeSource = fs.readFileSync(path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'hooks', 'observe.sh'), 'utf8'); + const startObserverSource = fs.readFileSync(path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'agents', 'start-observer.sh'), 'utf8'); + const detectProjectSource = fs.readFileSync(path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'scripts', 'detect-project.sh'), 'utf8'); - assert.ok(!/python3\s+-c/.test(observeSource), 'observe.sh should not invoke python3 directly'); - assert.ok(!/python3\s+-c/.test(startObserverSource), 'start-observer.sh should not invoke python3 directly'); - assert.ok(observeSource.includes('PYTHON_CMD'), 'observe.sh should resolve Python dynamically'); - assert.ok(startObserverSource.includes('CLV2_PYTHON_CMD'), 'start-observer.sh should reuse detected Python command'); - assert.ok(detectProjectSource.includes('_clv2_resolve_python_cmd'), 'detect-project.sh should provide shared Python resolution'); - })) passed++; else failed++; + assert.ok(!/python3\s+-c/.test(observeSource), 'observe.sh should not invoke python3 directly'); + assert.ok(!/python3\s+-c/.test(startObserverSource), 'start-observer.sh should not invoke python3 directly'); + assert.ok(observeSource.includes('PYTHON_CMD'), 'observe.sh should resolve Python dynamically'); + assert.ok(startObserverSource.includes('CLV2_PYTHON_CMD'), 'start-observer.sh should reuse detected Python command'); + assert.ok(detectProjectSource.includes('_clv2_resolve_python_cmd'), 'detect-project.sh should provide shared Python resolution'); + }) + ) + passed++; + else failed++; - if (await asyncTest('detect-project exports the resolved Python command for downstream scripts', async () => { - const detectProjectPath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'scripts', 'detect-project.sh'); - const shellCommand = [ - `source "${detectProjectPath}" >/dev/null 2>&1`, - 'printf "%s\\n" "${CLV2_PYTHON_CMD:-}"' - ].join('; '); + if ( + await asyncTest('detect-project exports the resolved Python command for downstream scripts', async () => { + const detectProjectPath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'scripts', 'detect-project.sh'); + const shellCommand = [`source "${detectProjectPath}" >/dev/null 2>&1`, 'printf "%s\\n" "${CLV2_PYTHON_CMD:-}"'].join('; '); - const shell = process.platform === 'win32' ? 'bash' : 'bash'; - const proc = spawn(shell, ['-lc', shellCommand], { - env: process.env, - stdio: ['ignore', 'pipe', 'pipe'] - }); + const shell = process.platform === 'win32' ? 'bash' : 'bash'; + const proc = spawn(shell, ['-lc', shellCommand], { + env: process.env, + stdio: ['ignore', 'pipe', 'pipe'] + }); - let stdout = ''; - let stderr = ''; - proc.stdout.on('data', data => stdout += data); - proc.stderr.on('data', data => stderr += data); + let stdout = ''; + let stderr = ''; + proc.stdout.on('data', data => (stdout += data)); + proc.stderr.on('data', data => (stderr += data)); - const code = await new Promise((resolve, reject) => { - proc.on('close', resolve); - proc.on('error', reject); - }); + const code = await new Promise((resolve, reject) => { + proc.on('close', resolve); + proc.on('error', reject); + }); - assert.strictEqual(code, 0, `detect-project.sh should source cleanly, stderr: ${stderr}`); - assert.ok(stdout.trim().length > 0, 'CLV2_PYTHON_CMD should export a resolved interpreter path'); - })) passed++; else failed++; + assert.strictEqual(code, 0, `detect-project.sh should source cleanly, stderr: ${stderr}`); + assert.ok(stdout.trim().length > 0, 'CLV2_PYTHON_CMD should export a resolved interpreter path'); + }) + ) + passed++; + else failed++; - if (await asyncTest('matches .tsx extension for type checking', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'component.tsx'); - fs.writeFileSync(testFile, 'const x: number = 1;'); + if ( + await asyncTest('matches .tsx extension for type checking', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'component.tsx'); + fs.writeFileSync(testFile, 'const x: number = 1;'); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through data for .tsx files'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through data for .tsx files'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; // ─── Round 23: Bug fixes & high-priority gap coverage ─── @@ -1798,1879 +2093,2242 @@ async function runTests() { const wrapperScript = path.join(testDir, 'eval-wrapper.js'); let src = fs.readFileSync(path.join(scriptsDir, 'evaluate-session.js'), 'utf8'); // Patch require to use absolute path (the temp dir doesn't have ../lib/utils) - src = src.replace( - /require\('\.\.\/lib\/utils'\)/, - `require(${JSON.stringify(realUtilsPath)})` - ); + src = src.replace(/require\('\.\.\/lib\/utils'\)/, `require(${JSON.stringify(realUtilsPath)})`); // Patch config file path to point to our test config - src = src.replace( - /const configFile = path\.join\(scriptDir.*?config\.json'\);/, - `const configFile = ${JSON.stringify(configPath)};` - ); + src = src.replace(/const configFile = path\.join\(scriptDir.*?config\.json'\);/, `const configFile = ${JSON.stringify(configPath)};`); fs.writeFileSync(wrapperScript, src); return wrapperScript; } console.log('\nRound 23: evaluate-session.js (config & nullish coalescing):'); - if (await asyncTest('respects min_session_length=0 from config (nullish coalescing)', async () => { - // This tests the ?? fix: min_session_length=0 should mean "evaluate ALL sessions" - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'short.jsonl'); - // Only 2 user messages — normally below the default threshold of 10 - const lines = [ - '{"type":"user","content":"msg1"}', - '{"type":"user","content":"msg2"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + if ( + await asyncTest('respects min_session_length=0 from config (nullish coalescing)', async () => { + // This tests the ?? fix: min_session_length=0 should mean "evaluate ALL sessions" + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'short.jsonl'); + // Only 2 user messages — normally below the default threshold of 10 + const lines = ['{"type":"user","content":"msg1"}', '{"type":"user","content":"msg2"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); - // Create a config file with min_session_length=0 - const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); - fs.mkdirSync(skillsDir, { recursive: true }); - const configPath = path.join(skillsDir, 'config.json'); - fs.writeFileSync(configPath, JSON.stringify({ - min_session_length: 0, - learned_skills_path: path.join(testDir, 'learned') - })); + // Create a config file with min_session_length=0 + const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); + fs.mkdirSync(skillsDir, { recursive: true }); + const configPath = path.join(skillsDir, 'config.json'); + fs.writeFileSync( + configPath, + JSON.stringify({ + min_session_length: 0, + learned_skills_path: path.join(testDir, 'learned') + }) + ); - const wrapperScript = createEvalWrapper(testDir, configPath); + const wrapperScript = createEvalWrapper(testDir, configPath); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(wrapperScript, stdinJson, { - HOME: testDir, USERPROFILE: testDir - }); - assert.strictEqual(result.code, 0); - // With min_session_length=0, even 2 messages should trigger evaluation - assert.ok( - result.stderr.includes('2 messages') && result.stderr.includes('evaluate'), - 'Should evaluate session with min_session_length=0 (not skip as too short)' - ); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(wrapperScript, stdinJson, { + HOME: testDir, + USERPROFILE: testDir + }); + assert.strictEqual(result.code, 0); + // With min_session_length=0, even 2 messages should trigger evaluation + assert.ok(result.stderr.includes('2 messages') && result.stderr.includes('evaluate'), 'Should evaluate session with min_session_length=0 (not skip as too short)'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('config with min_session_length=null falls back to default 10', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'short.jsonl'); - // 5 messages — below default 10 - const lines = []; - for (let i = 0; i < 5; i++) lines.push(`{"type":"user","content":"msg${i}"}`); - fs.writeFileSync(transcriptPath, lines.join('\n')); + if ( + await asyncTest('config with min_session_length=null falls back to default 10', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'short.jsonl'); + // 5 messages — below default 10 + const lines = []; + for (let i = 0; i < 5; i++) lines.push(`{"type":"user","content":"msg${i}"}`); + fs.writeFileSync(transcriptPath, lines.join('\n')); - const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); - fs.mkdirSync(skillsDir, { recursive: true }); - const configPath = path.join(skillsDir, 'config.json'); - fs.writeFileSync(configPath, JSON.stringify({ - min_session_length: null, - learned_skills_path: path.join(testDir, 'learned') - })); + const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); + fs.mkdirSync(skillsDir, { recursive: true }); + const configPath = path.join(skillsDir, 'config.json'); + fs.writeFileSync( + configPath, + JSON.stringify({ + min_session_length: null, + learned_skills_path: path.join(testDir, 'learned') + }) + ); - const wrapperScript = createEvalWrapper(testDir, configPath); + const wrapperScript = createEvalWrapper(testDir, configPath); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(wrapperScript, stdinJson, { - HOME: testDir, USERPROFILE: testDir - }); - assert.strictEqual(result.code, 0); - // null ?? 10 === 10, so 5 messages should be "too short" - assert.ok(result.stderr.includes('too short'), 'Should fall back to default 10 when null'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(wrapperScript, stdinJson, { + HOME: testDir, + USERPROFILE: testDir + }); + assert.strictEqual(result.code, 0); + // null ?? 10 === 10, so 5 messages should be "too short" + assert.ok(result.stderr.includes('too short'), 'Should fall back to default 10 when null'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('config with custom learned_skills_path creates directory', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - fs.writeFileSync(transcriptPath, '{"type":"user","content":"msg"}'); + if ( + await asyncTest('config with custom learned_skills_path creates directory', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + fs.writeFileSync(transcriptPath, '{"type":"user","content":"msg"}'); - const customLearnedDir = path.join(testDir, 'custom-learned-skills'); - const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); - fs.mkdirSync(skillsDir, { recursive: true }); - const configPath = path.join(skillsDir, 'config.json'); - fs.writeFileSync(configPath, JSON.stringify({ - learned_skills_path: customLearnedDir - })); + const customLearnedDir = path.join(testDir, 'custom-learned-skills'); + const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); + fs.mkdirSync(skillsDir, { recursive: true }); + const configPath = path.join(skillsDir, 'config.json'); + fs.writeFileSync( + configPath, + JSON.stringify({ + learned_skills_path: customLearnedDir + }) + ); - const wrapperScript = createEvalWrapper(testDir, configPath); + const wrapperScript = createEvalWrapper(testDir, configPath); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - await runScript(wrapperScript, stdinJson, { - HOME: testDir, USERPROFILE: testDir - }); - assert.ok(fs.existsSync(customLearnedDir), 'Should create custom learned skills directory'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + await runScript(wrapperScript, stdinJson, { + HOME: testDir, + USERPROFILE: testDir + }); + assert.ok(fs.existsSync(customLearnedDir), 'Should create custom learned skills directory'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('handles invalid config JSON gracefully (uses defaults)', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - const lines = []; - for (let i = 0; i < 5; i++) lines.push(`{"type":"user","content":"msg${i}"}`); - fs.writeFileSync(transcriptPath, lines.join('\n')); + if ( + await asyncTest('handles invalid config JSON gracefully (uses defaults)', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + const lines = []; + for (let i = 0; i < 5; i++) lines.push(`{"type":"user","content":"msg${i}"}`); + fs.writeFileSync(transcriptPath, lines.join('\n')); - const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); - fs.mkdirSync(skillsDir, { recursive: true }); - const configPath = path.join(skillsDir, 'config.json'); - fs.writeFileSync(configPath, 'not valid json!!!'); + const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); + fs.mkdirSync(skillsDir, { recursive: true }); + const configPath = path.join(skillsDir, 'config.json'); + fs.writeFileSync(configPath, 'not valid json!!!'); - const wrapperScript = createEvalWrapper(testDir, configPath); + const wrapperScript = createEvalWrapper(testDir, configPath); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(wrapperScript, stdinJson, { - HOME: testDir, USERPROFILE: testDir - }); - assert.strictEqual(result.code, 0); - // Should log parse failure and fall back to default 10 → 5 msgs too short - assert.ok(result.stderr.includes('too short'), 'Should use defaults when config is invalid JSON'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(wrapperScript, stdinJson, { + HOME: testDir, + USERPROFILE: testDir + }); + assert.strictEqual(result.code, 0); + // Should log parse failure and fall back to default 10 → 5 msgs too short + assert.ok(result.stderr.includes('too short'), 'Should use defaults when config is invalid JSON'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; console.log('\nRound 23: session-end.js (update existing file path):'); - if (await asyncTest('updates Last Updated timestamp in existing session file', async () => { - const testDir = createTestDir(); - const sessionsDir = path.join(testDir, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('updates Last Updated timestamp in existing session file', async () => { + const testDir = createTestDir(); + const sessionsDir = path.join(testDir, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - // Get the expected filename - const utils = require('../../scripts/lib/utils'); - const today = utils.getDateString(); + // Get the expected filename + const utils = require('../../scripts/lib/utils'); + const today = utils.getDateString(); - // Create a pre-existing session file with known timestamp - const shortId = 'update01'; - const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`); - const originalContent = `# Session: ${today}\n**Date:** ${today}\n**Started:** 09:00\n**Last Updated:** 09:00\n\n---\n\n## Current State\n\n[Session context goes here]\n\n### Completed\n- [ ]\n\n### In Progress\n- [ ]\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\`\n`; - fs.writeFileSync(sessionFile, originalContent); + // Create a pre-existing session file with known timestamp + const shortId = 'update01'; + const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`); + const originalContent = `# Session: ${today}\n**Date:** ${today}\n**Started:** 09:00\n**Last Updated:** 09:00\n\n---\n\n## Current State\n\n[Session context goes here]\n\n### Completed\n- [ ]\n\n### In Progress\n- [ ]\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\`\n`; + fs.writeFileSync(sessionFile, originalContent); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), '', { - HOME: testDir, USERPROFILE: testDir, - CLAUDE_SESSION_ID: `session-${shortId}` - }); - assert.strictEqual(result.code, 0); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), '', { + HOME: testDir, + USERPROFILE: testDir, + CLAUDE_SESSION_ID: `session-${shortId}` + }); + assert.strictEqual(result.code, 0); - const updated = fs.readFileSync(sessionFile, 'utf8'); - // The timestamp should have been updated (no longer 09:00) - assert.ok(updated.includes('**Last Updated:**'), 'Should still have Last Updated field'); - assert.ok(result.stderr.includes('Updated session file'), 'Should log update'); - })) passed++; else failed++; + const updated = fs.readFileSync(sessionFile, 'utf8'); + // The timestamp should have been updated (no longer 09:00) + assert.ok(updated.includes('**Last Updated:**'), 'Should still have Last Updated field'); + assert.ok(result.stderr.includes('Updated session file'), 'Should log update'); + }) + ) + passed++; + else failed++; - if (await asyncTest('replaces blank template with summary when updating existing file', async () => { - const testDir = createTestDir(); - const sessionsDir = path.join(testDir, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('replaces blank template with summary when updating existing file', async () => { + const testDir = createTestDir(); + const sessionsDir = path.join(testDir, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - const utils = require('../../scripts/lib/utils'); - const today = utils.getDateString(); + const utils = require('../../scripts/lib/utils'); + const today = utils.getDateString(); - const shortId = 'update02'; - const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`); - // Pre-existing file with blank template - const originalContent = `# Session: ${today}\n**Date:** ${today}\n**Started:** 09:00\n**Last Updated:** 09:00\n\n---\n\n## Current State\n\n[Session context goes here]\n\n### Completed\n- [ ]\n\n### In Progress\n- [ ]\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\`\n`; - fs.writeFileSync(sessionFile, originalContent); + const shortId = 'update02'; + const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`); + // Pre-existing file with blank template + const originalContent = `# Session: ${today}\n**Date:** ${today}\n**Started:** 09:00\n**Last Updated:** 09:00\n\n---\n\n## Current State\n\n[Session context goes here]\n\n### Completed\n- [ ]\n\n### In Progress\n- [ ]\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\`\n`; + fs.writeFileSync(sessionFile, originalContent); - // Create a transcript with user messages - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - const lines = [ - '{"type":"user","content":"Fix auth bug"}', - '{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/src/auth.ts"}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + // Create a transcript with user messages + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + const lines = ['{"type":"user","content":"Fix auth bug"}', '{"type":"tool_use","tool_name":"Edit","tool_input":{"file_path":"/src/auth.ts"}}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir, USERPROFILE: testDir, - CLAUDE_SESSION_ID: `session-${shortId}` - }); - assert.strictEqual(result.code, 0); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir, + USERPROFILE: testDir, + CLAUDE_SESSION_ID: `session-${shortId}` + }); + assert.strictEqual(result.code, 0); - const updated = fs.readFileSync(sessionFile, 'utf8'); - // Should have replaced blank template with actual summary - assert.ok(!updated.includes('[Session context goes here]'), 'Should replace blank template'); - assert.ok(updated.includes('Fix auth bug'), 'Should include user message in summary'); - assert.ok(updated.includes('/src/auth.ts'), 'Should include modified file'); - })) passed++; else failed++; + const updated = fs.readFileSync(sessionFile, 'utf8'); + // Should have replaced blank template with actual summary + assert.ok(!updated.includes('[Session context goes here]'), 'Should replace blank template'); + assert.ok(updated.includes('Fix auth bug'), 'Should include user message in summary'); + assert.ok(updated.includes('/src/auth.ts'), 'Should include modified file'); + }) + ) + passed++; + else failed++; - if (await asyncTest('always updates session summary content on session end', async () => { - const testDir = createTestDir(); - const sessionsDir = path.join(testDir, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('always updates session summary content on session end', async () => { + const testDir = createTestDir(); + const sessionsDir = path.join(testDir, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - const utils = require('../../scripts/lib/utils'); - const today = utils.getDateString(); + const utils = require('../../scripts/lib/utils'); + const today = utils.getDateString(); - const shortId = 'update03'; - const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`); - // Pre-existing file with already-filled summary - const existingContent = `# Session: ${today}\n**Date:** ${today}\n**Started:** 08:00\n**Last Updated:** 08:30\n\n---\n\n## Session Summary\n\n### Tasks\n- Previous task from earlier\n`; - fs.writeFileSync(sessionFile, existingContent); + const shortId = 'update03'; + const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`); + // Pre-existing file with already-filled summary + const existingContent = `# Session: ${today}\n**Date:** ${today}\n**Started:** 08:00\n**Last Updated:** 08:30\n\n---\n\n## Session Summary\n\n### Tasks\n- Previous task from earlier\n`; + fs.writeFileSync(sessionFile, existingContent); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - fs.writeFileSync(transcriptPath, '{"type":"user","content":"New task"}'); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + fs.writeFileSync(transcriptPath, '{"type":"user","content":"New task"}'); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir, USERPROFILE: testDir, - CLAUDE_SESSION_ID: `session-${shortId}` - }); - assert.strictEqual(result.code, 0); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir, + USERPROFILE: testDir, + CLAUDE_SESSION_ID: `session-${shortId}` + }); + assert.strictEqual(result.code, 0); - const updated = fs.readFileSync(sessionFile, 'utf8'); - // Session summary should always be refreshed with current content (#317) - assert.ok(updated.includes('## Session Summary'), 'Should have Session Summary section'); - assert.ok(updated.includes('# Session:'), 'Should preserve session header'); - })) passed++; else failed++; + const updated = fs.readFileSync(sessionFile, 'utf8'); + // Session summary should always be refreshed with current content (#317) + assert.ok(updated.includes('## Session Summary'), 'Should have Session Summary section'); + assert.ok(updated.includes('# Session:'), 'Should preserve session header'); + }) + ) + passed++; + else failed++; console.log('\nRound 23: pre-compact.js (glob specificity):'); - if (await asyncTest('only annotates *-session.tmp files, not other .tmp files', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-compact-glob-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('only annotates *-session.tmp files, not other .tmp files', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-compact-glob-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - // Create a session .tmp file and a non-session .tmp file - const sessionFile = path.join(sessionsDir, '2026-02-11-abc-session.tmp'); - const otherTmpFile = path.join(sessionsDir, 'other-data.tmp'); - fs.writeFileSync(sessionFile, '# Session\n'); - fs.writeFileSync(otherTmpFile, 'some other data\n'); + // Create a session .tmp file and a non-session .tmp file + const sessionFile = path.join(sessionsDir, '2026-02-11-abc-session.tmp'); + const otherTmpFile = path.join(sessionsDir, 'other-data.tmp'); + fs.writeFileSync(sessionFile, '# Session\n'); + fs.writeFileSync(otherTmpFile, 'some other data\n'); - try { - await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); + try { + await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); - const sessionContent = fs.readFileSync(sessionFile, 'utf8'); - const otherContent = fs.readFileSync(otherTmpFile, 'utf8'); + const sessionContent = fs.readFileSync(sessionFile, 'utf8'); + const otherContent = fs.readFileSync(otherTmpFile, 'utf8'); - assert.ok(sessionContent.includes('Compaction occurred'), 'Should annotate session file'); - assert.strictEqual(otherContent, 'some other data\n', 'Should NOT annotate non-session .tmp file'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + assert.ok(sessionContent.includes('Compaction occurred'), 'Should annotate session file'); + assert.strictEqual(otherContent, 'some other data\n', 'Should NOT annotate non-session .tmp file'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; - if (await asyncTest('handles no active session files gracefully', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-compact-nosession-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('handles no active session files gracefully', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-compact-nosession-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - try { - const result = await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0 with no session files'); - assert.ok(result.stderr.includes('[PreCompact]'), 'Should still log success'); + try { + const result = await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0 with no session files'); + assert.ok(result.stderr.includes('[PreCompact]'), 'Should still log success'); - // Compaction log should still be created - const logFile = path.join(sessionsDir, 'compaction-log.txt'); - assert.ok(fs.existsSync(logFile), 'Should create compaction log even with no sessions'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + // Compaction log should still be created + const logFile = path.join(sessionsDir, 'compaction-log.txt'); + assert.ok(fs.existsSync(logFile), 'Should create compaction log even with no sessions'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; console.log('\nRound 23: session-end.js (extractSessionSummary edge cases):'); - if (await asyncTest('handles transcript with only assistant messages (no user messages)', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - // Only assistant messages — no user messages - const lines = [ - '{"type":"assistant","message":{"content":[{"type":"text","text":"response"}]}}', - '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/src/app.ts"}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + if ( + await asyncTest('handles transcript with only assistant messages (no user messages)', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + // Only assistant messages — no user messages + const lines = ['{"type":"assistant","message":{"content":[{"type":"text","text":"response"}]}}', '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/src/app.ts"}}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0); - // With no user messages, extractSessionSummary returns null → blank template - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - assert.ok(content.includes('[Session context goes here]'), 'Should use blank template when no user messages'); - } - } - cleanupTestDir(testDir); - })) passed++; else failed++; - - if (await asyncTest('extracts tool_use from assistant message content blocks', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - // Claude Code JSONL format: tool_use blocks inside assistant message content array - const lines = [ - '{"type":"user","content":"Edit config"}', - JSON.stringify({ - type: 'assistant', - message: { - content: [ - { type: 'text', text: 'I will edit the config.' }, - { type: 'tool_use', name: 'Edit', input: { file_path: '/src/config.ts' } }, - { type: 'tool_use', name: 'Write', input: { file_path: '/src/new.ts' } }, - ] + // With no user messages, extractSessionSummary returns null → blank template + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + assert.ok(content.includes('[Session context goes here]'), 'Should use blank template when no user messages'); } - }), - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); - - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - assert.ok(content.includes('/src/config.ts'), 'Should extract file from nested tool_use block'); - assert.ok(content.includes('/src/new.ts'), 'Should extract Write file from nested block'); - assert.ok(content.includes('Edit'), 'Should list Edit in tools used'); } - } - cleanupTestDir(testDir); - })) passed++; else failed++; + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + + if ( + await asyncTest('extracts tool_use from assistant message content blocks', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + // Claude Code JSONL format: tool_use blocks inside assistant message content array + const lines = [ + '{"type":"user","content":"Edit config"}', + JSON.stringify({ + type: 'assistant', + message: { + content: [ + { type: 'text', text: 'I will edit the config.' }, + { type: 'tool_use', name: 'Edit', input: { file_path: '/src/config.ts' } }, + { type: 'tool_use', name: 'Write', input: { file_path: '/src/new.ts' } } + ] + } + }) + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); + + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0); + + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + assert.ok(content.includes('/src/config.ts'), 'Should extract file from nested tool_use block'); + assert.ok(content.includes('/src/new.ts'), 'Should extract Write file from nested block'); + assert.ok(content.includes('Edit'), 'Should list Edit in tools used'); + } + } + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; // ─── Round 24: suggest-compact interval fix, fd fallback, session-start maxAge ─── console.log('\nRound 24: suggest-compact.js (interval fix & fd fallback):'); - if (await asyncTest('periodic intervals are consistent with non-25-divisible threshold', async () => { - // Regression test: with threshold=13, periodic suggestions should fire at 38, 63, 88... - // (count - 13) % 25 === 0 → 38-13=25, 63-13=50, etc. - const sessionId = `test-interval-fix-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - // Pre-seed at 37 so next call = 38 (13 + 25 = 38) - fs.writeFileSync(counterFile, '37'); - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: '13' - }); - assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('38 tool calls'), 'Should suggest at threshold(13) + 25 = 38'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; + if ( + await asyncTest('periodic intervals are consistent with non-25-divisible threshold', async () => { + // Regression test: with threshold=13, periodic suggestions should fire at 38, 63, 88... + // (count - 13) % 25 === 0 → 38-13=25, 63-13=50, etc. + const sessionId = `test-interval-fix-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + // Pre-seed at 37 so next call = 38 (13 + 25 = 38) + fs.writeFileSync(counterFile, '37'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId, + COMPACT_THRESHOLD: '13' + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('38 tool calls'), 'Should suggest at threshold(13) + 25 = 38'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; - if (await asyncTest('does not suggest at old-style multiples that skip threshold offset', async () => { - // With threshold=13, count=50 should NOT trigger (old behavior would: 50%25===0) - // New behavior: (50-13)%25 = 37%25 = 12 → no suggestion - const sessionId = `test-no-false-suggest-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - fs.writeFileSync(counterFile, '49'); - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId, - COMPACT_THRESHOLD: '13' - }); - assert.strictEqual(result.code, 0); - assert.ok(!result.stderr.includes('checkpoint'), 'Should NOT suggest at count=50 with threshold=13'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; + if ( + await asyncTest('does not suggest at old-style multiples that skip threshold offset', async () => { + // With threshold=13, count=50 should NOT trigger (old behavior would: 50%25===0) + // New behavior: (50-13)%25 = 37%25 = 12 → no suggestion + const sessionId = `test-no-false-suggest-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + fs.writeFileSync(counterFile, '49'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId, + COMPACT_THRESHOLD: '13' + }); + assert.strictEqual(result.code, 0); + assert.ok(!result.stderr.includes('checkpoint'), 'Should NOT suggest at count=50 with threshold=13'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; - if (await asyncTest('fd fallback: handles corrupted counter file gracefully', async () => { - const sessionId = `test-corrupt-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - // Write non-numeric data to trigger parseInt → NaN → reset to 1 - fs.writeFileSync(counterFile, 'corrupted data here!!!'); - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId - }); - assert.strictEqual(result.code, 0); - const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); - assert.strictEqual(newCount, 1, 'Should reset to 1 on corrupted file content'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; + if ( + await asyncTest('fd fallback: handles corrupted counter file gracefully', async () => { + const sessionId = `test-corrupt-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + // Write non-numeric data to trigger parseInt → NaN → reset to 1 + fs.writeFileSync(counterFile, 'corrupted data here!!!'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId + }); + assert.strictEqual(result.code, 0); + const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); + assert.strictEqual(newCount, 1, 'Should reset to 1 on corrupted file content'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; - if (await asyncTest('handles counter at exact 1000000 boundary', async () => { - const sessionId = `test-boundary-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - try { - // 1000000 is the upper clamp boundary — should still increment - fs.writeFileSync(counterFile, '1000000'); - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId - }); - assert.strictEqual(result.code, 0); - const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); - assert.strictEqual(newCount, 1000001, 'Should increment from exactly 1000000'); - } finally { - try { fs.unlinkSync(counterFile); } catch { /* ignore */ } - } - })) passed++; else failed++; + if ( + await asyncTest('handles counter at exact 1000000 boundary', async () => { + const sessionId = `test-boundary-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + try { + // 1000000 is the upper clamp boundary — should still increment + fs.writeFileSync(counterFile, '1000000'); + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId + }); + assert.strictEqual(result.code, 0); + const newCount = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10); + assert.strictEqual(newCount, 1000001, 'Should increment from exactly 1000000'); + } finally { + try { + fs.unlinkSync(counterFile); + } catch { + /* ignore */ + } + } + }) + ) + passed++; + else failed++; console.log('\nRound 24: post-edit-format.js (edge cases):'); - if (await asyncTest('passes through malformed JSON unchanged', async () => { - const malformedJson = '{"tool_input": {"file_path": "/test.ts"'; - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), malformedJson); - assert.strictEqual(result.code, 0); - // Should pass through the malformed data unchanged - assert.ok(result.stdout.includes(malformedJson), 'Should pass through malformed JSON'); - })) passed++; else failed++; + if ( + await asyncTest('passes through malformed JSON unchanged', async () => { + const malformedJson = '{"tool_input": {"file_path": "/test.ts"'; + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), malformedJson); + assert.strictEqual(result.code, 0); + // Should pass through the malformed data unchanged + assert.ok(result.stdout.includes(malformedJson), 'Should pass through malformed JSON'); + }) + ) + passed++; + else failed++; - if (await asyncTest('passes through data for non-JS/TS file extensions', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/path/to/file.py' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.ok(result.stdout.includes('file.py'), 'Should pass through for .py files'); - })) passed++; else failed++; + if ( + await asyncTest('passes through data for non-JS/TS file extensions', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/path/to/file.py' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('file.py'), 'Should pass through for .py files'); + }) + ) + passed++; + else failed++; console.log('\nRound 24: post-edit-typecheck.js (edge cases):'); - if (await asyncTest('skips typecheck for non-existent file and still passes through', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/deep/file.ts' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.ok(result.stdout.includes('file.ts'), 'Should pass through for non-existent .ts file'); - })) passed++; else failed++; + if ( + await asyncTest('skips typecheck for non-existent file and still passes through', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/deep/file.ts' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('file.ts'), 'Should pass through for non-existent .ts file'); + }) + ) + passed++; + else failed++; - if (await asyncTest('passes through for non-TS extensions without running tsc', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/path/to/file.js' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.ok(result.stdout.includes('file.js'), 'Should pass through for .js file without running tsc'); - })) passed++; else failed++; + if ( + await asyncTest('passes through for non-TS extensions without running tsc', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/path/to/file.js' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('file.js'), 'Should pass through for .js file without running tsc'); + }) + ) + passed++; + else failed++; console.log('\nRound 24: session-start.js (edge cases):'); - if (await asyncTest('exits 0 with empty sessions directory (no recent sessions)', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-start-empty-${Date.now()}`); - fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0 with no sessions'); - // Should NOT inject any previous session data (stdout should be empty or minimal) - assert.ok(!result.stdout.includes('Previous session summary'), 'Should not inject when no sessions'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + if ( + await asyncTest('exits 0 with empty sessions directory (no recent sessions)', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-start-empty-${Date.now()}`); + fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0 with no sessions'); + // Should NOT inject any previous session data (stdout should be empty or minimal) + assert.ok(!result.stdout.includes('Previous session summary'), 'Should not inject when no sessions'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; - if (await asyncTest('does not inject blank template session into context', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-start-blank-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + if ( + await asyncTest('does not inject blank template session into context', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-start-blank-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - // Create a session file with the blank template marker - const today = new Date().toISOString().slice(0, 10); - const sessionFile = path.join(sessionsDir, `${today}-blank-session.tmp`); - fs.writeFileSync(sessionFile, '# Session\n[Session context goes here]\n'); + // Create a session file with the blank template marker + const today = new Date().toISOString().slice(0, 10); + const sessionFile = path.join(sessionsDir, `${today}-blank-session.tmp`); + fs.writeFileSync(sessionFile, '# Session\n[Session context goes here]\n'); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); - // Should NOT inject blank template - assert.ok(!result.stdout.includes('Previous session summary'), 'Should skip blank template sessions'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); + // Should NOT inject blank template + assert.ok(!result.stdout.includes('Previous session summary'), 'Should skip blank template sessions'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // ─── Round 25: post-edit-console-warn pass-through fix, check-console-log edge cases ─── console.log('\nRound 25: post-edit-console-warn.js (pass-through fix):'); - if (await asyncTest('stdout is exact byte match of stdin (no trailing newline)', async () => { - // Regression test: console.log(data) was replaced with process.stdout.write(data) - const stdinData = '{"tool_input":{"file_path":"/nonexistent/file.py"}}'; - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinData); - assert.strictEqual(result.code, 0); - assert.strictEqual(result.stdout, stdinData, 'stdout should exactly match stdin (no extra newline)'); - })) passed++; else failed++; + if ( + await asyncTest('stdout is exact byte match of stdin (no trailing newline)', async () => { + // Regression test: console.log(data) was replaced with process.stdout.write(data) + const stdinData = '{"tool_input":{"file_path":"/nonexistent/file.py"}}'; + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinData); + assert.strictEqual(result.code, 0); + assert.strictEqual(result.stdout, stdinData, 'stdout should exactly match stdin (no extra newline)'); + }) + ) + passed++; + else failed++; - if (await asyncTest('passes through malformed JSON unchanged without crash', async () => { - const malformed = '{"tool_input": {"file_path": "/test.ts"'; - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), malformed); - assert.strictEqual(result.code, 0); - assert.strictEqual(result.stdout, malformed, 'Should pass through malformed JSON exactly'); - })) passed++; else failed++; + if ( + await asyncTest('passes through malformed JSON unchanged without crash', async () => { + const malformed = '{"tool_input": {"file_path": "/test.ts"'; + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), malformed); + assert.strictEqual(result.code, 0); + assert.strictEqual(result.stdout, malformed, 'Should pass through malformed JSON exactly'); + }) + ) + passed++; + else failed++; - if (await asyncTest('handles missing file_path in tool_input gracefully', async () => { - const stdinJson = JSON.stringify({ tool_input: {} }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.strictEqual(result.stdout, stdinJson, 'Should pass through with missing file_path'); - })) passed++; else failed++; + if ( + await asyncTest('handles missing file_path in tool_input gracefully', async () => { + const stdinJson = JSON.stringify({ tool_input: {} }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.strictEqual(result.stdout, stdinJson, 'Should pass through with missing file_path'); + }) + ) + passed++; + else failed++; - if (await asyncTest('passes through when file does not exist (readFile returns null)', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/deep/file.ts' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.strictEqual(result.stdout, stdinJson, 'Should pass through exactly when file not found'); - })) passed++; else failed++; + if ( + await asyncTest('passes through when file does not exist (readFile returns null)', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/deep/file.ts' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.strictEqual(result.stdout, stdinJson, 'Should pass through exactly when file not found'); + }) + ) + passed++; + else failed++; console.log('\nRound 25: check-console-log.js (edge cases):'); - if (await asyncTest('source has expected exclusion patterns', async () => { - // The EXCLUDED_PATTERNS array includes .test.ts, .spec.ts, etc. - const source = fs.readFileSync(path.join(scriptsDir, 'check-console-log.js'), 'utf8'); - // Verify the exclusion patterns exist (regex escapes use \. so check for the pattern names) - assert.ok(source.includes('EXCLUDED_PATTERNS'), 'Should have exclusion patterns array'); - assert.ok(/\.test\\\./.test(source), 'Should have test file exclusion pattern'); - assert.ok(/\.spec\\\./.test(source), 'Should have spec file exclusion pattern'); - assert.ok(source.includes('scripts'), 'Should exclude scripts/ directory'); - assert.ok(source.includes('__tests__'), 'Should exclude __tests__/ directory'); - assert.ok(source.includes('__mocks__'), 'Should exclude __mocks__/ directory'); - })) passed++; else failed++; + if ( + await asyncTest('source has expected exclusion patterns', async () => { + // The EXCLUDED_PATTERNS array includes .test.ts, .spec.ts, etc. + const source = fs.readFileSync(path.join(scriptsDir, 'check-console-log.js'), 'utf8'); + // Verify the exclusion patterns exist (regex escapes use \. so check for the pattern names) + assert.ok(source.includes('EXCLUDED_PATTERNS'), 'Should have exclusion patterns array'); + assert.ok(/\.test\\\./.test(source), 'Should have test file exclusion pattern'); + assert.ok(/\.spec\\\./.test(source), 'Should have spec file exclusion pattern'); + assert.ok(source.includes('scripts'), 'Should exclude scripts/ directory'); + assert.ok(source.includes('__tests__'), 'Should exclude __tests__/ directory'); + assert.ok(source.includes('__mocks__'), 'Should exclude __mocks__/ directory'); + }) + ) + passed++; + else failed++; - if (await asyncTest('passes through data unchanged on non-git repo', async () => { - // In a temp dir with no git repo, the hook should pass through data unchanged - const testDir = createTestDir(); - const stdinData = '{"tool_input":"test"}'; - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData, { - // Use a non-git directory as CWD - HOME: testDir, USERPROFILE: testDir - }); - // Note: We're still running from a git repo, so isGitRepo() may still return true. - // This test verifies the script doesn't crash and passes through data. - assert.strictEqual(result.code, 0); - assert.ok(result.stdout.includes(stdinData), 'Should pass through data'); - cleanupTestDir(testDir); - })) passed++; else failed++; + if ( + await asyncTest('passes through data unchanged on non-git repo', async () => { + // In a temp dir with no git repo, the hook should pass through data unchanged + const testDir = createTestDir(); + const stdinData = '{"tool_input":"test"}'; + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData, { + // Use a non-git directory as CWD + HOME: testDir, + USERPROFILE: testDir + }); + // Note: We're still running from a git repo, so isGitRepo() may still return true. + // This test verifies the script doesn't crash and passes through data. + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes(stdinData), 'Should pass through data'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('exits 0 even when no stdin is provided', async () => { - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), ''); - assert.strictEqual(result.code, 0, 'Should exit 0 with empty stdin'); - })) passed++; else failed++; + if ( + await asyncTest('exits 0 even when no stdin is provided', async () => { + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), ''); + assert.strictEqual(result.code, 0, 'Should exit 0 with empty stdin'); + }) + ) + passed++; + else failed++; // ── Round 29: post-edit-format.js cwd fix and process.exit(0) consistency ── console.log('\nRound 29: post-edit-format.js (cwd and exit):'); - if (await asyncTest('source uses cwd based on file directory for npx', async () => { - const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); - assert.ok(formatSource.includes('cwd:'), 'Should set cwd option for execFileSync'); - assert.ok(formatSource.includes('path.dirname'), 'cwd should use path.dirname of the file'); - assert.ok(formatSource.includes('path.resolve'), 'cwd should resolve the file path first'); - })) passed++; else failed++; + if ( + await asyncTest('source uses cwd based on file directory for npx', async () => { + const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); + assert.ok(formatSource.includes('cwd:'), 'Should set cwd option for execFileSync'); + assert.ok(formatSource.includes('path.dirname'), 'cwd should use path.dirname of the file'); + assert.ok(formatSource.includes('path.resolve'), 'cwd should resolve the file path first'); + }) + ) + passed++; + else failed++; - if (await asyncTest('source calls process.exit(0) after writing output', async () => { - const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); - assert.ok(formatSource.includes('process.exit(0)'), 'Should call process.exit(0) for clean termination'); - })) passed++; else failed++; + if ( + await asyncTest('source calls process.exit(0) after writing output', async () => { + const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); + assert.ok(formatSource.includes('process.exit(0)'), 'Should call process.exit(0) for clean termination'); + }) + ) + passed++; + else failed++; - if (await asyncTest('uses process.stdout.write instead of console.log for pass-through', async () => { - const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); - assert.ok(formatSource.includes('process.stdout.write(data)'), 'Should use process.stdout.write to avoid trailing newline'); - // Verify no console.log(data) for pass-through (console.error for warnings is OK) - const lines = formatSource.split('\n'); - const passThrough = lines.filter(l => /console\.log\(data\)/.test(l)); - assert.strictEqual(passThrough.length, 0, 'Should not use console.log(data) for pass-through'); - })) passed++; else failed++; + if ( + await asyncTest('uses process.stdout.write instead of console.log for pass-through', async () => { + const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); + assert.ok(formatSource.includes('process.stdout.write(data)'), 'Should use process.stdout.write to avoid trailing newline'); + // Verify no console.log(data) for pass-through (console.error for warnings is OK) + const lines = formatSource.split('\n'); + const passThrough = lines.filter(l => /console\.log\(data\)/.test(l)); + assert.strictEqual(passThrough.length, 0, 'Should not use console.log(data) for pass-through'); + }) + ) + passed++; + else failed++; console.log('\nRound 29: post-edit-typecheck.js (exit and pass-through):'); - if (await asyncTest('source calls process.exit(0) after writing output', async () => { - const tcSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-typecheck.js'), 'utf8'); - assert.ok(tcSource.includes('process.exit(0)'), 'Should call process.exit(0) for clean termination'); - })) passed++; else failed++; + if ( + await asyncTest('source calls process.exit(0) after writing output', async () => { + const tcSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-typecheck.js'), 'utf8'); + assert.ok(tcSource.includes('process.exit(0)'), 'Should call process.exit(0) for clean termination'); + }) + ) + passed++; + else failed++; - if (await asyncTest('uses process.stdout.write instead of console.log for pass-through', async () => { - const tcSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-typecheck.js'), 'utf8'); - assert.ok(tcSource.includes('process.stdout.write(data)'), 'Should use process.stdout.write'); - const lines = tcSource.split('\n'); - const passThrough = lines.filter(l => /console\.log\(data\)/.test(l)); - assert.strictEqual(passThrough.length, 0, 'Should not use console.log(data) for pass-through'); - })) passed++; else failed++; + if ( + await asyncTest('uses process.stdout.write instead of console.log for pass-through', async () => { + const tcSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-typecheck.js'), 'utf8'); + assert.ok(tcSource.includes('process.stdout.write(data)'), 'Should use process.stdout.write'); + const lines = tcSource.split('\n'); + const passThrough = lines.filter(l => /console\.log\(data\)/.test(l)); + assert.strictEqual(passThrough.length, 0, 'Should not use console.log(data) for pass-through'); + }) + ) + passed++; + else failed++; - if (await asyncTest('exact stdout pass-through without trailing newline (typecheck)', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/file.py' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.strictEqual(result.stdout, stdinJson, 'stdout should exactly match stdin (no trailing newline)'); - })) passed++; else failed++; + if ( + await asyncTest('exact stdout pass-through without trailing newline (typecheck)', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/file.py' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.strictEqual(result.stdout, stdinJson, 'stdout should exactly match stdin (no trailing newline)'); + }) + ) + passed++; + else failed++; - if (await asyncTest('exact stdout pass-through without trailing newline (format)', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/file.py' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0); - assert.strictEqual(result.stdout, stdinJson, 'stdout should exactly match stdin (no trailing newline)'); - })) passed++; else failed++; + if ( + await asyncTest('exact stdout pass-through without trailing newline (format)', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/nonexistent/file.py' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + assert.strictEqual(result.code, 0); + assert.strictEqual(result.stdout, stdinJson, 'stdout should exactly match stdin (no trailing newline)'); + }) + ) + passed++; + else failed++; console.log('\nRound 29: post-edit-console-warn.js (extension and exit):'); - if (await asyncTest('source calls process.exit(0) after writing output', async () => { - const cwSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-console-warn.js'), 'utf8'); - assert.ok(cwSource.includes('process.exit(0)'), 'Should call process.exit(0)'); - })) passed++; else failed++; + if ( + await asyncTest('source calls process.exit(0) after writing output', async () => { + const cwSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-console-warn.js'), 'utf8'); + assert.ok(cwSource.includes('process.exit(0)'), 'Should call process.exit(0)'); + }) + ) + passed++; + else failed++; - if (await asyncTest('does NOT match .mts or .mjs extensions', async () => { - const stdinMts = JSON.stringify({ tool_input: { file_path: '/some/file.mts' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinMts); - assert.strictEqual(result.code, 0); - // .mts is not in the regex /\.(ts|tsx|js|jsx)$/, so no console.log scan - assert.strictEqual(result.stdout, stdinMts, 'Should pass through .mts without scanning'); - assert.ok(!result.stderr.includes('console.log'), 'Should NOT scan .mts files for console.log'); - })) passed++; else failed++; + if ( + await asyncTest('does NOT match .mts or .mjs extensions', async () => { + const stdinMts = JSON.stringify({ tool_input: { file_path: '/some/file.mts' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinMts); + assert.strictEqual(result.code, 0); + // .mts is not in the regex /\.(ts|tsx|js|jsx)$/, so no console.log scan + assert.strictEqual(result.stdout, stdinMts, 'Should pass through .mts without scanning'); + assert.ok(!result.stderr.includes('console.log'), 'Should NOT scan .mts files for console.log'); + }) + ) + passed++; + else failed++; - if (await asyncTest('does NOT match uppercase .TS extension', async () => { - const stdinTS = JSON.stringify({ tool_input: { file_path: '/some/file.TS' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinTS); - assert.strictEqual(result.code, 0); - assert.strictEqual(result.stdout, stdinTS, 'Should pass through .TS without scanning'); - assert.ok(!result.stderr.includes('console.log'), 'Should NOT scan .TS (uppercase) files'); - })) passed++; else failed++; + if ( + await asyncTest('does NOT match uppercase .TS extension', async () => { + const stdinTS = JSON.stringify({ tool_input: { file_path: '/some/file.TS' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinTS); + assert.strictEqual(result.code, 0); + assert.strictEqual(result.stdout, stdinTS, 'Should pass through .TS without scanning'); + assert.ok(!result.stderr.includes('console.log'), 'Should NOT scan .TS (uppercase) files'); + }) + ) + passed++; + else failed++; - if (await asyncTest('detects console.log in commented-out code', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'commented.js'); - fs.writeFileSync(testFile, '// console.log("debug")\nconst x = 1;\n'); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - assert.strictEqual(result.code, 0); - // The regex /console\.log/ matches even in comments — this is intentional - assert.ok(result.stderr.includes('console.log'), 'Should detect console.log even in comments'); - cleanupTestDir(testDir); - })) passed++; else failed++; + if ( + await asyncTest('detects console.log in commented-out code', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'commented.js'); + fs.writeFileSync(testFile, '// console.log("debug")\nconst x = 1;\n'); + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + assert.strictEqual(result.code, 0); + // The regex /console\.log/ matches even in comments — this is intentional + assert.ok(result.stderr.includes('console.log'), 'Should detect console.log even in comments'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; console.log('\nRound 29: check-console-log.js (exclusion patterns and exit):'); - if (await asyncTest('source calls process.exit(0) after writing output', async () => { - const clSource = fs.readFileSync(path.join(scriptsDir, 'check-console-log.js'), 'utf8'); - // Should have at least 2 process.exit(0) calls (early return + end) - const exitCalls = clSource.match(/process\.exit\(0\)/g) || []; - assert.ok(exitCalls.length >= 2, `Should have at least 2 process.exit(0) calls, found ${exitCalls.length}`); - })) passed++; else failed++; + if ( + await asyncTest('source calls process.exit(0) after writing output', async () => { + const clSource = fs.readFileSync(path.join(scriptsDir, 'check-console-log.js'), 'utf8'); + // Should have at least 2 process.exit(0) calls (early return + end) + const exitCalls = clSource.match(/process\.exit\(0\)/g) || []; + assert.ok(exitCalls.length >= 2, `Should have at least 2 process.exit(0) calls, found ${exitCalls.length}`); + }) + ) + passed++; + else failed++; - if (await asyncTest('EXCLUDED_PATTERNS correctly excludes test files', async () => { - // Test the patterns directly by reading the source and evaluating the regex - const source = fs.readFileSync(path.join(scriptsDir, 'check-console-log.js'), 'utf8'); - // Verify the 6 exclusion patterns exist in the source (as regex literals with escapes) - const expectedSubstrings = ['test', 'spec', 'config', 'scripts', '__tests__', '__mocks__']; - for (const substr of expectedSubstrings) { - assert.ok(source.includes(substr), `Should include pattern containing "${substr}"`); - } - // Verify the array name exists - assert.ok(source.includes('EXCLUDED_PATTERNS'), 'Should have EXCLUDED_PATTERNS array'); - })) passed++; else failed++; + if ( + await asyncTest('EXCLUDED_PATTERNS correctly excludes test files', async () => { + // Test the patterns directly by reading the source and evaluating the regex + const source = fs.readFileSync(path.join(scriptsDir, 'check-console-log.js'), 'utf8'); + // Verify the 6 exclusion patterns exist in the source (as regex literals with escapes) + const expectedSubstrings = ['test', 'spec', 'config', 'scripts', '__tests__', '__mocks__']; + for (const substr of expectedSubstrings) { + assert.ok(source.includes(substr), `Should include pattern containing "${substr}"`); + } + // Verify the array name exists + assert.ok(source.includes('EXCLUDED_PATTERNS'), 'Should have EXCLUDED_PATTERNS array'); + }) + ) + passed++; + else failed++; - if (await asyncTest('exclusion patterns match expected file paths', async () => { - // Recreate the EXCLUDED_PATTERNS from the source and test them - const EXCLUDED_PATTERNS = [ - /\.test\.[jt]sx?$/, - /\.spec\.[jt]sx?$/, - /\.config\.[jt]s$/, - /scripts\//, - /__tests__\//, - /__mocks__\//, - ]; - // These SHOULD be excluded - const excluded = [ - 'src/utils.test.ts', 'src/utils.test.js', 'src/utils.test.tsx', 'src/utils.test.jsx', - 'src/utils.spec.ts', 'src/utils.spec.js', - 'src/utils.config.ts', 'src/utils.config.js', - 'scripts/hooks/session-end.js', - '__tests__/utils.ts', - '__mocks__/api.ts', - ]; - for (const f of excluded) { - const matches = EXCLUDED_PATTERNS.some(p => p.test(f)); - assert.ok(matches, `Expected "${f}" to be excluded but it was not`); - } - // These should NOT be excluded - const notExcluded = [ - 'src/utils.ts', 'src/main.tsx', 'src/app.js', - 'src/test.component.ts', // "test" in name but not .test. pattern - 'src/config.ts', // "config" in name but not .config. pattern - ]; - for (const f of notExcluded) { - const matches = EXCLUDED_PATTERNS.some(p => p.test(f)); - assert.ok(!matches, `Expected "${f}" to NOT be excluded but it was`); - } - })) passed++; else failed++; + if ( + await asyncTest('exclusion patterns match expected file paths', async () => { + // Recreate the EXCLUDED_PATTERNS from the source and test them + const EXCLUDED_PATTERNS = [/\.test\.[jt]sx?$/, /\.spec\.[jt]sx?$/, /\.config\.[jt]s$/, /scripts\//, /__tests__\//, /__mocks__\//]; + // These SHOULD be excluded + const excluded = [ + 'src/utils.test.ts', + 'src/utils.test.js', + 'src/utils.test.tsx', + 'src/utils.test.jsx', + 'src/utils.spec.ts', + 'src/utils.spec.js', + 'src/utils.config.ts', + 'src/utils.config.js', + 'scripts/hooks/session-end.js', + '__tests__/utils.ts', + '__mocks__/api.ts' + ]; + for (const f of excluded) { + const matches = EXCLUDED_PATTERNS.some(p => p.test(f)); + assert.ok(matches, `Expected "${f}" to be excluded but it was not`); + } + // These should NOT be excluded + const notExcluded = [ + 'src/utils.ts', + 'src/main.tsx', + 'src/app.js', + 'src/test.component.ts', // "test" in name but not .test. pattern + 'src/config.ts' // "config" in name but not .config. pattern + ]; + for (const f of notExcluded) { + const matches = EXCLUDED_PATTERNS.some(p => p.test(f)); + assert.ok(!matches, `Expected "${f}" to NOT be excluded but it was`); + } + }) + ) + passed++; + else failed++; console.log('\nRound 29: run-all.js test runner improvements:'); - if (await asyncTest('test runner uses spawnSync to capture stderr on success', async () => { - const runAllSource = fs.readFileSync(path.join(__dirname, '..', 'run-all.js'), 'utf8'); - assert.ok(runAllSource.includes('spawnSync'), 'Should use spawnSync instead of execSync'); - assert.ok(!runAllSource.includes('execSync'), 'Should not use execSync'); - // Verify it shows stderr - assert.ok(runAllSource.includes('stderr'), 'Should handle stderr output'); - })) passed++; else failed++; + if ( + await asyncTest('test runner uses spawnSync to capture stderr on success', async () => { + const runAllSource = fs.readFileSync(path.join(__dirname, '..', 'run-all.js'), 'utf8'); + assert.ok(runAllSource.includes('spawnSync'), 'Should use spawnSync instead of execSync'); + assert.ok(!runAllSource.includes('execSync'), 'Should not use execSync'); + // Verify it shows stderr + assert.ok(runAllSource.includes('stderr'), 'Should handle stderr output'); + }) + ) + passed++; + else failed++; // ── Round 32: post-edit-typecheck special characters & check-console-log ── console.log('\nRound 32: post-edit-typecheck (special character paths):'); - if (await asyncTest('handles file path with spaces gracefully', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'my file.ts'); - fs.writeFileSync(testFile, 'const x: number = 1;'); + if ( + await asyncTest('handles file path with spaces gracefully', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'my file.ts'); + fs.writeFileSync(testFile, 'const x: number = 1;'); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should handle spaces in path'); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should handle spaces in path'); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('handles file path with shell metacharacters safely', async () => { - const testDir = createTestDir(); - // File name with characters that could be dangerous in shell contexts - const testFile = path.join(testDir, 'test$(echo).ts'); - fs.writeFileSync(testFile, 'const x: number = 1;'); + if ( + await asyncTest('handles file path with shell metacharacters safely', async () => { + const testDir = createTestDir(); + // File name with characters that could be dangerous in shell contexts + const testFile = path.join(testDir, 'test$(echo).ts'); + fs.writeFileSync(testFile, 'const x: number = 1;'); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should not crash on shell metacharacters'); - // execFileSync prevents shell injection — just verify no crash - assert.ok(result.stdout.includes('tool_input'), 'Should pass through data safely'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should not crash on shell metacharacters'); + // execFileSync prevents shell injection — just verify no crash + assert.ok(result.stdout.includes('tool_input'), 'Should pass through data safely'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('handles .tsx file extension', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'component.tsx'); - fs.writeFileSync(testFile, 'const App = () =>
Hello
;'); + if ( + await asyncTest('handles .tsx file extension', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'component.tsx'); + fs.writeFileSync(testFile, 'const App = () =>
Hello
;'); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should handle .tsx files'); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should handle .tsx files'); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; console.log('\nRound 32: check-console-log (edge cases):'); - if (await asyncTest('passes through data when git commands fail', async () => { - // Run from a non-git directory - const testDir = createTestDir(); - const stdinData = JSON.stringify({ tool_name: 'Write', tool_input: {} }); - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData); - assert.strictEqual(result.code, 0, 'Should exit 0'); - assert.ok(result.stdout.includes('tool_name'), 'Should pass through stdin'); - cleanupTestDir(testDir); - })) passed++; else failed++; + if ( + await asyncTest('passes through data when git commands fail', async () => { + // Run from a non-git directory + const testDir = createTestDir(); + const stdinData = JSON.stringify({ tool_name: 'Write', tool_input: {} }); + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), stdinData); + assert.strictEqual(result.code, 0, 'Should exit 0'); + assert.ok(result.stdout.includes('tool_name'), 'Should pass through stdin'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('handles very large stdin within limit', async () => { - // Send just under the 1MB limit - const largePayload = JSON.stringify({ tool_name: 'x'.repeat(500000) }); - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), largePayload); - assert.strictEqual(result.code, 0, 'Should handle large stdin'); - })) passed++; else failed++; + if ( + await asyncTest('handles very large stdin within limit', async () => { + // Send just under the 1MB limit + const largePayload = JSON.stringify({ tool_name: 'x'.repeat(500000) }); + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), largePayload); + assert.strictEqual(result.code, 0, 'Should handle large stdin'); + }) + ) + passed++; + else failed++; console.log('\nRound 32: post-edit-console-warn (additional edge cases):'); - if (await asyncTest('handles file with only console.error (no warning)', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'errors-only.ts'); - fs.writeFileSync(testFile, 'console.error("this is fine");\nconsole.warn("also fine");'); + if ( + await asyncTest('handles file with only console.error (no warning)', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'errors-only.ts'); + fs.writeFileSync(testFile, 'console.error("this is fine");\nconsole.warn("also fine");'); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - assert.ok(!result.stderr.includes('WARNING'), 'Should NOT warn for console.error/warn only'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + assert.ok(!result.stderr.includes('WARNING'), 'Should NOT warn for console.error/warn only'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('handles null tool_input gracefully', async () => { - const stdinJson = JSON.stringify({ tool_input: null }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should handle null tool_input'); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); - })) passed++; else failed++; + if ( + await asyncTest('handles null tool_input gracefully', async () => { + const stdinJson = JSON.stringify({ tool_input: null }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should handle null tool_input'); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through data'); + }) + ) + passed++; + else failed++; console.log('\nRound 32: session-end.js (empty transcript):'); - if (await asyncTest('handles completely empty transcript file', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'empty.jsonl'); - fs.writeFileSync(transcriptPath, ''); + if ( + await asyncTest('handles completely empty transcript file', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'empty.jsonl'); + fs.writeFileSync(transcriptPath, ''); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should handle empty transcript'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should handle empty transcript'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('handles transcript with only whitespace lines', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'whitespace.jsonl'); - fs.writeFileSync(transcriptPath, ' \n\n \n'); + if ( + await asyncTest('handles transcript with only whitespace lines', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'whitespace.jsonl'); + fs.writeFileSync(transcriptPath, ' \n\n \n'); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should handle whitespace-only transcript'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should handle whitespace-only transcript'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; // ── Round 38: evaluate-session.js tilde expansion & missing config ── console.log('\nRound 38: evaluate-session.js (tilde expansion & missing config):'); - if (await asyncTest('expands ~ in learned_skills_path to home directory', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - // 1 user message — below threshold, but we only need to verify directory creation - fs.writeFileSync(transcriptPath, '{"type":"user","content":"msg"}'); + if ( + await asyncTest('expands ~ in learned_skills_path to home directory', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + // 1 user message — below threshold, but we only need to verify directory creation + fs.writeFileSync(transcriptPath, '{"type":"user","content":"msg"}'); - const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); - fs.mkdirSync(skillsDir, { recursive: true }); - const configPath = path.join(skillsDir, 'config.json'); - // Use ~ prefix — should expand to the HOME dir we set - fs.writeFileSync(configPath, JSON.stringify({ - learned_skills_path: '~/test-tilde-skills' - })); + const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); + fs.mkdirSync(skillsDir, { recursive: true }); + const configPath = path.join(skillsDir, 'config.json'); + // Use ~ prefix — should expand to the HOME dir we set + fs.writeFileSync( + configPath, + JSON.stringify({ + learned_skills_path: '~/test-tilde-skills' + }) + ); - const wrapperScript = createEvalWrapper(testDir, configPath); + const wrapperScript = createEvalWrapper(testDir, configPath); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(wrapperScript, stdinJson, { - HOME: testDir, USERPROFILE: testDir - }); - assert.strictEqual(result.code, 0); - // ~ should expand to os.homedir() which during the script run is the real home - // The script creates the directory via ensureDir — check that it attempted to - // create a directory starting with the home dir, not a literal ~/ - // Verify the literal ~/test-tilde-skills was NOT created - assert.ok( - !fs.existsSync(path.join(testDir, '~', 'test-tilde-skills')), - 'Should NOT create literal ~/test-tilde-skills directory' - ); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(wrapperScript, stdinJson, { + HOME: testDir, + USERPROFILE: testDir + }); + assert.strictEqual(result.code, 0); + // ~ should expand to os.homedir() which during the script run is the real home + // The script creates the directory via ensureDir — check that it attempted to + // create a directory starting with the home dir, not a literal ~/ + // Verify the literal ~/test-tilde-skills was NOT created + assert.ok(!fs.existsSync(path.join(testDir, '~', 'test-tilde-skills')), 'Should NOT create literal ~/test-tilde-skills directory'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('does NOT expand ~ in middle of learned_skills_path', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - fs.writeFileSync(transcriptPath, '{"type":"user","content":"msg"}'); + if ( + await asyncTest('does NOT expand ~ in middle of learned_skills_path', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + fs.writeFileSync(transcriptPath, '{"type":"user","content":"msg"}'); - const midTildeDir = path.join(testDir, 'some~path', 'skills'); - const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); - fs.mkdirSync(skillsDir, { recursive: true }); - const configPath = path.join(skillsDir, 'config.json'); - // Path with ~ in the middle — should NOT be expanded - fs.writeFileSync(configPath, JSON.stringify({ - learned_skills_path: midTildeDir - })); + const midTildeDir = path.join(testDir, 'some~path', 'skills'); + const skillsDir = path.join(testDir, 'skills', 'continuous-learning'); + fs.mkdirSync(skillsDir, { recursive: true }); + const configPath = path.join(skillsDir, 'config.json'); + // Path with ~ in the middle — should NOT be expanded + fs.writeFileSync( + configPath, + JSON.stringify({ + learned_skills_path: midTildeDir + }) + ); - const wrapperScript = createEvalWrapper(testDir, configPath); + const wrapperScript = createEvalWrapper(testDir, configPath); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(wrapperScript, stdinJson, { - HOME: testDir, USERPROFILE: testDir - }); - assert.strictEqual(result.code, 0); - // The directory with ~ in the middle should be created as-is - assert.ok( - fs.existsSync(midTildeDir), - 'Should create directory with ~ in middle of path unchanged' - ); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(wrapperScript, stdinJson, { + HOME: testDir, + USERPROFILE: testDir + }); + assert.strictEqual(result.code, 0); + // The directory with ~ in the middle should be created as-is + assert.ok(fs.existsSync(midTildeDir), 'Should create directory with ~ in middle of path unchanged'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('uses defaults when config file does not exist', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - // 5 user messages — below default threshold of 10 - const lines = []; - for (let i = 0; i < 5; i++) lines.push(`{"type":"user","content":"msg${i}"}`); - fs.writeFileSync(transcriptPath, lines.join('\n')); + if ( + await asyncTest('uses defaults when config file does not exist', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + // 5 user messages — below default threshold of 10 + const lines = []; + for (let i = 0; i < 5; i++) lines.push(`{"type":"user","content":"msg${i}"}`); + fs.writeFileSync(transcriptPath, lines.join('\n')); - // Point config to a non-existent file - const configPath = path.join(testDir, 'nonexistent', 'config.json'); - const wrapperScript = createEvalWrapper(testDir, configPath); + // Point config to a non-existent file + const configPath = path.join(testDir, 'nonexistent', 'config.json'); + const wrapperScript = createEvalWrapper(testDir, configPath); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(wrapperScript, stdinJson, { - HOME: testDir, USERPROFILE: testDir - }); - assert.strictEqual(result.code, 0); - // With no config file, default min_session_length=10 applies - // 5 messages should be "too short" - assert.ok( - result.stderr.includes('too short'), - 'Should use default threshold (10) when config file missing' - ); - // No error messages about missing config - assert.ok( - !result.stderr.includes('Failed to parse config'), - 'Should NOT log config parse error for missing file' - ); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(wrapperScript, stdinJson, { + HOME: testDir, + USERPROFILE: testDir + }); + assert.strictEqual(result.code, 0); + // With no config file, default min_session_length=10 applies + // 5 messages should be "too short" + assert.ok(result.stderr.includes('too short'), 'Should use default threshold (10) when config file missing'); + // No error messages about missing config + assert.ok(!result.stderr.includes('Failed to parse config'), 'Should NOT log config parse error for missing file'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; // Round 41: pre-compact.js (multiple session files) console.log('\nRound 41: pre-compact.js (multiple session files):'); - if (await asyncTest('annotates only the newest session file when multiple exist', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-compact-multi-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('annotates only the newest session file when multiple exist', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-compact-multi-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - // Create two session files with different mtimes - const olderSession = path.join(sessionsDir, '2026-01-01-older-session.tmp'); - const newerSession = path.join(sessionsDir, '2026-02-11-newer-session.tmp'); - fs.writeFileSync(olderSession, '# Older Session\n'); - // Small delay to ensure different mtime - const now = Date.now(); - fs.utimesSync(olderSession, new Date(now - 60000), new Date(now - 60000)); - fs.writeFileSync(newerSession, '# Newer Session\n'); + // Create two session files with different mtimes + const olderSession = path.join(sessionsDir, '2026-01-01-older-session.tmp'); + const newerSession = path.join(sessionsDir, '2026-02-11-newer-session.tmp'); + fs.writeFileSync(olderSession, '# Older Session\n'); + // Small delay to ensure different mtime + const now = Date.now(); + fs.utimesSync(olderSession, new Date(now - 60000), new Date(now - 60000)); + fs.writeFileSync(newerSession, '# Newer Session\n'); - try { - const result = await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); + try { + const result = await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); - const newerContent = fs.readFileSync(newerSession, 'utf8'); - const olderContent = fs.readFileSync(olderSession, 'utf8'); + const newerContent = fs.readFileSync(newerSession, 'utf8'); + const olderContent = fs.readFileSync(olderSession, 'utf8'); - // findFiles sorts by mtime newest first, so sessions[0] is the newest - assert.ok( - newerContent.includes('Compaction occurred'), - 'Should annotate the newest session file' - ); - assert.strictEqual( - olderContent, - '# Older Session\n', - 'Should NOT annotate older session files' - ); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + // findFiles sorts by mtime newest first, so sessions[0] is the newest + assert.ok(newerContent.includes('Compaction occurred'), 'Should annotate the newest session file'); + assert.strictEqual(olderContent, '# Older Session\n', 'Should NOT annotate older session files'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // Round 40: session-end.js (newline collapse in markdown list items) console.log('\nRound 40: session-end.js (newline collapse):'); - if (await asyncTest('collapses newlines in user messages to single-line markdown items', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); + if ( + await asyncTest('collapses newlines in user messages to single-line markdown items', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); - // User message containing newlines that would break markdown list - const lines = [ - JSON.stringify({ type: 'user', content: 'Please help me with:\n1. Task one\n2. Task two\n3. Task three' }), - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + // User message containing newlines that would break markdown list + const lines = [JSON.stringify({ type: 'user', content: 'Please help me with:\n1. Task one\n2. Task two\n3. Task three' })]; + fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0); - // Find the session file and verify newlines were collapsed - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - // Each task should be a single-line markdown list item - const taskLines = content.split('\n').filter(l => l.startsWith('- ')); - for (const line of taskLines) { - assert.ok( - !line.includes('\n'), - 'Task list items should be single-line' - ); + // Find the session file and verify newlines were collapsed + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + // Each task should be a single-line markdown list item + const taskLines = content.split('\n').filter(l => l.startsWith('- ')); + for (const line of taskLines) { + assert.ok(!line.includes('\n'), 'Task list items should be single-line'); + } + // Newlines should be replaced with spaces + assert.ok(content.includes('Please help me with: 1. Task one 2. Task two'), `Newlines should be collapsed to spaces, got: ${content.substring(0, 500)}`); } - // Newlines should be replaced with spaces - assert.ok( - content.includes('Please help me with: 1. Task one 2. Task two'), - `Newlines should be collapsed to spaces, got: ${content.substring(0, 500)}` - ); } - } - cleanupTestDir(testDir); - })) passed++; else failed++; + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; // ── Round 44: session-start.js empty session file ── console.log('\nRound 44: session-start.js (empty session file):'); - if (await asyncTest('does not inject empty session file content into context', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-start-empty-file-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + if ( + await asyncTest('does not inject empty session file content into context', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-start-empty-file-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - // Create a 0-byte session file (simulates truncated/corrupted write) - const today = new Date().toISOString().slice(0, 10); - const sessionFile = path.join(sessionsDir, `${today}-empty0000-session.tmp`); - fs.writeFileSync(sessionFile, ''); + // Create a 0-byte session file (simulates truncated/corrupted write) + const today = new Date().toISOString().slice(0, 10); + const sessionFile = path.join(sessionsDir, `${today}-empty0000-session.tmp`); + fs.writeFileSync(sessionFile, ''); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0 with empty session file'); - // readFile returns '' (falsy) → the if (content && ...) guard skips injection - assert.ok( - !result.stdout.includes('Previous session summary'), - 'Should NOT inject empty string into context' - ); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0 with empty session file'); + // readFile returns '' (falsy) → the if (content && ...) guard skips injection + assert.ok(!result.stdout.includes('Previous session summary'), 'Should NOT inject empty string into context'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // ── Round 49: typecheck extension matching and session-end conditional sections ── console.log('\nRound 49: post-edit-typecheck.js (extension edge cases):'); - if (await asyncTest('.d.ts files match the TS regex and trigger typecheck path', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'types.d.ts'); - fs.writeFileSync(testFile, 'declare const x: number;'); + if ( + await asyncTest('.d.ts files match the TS regex and trigger typecheck path', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'types.d.ts'); + fs.writeFileSync(testFile, 'declare const x: number;'); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for .d.ts file'); - assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data'); - cleanupTestDir(testDir); - })) passed++; else failed++; + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 for .d.ts file'); + assert.ok(result.stdout.includes('tool_input'), 'Should pass through stdin data'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; - if (await asyncTest('.mts extension does not trigger typecheck', async () => { - const stdinJson = JSON.stringify({ tool_input: { file_path: '/project/utils.mts' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for .mts file'); - assert.strictEqual(result.stdout, stdinJson, 'Should pass through .mts unchanged'); - })) passed++; else failed++; + if ( + await asyncTest('.mts extension does not trigger typecheck', async () => { + const stdinJson = JSON.stringify({ tool_input: { file_path: '/project/utils.mts' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + assert.strictEqual(result.code, 0, 'Should exit 0 for .mts file'); + assert.strictEqual(result.stdout, stdinJson, 'Should pass through .mts unchanged'); + }) + ) + passed++; + else failed++; console.log('\nRound 49: session-end.js (conditional summary sections):'); - if (await asyncTest('summary omits Files Modified and Tools Used when none found', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-notools-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('summary omits Files Modified and Tools Used when none found', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-notools-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - // Only user messages — no tool_use entries at all - const lines = [ - '{"type":"user","content":"How does authentication work?"}', - '{"type":"assistant","message":{"content":[{"type":"text","text":"It uses JWT"}]}}' - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + // Only user messages — no tool_use entries at all + const lines = ['{"type":"user","content":"How does authentication work?"}', '{"type":"assistant","message":{"content":[{"type":"text","text":"It uses JWT"}]}}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - try { - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); - const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('-session.tmp')); - assert.ok(files.length > 0, 'Should create session file'); - const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); - assert.ok(content.includes('authentication'), 'Should include user message'); - assert.ok(!content.includes('### Files Modified'), 'Should omit Files Modified when empty'); - assert.ok(!content.includes('### Tools Used'), 'Should omit Tools Used when empty'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - cleanupTestDir(testDir); - } - })) passed++; else failed++; + const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('-session.tmp')); + assert.ok(files.length > 0, 'Should create session file'); + const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); + assert.ok(content.includes('authentication'), 'Should include user message'); + assert.ok(!content.includes('### Files Modified'), 'Should omit Files Modified when empty'); + assert.ok(!content.includes('### Tools Used'), 'Should omit Tools Used when empty'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + cleanupTestDir(testDir); + } + }) + ) + passed++; + else failed++; // ── Round 50: alias reporting, parallel compaction, graceful degradation ── console.log('\nRound 50: session-start.js (alias reporting):'); - if (await asyncTest('reports available session aliases on startup', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-start-alias-${Date.now()}`); - fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + if ( + await asyncTest('reports available session aliases on startup', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-start-alias-${Date.now()}`); + fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - // Pre-populate the aliases file - fs.writeFileSync(path.join(isoHome, '.claude', 'session-aliases.json'), JSON.stringify({ - version: '1.0', - aliases: { - 'my-feature': { sessionPath: '/sessions/feat', createdAt: new Date().toISOString(), updatedAt: new Date().toISOString(), title: null }, - 'bug-fix': { sessionPath: '/sessions/fix', createdAt: new Date().toISOString(), updatedAt: new Date().toISOString(), title: null } - }, - metadata: { totalCount: 2, lastUpdated: new Date().toISOString() } - })); - - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('alias'), 'Should mention aliases in stderr'); - assert.ok( - result.stderr.includes('my-feature') || result.stderr.includes('bug-fix'), - 'Should list at least one alias name' + // Pre-populate the aliases file + fs.writeFileSync( + path.join(isoHome, '.claude', 'session-aliases.json'), + JSON.stringify({ + version: '1.0', + aliases: { + 'my-feature': { sessionPath: '/sessions/feat', createdAt: new Date().toISOString(), updatedAt: new Date().toISOString(), title: null }, + 'bug-fix': { sessionPath: '/sessions/fix', createdAt: new Date().toISOString(), updatedAt: new Date().toISOString(), title: null } + }, + metadata: { totalCount: 2, lastUpdated: new Date().toISOString() } + }) ); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('alias'), 'Should mention aliases in stderr'); + assert.ok(result.stderr.includes('my-feature') || result.stderr.includes('bug-fix'), 'Should list at least one alias name'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; console.log('\nRound 50: pre-compact.js (parallel execution):'); - if (await asyncTest('parallel compaction runs all append to log without loss', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-compact-par-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('parallel compaction runs all append to log without loss', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-compact-par-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - try { - const promises = Array(3).fill(null).map(() => - runScript(path.join(scriptsDir, 'pre-compact.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }) - ); - const results = await Promise.all(promises); - results.forEach((r, i) => assert.strictEqual(r.code, 0, `Run ${i} should exit 0`)); + try { + const promises = Array(3) + .fill(null) + .map(() => + runScript(path.join(scriptsDir, 'pre-compact.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }) + ); + const results = await Promise.all(promises); + results.forEach((r, i) => assert.strictEqual(r.code, 0, `Run ${i} should exit 0`)); - const logFile = path.join(sessionsDir, 'compaction-log.txt'); - assert.ok(fs.existsSync(logFile), 'Compaction log should exist'); - const content = fs.readFileSync(logFile, 'utf8'); - const entries = (content.match(/Context compaction triggered/g) || []).length; - assert.strictEqual(entries, 3, `Should have 3 log entries, got ${entries}`); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + const logFile = path.join(sessionsDir, 'compaction-log.txt'); + assert.ok(fs.existsSync(logFile), 'Compaction log should exist'); + const content = fs.readFileSync(logFile, 'utf8'); + const entries = (content.match(/Context compaction triggered/g) || []).length; + assert.strictEqual(entries, 3, `Should have 3 log entries, got ${entries}`); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; console.log('\nRound 50: session-start.js (graceful degradation):'); - if (await asyncTest('exits 0 when sessions path is a file (not a directory)', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-start-blocked-${Date.now()}`); - fs.mkdirSync(path.join(isoHome, '.claude'), { recursive: true }); - // Block sessions dir creation by placing a file at that path - fs.writeFileSync(path.join(isoHome, '.claude', 'sessions'), 'blocked'); + if ( + await asyncTest('exits 0 when sessions path is a file (not a directory)', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-start-blocked-${Date.now()}`); + fs.mkdirSync(path.join(isoHome, '.claude'), { recursive: true }); + // Block sessions dir creation by placing a file at that path + fs.writeFileSync(path.join(isoHome, '.claude', 'sessions'), 'blocked'); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0 even when sessions dir is blocked'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0 even when sessions dir is blocked'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // ── Round 53: console-warn max matches and format non-existent file ── console.log('\nRound 53: post-edit-console-warn.js (max matches truncation):'); - if (await asyncTest('reports maximum 5 console.log matches per file', async () => { - const testDir = createTestDir(); - const testFile = path.join(testDir, 'many-logs.js'); - const lines = Array(7).fill(null).map((_, i) => - `console.log("debug line ${i + 1}");` - ); - fs.writeFileSync(testFile, lines.join('\n')); + if ( + await asyncTest('reports maximum 5 console.log matches per file', async () => { + const testDir = createTestDir(); + const testFile = path.join(testDir, 'many-logs.js'); + const lines = Array(7) + .fill(null) + .map((_, i) => `console.log("debug line ${i + 1}");`); + fs.writeFileSync(testFile, lines.join('\n')); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0'); - // Count line number reports in stderr (format: "N: console.log(...)") - const lineReports = (result.stderr.match(/^\d+:/gm) || []).length; - assert.strictEqual(lineReports, 5, `Should report max 5 matches, got ${lineReports}`); - cleanupTestDir(testDir); - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0'); + // Count line number reports in stderr (format: "N: console.log(...)") + const lineReports = (result.stderr.match(/^\d+:/gm) || []).length; + assert.strictEqual(lineReports, 5, `Should report max 5 matches, got ${lineReports}`); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; console.log('\nRound 53: post-edit-format.js (non-existent file):'); - if (await asyncTest('passes through data for non-existent .tsx file path', async () => { - const stdinJson = JSON.stringify({ - tool_input: { file_path: '/nonexistent/path/file.tsx' } - }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + if ( + await asyncTest('passes through data for non-existent .tsx file path', async () => { + const stdinJson = JSON.stringify({ + tool_input: { file_path: '/nonexistent/path/file.tsx' } + }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for non-existent file'); - assert.strictEqual(result.stdout, stdinJson, 'Should pass through stdin data unchanged'); - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0 for non-existent file'); + assert.strictEqual(result.stdout, stdinJson, 'Should pass through stdin data unchanged'); + }) + ) + passed++; + else failed++; // ── Round 55: maxAge boundary, multi-session injection, stdin overflow ── console.log('\nRound 55: session-start.js (maxAge 7-day boundary):'); - if (await asyncTest('excludes session files older than 7 days', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-start-7day-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + if ( + await asyncTest('excludes session files older than 7 days', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-start-7day-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - // Create session file 6.9 days old (should be INCLUDED by maxAge:7) - const recentFile = path.join(sessionsDir, '2026-02-06-recent69-session.tmp'); - fs.writeFileSync(recentFile, '# Recent Session\n\nRECENT CONTENT HERE'); - const sixPointNineDaysAgo = new Date(Date.now() - 6.9 * 24 * 60 * 60 * 1000); - fs.utimesSync(recentFile, sixPointNineDaysAgo, sixPointNineDaysAgo); + // Create session file 6.9 days old (should be INCLUDED by maxAge:7) + const recentFile = path.join(sessionsDir, '2026-02-06-recent69-session.tmp'); + fs.writeFileSync(recentFile, '# Recent Session\n\nRECENT CONTENT HERE'); + const sixPointNineDaysAgo = new Date(Date.now() - 6.9 * 24 * 60 * 60 * 1000); + fs.utimesSync(recentFile, sixPointNineDaysAgo, sixPointNineDaysAgo); - // Create session file 8 days old (should be EXCLUDED by maxAge:7) - const oldFile = path.join(sessionsDir, '2026-02-05-old8day-session.tmp'); - fs.writeFileSync(oldFile, '# Old Session\n\nOLD CONTENT SHOULD NOT APPEAR'); - const eightDaysAgo = new Date(Date.now() - 8 * 24 * 60 * 60 * 1000); - fs.utimesSync(oldFile, eightDaysAgo, eightDaysAgo); + // Create session file 8 days old (should be EXCLUDED by maxAge:7) + const oldFile = path.join(sessionsDir, '2026-02-05-old8day-session.tmp'); + fs.writeFileSync(oldFile, '# Old Session\n\nOLD CONTENT SHOULD NOT APPEAR'); + const eightDaysAgo = new Date(Date.now() - 8 * 24 * 60 * 60 * 1000); + fs.utimesSync(oldFile, eightDaysAgo, eightDaysAgo); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('1 recent session'), - `Should find 1 recent session (6.9-day included, 8-day excluded), stderr: ${result.stderr}`); - assert.ok(result.stdout.includes('RECENT CONTENT HERE'), - 'Should inject the 6.9-day-old session content'); - assert.ok(!result.stdout.includes('OLD CONTENT SHOULD NOT APPEAR'), - 'Should NOT inject the 8-day-old session content'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('1 recent session'), `Should find 1 recent session (6.9-day included, 8-day excluded), stderr: ${result.stderr}`); + assert.ok(result.stdout.includes('RECENT CONTENT HERE'), 'Should inject the 6.9-day-old session content'); + assert.ok(!result.stdout.includes('OLD CONTENT SHOULD NOT APPEAR'), 'Should NOT inject the 8-day-old session content'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; console.log('\nRound 55: session-start.js (newest session selection):'); - if (await asyncTest('injects newest session when multiple recent sessions exist', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-start-multi-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + if ( + await asyncTest('injects newest session when multiple recent sessions exist', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-start-multi-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - const now = Date.now(); + const now = Date.now(); - // Create older session (2 days ago) - const olderSession = path.join(sessionsDir, '2026-02-11-olderabc-session.tmp'); - fs.writeFileSync(olderSession, '# Older Session\n\nOLDER_CONTEXT_MARKER'); - fs.utimesSync(olderSession, new Date(now - 2 * 86400000), new Date(now - 2 * 86400000)); + // Create older session (2 days ago) + const olderSession = path.join(sessionsDir, '2026-02-11-olderabc-session.tmp'); + fs.writeFileSync(olderSession, '# Older Session\n\nOLDER_CONTEXT_MARKER'); + fs.utimesSync(olderSession, new Date(now - 2 * 86400000), new Date(now - 2 * 86400000)); - // Create newer session (1 day ago) - const newerSession = path.join(sessionsDir, '2026-02-12-newerdef-session.tmp'); - fs.writeFileSync(newerSession, '# Newer Session\n\nNEWER_CONTEXT_MARKER'); - fs.utimesSync(newerSession, new Date(now - 1 * 86400000), new Date(now - 1 * 86400000)); + // Create newer session (1 day ago) + const newerSession = path.join(sessionsDir, '2026-02-12-newerdef-session.tmp'); + fs.writeFileSync(newerSession, '# Newer Session\n\nNEWER_CONTEXT_MARKER'); + fs.utimesSync(newerSession, new Date(now - 1 * 86400000), new Date(now - 1 * 86400000)); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); - assert.ok(result.stderr.includes('2 recent session'), - `Should find 2 recent sessions, stderr: ${result.stderr}`); - // Should inject the NEWER session, not the older one - assert.ok(result.stdout.includes('NEWER_CONTEXT_MARKER'), - 'Should inject the newest session content'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); + assert.ok(result.stderr.includes('2 recent session'), `Should find 2 recent sessions, stderr: ${result.stderr}`); + // Should inject the NEWER session, not the older one + assert.ok(result.stdout.includes('NEWER_CONTEXT_MARKER'), 'Should inject the newest session content'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; console.log('\nRound 55: session-end.js (stdin overflow):'); - if (await asyncTest('handles stdin exceeding MAX_STDIN (1MB) gracefully', async () => { - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - // Create a minimal valid transcript so env var fallback works - fs.writeFileSync(transcriptPath, JSON.stringify({ type: 'user', content: 'Overflow test' }) + '\n'); + if ( + await asyncTest('handles stdin exceeding MAX_STDIN (1MB) gracefully', async () => { + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + // Create a minimal valid transcript so env var fallback works + fs.writeFileSync(transcriptPath, JSON.stringify({ type: 'user', content: 'Overflow test' }) + '\n'); - // Create stdin > 1MB: truncated JSON will be invalid → falls back to env var - const oversizedPayload = '{"transcript_path":"' + 'x'.repeat(1048600) + '"}'; + // Create stdin > 1MB: truncated JSON will be invalid → falls back to env var + const oversizedPayload = '{"transcript_path":"' + 'x'.repeat(1048600) + '"}'; - try { - const result = await runScript(path.join(scriptsDir, 'session-end.js'), oversizedPayload, { - CLAUDE_TRANSCRIPT_PATH: transcriptPath - }); - assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); - // Truncated JSON → JSON.parse throws → falls back to env var → creates session file - assert.ok( - result.stderr.includes('Created session file') || result.stderr.includes('Updated session file'), - `Should create/update session file via env var fallback, stderr: ${result.stderr}` - ); - } finally { - cleanupTestDir(testDir); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), oversizedPayload, { + CLAUDE_TRANSCRIPT_PATH: transcriptPath + }); + assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); + // Truncated JSON → JSON.parse throws → falls back to env var → creates session file + assert.ok(result.stderr.includes('Created session file') || result.stderr.includes('Updated session file'), `Should create/update session file via env var fallback, stderr: ${result.stderr}`); + } finally { + cleanupTestDir(testDir); + } + }) + ) + passed++; + else failed++; // ── Round 56: typecheck tsconfig walk-up, suggest-compact fallback path ── console.log('\nRound 56: post-edit-typecheck.js (tsconfig in parent directory):'); - if (await asyncTest('walks up directory tree to find tsconfig.json in grandparent', async () => { - const testDir = createTestDir(); - // Place tsconfig at the TOP level, file is nested 2 levels deep - fs.writeFileSync(path.join(testDir, 'tsconfig.json'), JSON.stringify({ - compilerOptions: { strict: false, noEmit: true } - })); - const deepDir = path.join(testDir, 'src', 'components'); - fs.mkdirSync(deepDir, { recursive: true }); - const testFile = path.join(deepDir, 'widget.ts'); - fs.writeFileSync(testFile, 'export const value: number = 42;\n'); + if ( + await asyncTest('walks up directory tree to find tsconfig.json in grandparent', async () => { + const testDir = createTestDir(); + // Place tsconfig at the TOP level, file is nested 2 levels deep + fs.writeFileSync( + path.join(testDir, 'tsconfig.json'), + JSON.stringify({ + compilerOptions: { strict: false, noEmit: true } + }) + ); + const deepDir = path.join(testDir, 'src', 'components'); + fs.mkdirSync(deepDir, { recursive: true }); + const testFile = path.join(deepDir, 'widget.ts'); + fs.writeFileSync(testFile, 'export const value: number = 42;\n'); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 after walking up to find tsconfig'); - // Core assertion: stdin must pass through regardless of whether tsc ran - const parsed = JSON.parse(result.stdout); - assert.strictEqual(parsed.tool_input.file_path, testFile, - 'Should pass through original stdin data with file_path intact'); - cleanupTestDir(testDir); - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0 after walking up to find tsconfig'); + // Core assertion: stdin must pass through regardless of whether tsc ran + const parsed = JSON.parse(result.stdout); + assert.strictEqual(parsed.tool_input.file_path, testFile, 'Should pass through original stdin data with file_path intact'); + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; console.log('\nRound 56: suggest-compact.js (counter file as directory — fallback path):'); - if (await asyncTest('exits 0 when counter file path is occupied by a directory', async () => { - const sessionId = `dirblock-${Date.now()}`; - const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); - // Create a DIRECTORY at the counter file path — openSync('a+') will fail with EISDIR - fs.mkdirSync(counterFile); + if ( + await asyncTest('exits 0 when counter file path is occupied by a directory', async () => { + const sessionId = `dirblock-${Date.now()}`; + const counterFile = path.join(os.tmpdir(), `claude-tool-count-${sessionId}`); + // Create a DIRECTORY at the counter file path — openSync('a+') will fail with EISDIR + fs.mkdirSync(counterFile); - try { - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - CLAUDE_SESSION_ID: sessionId - }); - assert.strictEqual(result.code, 0, - 'Should exit 0 even when counter file path is a directory (graceful fallback)'); - } finally { - // Cleanup: remove the blocking directory - try { fs.rmdirSync(counterFile); } catch { /* best-effort */ } - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + CLAUDE_SESSION_ID: sessionId + }); + assert.strictEqual(result.code, 0, 'Should exit 0 even when counter file path is a directory (graceful fallback)'); + } finally { + // Cleanup: remove the blocking directory + try { + fs.rmdirSync(counterFile); + } catch { + /* best-effort */ + } + } + }) + ) + passed++; + else failed++; // ── Round 59: session-start unreadable file, console-log stdin overflow, pre-compact write error ── console.log('\nRound 59: session-start.js (unreadable session file — readFile returns null):'); - if (await asyncTest('does not inject content when session file is unreadable', async () => { - // Skip on Windows or when running as root (permissions won't work) - if (process.platform === 'win32' || (process.getuid && process.getuid() === 0)) { - console.log(' (skipped — not supported on this platform)'); - return; - } - const isoHome = path.join(os.tmpdir(), `ecc-start-unreadable-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('does not inject content when session file is unreadable', async () => { + // Skip on Windows or when running as root (permissions won't work) + if (process.platform === 'win32' || (process.getuid && process.getuid() === 0)) { + console.log(' (skipped — not supported on this platform)'); + return; + } + const isoHome = path.join(os.tmpdir(), `ecc-start-unreadable-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - // Create a session file with real content, then make it unreadable - const sessionFile = path.join(sessionsDir, `${Date.now()}-session.tmp`); - fs.writeFileSync(sessionFile, '# Sensitive session content that should NOT appear'); - fs.chmodSync(sessionFile, 0o000); + // Create a session file with real content, then make it unreadable + const sessionFile = path.join(sessionsDir, `${Date.now()}-session.tmp`); + fs.writeFileSync(sessionFile, '# Sensitive session content that should NOT appear'); + fs.chmodSync(sessionFile, 0o000); - try { - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0 even with unreadable session file'); - // readFile returns null for unreadable files → content is null → no injection - assert.ok(!result.stdout.includes('Sensitive session content'), - 'Should NOT inject content from unreadable file'); - } finally { - try { fs.chmodSync(sessionFile, 0o644); } catch { /* best-effort */ } - try { fs.rmSync(isoHome, { recursive: true, force: true }); } catch { /* best-effort */ } - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0 even with unreadable session file'); + // readFile returns null for unreadable files → content is null → no injection + assert.ok(!result.stdout.includes('Sensitive session content'), 'Should NOT inject content from unreadable file'); + } finally { + try { + fs.chmodSync(sessionFile, 0o644); + } catch { + /* best-effort */ + } + try { + fs.rmSync(isoHome, { recursive: true, force: true }); + } catch { + /* best-effort */ + } + } + }) + ) + passed++; + else failed++; console.log('\nRound 59: check-console-log.js (stdin exceeding 1MB — truncation):'); - if (await asyncTest('truncates stdin at 1MB limit and still passes through data', async () => { - // Send 1.2MB of data — exceeds the 1MB MAX_STDIN limit - const payload = 'x'.repeat(1024 * 1024 + 200000); - const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), payload); + if ( + await asyncTest('truncates stdin at 1MB limit and still passes through data', async () => { + // Send 1.2MB of data — exceeds the 1MB MAX_STDIN limit + const payload = 'x'.repeat(1024 * 1024 + 200000); + const result = await runScript(path.join(scriptsDir, 'check-console-log.js'), payload); - assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); - // Output should be truncated — significantly less than input - assert.ok(result.stdout.length < payload.length, - `stdout (${result.stdout.length}) should be shorter than input (${payload.length})`); - // Output should be approximately 1MB (last accepted chunk may push slightly over) - assert.ok(result.stdout.length <= 1024 * 1024 + 65536, - `stdout (${result.stdout.length}) should be near 1MB, not unbounded`); - assert.ok(result.stdout.length > 0, 'Should still pass through truncated data'); - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); + // Output should be truncated — significantly less than input + assert.ok(result.stdout.length < payload.length, `stdout (${result.stdout.length}) should be shorter than input (${payload.length})`); + // Output should be approximately 1MB (last accepted chunk may push slightly over) + assert.ok(result.stdout.length <= 1024 * 1024 + 65536, `stdout (${result.stdout.length}) should be near 1MB, not unbounded`); + assert.ok(result.stdout.length > 0, 'Should still pass through truncated data'); + }) + ) + passed++; + else failed++; console.log('\nRound 59: pre-compact.js (read-only session file — appendFile error):'); - if (await asyncTest('exits 0 when session file is read-only (appendFile fails)', async () => { - if (process.platform === 'win32' || (process.getuid && process.getuid() === 0)) { - console.log(' (skipped — not supported on this platform)'); - return; - } - const isoHome = path.join(os.tmpdir(), `ecc-compact-ro-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('exits 0 when session file is read-only (appendFile fails)', async () => { + if (process.platform === 'win32' || (process.getuid && process.getuid() === 0)) { + console.log(' (skipped — not supported on this platform)'); + return; + } + const isoHome = path.join(os.tmpdir(), `ecc-compact-ro-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - // Create a session file then make it read-only - const sessionFile = path.join(sessionsDir, `${Date.now()}-session.tmp`); - fs.writeFileSync(sessionFile, '# Active session\n'); - fs.chmodSync(sessionFile, 0o444); + // Create a session file then make it read-only + const sessionFile = path.join(sessionsDir, `${Date.now()}-session.tmp`); + fs.writeFileSync(sessionFile, '# Active session\n'); + fs.chmodSync(sessionFile, 0o444); - try { - const result = await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { - HOME: isoHome, USERPROFILE: isoHome - }); - // Should exit 0 — hooks must not block the user (catch at lines 45-47) - assert.strictEqual(result.code, 0, 'Should exit 0 even when append fails'); - // Session file should remain unchanged (write was blocked) - const content = fs.readFileSync(sessionFile, 'utf8'); - assert.strictEqual(content, '# Active session\n', - 'Read-only session file should remain unchanged'); - } finally { - try { fs.chmodSync(sessionFile, 0o644); } catch { /* best-effort */ } - try { fs.rmSync(isoHome, { recursive: true, force: true }); } catch { /* best-effort */ } - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); + // Should exit 0 — hooks must not block the user (catch at lines 45-47) + assert.strictEqual(result.code, 0, 'Should exit 0 even when append fails'); + // Session file should remain unchanged (write was blocked) + const content = fs.readFileSync(sessionFile, 'utf8'); + assert.strictEqual(content, '# Active session\n', 'Read-only session file should remain unchanged'); + } finally { + try { + fs.chmodSync(sessionFile, 0o644); + } catch { + /* best-effort */ + } + try { + fs.rmSync(isoHome, { recursive: true, force: true }); + } catch { + /* best-effort */ + } + } + }) + ) + passed++; + else failed++; // ── Round 60: replaceInFile failure, console-warn stdin overflow, format missing tool_input ── console.log('\nRound 60: session-end.js (replaceInFile returns false — timestamp update warning):'); - if (await asyncTest('logs warning when existing session file lacks Last Updated field', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-end-nots-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('logs warning when existing session file lacks Last Updated field', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-end-nots-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - // Create transcript with a user message so a summary is produced - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - fs.writeFileSync(transcriptPath, '{"type":"user","content":"test message"}\n'); + // Create transcript with a user message so a summary is produced + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + fs.writeFileSync(transcriptPath, '{"type":"user","content":"test message"}\n'); - // Pre-create session file WITHOUT the **Last Updated:** line - // Use today's date and a short ID matching getSessionIdShort() pattern - const today = new Date().toISOString().split('T')[0]; - const sessionFile = path.join(sessionsDir, `${today}-session-session.tmp`); - fs.writeFileSync(sessionFile, '# Session file without timestamp marker\nSome existing content\n'); + // Pre-create session file WITHOUT the **Last Updated:** line + // Use today's date and a short ID matching getSessionIdShort() pattern + const today = new Date().toISOString().split('T')[0]; + const sessionFile = path.join(sessionsDir, `${today}-session-session.tmp`); + fs.writeFileSync(sessionFile, '# Session file without timestamp marker\nSome existing content\n'); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: isoHome, USERPROFILE: isoHome - }); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: isoHome, + USERPROFILE: isoHome + }); - assert.strictEqual(result.code, 0, 'Should exit 0 even when replaceInFile fails'); - // replaceInFile returns false → line 166 logs warning about failed timestamp update - assert.ok(result.stderr.includes('Failed to update') || result.stderr.includes('[SessionEnd]'), - 'Should log warning when timestamp pattern not found in session file'); + assert.strictEqual(result.code, 0, 'Should exit 0 even when replaceInFile fails'); + // replaceInFile returns false → line 166 logs warning about failed timestamp update + assert.ok(result.stderr.includes('Failed to update') || result.stderr.includes('[SessionEnd]'), 'Should log warning when timestamp pattern not found in session file'); - cleanupTestDir(testDir); - try { fs.rmSync(isoHome, { recursive: true, force: true }); } catch { /* best-effort */ } - })) passed++; else failed++; + cleanupTestDir(testDir); + try { + fs.rmSync(isoHome, { recursive: true, force: true }); + } catch { + /* best-effort */ + } + }) + ) + passed++; + else failed++; console.log('\nRound 60: post-edit-console-warn.js (stdin exceeding 1MB — truncation):'); - if (await asyncTest('truncates stdin at 1MB limit and still passes through data', async () => { - // Send 1.2MB of data — exceeds the 1MB MAX_STDIN limit - const payload = 'x'.repeat(1024 * 1024 + 200000); - const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), payload); + if ( + await asyncTest('truncates stdin at 1MB limit and still passes through data', async () => { + // Send 1.2MB of data — exceeds the 1MB MAX_STDIN limit + const payload = 'x'.repeat(1024 * 1024 + 200000); + const result = await runScript(path.join(scriptsDir, 'post-edit-console-warn.js'), payload); - assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); - // Data should be truncated — stdout significantly less than input - assert.ok(result.stdout.length < payload.length, - `stdout (${result.stdout.length}) should be shorter than input (${payload.length})`); - // Should be approximately 1MB (last accepted chunk may push slightly over) - assert.ok(result.stdout.length <= 1024 * 1024 + 65536, - `stdout (${result.stdout.length}) should be near 1MB, not unbounded`); - assert.ok(result.stdout.length > 0, 'Should still pass through truncated data'); - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); + // Data should be truncated — stdout significantly less than input + assert.ok(result.stdout.length < payload.length, `stdout (${result.stdout.length}) should be shorter than input (${payload.length})`); + // Should be approximately 1MB (last accepted chunk may push slightly over) + assert.ok(result.stdout.length <= 1024 * 1024 + 65536, `stdout (${result.stdout.length}) should be near 1MB, not unbounded`); + assert.ok(result.stdout.length > 0, 'Should still pass through truncated data'); + }) + ) + passed++; + else failed++; console.log('\nRound 60: post-edit-format.js (valid JSON without tool_input key):'); - if (await asyncTest('skips formatting when JSON has no tool_input field', async () => { - const stdinJson = JSON.stringify({ result: 'ok', output: 'some data' }); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); + if ( + await asyncTest('skips formatting when JSON has no tool_input field', async () => { + const stdinJson = JSON.stringify({ result: 'ok', output: 'some data' }); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for JSON without tool_input'); - // input.tool_input?.file_path is undefined → skips formatting → passes through - assert.strictEqual(result.stdout, stdinJson, - 'Should pass through data unchanged when tool_input is absent'); - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0 for JSON without tool_input'); + // input.tool_input?.file_path is undefined → skips formatting → passes through + assert.strictEqual(result.stdout, stdinJson, 'Should pass through data unchanged when tool_input is absent'); + }) + ) + passed++; + else failed++; // ── Round 64: post-edit-typecheck.js valid JSON without tool_input ── console.log('\nRound 64: post-edit-typecheck.js (valid JSON without tool_input):'); - if (await asyncTest('skips typecheck when JSON has no tool_input field', async () => { - const stdinJson = JSON.stringify({ result: 'ok', metadata: { action: 'test' } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + if ( + await asyncTest('skips typecheck when JSON has no tool_input field', async () => { + const stdinJson = JSON.stringify({ result: 'ok', metadata: { action: 'test' } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - assert.strictEqual(result.code, 0, 'Should exit 0 for JSON without tool_input'); - // input.tool_input?.file_path is undefined → skips TS check → passes through - assert.strictEqual(result.stdout, stdinJson, - 'Should pass through data unchanged when tool_input is absent'); - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0 for JSON without tool_input'); + // input.tool_input?.file_path is undefined → skips TS check → passes through + assert.strictEqual(result.stdout, stdinJson, 'Should pass through data unchanged when tool_input is absent'); + }) + ) + passed++; + else failed++; // ── Round 66: session-end.js entry.role === 'user' fallback and nonexistent transcript ── console.log('\nRound 66: session-end.js (entry.role user fallback):'); - if (await asyncTest('extracts user messages from role-only format (no type field)', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-role-only-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('extracts user messages from role-only format (no type field)', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-role-only-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - // Use entries with ONLY role field (no type:"user") to exercise the fallback - const lines = [ - '{"role":"user","content":"Deploy the production build"}', - '{"role":"assistant","content":"I will deploy now"}', - '{"role":"user","content":"Check the logs after deploy"}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + // Use entries with ONLY role field (no type:"user") to exercise the fallback + const lines = ['{"role":"user","content":"Deploy the production build"}', '{"role":"assistant","content":"I will deploy now"}', '{"role":"user","content":"Check the logs after deploy"}']; + fs.writeFileSync(transcriptPath, lines.join('\n')); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - try { - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); - const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('-session.tmp')); - assert.ok(files.length > 0, 'Should create session file'); - const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); - // The role-only user messages should be extracted - assert.ok(content.includes('Deploy the production build') || content.includes('deploy'), - `Session file should include role-only user messages. Got: ${content.substring(0, 300)}`); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - cleanupTestDir(testDir); - } - })) passed++; else failed++; + const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('-session.tmp')); + assert.ok(files.length > 0, 'Should create session file'); + const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); + // The role-only user messages should be extracted + assert.ok(content.includes('Deploy the production build') || content.includes('deploy'), `Session file should include role-only user messages. Got: ${content.substring(0, 300)}`); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + cleanupTestDir(testDir); + } + }) + ) + passed++; + else failed++; console.log('\nRound 66: session-end.js (nonexistent transcript path):'); - if (await asyncTest('logs "Transcript not found" for nonexistent transcript_path', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-notfound-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('logs "Transcript not found" for nonexistent transcript_path', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-notfound-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - const stdinJson = JSON.stringify({ transcript_path: '/tmp/nonexistent-transcript-99999.jsonl' }); + const stdinJson = JSON.stringify({ transcript_path: '/tmp/nonexistent-transcript-99999.jsonl' }); - try { - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0 for missing transcript'); - assert.ok( - result.stderr.includes('Transcript not found') || result.stderr.includes('not found'), - `Should log transcript not found. Got stderr: ${result.stderr.substring(0, 300)}` - ); - // Should still create a session file (with blank template, since summary is null) - const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('-session.tmp')); - assert.ok(files.length > 0, 'Should still create session file even without transcript'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0 for missing transcript'); + assert.ok(result.stderr.includes('Transcript not found') || result.stderr.includes('not found'), `Should log transcript not found. Got stderr: ${result.stderr.substring(0, 300)}`); + // Should still create a session file (with blank template, since summary is null) + const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('-session.tmp')); + assert.ok(files.length > 0, 'Should still create session file even without transcript'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // ── Round 70: session-end.js entry.name / entry.input fallback in direct tool_use entries ── console.log('\nRound 70: session-end.js (entry.name/entry.input fallback):'); - if (await asyncTest('extracts tool name and file path from entry.name/entry.input (not tool_name/tool_input)', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-r70-entryname-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - const transcriptPath = path.join(isoHome, 'transcript.jsonl'); + if ( + await asyncTest('extracts tool name and file path from entry.name/entry.input (not tool_name/tool_input)', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-r70-entryname-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + const transcriptPath = path.join(isoHome, 'transcript.jsonl'); - // Use "name" and "input" fields instead of "tool_name" and "tool_input" - // This exercises the fallback at session-end.js lines 63 and 66: - // const toolName = entry.tool_name || entry.name || ''; - // const filePath = entry.tool_input?.file_path || entry.input?.file_path || ''; - const lines = [ - '{"type":"user","content":"Use the alt format fields"}', - '{"type":"tool_use","name":"Edit","input":{"file_path":"/src/alt-format.ts"}}', - '{"type":"tool_use","name":"Read","input":{"file_path":"/src/other.ts"}}', - '{"type":"tool_use","name":"Write","input":{"file_path":"/src/written.ts"}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + // Use "name" and "input" fields instead of "tool_name" and "tool_input" + // This exercises the fallback at session-end.js lines 63 and 66: + // const toolName = entry.tool_name || entry.name || ''; + // const filePath = entry.tool_input?.file_path || entry.input?.file_path || ''; + const lines = [ + '{"type":"user","content":"Use the alt format fields"}', + '{"type":"tool_use","name":"Edit","input":{"file_path":"/src/alt-format.ts"}}', + '{"type":"tool_use","name":"Read","input":{"file_path":"/src/other.ts"}}', + '{"type":"tool_use","name":"Write","input":{"file_path":"/src/written.ts"}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - try { - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0'); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0'); - const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('.tmp')); - assert.ok(files.length > 0, 'Should create session file'); - const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); - // Tools extracted via entry.name fallback - assert.ok(content.includes('Edit'), 'Should list Edit via entry.name fallback'); - assert.ok(content.includes('Read'), 'Should list Read via entry.name fallback'); - // Files modified via entry.input fallback (Edit and Write, not Read) - assert.ok(content.includes('/src/alt-format.ts'), 'Should list edited file via entry.input fallback'); - assert.ok(content.includes('/src/written.ts'), 'Should list written file via entry.input fallback'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('.tmp')); + assert.ok(files.length > 0, 'Should create session file'); + const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); + // Tools extracted via entry.name fallback + assert.ok(content.includes('Edit'), 'Should list Edit via entry.name fallback'); + assert.ok(content.includes('Read'), 'Should list Read via entry.name fallback'); + // Files modified via entry.input fallback (Edit and Write, not Read) + assert.ok(content.includes('/src/alt-format.ts'), 'Should list edited file via entry.input fallback'); + assert.ok(content.includes('/src/written.ts'), 'Should list written file via entry.input fallback'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // ── Round 71: session-start.js default source shows getSelectionPrompt ── console.log('\nRound 71: session-start.js (default source — selection prompt):'); - if (await asyncTest('shows selection prompt when no package manager preference found (default source)', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-r71-ss-default-${Date.now()}`); - const isoProject = path.join(isoHome, 'project'); - fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); - fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); - fs.mkdirSync(isoProject, { recursive: true }); - // No package.json, no lock files, no package-manager.json — forces default source + if ( + await asyncTest('shows selection prompt when no package manager preference found (default source)', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-r71-ss-default-${Date.now()}`); + const isoProject = path.join(isoHome, 'project'); + fs.mkdirSync(path.join(isoHome, '.claude', 'sessions'), { recursive: true }); + fs.mkdirSync(path.join(isoHome, '.claude', 'skills', 'learned'), { recursive: true }); + fs.mkdirSync(isoProject, { recursive: true }); + // No package.json, no lock files, no package-manager.json — forces default source - try { - const result = await new Promise((resolve, reject) => { - const env = { ...process.env, HOME: isoHome, USERPROFILE: isoHome }; - delete env.CLAUDE_PACKAGE_MANAGER; // Remove any env-level PM override - const proc = spawn('node', [path.join(scriptsDir, 'session-start.js')], { - env, - cwd: isoProject, // CWD with no package.json or lock files - stdio: ['pipe', 'pipe', 'pipe'] + try { + const result = await new Promise((resolve, reject) => { + const env = { ...process.env, HOME: isoHome, USERPROFILE: isoHome }; + delete env.CLAUDE_PACKAGE_MANAGER; // Remove any env-level PM override + const proc = spawn('node', [path.join(scriptsDir, 'session-start.js')], { + env, + cwd: isoProject, // CWD with no package.json or lock files + stdio: ['pipe', 'pipe', 'pipe'] + }); + let stdout = ''; + let stderr = ''; + proc.stdout.on('data', data => (stdout += data)); + proc.stderr.on('data', data => (stderr += data)); + proc.stdin.end(); + proc.on('close', code => resolve({ code, stdout, stderr })); + proc.on('error', reject); }); - let stdout = ''; - let stderr = ''; - proc.stdout.on('data', data => stdout += data); - proc.stderr.on('data', data => stderr += data); - proc.stdin.end(); - proc.on('close', code => resolve({ code, stdout, stderr })); - proc.on('error', reject); - }); - assert.strictEqual(result.code, 0, 'Should exit 0'); - assert.ok(result.stderr.includes('No package manager preference'), - `Should show selection prompt when source is default. Got stderr: ${result.stderr.slice(0, 500)}`); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0'); + assert.ok(result.stderr.includes('No package manager preference'), `Should show selection prompt when source is default. Got stderr: ${result.stderr.slice(0, 500)}`); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // ── Round 74: session-start.js main().catch handler ── console.log('\nRound 74: session-start.js (main catch — unrecoverable error):'); - if (await asyncTest('session-start exits 0 with error message when HOME is non-directory', async () => { - if (process.platform === 'win32') { - console.log(' (skipped — /dev/null not available on Windows)'); - return; - } - // HOME=/dev/null makes ensureDir(sessionsDir) throw ENOTDIR, - // which propagates to main().catch — the top-level error boundary - const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { - HOME: '/dev/null', - USERPROFILE: '/dev/null' - }); - assert.strictEqual(result.code, 0, - `Should exit 0 (don't block on errors), got ${result.code}`); - assert.ok(result.stderr.includes('[SessionStart] Error:'), - `stderr should contain [SessionStart] Error:, got: ${result.stderr}`); - })) passed++; else failed++; + if ( + await asyncTest('session-start exits 0 with error message when HOME is non-directory', async () => { + if (process.platform === 'win32') { + console.log(' (skipped — /dev/null not available on Windows)'); + return; + } + // HOME=/dev/null makes ensureDir(sessionsDir) throw ENOTDIR, + // which propagates to main().catch — the top-level error boundary + const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', { + HOME: '/dev/null', + USERPROFILE: '/dev/null' + }); + assert.strictEqual(result.code, 0, `Should exit 0 (don't block on errors), got ${result.code}`); + assert.ok(result.stderr.includes('[SessionStart] Error:'), `stderr should contain [SessionStart] Error:, got: ${result.stderr}`); + }) + ) + passed++; + else failed++; // ── Round 75: pre-compact.js main().catch handler ── console.log('\nRound 75: pre-compact.js (main catch — unrecoverable error):'); - if (await asyncTest('pre-compact exits 0 with error message when HOME is non-directory', async () => { - if (process.platform === 'win32') { - console.log(' (skipped — /dev/null not available on Windows)'); - return; - } - // HOME=/dev/null makes ensureDir(sessionsDir) throw ENOTDIR, - // which propagates to main().catch — the top-level error boundary - const result = await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { - HOME: '/dev/null', - USERPROFILE: '/dev/null' - }); - assert.strictEqual(result.code, 0, - `Should exit 0 (don't block on errors), got ${result.code}`); - assert.ok(result.stderr.includes('[PreCompact] Error:'), - `stderr should contain [PreCompact] Error:, got: ${result.stderr}`); - })) passed++; else failed++; + if ( + await asyncTest('pre-compact exits 0 with error message when HOME is non-directory', async () => { + if (process.platform === 'win32') { + console.log(' (skipped — /dev/null not available on Windows)'); + return; + } + // HOME=/dev/null makes ensureDir(sessionsDir) throw ENOTDIR, + // which propagates to main().catch — the top-level error boundary + const result = await runScript(path.join(scriptsDir, 'pre-compact.js'), '', { + HOME: '/dev/null', + USERPROFILE: '/dev/null' + }); + assert.strictEqual(result.code, 0, `Should exit 0 (don't block on errors), got ${result.code}`); + assert.ok(result.stderr.includes('[PreCompact] Error:'), `stderr should contain [PreCompact] Error:, got: ${result.stderr}`); + }) + ) + passed++; + else failed++; // ── Round 75: session-end.js main().catch handler ── console.log('\nRound 75: session-end.js (main catch — unrecoverable error):'); - if (await asyncTest('session-end exits 0 with error message when HOME is non-directory', async () => { - if (process.platform === 'win32') { - console.log(' (skipped — /dev/null not available on Windows)'); - return; - } - // HOME=/dev/null makes ensureDir(sessionsDir) throw ENOTDIR inside main(), - // which propagates to runMain().catch — the top-level error boundary - const result = await runScript(path.join(scriptsDir, 'session-end.js'), '{}', { - HOME: '/dev/null', - USERPROFILE: '/dev/null' - }); - assert.strictEqual(result.code, 0, - `Should exit 0 (don't block on errors), got ${result.code}`); - assert.ok(result.stderr.includes('[SessionEnd] Error:'), - `stderr should contain [SessionEnd] Error:, got: ${result.stderr}`); - })) passed++; else failed++; + if ( + await asyncTest('session-end exits 0 with error message when HOME is non-directory', async () => { + if (process.platform === 'win32') { + console.log(' (skipped — /dev/null not available on Windows)'); + return; + } + // HOME=/dev/null makes ensureDir(sessionsDir) throw ENOTDIR inside main(), + // which propagates to runMain().catch — the top-level error boundary + const result = await runScript(path.join(scriptsDir, 'session-end.js'), '{}', { + HOME: '/dev/null', + USERPROFILE: '/dev/null' + }); + assert.strictEqual(result.code, 0, `Should exit 0 (don't block on errors), got ${result.code}`); + assert.ok(result.stderr.includes('[SessionEnd] Error:'), `stderr should contain [SessionEnd] Error:, got: ${result.stderr}`); + }) + ) + passed++; + else failed++; // ── Round 76: evaluate-session.js main().catch handler ── console.log('\nRound 76: evaluate-session.js (main catch — unrecoverable error):'); - if (await asyncTest('evaluate-session exits 0 with error message when HOME is non-directory', async () => { - if (process.platform === 'win32') { - console.log(' (skipped — /dev/null not available on Windows)'); - return; - } - // HOME=/dev/null makes ensureDir(learnedSkillsPath) throw ENOTDIR, - // which propagates to main().catch — the top-level error boundary - const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), '{}', { - HOME: '/dev/null', - USERPROFILE: '/dev/null' - }); - assert.strictEqual(result.code, 0, - `Should exit 0 (don't block on errors), got ${result.code}`); - assert.ok(result.stderr.includes('[ContinuousLearning] Error:'), - `stderr should contain [ContinuousLearning] Error:, got: ${result.stderr}`); - })) passed++; else failed++; + if ( + await asyncTest('evaluate-session exits 0 with error message when HOME is non-directory', async () => { + if (process.platform === 'win32') { + console.log(' (skipped — /dev/null not available on Windows)'); + return; + } + // HOME=/dev/null makes ensureDir(learnedSkillsPath) throw ENOTDIR, + // which propagates to main().catch — the top-level error boundary + const result = await runScript(path.join(scriptsDir, 'evaluate-session.js'), '{}', { + HOME: '/dev/null', + USERPROFILE: '/dev/null' + }); + assert.strictEqual(result.code, 0, `Should exit 0 (don't block on errors), got ${result.code}`); + assert.ok(result.stderr.includes('[ContinuousLearning] Error:'), `stderr should contain [ContinuousLearning] Error:, got: ${result.stderr}`); + }) + ) + passed++; + else failed++; // ── Round 76: suggest-compact.js main().catch handler ── console.log('\nRound 76: suggest-compact.js (main catch — double-failure):'); - if (await asyncTest('suggest-compact exits 0 with error when TMPDIR is non-directory', async () => { - if (process.platform === 'win32') { - console.log(' (skipped — /dev/null not available on Windows)'); - return; - } - // TMPDIR=/dev/null causes openSync to fail (ENOTDIR), then the catch - // fallback writeFile also fails, propagating to main().catch - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - TMPDIR: '/dev/null' - }); - assert.strictEqual(result.code, 0, - `Should exit 0 (don't block on errors), got ${result.code}`); - assert.ok(result.stderr.includes('[StrategicCompact] Error:'), - `stderr should contain [StrategicCompact] Error:, got: ${result.stderr}`); - })) passed++; else failed++; + if ( + await asyncTest('suggest-compact exits 0 with error when TMPDIR is non-directory', async () => { + if (process.platform === 'win32') { + console.log(' (skipped — /dev/null not available on Windows)'); + return; + } + // TMPDIR=/dev/null causes openSync to fail (ENOTDIR), then the catch + // fallback writeFile also fails, propagating to main().catch + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + TMPDIR: '/dev/null' + }); + assert.strictEqual(result.code, 0, `Should exit 0 (don't block on errors), got ${result.code}`); + assert.ok(result.stderr.includes('[StrategicCompact] Error:'), `stderr should contain [StrategicCompact] Error:, got: ${result.stderr}`); + }) + ) + passed++; + else failed++; // ── Round 80: session-end.js entry.message?.role === 'user' third OR condition ── console.log('\nRound 80: session-end.js (entry.message.role user — third OR condition):'); - if (await asyncTest('extracts user messages from entries where only message.role is user (not type or role)', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-msgrole-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('extracts user messages from entries where only message.role is user (not type or role)', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-msgrole-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); - // Entries where type is NOT 'user' and there is no direct role field, - // but message.role IS 'user'. This exercises the third OR condition at - // session-end.js line 48: entry.message?.role === 'user' - const lines = [ - '{"type":"human","message":{"role":"user","content":"Refactor the auth module"}}', - '{"type":"human","message":{"role":"assistant","content":"I will refactor it"}}', - '{"type":"human","message":{"role":"user","content":"Add integration tests too"}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); + // Entries where type is NOT 'user' and there is no direct role field, + // but message.role IS 'user'. This exercises the third OR condition at + // session-end.js line 48: entry.message?.role === 'user' + const lines = [ + '{"type":"human","message":{"role":"user","content":"Refactor the auth module"}}', + '{"type":"human","message":{"role":"assistant","content":"I will refactor it"}}', + '{"type":"human","message":{"role":"user","content":"Add integration tests too"}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - try { - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0); + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0); - const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('-session.tmp')); - assert.ok(files.length > 0, 'Should create session file'); - const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); - // The third OR condition should fire for type:"human" + message.role:"user" - assert.ok(content.includes('Refactor the auth module') || content.includes('auth'), - `Session should include message extracted via message.role path. Got: ${content.substring(0, 300)}`); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - cleanupTestDir(testDir); - } - })) passed++; else failed++; + const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('-session.tmp')); + assert.ok(files.length > 0, 'Should create session file'); + const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); + // The third OR condition should fire for type:"human" + message.role:"user" + assert.ok(content.includes('Refactor the auth module') || content.includes('auth'), `Session should include message extracted via message.role path. Got: ${content.substring(0, 300)}`); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + cleanupTestDir(testDir); + } + }) + ) + passed++; + else failed++; // ── Round 81: suggest-compact threshold upper bound, session-end non-string content ── console.log('\nRound 81: suggest-compact.js (COMPACT_THRESHOLD > 10000):'); - if (await asyncTest('COMPACT_THRESHOLD exceeding 10000 falls back to default 50', async () => { - // suggest-compact.js line 31: rawThreshold <= 10000 ? rawThreshold : 50 - // Values > 10000 are positive and finite but fail the upper-bound check. - // Existing tests cover 0, negative, NaN — this covers the > 10000 boundary. - const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { - COMPACT_THRESHOLD: '20000' - }); - assert.strictEqual(result.code, 0, 'Should exit 0'); - // The script logs the threshold it chose — should fall back to 50 - // Look for the fallback value in stderr (log output) - const compactSource = fs.readFileSync(path.join(scriptsDir, 'suggest-compact.js'), 'utf8'); - // The condition at line 31: rawThreshold <= 10000 ? rawThreshold : 50 - assert.ok(compactSource.includes('<= 10000'), - 'Source should have <= 10000 upper bound check'); - assert.ok(compactSource.includes(': 50'), - 'Source should fall back to 50 when threshold exceeds 10000'); - })) passed++; else failed++; + if ( + await asyncTest('COMPACT_THRESHOLD exceeding 10000 falls back to default 50', async () => { + // suggest-compact.js line 31: rawThreshold <= 10000 ? rawThreshold : 50 + // Values > 10000 are positive and finite but fail the upper-bound check. + // Existing tests cover 0, negative, NaN — this covers the > 10000 boundary. + const result = await runScript(path.join(scriptsDir, 'suggest-compact.js'), '', { + COMPACT_THRESHOLD: '20000' + }); + assert.strictEqual(result.code, 0, 'Should exit 0'); + // The script logs the threshold it chose — should fall back to 50 + // Look for the fallback value in stderr (log output) + const compactSource = fs.readFileSync(path.join(scriptsDir, 'suggest-compact.js'), 'utf8'); + // The condition at line 31: rawThreshold <= 10000 ? rawThreshold : 50 + assert.ok(compactSource.includes('<= 10000'), 'Source should have <= 10000 upper bound check'); + assert.ok(compactSource.includes(': 50'), 'Source should fall back to 50 when threshold exceeds 10000'); + }) + ) + passed++; + else failed++; console.log('\nRound 81: session-end.js (user entry with non-string non-array content):'); - if (await asyncTest('skips user messages with numeric content (non-string non-array branch)', async () => { - // session-end.js line 50-55: rawContent is checked for string, then array, else '' - // When content is a number (42), neither branch matches, text = '', message is skipped. - const isoHome = path.join(os.tmpdir(), `ecc-r81-numcontent-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); - const transcriptPath = path.join(isoHome, 'transcript.jsonl'); + if ( + await asyncTest('skips user messages with numeric content (non-string non-array branch)', async () => { + // session-end.js line 50-55: rawContent is checked for string, then array, else '' + // When content is a number (42), neither branch matches, text = '', message is skipped. + const isoHome = path.join(os.tmpdir(), `ecc-r81-numcontent-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + const transcriptPath = path.join(isoHome, 'transcript.jsonl'); - const lines = [ - // Normal user message (string content) — should be included - '{"type":"user","content":"Real user message"}', - // User message with numeric content — exercises the else: '' branch - '{"type":"user","content":42}', - // User message with boolean content — also hits the else branch - '{"type":"user","content":true}', - // User message with object content (no .text) — also hits the else branch - '{"type":"user","content":{"type":"image","source":"data:..."}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + const lines = [ + // Normal user message (string content) — should be included + '{"type":"user","content":"Real user message"}', + // User message with numeric content — exercises the else: '' branch + '{"type":"user","content":42}', + // User message with boolean content — also hits the else branch + '{"type":"user","content":true}', + // User message with object content (no .text) — also hits the else branch + '{"type":"user","content":{"type":"image","source":"data:..."}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - try { - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0'); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0'); - const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('.tmp')); - assert.ok(files.length > 0, 'Should create session file'); - const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); - // The real string message should appear - assert.ok(content.includes('Real user message'), - 'Should include the string content user message'); - // Numeric/boolean/object content should NOT appear as text - assert.ok(!content.includes('42'), - 'Numeric content should be skipped (else branch → empty string → filtered)'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('.tmp')); + assert.ok(files.length > 0, 'Should create session file'); + const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); + // The real string message should appear + assert.ok(content.includes('Real user message'), 'Should include the string content user message'); + // Numeric/boolean/object content should NOT appear as text + assert.ok(!content.includes('42'), 'Numeric content should be skipped (else branch → empty string → filtered)'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // ── Round 82: tool_name OR fallback, template marker regex no-match ── console.log('\nRound 82: session-end.js (entry.tool_name without type=tool_use):'); - if (await asyncTest('collects tool name from entry with tool_name but non-tool_use type', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-r82-toolname-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('collects tool name from entry with tool_name but non-tool_use type', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-r82-toolname-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - const transcriptPath = path.join(isoHome, 'transcript.jsonl'); - const lines = [ - '{"type":"user","content":"Fix the bug"}', - '{"type":"result","tool_name":"Edit","tool_input":{"file_path":"/tmp/app.js"}}', - '{"type":"assistant","message":{"content":[{"type":"text","text":"Done fixing"}]}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + const transcriptPath = path.join(isoHome, 'transcript.jsonl'); + const lines = [ + '{"type":"user","content":"Fix the bug"}', + '{"type":"result","tool_name":"Edit","tool_input":{"file_path":"/tmp/app.js"}}', + '{"type":"assistant","message":{"content":[{"type":"text","text":"Done fixing"}]}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - try { - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0'); - const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('.tmp')); - assert.ok(files.length > 0, 'Should create session file'); - const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); - // The tool name "Edit" should appear even though type is "result", not "tool_use" - assert.ok(content.includes('Edit'), 'Should collect Edit tool via tool_name OR fallback'); - // The file modified should also be collected since tool_name is Edit - assert.ok(content.includes('app.js'), 'Should collect modified file path from tool_input'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0'); + const files = fs.readdirSync(sessionsDir).filter(f => f.endsWith('.tmp')); + assert.ok(files.length > 0, 'Should create session file'); + const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); + // The tool name "Edit" should appear even though type is "result", not "tool_use" + assert.ok(content.includes('Edit'), 'Should collect Edit tool via tool_name OR fallback'); + // The file modified should also be collected since tool_name is Edit + assert.ok(content.includes('app.js'), 'Should collect modified file path from tool_input'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; console.log('\nRound 82: session-end.js (template marker present but regex no-match):'); - if (await asyncTest('preserves file when marker present but regex does not match corrupted template', async () => { - const isoHome = path.join(os.tmpdir(), `ecc-r82-tmpl-${Date.now()}`); - const sessionsDir = path.join(isoHome, '.claude', 'sessions'); - fs.mkdirSync(sessionsDir, { recursive: true }); + if ( + await asyncTest('preserves file when marker present but regex does not match corrupted template', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-r82-tmpl-${Date.now()}`); + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); - const today = new Date().toISOString().split('T')[0]; - const sessionFile = path.join(sessionsDir, `session-${today}.tmp`); + const today = new Date().toISOString().split('T')[0]; + const sessionFile = path.join(sessionsDir, `session-${today}.tmp`); - // Write a corrupted template: has the marker but NOT the full regex structure - const corruptedTemplate = `# Session: ${today} + // Write a corrupted template: has the marker but NOT the full regex structure + const corruptedTemplate = `# Session: ${today} **Date:** ${today} **Started:** 10:00 **Last Updated:** 10:00 @@ -3683,255 +4341,283 @@ async function runTests() { Some random content without the expected ### Context to Load section `; - fs.writeFileSync(sessionFile, corruptedTemplate); + fs.writeFileSync(sessionFile, corruptedTemplate); - // Provide a transcript with enough content to generate a summary - const transcriptPath = path.join(isoHome, 'transcript.jsonl'); - const lines = [ - '{"type":"user","content":"Implement authentication feature"}', - '{"type":"assistant","message":{"content":[{"type":"text","text":"I will implement the auth feature using JWT tokens and bcrypt for password hashing."}]}}', - '{"type":"tool_use","tool_name":"Write","name":"Write","tool_input":{"file_path":"/tmp/auth.js"}}', - '{"type":"user","content":"Now add the login endpoint"}', - '{"type":"assistant","message":{"content":[{"type":"text","text":"Adding the login endpoint with proper validation."}]}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + // Provide a transcript with enough content to generate a summary + const transcriptPath = path.join(isoHome, 'transcript.jsonl'); + const lines = [ + '{"type":"user","content":"Implement authentication feature"}', + '{"type":"assistant","message":{"content":[{"type":"text","text":"I will implement the auth feature using JWT tokens and bcrypt for password hashing."}]}}', + '{"type":"tool_use","tool_name":"Write","name":"Write","tool_input":{"file_path":"/tmp/auth.js"}}', + '{"type":"user","content":"Now add the login endpoint"}', + '{"type":"assistant","message":{"content":[{"type":"text","text":"Adding the login endpoint with proper validation."}]}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - try { - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: isoHome, USERPROFILE: isoHome - }); - assert.strictEqual(result.code, 0, 'Should exit 0'); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: isoHome, + USERPROFILE: isoHome + }); + assert.strictEqual(result.code, 0, 'Should exit 0'); - const content = fs.readFileSync(sessionFile, 'utf8'); - // The marker text should still be present since regex didn't match - assert.ok(content.includes('[Session context goes here]'), - 'Marker should remain when regex fails to match corrupted template'); - // The corrupted content should still be there - assert.ok(content.includes('Some random content'), - 'Original corrupted content should be preserved'); - } finally { - fs.rmSync(isoHome, { recursive: true, force: true }); - } - })) passed++; else failed++; + const content = fs.readFileSync(sessionFile, 'utf8'); + // The marker text should still be present since regex didn't match + assert.ok(content.includes('[Session context goes here]'), 'Marker should remain when regex fails to match corrupted template'); + // The corrupted content should still be there + assert.ok(content.includes('Some random content'), 'Original corrupted content should be preserved'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; // ── Round 87: post-edit-format.js and post-edit-typecheck.js stdin overflow (1MB) ── console.log('\nRound 87: post-edit-format.js (stdin exceeding 1MB — truncation):'); - if (await asyncTest('truncates stdin at 1MB limit and still passes through data (post-edit-format)', async () => { - // Send 1.2MB of data — exceeds the 1MB MAX_STDIN limit (lines 14-22) - const payload = 'x'.repeat(1024 * 1024 + 200000); - const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), payload); + if ( + await asyncTest('truncates stdin at 1MB limit and still passes through data (post-edit-format)', async () => { + // Send 1.2MB of data — exceeds the 1MB MAX_STDIN limit (lines 14-22) + const payload = 'x'.repeat(1024 * 1024 + 200000); + const result = await runScript(path.join(scriptsDir, 'post-edit-format.js'), payload); - assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); - // Output should be truncated — significantly less than input - assert.ok(result.stdout.length < payload.length, - `stdout (${result.stdout.length}) should be shorter than input (${payload.length})`); - // Output should be approximately 1MB (last accepted chunk may push slightly over) - assert.ok(result.stdout.length <= 1024 * 1024 + 65536, - `stdout (${result.stdout.length}) should be near 1MB, not unbounded`); - assert.ok(result.stdout.length > 0, 'Should still pass through truncated data'); - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); + // Output should be truncated — significantly less than input + assert.ok(result.stdout.length < payload.length, `stdout (${result.stdout.length}) should be shorter than input (${payload.length})`); + // Output should be approximately 1MB (last accepted chunk may push slightly over) + assert.ok(result.stdout.length <= 1024 * 1024 + 65536, `stdout (${result.stdout.length}) should be near 1MB, not unbounded`); + assert.ok(result.stdout.length > 0, 'Should still pass through truncated data'); + }) + ) + passed++; + else failed++; console.log('\nRound 87: post-edit-typecheck.js (stdin exceeding 1MB — truncation):'); - if (await asyncTest('truncates stdin at 1MB limit and still passes through data (post-edit-typecheck)', async () => { - // Send 1.2MB of data — exceeds the 1MB MAX_STDIN limit (lines 16-24) - const payload = 'x'.repeat(1024 * 1024 + 200000); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), payload); + if ( + await asyncTest('truncates stdin at 1MB limit and still passes through data (post-edit-typecheck)', async () => { + // Send 1.2MB of data — exceeds the 1MB MAX_STDIN limit (lines 16-24) + const payload = 'x'.repeat(1024 * 1024 + 200000); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), payload); - assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); - // Output should be truncated — significantly less than input - assert.ok(result.stdout.length < payload.length, - `stdout (${result.stdout.length}) should be shorter than input (${payload.length})`); - // Output should be approximately 1MB (last accepted chunk may push slightly over) - assert.ok(result.stdout.length <= 1024 * 1024 + 65536, - `stdout (${result.stdout.length}) should be near 1MB, not unbounded`); - assert.ok(result.stdout.length > 0, 'Should still pass through truncated data'); - })) passed++; else failed++; + assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); + // Output should be truncated — significantly less than input + assert.ok(result.stdout.length < payload.length, `stdout (${result.stdout.length}) should be shorter than input (${payload.length})`); + // Output should be approximately 1MB (last accepted chunk may push slightly over) + assert.ok(result.stdout.length <= 1024 * 1024 + 65536, `stdout (${result.stdout.length}) should be near 1MB, not unbounded`); + assert.ok(result.stdout.length > 0, 'Should still pass through truncated data'); + }) + ) + passed++; + else failed++; // ── Round 89: post-edit-typecheck.js error detection path (relevantLines) ── console.log('\nRound 89: post-edit-typecheck.js (TypeScript error detection path):'); - if (await asyncTest('filters TypeScript errors to edited file when tsc reports errors', async () => { - // post-edit-typecheck.js lines 60-85: when execFileSync('npx', ['tsc', ...]) throws, - // the catch block filters error output by file path candidates and logs relevant lines. - // All existing tests either have no tsconfig (tsc never runs) or valid TS (tsc succeeds). - // This test creates a .ts file with a type error and a tsconfig.json. - const testDir = createTestDir(); - fs.writeFileSync(path.join(testDir, 'tsconfig.json'), JSON.stringify({ - compilerOptions: { strict: true, noEmit: true } - })); - const testFile = path.join(testDir, 'broken.ts'); - // Intentional type error: assigning string to number - fs.writeFileSync(testFile, 'const x: number = "not a number";\n'); + if ( + await asyncTest('filters TypeScript errors to edited file when tsc reports errors', async () => { + // post-edit-typecheck.js lines 60-85: when execFileSync('npx', ['tsc', ...]) throws, + // the catch block filters error output by file path candidates and logs relevant lines. + // All existing tests either have no tsconfig (tsc never runs) or valid TS (tsc succeeds). + // This test creates a .ts file with a type error and a tsconfig.json. + const testDir = createTestDir(); + fs.writeFileSync( + path.join(testDir, 'tsconfig.json'), + JSON.stringify({ + compilerOptions: { strict: true, noEmit: true } + }) + ); + const testFile = path.join(testDir, 'broken.ts'); + // Intentional type error: assigning string to number + fs.writeFileSync(testFile, 'const x: number = "not a number";\n'); - const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); - const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); + const stdinJson = JSON.stringify({ tool_input: { file_path: testFile } }); + const result = await runScript(path.join(scriptsDir, 'post-edit-typecheck.js'), stdinJson); - // Core: script must exit 0 and pass through stdin data regardless - assert.strictEqual(result.code, 0, 'Should exit 0 even when tsc finds errors'); - const parsed = JSON.parse(result.stdout); - assert.strictEqual(parsed.tool_input.file_path, testFile, - 'Should pass through original stdin data with file_path intact'); + // Core: script must exit 0 and pass through stdin data regardless + assert.strictEqual(result.code, 0, 'Should exit 0 even when tsc finds errors'); + const parsed = JSON.parse(result.stdout); + assert.strictEqual(parsed.tool_input.file_path, testFile, 'Should pass through original stdin data with file_path intact'); - // If tsc is available and ran, check that error output is filtered to this file - if (result.stderr.includes('TypeScript errors in')) { - assert.ok(result.stderr.includes('broken.ts'), - `Should reference the edited file basename. Got: ${result.stderr}`); - } - // Either way, no crash and data passes through (verified above) - cleanupTestDir(testDir); - })) passed++; else failed++; + // If tsc is available and ran, check that error output is filtered to this file + if (result.stderr.includes('TypeScript errors in')) { + assert.ok(result.stderr.includes('broken.ts'), `Should reference the edited file basename. Got: ${result.stderr}`); + } + // Either way, no crash and data passes through (verified above) + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; // ── Round 89: extractSessionSummary entry.name + entry.input fallback paths ── console.log('\nRound 89: session-end.js (entry.name + entry.input fallback in extractSessionSummary):'); - if (await asyncTest('extracts tool name from entry.name and file path from entry.input (fallback format)', async () => { - // session-end.js line 63: const toolName = entry.tool_name || entry.name || ''; - // session-end.js line 66: const filePath = entry.tool_input?.file_path || entry.input?.file_path || ''; - // All existing tests use tool_name + tool_input format. This tests the name + input fallback. - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); + if ( + await asyncTest('extracts tool name from entry.name and file path from entry.input (fallback format)', async () => { + // session-end.js line 63: const toolName = entry.tool_name || entry.name || ''; + // session-end.js line 66: const filePath = entry.tool_input?.file_path || entry.input?.file_path || ''; + // All existing tests use tool_name + tool_input format. This tests the name + input fallback. + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); - const lines = [ - '{"type":"user","content":"Fix the auth module"}', - // Tool entries using "name" + "input" instead of "tool_name" + "tool_input" - '{"type":"tool_use","name":"Edit","input":{"file_path":"/src/auth.ts"}}', - '{"type":"tool_use","name":"Write","input":{"file_path":"/src/new-helper.ts"}}', - // Also include a tool with tool_name but entry.input (mixed format) - '{"tool_name":"Read","input":{"file_path":"/src/config.ts"}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + const lines = [ + '{"type":"user","content":"Fix the auth module"}', + // Tool entries using "name" + "input" instead of "tool_name" + "tool_input" + '{"type":"tool_use","name":"Edit","input":{"file_path":"/src/auth.ts"}}', + '{"type":"tool_use","name":"Write","input":{"file_path":"/src/new-helper.ts"}}', + // Also include a tool with tool_name but entry.input (mixed format) + '{"tool_name":"Read","input":{"file_path":"/src/config.ts"}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0, 'Should exit 0'); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0, 'Should exit 0'); - // Read the session file to verify tool names and file paths were extracted - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - // Tools from entry.name fallback - assert.ok(content.includes('Edit'), - `Should extract Edit tool from entry.name fallback. Got: ${content}`); - assert.ok(content.includes('Write'), - `Should extract Write tool from entry.name fallback. Got: ${content}`); - // File paths from entry.input fallback - assert.ok(content.includes('/src/auth.ts'), - `Should extract file path from entry.input.file_path fallback. Got: ${content}`); - assert.ok(content.includes('/src/new-helper.ts'), - `Should extract Write file from entry.input.file_path fallback. Got: ${content}`); + // Read the session file to verify tool names and file paths were extracted + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + // Tools from entry.name fallback + assert.ok(content.includes('Edit'), `Should extract Edit tool from entry.name fallback. Got: ${content}`); + assert.ok(content.includes('Write'), `Should extract Write tool from entry.name fallback. Got: ${content}`); + // File paths from entry.input fallback + assert.ok(content.includes('/src/auth.ts'), `Should extract file path from entry.input.file_path fallback. Got: ${content}`); + assert.ok(content.includes('/src/new-helper.ts'), `Should extract Write file from entry.input.file_path fallback. Got: ${content}`); + } } - } - cleanupTestDir(testDir); - })) passed++; else failed++; + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; // ── Round 90: readStdinJson timeout path (utils.js lines 215-229) ── console.log('\nRound 90: readStdinJson (timeout fires when stdin stays open):'); - if (await asyncTest('readStdinJson resolves with {} when stdin never closes (timeout fires, no data)', async () => { - // utils.js line 215: setTimeout fires because stdin 'end' never arrives. - // Line 225: data.trim() is empty → resolves with {}. - // Exercises: removeAllListeners, process.stdin.unref(), and the empty-data timeout resolution. - const script = 'const u=require("./scripts/lib/utils");u.readStdinJson({timeoutMs:100}).then(d=>{process.stdout.write(JSON.stringify(d));process.exit(0)})'; - return new Promise((resolve, reject) => { - const child = spawn('node', ['-e', script], { - cwd: path.resolve(__dirname, '..', '..'), - stdio: ['pipe', 'pipe', 'pipe'] + if ( + await asyncTest('readStdinJson resolves with {} when stdin never closes (timeout fires, no data)', async () => { + // utils.js line 215: setTimeout fires because stdin 'end' never arrives. + // Line 225: data.trim() is empty → resolves with {}. + // Exercises: removeAllListeners, process.stdin.unref(), and the empty-data timeout resolution. + const script = 'const u=require("./scripts/lib/utils");u.readStdinJson({timeoutMs:100}).then(d=>{process.stdout.write(JSON.stringify(d));process.exit(0)})'; + return new Promise((resolve, reject) => { + const child = spawn('node', ['-e', script], { + cwd: path.resolve(__dirname, '..', '..'), + stdio: ['pipe', 'pipe', 'pipe'] + }); + // Don't write anything or close stdin — force the timeout to fire + let stdout = ''; + child.stdout.on('data', d => (stdout += d)); + const timer = setTimeout(() => { + child.kill(); + reject(new Error('Test timed out')); + }, 5000); + child.on('close', code => { + clearTimeout(timer); + try { + assert.strictEqual(code, 0, 'Should exit 0 via timeout resolution'); + const parsed = JSON.parse(stdout); + assert.deepStrictEqual(parsed, {}, 'Should resolve with {} when no data received before timeout'); + resolve(); + } catch (err) { + reject(err); + } + }); }); - // Don't write anything or close stdin — force the timeout to fire - let stdout = ''; - child.stdout.on('data', d => stdout += d); - const timer = setTimeout(() => { child.kill(); reject(new Error('Test timed out')); }, 5000); - child.on('close', (code) => { - clearTimeout(timer); - try { - assert.strictEqual(code, 0, 'Should exit 0 via timeout resolution'); - const parsed = JSON.parse(stdout); - assert.deepStrictEqual(parsed, {}, 'Should resolve with {} when no data received before timeout'); - resolve(); - } catch (err) { - reject(err); - } - }); - }); - })) passed++; else failed++; + }) + ) + passed++; + else failed++; - if (await asyncTest('readStdinJson resolves with {} when timeout fires with invalid partial JSON', async () => { - // utils.js lines 224-228: setTimeout fires, data.trim() is non-empty, - // JSON.parse(data) throws → catch at line 226 resolves with {}. - const script = 'const u=require("./scripts/lib/utils");u.readStdinJson({timeoutMs:100}).then(d=>{process.stdout.write(JSON.stringify(d));process.exit(0)})'; - return new Promise((resolve, reject) => { - const child = spawn('node', ['-e', script], { - cwd: path.resolve(__dirname, '..', '..'), - stdio: ['pipe', 'pipe', 'pipe'] + if ( + await asyncTest('readStdinJson resolves with {} when timeout fires with invalid partial JSON', async () => { + // utils.js lines 224-228: setTimeout fires, data.trim() is non-empty, + // JSON.parse(data) throws → catch at line 226 resolves with {}. + const script = 'const u=require("./scripts/lib/utils");u.readStdinJson({timeoutMs:100}).then(d=>{process.stdout.write(JSON.stringify(d));process.exit(0)})'; + return new Promise((resolve, reject) => { + const child = spawn('node', ['-e', script], { + cwd: path.resolve(__dirname, '..', '..'), + stdio: ['pipe', 'pipe', 'pipe'] + }); + // Write partial invalid JSON but don't close stdin — timeout fires with unparseable data + child.stdin.write('{"incomplete":'); + let stdout = ''; + child.stdout.on('data', d => (stdout += d)); + const timer = setTimeout(() => { + child.kill(); + reject(new Error('Test timed out')); + }, 5000); + child.on('close', code => { + clearTimeout(timer); + try { + assert.strictEqual(code, 0, 'Should exit 0 via timeout resolution'); + const parsed = JSON.parse(stdout); + assert.deepStrictEqual(parsed, {}, 'Should resolve with {} when partial JSON cannot be parsed'); + resolve(); + } catch (err) { + reject(err); + } + }); }); - // Write partial invalid JSON but don't close stdin — timeout fires with unparseable data - child.stdin.write('{"incomplete":'); - let stdout = ''; - child.stdout.on('data', d => stdout += d); - const timer = setTimeout(() => { child.kill(); reject(new Error('Test timed out')); }, 5000); - child.on('close', (code) => { - clearTimeout(timer); - try { - assert.strictEqual(code, 0, 'Should exit 0 via timeout resolution'); - const parsed = JSON.parse(stdout); - assert.deepStrictEqual(parsed, {}, 'Should resolve with {} when partial JSON cannot be parsed'); - resolve(); - } catch (err) { - reject(err); - } - }); - }); - })) passed++; else failed++; + }) + ) + passed++; + else failed++; // ── Round 94: session-end.js tools used but no files modified ── console.log('\nRound 94: session-end.js (tools used without files modified):'); - if (await asyncTest('session file includes Tools Used but omits Files Modified when only Read/Grep used', async () => { - // session-end.js buildSummarySection (lines 217-228): - // filesModified.length > 0 → include "### Files Modified" section - // toolsUsed.length > 0 → include "### Tools Used" section - // Previously tested: BOTH present (Round ~10) and NEITHER present (Round ~10). - // Untested combination: toolsUsed present, filesModified empty. - // Transcript with Read/Grep tools (don't add to filesModified) and user messages. - const testDir = createTestDir(); - const transcriptPath = path.join(testDir, 'transcript.jsonl'); + if ( + await asyncTest('session file includes Tools Used but omits Files Modified when only Read/Grep used', async () => { + // session-end.js buildSummarySection (lines 217-228): + // filesModified.length > 0 → include "### Files Modified" section + // toolsUsed.length > 0 → include "### Tools Used" section + // Previously tested: BOTH present (Round ~10) and NEITHER present (Round ~10). + // Untested combination: toolsUsed present, filesModified empty. + // Transcript with Read/Grep tools (don't add to filesModified) and user messages. + const testDir = createTestDir(); + const transcriptPath = path.join(testDir, 'transcript.jsonl'); - const lines = [ - '{"type":"user","content":"Search the codebase for auth handlers"}', - '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/src/auth.ts"}}', - '{"type":"tool_use","tool_name":"Grep","tool_input":{"pattern":"handler"}}', - '{"type":"user","content":"Check the test file too"}', - '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/tests/auth.test.ts"}}', - ]; - fs.writeFileSync(transcriptPath, lines.join('\n')); + const lines = [ + '{"type":"user","content":"Search the codebase for auth handlers"}', + '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/src/auth.ts"}}', + '{"type":"tool_use","tool_name":"Grep","tool_input":{"pattern":"handler"}}', + '{"type":"user","content":"Check the test file too"}', + '{"type":"tool_use","tool_name":"Read","tool_input":{"file_path":"/tests/auth.test.ts"}}' + ]; + fs.writeFileSync(transcriptPath, lines.join('\n')); - const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { - HOME: testDir - }); - assert.strictEqual(result.code, 0, 'Should exit 0'); + const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir + }); + assert.strictEqual(result.code, 0, 'Should exit 0'); - const claudeDir = path.join(testDir, '.claude', 'sessions'); - if (fs.existsSync(claudeDir)) { - const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); - if (files.length > 0) { - const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); - assert.ok(content.includes('### Tools Used'), 'Should include Tools Used section'); - assert.ok(content.includes('Read'), 'Should list Read tool'); - assert.ok(content.includes('Grep'), 'Should list Grep tool'); - assert.ok(!content.includes('### Files Modified'), - 'Should NOT include Files Modified section (Read/Grep do not modify files)'); + const claudeDir = path.join(testDir, '.claude', 'sessions'); + if (fs.existsSync(claudeDir)) { + const files = fs.readdirSync(claudeDir).filter(f => f.endsWith('.tmp')); + if (files.length > 0) { + const content = fs.readFileSync(path.join(claudeDir, files[0]), 'utf8'); + assert.ok(content.includes('### Tools Used'), 'Should include Tools Used section'); + assert.ok(content.includes('Read'), 'Should list Read tool'); + assert.ok(content.includes('Grep'), 'Should list Grep tool'); + assert.ok(!content.includes('### Files Modified'), 'Should NOT include Files Modified section (Read/Grep do not modify files)'); + } } - } - cleanupTestDir(testDir); - })) passed++; else failed++; + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; // Summary console.log('\n=== Test Results ==='); From 66498ae9acec6430c5b41411cce6ed4b0aadb7f1 Mon Sep 17 00:00:00 2001 From: Jonghyeok Park Date: Sun, 8 Mar 2026 16:53:20 +0900 Subject: [PATCH 07/42] perf(hooks): use direct require() instead of spawning child process Invoke hook scripts directly via require() when they export a run(rawInput) function, eliminating one Node.js process spawn per hook invocation (~50-100ms). Includes path traversal guard, timeouts, error logging, PR review feedback, legacy hooks guard, normalized filePath, and restored findProjectRoot config detection with package manager support. --- scripts/hooks/post-edit-format.js | 23 +++--- scripts/hooks/quality-gate.js | 49 ++++++++++--- scripts/hooks/run-with-flags.js | 42 ++++++++++- scripts/lib/resolve-formatter.js | 106 ++++++++++++++++++++-------- tests/lib/resolve-formatter.test.js | 68 +++++++++++++++--- tests/run-all.js | 3 +- 6 files changed, 228 insertions(+), 63 deletions(-) diff --git a/scripts/hooks/post-edit-format.js b/scripts/hooks/post-edit-format.js index 89643830..7e098132 100644 --- a/scripts/hooks/post-edit-format.js +++ b/scripts/hooks/post-edit-format.js @@ -20,11 +20,7 @@ const { execFileSync } = require('child_process'); const path = require('path'); -const { - findProjectRoot, - detectFormatter, - resolveFormatterBin, -} = require('../lib/resolve-formatter'); +const { findProjectRoot, detectFormatter, resolveFormatterBin } = require('../lib/resolve-formatter'); const MAX_STDIN = 1024 * 1024; // 1MB limit @@ -42,23 +38,22 @@ function run(rawInput) { if (filePath && /\.(ts|tsx|js|jsx)$/.test(filePath)) { try { - const projectRoot = findProjectRoot(path.dirname(path.resolve(filePath))); + const resolvedFilePath = path.resolve(filePath); + const projectRoot = findProjectRoot(path.dirname(resolvedFilePath)); const formatter = detectFormatter(projectRoot); if (!formatter) return rawInput; const resolved = resolveFormatterBin(projectRoot, formatter); + if (!resolved) return rawInput; // Biome: `check --write` = format + lint in one pass // Prettier: `--write` = format only - const args = - formatter === 'biome' - ? [...resolved.prefix, 'check', '--write', filePath] - : [...resolved.prefix, '--write', filePath]; + const args = formatter === 'biome' ? [...resolved.prefix, 'check', '--write', resolvedFilePath] : [...resolved.prefix, '--write', resolvedFilePath]; execFileSync(resolved.bin, args, { cwd: projectRoot, stdio: ['pipe', 'pipe', 'pipe'], - timeout: 15000, + timeout: 15000 }); } catch { // Formatter not installed, file missing, or failed — non-blocking @@ -76,7 +71,7 @@ if (require.main === module) { let data = ''; process.stdin.setEncoding('utf8'); - process.stdin.on('data', (chunk) => { + process.stdin.on('data', chunk => { if (data.length < MAX_STDIN) { const remaining = MAX_STDIN - data.length; data += chunk.substring(0, remaining); @@ -84,8 +79,8 @@ if (require.main === module) { }); process.stdin.on('end', () => { - const result = run(data); - process.stdout.write(result); + data = run(data); + process.stdout.write(data); process.exit(0); }); } diff --git a/scripts/hooks/quality-gate.js b/scripts/hooks/quality-gate.js index aee94b27..19f35195 100755 --- a/scripts/hooks/quality-gate.js +++ b/scripts/hooks/quality-gate.js @@ -18,31 +18,50 @@ const fs = require('fs'); const path = require('path'); const { spawnSync } = require('child_process'); -const { - findProjectRoot, - detectFormatter, - resolveFormatterBin, -} = require('../lib/resolve-formatter'); +const { findProjectRoot, detectFormatter, resolveFormatterBin } = require('../lib/resolve-formatter'); const MAX_STDIN = 1024 * 1024; +/** + * Execute a command synchronously, returning the spawnSync result. + * + * @param {string} command - Executable path or name + * @param {string[]} args - Arguments to pass + * @param {string} [cwd] - Working directory (defaults to process.cwd()) + * @returns {import('child_process').SpawnSyncReturns} + */ function exec(command, args, cwd = process.cwd()) { return spawnSync(command, args, { cwd, encoding: 'utf8', env: process.env, + timeout: 15000 }); } +/** + * Write a message to stderr for logging. + * + * @param {string} msg - Message to log + */ function log(msg) { process.stderr.write(`${msg}\n`); } +/** + * Run quality-gate checks for a single file based on its extension. + * Skips JS/TS files when Biome is configured (handled by post-edit-format). + * + * @param {string} filePath - Path to the edited file + */ function maybeRunQualityGate(filePath) { if (!filePath || !fs.existsSync(filePath)) { return; } + // Resolve to absolute path so projectRoot-relative comparisons work + filePath = path.resolve(filePath); + const ext = path.extname(filePath).toLowerCase(); const fix = String(process.env.ECC_QUALITY_GATE_FIX || '').toLowerCase() === 'true'; const strict = String(process.env.ECC_QUALITY_GATE_STRICT || '').toLowerCase() === 'true'; @@ -59,6 +78,7 @@ function maybeRunQualityGate(filePath) { // .json / .md — still need quality gate const resolved = resolveFormatterBin(projectRoot, 'biome'); + if (!resolved) return; const args = [...resolved.prefix, 'check', filePath]; if (fix) args.push('--write'); const result = exec(resolved.bin, args, projectRoot); @@ -70,6 +90,7 @@ function maybeRunQualityGate(filePath) { if (formatter === 'prettier') { const resolved = resolveFormatterBin(projectRoot, 'prettier'); + if (!resolved) return; const args = [...resolved.prefix, fix ? '--write' : '--check', filePath]; const result = exec(resolved.bin, args, projectRoot); if (result.status !== 0 && strict) { @@ -82,8 +103,20 @@ function maybeRunQualityGate(filePath) { return; } - if (ext === '.go' && fix) { - exec('gofmt', ['-w', filePath]); + if (ext === '.go') { + if (fix) { + const r = exec('gofmt', ['-w', filePath]); + if (r.status !== 0 && strict) { + log(`[QualityGate] gofmt failed for ${filePath}`); + } + } else if (strict) { + const r = exec('gofmt', ['-l', filePath]); + if (r.status !== 0) { + log(`[QualityGate] gofmt failed for ${filePath}`); + } else if (r.stdout && r.stdout.trim()) { + log(`[QualityGate] gofmt check failed for ${filePath}`); + } + } return; } @@ -119,7 +152,7 @@ function run(rawInput) { if (require.main === module) { let raw = ''; process.stdin.setEncoding('utf8'); - process.stdin.on('data', (chunk) => { + process.stdin.on('data', chunk => { if (raw.length < MAX_STDIN) { const remaining = MAX_STDIN - raw.length; raw += chunk.substring(0, remaining); diff --git a/scripts/hooks/run-with-flags.js b/scripts/hooks/run-with-flags.js index eb41564f..d69e8131 100755 --- a/scripts/hooks/run-with-flags.js +++ b/scripts/hooks/run-with-flags.js @@ -52,7 +52,15 @@ async function main() { } const pluginRoot = getPluginRoot(); - const scriptPath = path.join(pluginRoot, relScriptPath); + const resolvedRoot = path.resolve(pluginRoot); + const scriptPath = path.resolve(pluginRoot, relScriptPath); + + // Prevent path traversal outside the plugin root + if (!scriptPath.startsWith(resolvedRoot + path.sep)) { + process.stderr.write(`[Hook] Path traversal rejected for ${hookId}: ${scriptPath}\n`); + process.stdout.write(raw); + process.exit(0); + } if (!fs.existsSync(scriptPath)) { process.stderr.write(`[Hook] Script not found for ${hookId}: ${scriptPath}\n`); @@ -60,11 +68,43 @@ async function main() { process.exit(0); } + // Prefer direct require() when the hook exports a run(rawInput) function. + // This eliminates one Node.js process spawn (~50-100ms savings per hook). + // + // SAFETY: Only require() hooks that export run(). Legacy hooks execute + // side effects at module scope (stdin listeners, process.exit, main() calls) + // which would interfere with the parent process or cause double execution. + let hookModule; + const src = fs.readFileSync(scriptPath, 'utf8'); + const hasRunExport = /\bmodule\.exports\b/.test(src) && /\brun\b/.test(src); + + if (hasRunExport) { + try { + hookModule = require(scriptPath); + } catch (requireErr) { + process.stderr.write(`[Hook] require() failed for ${hookId}: ${requireErr.message}\n`); + // Fall through to legacy spawnSync path + } + } + + if (hookModule && typeof hookModule.run === 'function') { + try { + const output = hookModule.run(raw); + if (output != null) process.stdout.write(output); + } catch (runErr) { + process.stderr.write(`[Hook] run() error for ${hookId}: ${runErr.message}\n`); + process.stdout.write(raw); + } + process.exit(0); + } + + // Legacy path: spawn a child Node process for hooks without run() export const result = spawnSync('node', [scriptPath], { input: raw, encoding: 'utf8', env: process.env, cwd: process.cwd(), + timeout: 30000 }); if (result.stdout) process.stdout.write(result.stdout); diff --git a/scripts/lib/resolve-formatter.js b/scripts/lib/resolve-formatter.js index 727a0734..f940a1fa 100644 --- a/scripts/lib/resolve-formatter.js +++ b/scripts/lib/resolve-formatter.js @@ -18,9 +18,29 @@ const binCache = new Map(); // ── Public helpers ────────────────────────────────────────────────── +// Markers that indicate a project root (formatter configs included so +// repos without package.json are still detected correctly). +const PROJECT_ROOT_MARKERS = [ + 'package.json', + 'biome.json', + 'biome.jsonc', + '.prettierrc', + '.prettierrc.json', + '.prettierrc.js', + '.prettierrc.cjs', + '.prettierrc.mjs', + '.prettierrc.yml', + '.prettierrc.yaml', + '.prettierrc.toml', + 'prettier.config.js', + 'prettier.config.cjs', + 'prettier.config.mjs' +]; + /** - * Walk up from `startDir` until a directory containing package.json is found. - * Returns `startDir` as fallback when no package.json exists above it. + * Walk up from `startDir` until a directory containing a known project + * root marker (package.json or formatter config) is found. + * Returns `startDir` as fallback when no marker exists above it. * * @param {string} startDir - Absolute directory path to start from * @returns {string} Absolute path to the project root @@ -30,9 +50,11 @@ function findProjectRoot(startDir) { let dir = startDir; while (dir !== path.dirname(dir)) { - if (fs.existsSync(path.join(dir, 'package.json'))) { - projectRootCache.set(startDir, dir); - return dir; + for (const marker of PROJECT_ROOT_MARKERS) { + if (fs.existsSync(path.join(dir, marker))) { + projectRootCache.set(startDir, dir); + return dir; + } } dir = path.dirname(dir); } @@ -59,6 +81,20 @@ function detectFormatter(projectRoot) { } } + // Check package.json "prettier" key before config files + try { + const pkgPath = path.join(projectRoot, 'package.json'); + if (fs.existsSync(pkgPath)) { + const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8')); + if (pkg.prettier != null) { + formatterCache.set(projectRoot, 'prettier'); + return 'prettier'; + } + } + } catch { + // Malformed package.json — continue to file-based detection + } + const prettierConfigs = [ '.prettierrc', '.prettierrc.json', @@ -70,7 +106,7 @@ function detectFormatter(projectRoot) { '.prettierrc.toml', 'prettier.config.js', 'prettier.config.cjs', - 'prettier.config.mjs', + 'prettier.config.mjs' ]; for (const cfg of prettierConfigs) { if (fs.existsSync(path.join(projectRoot, cfg))) { @@ -83,14 +119,35 @@ function detectFormatter(projectRoot) { return null; } +/** + * Resolve the runner binary and prefix args for the configured package + * manager (respects CLAUDE_PACKAGE_MANAGER env and project config). + * + * @param {string} projectRoot - Absolute path to the project root + * @returns {{ bin: string, prefix: string[] }} + */ +// Windows .cmd shim mapping for cross-platform safety +const WIN_CMD_SHIMS = { npx: 'npx.cmd', pnpm: 'pnpm.cmd', yarn: 'yarn.cmd', bunx: 'bunx.cmd' }; + +function getRunnerFromPackageManager(projectRoot) { + const isWin = process.platform === 'win32'; + const { getPackageManager } = require('./package-manager'); + const pm = getPackageManager({ projectDir: projectRoot }); + const execCmd = pm?.config?.execCmd || 'npx'; + const [rawBin = 'npx', ...prefix] = execCmd.split(/\s+/).filter(Boolean); + const bin = isWin ? WIN_CMD_SHIMS[rawBin] || rawBin : rawBin; + return { bin, prefix }; +} + /** * Resolve the formatter binary, preferring the local node_modules/.bin - * installation over npx to avoid package-resolution overhead. + * installation over the package manager exec command to avoid + * package-resolution overhead. * * @param {string} projectRoot - Absolute path to the project root * @param {'biome' | 'prettier'} formatter - Detected formatter name - * @returns {{ bin: string, prefix: string[] }} - * `bin` – executable path (absolute local path or npx/npx.cmd) + * @returns {{ bin: string, prefix: string[] } | null} + * `bin` – executable path (absolute local path or runner binary) * `prefix` – extra args to prepend (e.g. ['@biomejs/biome'] when using npx) */ function resolveFormatterBin(projectRoot, formatter) { @@ -98,45 +155,36 @@ function resolveFormatterBin(projectRoot, formatter) { if (binCache.has(cacheKey)) return binCache.get(cacheKey); const isWin = process.platform === 'win32'; - const npxBin = isWin ? 'npx.cmd' : 'npx'; if (formatter === 'biome') { - const localBin = path.join( - projectRoot, - 'node_modules', - '.bin', - isWin ? 'biome.cmd' : 'biome', - ); + const localBin = path.join(projectRoot, 'node_modules', '.bin', isWin ? 'biome.cmd' : 'biome'); if (fs.existsSync(localBin)) { const result = { bin: localBin, prefix: [] }; binCache.set(cacheKey, result); return result; } - const result = { bin: npxBin, prefix: ['@biomejs/biome'] }; + const runner = getRunnerFromPackageManager(projectRoot); + const result = { bin: runner.bin, prefix: [...runner.prefix, '@biomejs/biome'] }; binCache.set(cacheKey, result); return result; } if (formatter === 'prettier') { - const localBin = path.join( - projectRoot, - 'node_modules', - '.bin', - isWin ? 'prettier.cmd' : 'prettier', - ); + const localBin = path.join(projectRoot, 'node_modules', '.bin', isWin ? 'prettier.cmd' : 'prettier'); if (fs.existsSync(localBin)) { const result = { bin: localBin, prefix: [] }; binCache.set(cacheKey, result); return result; } - const result = { bin: npxBin, prefix: ['prettier'] }; + const runner = getRunnerFromPackageManager(projectRoot); + const result = { bin: runner.bin, prefix: [...runner.prefix, 'prettier'] }; binCache.set(cacheKey, result); return result; } - const result = { bin: npxBin, prefix: [] }; - binCache.set(cacheKey, result); - return result; + // Unknown formatter — return null so callers can handle gracefully + binCache.set(cacheKey, null); + return null; } /** @@ -152,5 +200,5 @@ module.exports = { findProjectRoot, detectFormatter, resolveFormatterBin, - clearCaches, -}; \ No newline at end of file + clearCaches +}; diff --git a/tests/lib/resolve-formatter.test.js b/tests/lib/resolve-formatter.test.js index 92741f84..c02bb60d 100644 --- a/tests/lib/resolve-formatter.test.js +++ b/tests/lib/resolve-formatter.test.js @@ -9,14 +9,15 @@ const path = require('path'); const fs = require('fs'); const os = require('os'); -const { - findProjectRoot, - detectFormatter, - resolveFormatterBin, - clearCaches, -} = require('../../scripts/lib/resolve-formatter'); +const { findProjectRoot, detectFormatter, resolveFormatterBin, clearCaches } = require('../../scripts/lib/resolve-formatter'); -// Test helper +/** + * Run a single test case, printing pass/fail. + * + * @param {string} name - Test description + * @param {() => void} fn - Test body (throws on failure) + * @returns {boolean} Whether the test passed + */ function test(name, fn) { try { fn(); @@ -29,8 +30,32 @@ function test(name, fn) { } } +/** Track all created tmp dirs for cleanup */ +const tmpDirs = []; + +/** + * Create a temporary directory and track it for cleanup. + * + * @returns {string} Absolute path to the new temp directory + */ function makeTmpDir() { - return fs.mkdtempSync(path.join(os.tmpdir(), 'resolve-fmt-')); + const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'resolve-fmt-')); + tmpDirs.push(dir); + return dir; +} + +/** + * Remove all tracked temporary directories. + */ +function cleanupTmpDirs() { + for (const dir of tmpDirs) { + try { + fs.rmSync(dir, { recursive: true, force: true }); + } catch { + // Best-effort cleanup + } + } + tmpDirs.length = 0; } function runTests() { @@ -103,6 +128,18 @@ function runTests() { assert.strictEqual(detectFormatter(root), 'prettier'); }); + run('detectFormatter: detects prettier key in package.json', () => { + const root = makeTmpDir(); + fs.writeFileSync(path.join(root, 'package.json'), JSON.stringify({ name: 'test', prettier: { singleQuote: true } })); + assert.strictEqual(detectFormatter(root), 'prettier'); + }); + + run('detectFormatter: ignores package.json without prettier key', () => { + const root = makeTmpDir(); + fs.writeFileSync(path.join(root, 'package.json'), JSON.stringify({ name: 'test' })); + assert.strictEqual(detectFormatter(root), null); + }); + run('detectFormatter: biome takes priority over prettier', () => { const root = makeTmpDir(); fs.writeFileSync(path.join(root, 'biome.json'), '{}'); @@ -157,6 +194,12 @@ function runTests() { assert.deepStrictEqual(result.prefix, ['prettier']); }); + run('resolveFormatterBin: returns null for unknown formatter', () => { + const root = makeTmpDir(); + const result = resolveFormatterBin(root, 'unknown'); + assert.strictEqual(result, null); + }); + run('resolveFormatterBin: caches resolved binary', () => { const root = makeTmpDir(); const binDir = path.join(root, 'node_modules', '.bin'); @@ -189,9 +232,14 @@ function runTests() { assert.strictEqual(detectFormatter(root), null); }); - // ── Summary ─────────────────────────────────────────────────── + // ── Summary & Cleanup ───────────────────────────────────────── - console.log(`\n ${passed} passed, ${failed} failed\n`); + cleanupTmpDirs(); + + console.log('\n=== Test Results ==='); + console.log(`Passed: ${passed}`); + console.log(`Failed: ${failed}`); + console.log(`Total: ${passed + failed}`); process.exit(failed > 0 ? 1 : 0); } diff --git a/tests/run-all.js b/tests/run-all.js index 54d8c212..41a6c7f7 100644 --- a/tests/run-all.js +++ b/tests/run-all.js @@ -16,6 +16,7 @@ const testFiles = [ 'lib/session-manager.test.js', 'lib/session-aliases.test.js', 'lib/project-detect.test.js', + 'lib/resolve-formatter.test.js', 'hooks/hooks.test.js', 'hooks/evaluate-session.test.js', 'hooks/suggest-compact.test.js', @@ -27,7 +28,7 @@ const testFiles = [ ]; const BOX_W = 58; // inner width between ║ delimiters -const boxLine = (s) => `║${s.padEnd(BOX_W)}║`; +const boxLine = s => `║${s.padEnd(BOX_W)}║`; console.log('╔' + '═'.repeat(BOX_W) + '╗'); console.log(boxLine(' Everything Claude Code - Test Suite')); From 0a3afbe38fd67801ef56e16b31e1a7b4815352c7 Mon Sep 17 00:00:00 2001 From: Jonghyeok Park Date: Tue, 10 Mar 2026 22:37:57 +0900 Subject: [PATCH 08/42] fix(hooks): add Windows .cmd support with shell injection guard Handle Windows .cmd shim resolution via spawnSync with strict path validation. Removes shell:true injection risk, uses strict equality, and restores .cmd support with path injection guard. --- scripts/hooks/post-edit-format.js | 33 +++++++++++++++++++++++++------ scripts/hooks/run-with-flags.js | 2 +- scripts/lib/resolve-formatter.js | 2 +- tests/hooks/hooks.test.js | 6 ++++-- 4 files changed, 33 insertions(+), 10 deletions(-) diff --git a/scripts/hooks/post-edit-format.js b/scripts/hooks/post-edit-format.js index 7e098132..d6486866 100644 --- a/scripts/hooks/post-edit-format.js +++ b/scripts/hooks/post-edit-format.js @@ -17,9 +17,12 @@ * Fails silently if no formatter is found or installed. */ -const { execFileSync } = require('child_process'); +const { execFileSync, spawnSync } = require('child_process'); const path = require('path'); +// Shell metacharacters that cmd.exe interprets as command separators/operators +const UNSAFE_PATH_CHARS = /[&|<>^%!]/; + const { findProjectRoot, detectFormatter, resolveFormatterBin } = require('../lib/resolve-formatter'); const MAX_STDIN = 1024 * 1024; // 1MB limit @@ -50,11 +53,29 @@ function run(rawInput) { // Prettier: `--write` = format only const args = formatter === 'biome' ? [...resolved.prefix, 'check', '--write', resolvedFilePath] : [...resolved.prefix, '--write', resolvedFilePath]; - execFileSync(resolved.bin, args, { - cwd: projectRoot, - stdio: ['pipe', 'pipe', 'pipe'], - timeout: 15000 - }); + if (process.platform === 'win32' && resolved.bin.endsWith('.cmd')) { + // Windows: .cmd files require shell to execute. Guard against + // command injection by rejecting paths with shell metacharacters. + if (UNSAFE_PATH_CHARS.test(resolvedFilePath)) { + throw new Error('File path contains unsafe shell characters'); + } + const result = spawnSync(resolved.bin, args, { + cwd: projectRoot, + shell: true, + stdio: 'pipe', + timeout: 15000 + }); + if (result.error) throw result.error; + if (typeof result.status === 'number' && result.status !== 0) { + throw new Error(result.stderr?.toString() || `Formatter exited with status ${result.status}`); + } + } else { + execFileSync(resolved.bin, args, { + cwd: projectRoot, + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 15000 + }); + } } catch { // Formatter not installed, file missing, or failed — non-blocking } diff --git a/scripts/hooks/run-with-flags.js b/scripts/hooks/run-with-flags.js index d69e8131..b665fe28 100755 --- a/scripts/hooks/run-with-flags.js +++ b/scripts/hooks/run-with-flags.js @@ -90,7 +90,7 @@ async function main() { if (hookModule && typeof hookModule.run === 'function') { try { const output = hookModule.run(raw); - if (output != null) process.stdout.write(output); + if (output !== null && output !== undefined) process.stdout.write(output); } catch (runErr) { process.stderr.write(`[Hook] run() error for ${hookId}: ${runErr.message}\n`); process.stdout.write(raw); diff --git a/scripts/lib/resolve-formatter.js b/scripts/lib/resolve-formatter.js index f940a1fa..22d04819 100644 --- a/scripts/lib/resolve-formatter.js +++ b/scripts/lib/resolve-formatter.js @@ -86,7 +86,7 @@ function detectFormatter(projectRoot) { const pkgPath = path.join(projectRoot, 'package.json'); if (fs.existsSync(pkgPath)) { const pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8')); - if (pkg.prettier != null) { + if ('prettier' in pkg) { formatterCache.set(projectRoot, 'prettier'); return 'prettier'; } diff --git a/tests/hooks/hooks.test.js b/tests/hooks/hooks.test.js index 81766ea2..311ae21f 100644 --- a/tests/hooks/hooks.test.js +++ b/tests/hooks/hooks.test.js @@ -1966,8 +1966,10 @@ async function runTests() { const formatSource = fs.readFileSync(path.join(scriptsDir, 'post-edit-format.js'), 'utf8'); // Strip comments to avoid matching "shell: true" in comment text const codeOnly = formatSource.replace(/\/\/.*$/gm, '').replace(/\/\*[\s\S]*?\*\//g, ''); - assert.ok(!codeOnly.includes('shell:'), 'post-edit-format.js should not pass shell option in code'); - // npx.cmd handling moved to shared resolve-formatter.js — verify it uses the shared module + assert.ok(!/execFileSync\([^)]*shell\s*:/.test(codeOnly), 'post-edit-format.js should not pass shell option to execFileSync'); + assert.ok(codeOnly.includes("process.platform === 'win32' && resolved.bin.endsWith('.cmd')"), 'Windows shell execution must stay gated to .cmd shims'); + assert.ok(codeOnly.includes('UNSAFE_PATH_CHARS'), 'Must guard against shell metacharacters before using shell: true'); + // npx.cmd handling in shared resolve-formatter.js const resolverSource = fs.readFileSync(path.join(scriptsDir, '..', 'lib', 'resolve-formatter.js'), 'utf8'); assert.ok(resolverSource.includes('npx.cmd'), 'resolve-formatter.js should use npx.cmd for Windows cross-platform safety'); assert.ok(formatSource.includes('resolveFormatterBin'), 'post-edit-format.js should use shared resolveFormatterBin'); From 67841042d67706bf1e1700b3f165c44b991c60e9 Mon Sep 17 00:00:00 2001 From: Jonghyeok Park Date: Wed, 11 Mar 2026 09:55:56 +0900 Subject: [PATCH 09/42] refactor: deduplicate config lists and unify resolveFormatterBin branches Extract BIOME_CONFIGS and PRETTIER_CONFIGS as shared constants to eliminate duplication between PROJECT_ROOT_MARKERS and detectFormatter(). Unify the biome/prettier branches in resolveFormatterBin() via a FORMATTER_PACKAGES map. Remove redundant path.resolve() in quality-gate.js. --- scripts/hooks/quality-gate.js | 2 +- scripts/lib/resolve-formatter.js | 83 ++++++++++++-------------------- 2 files changed, 33 insertions(+), 52 deletions(-) diff --git a/scripts/hooks/quality-gate.js b/scripts/hooks/quality-gate.js index 19f35195..37373b87 100755 --- a/scripts/hooks/quality-gate.js +++ b/scripts/hooks/quality-gate.js @@ -67,7 +67,7 @@ function maybeRunQualityGate(filePath) { const strict = String(process.env.ECC_QUALITY_GATE_STRICT || '').toLowerCase() === 'true'; if (['.ts', '.tsx', '.js', '.jsx', '.json', '.md'].includes(ext)) { - const projectRoot = findProjectRoot(path.dirname(path.resolve(filePath))); + const projectRoot = findProjectRoot(path.dirname(filePath)); const formatter = detectFormatter(projectRoot); if (formatter === 'biome') { diff --git a/scripts/lib/resolve-formatter.js b/scripts/lib/resolve-formatter.js index 22d04819..a118752f 100644 --- a/scripts/lib/resolve-formatter.js +++ b/scripts/lib/resolve-formatter.js @@ -16,14 +16,11 @@ const projectRootCache = new Map(); const formatterCache = new Map(); const binCache = new Map(); -// ── Public helpers ────────────────────────────────────────────────── +// ── Config file lists (single source of truth) ───────────────────── -// Markers that indicate a project root (formatter configs included so -// repos without package.json are still detected correctly). -const PROJECT_ROOT_MARKERS = [ - 'package.json', - 'biome.json', - 'biome.jsonc', +const BIOME_CONFIGS = ['biome.json', 'biome.jsonc']; + +const PRETTIER_CONFIGS = [ '.prettierrc', '.prettierrc.json', '.prettierrc.js', @@ -37,6 +34,19 @@ const PROJECT_ROOT_MARKERS = [ 'prettier.config.mjs' ]; +const PROJECT_ROOT_MARKERS = ['package.json', ...BIOME_CONFIGS, ...PRETTIER_CONFIGS]; + +// ── Windows .cmd shim mapping ─────────────────────────────────────── +const WIN_CMD_SHIMS = { npx: 'npx.cmd', pnpm: 'pnpm.cmd', yarn: 'yarn.cmd', bunx: 'bunx.cmd' }; + +// ── Formatter → package name mapping ──────────────────────────────── +const FORMATTER_PACKAGES = { + biome: { binName: 'biome', pkgName: '@biomejs/biome' }, + prettier: { binName: 'prettier', pkgName: 'prettier' } +}; + +// ── Public helpers ────────────────────────────────────────────────── + /** * Walk up from `startDir` until a directory containing a known project * root marker (package.json or formatter config) is found. @@ -73,8 +83,7 @@ function findProjectRoot(startDir) { function detectFormatter(projectRoot) { if (formatterCache.has(projectRoot)) return formatterCache.get(projectRoot); - const biomeConfigs = ['biome.json', 'biome.jsonc']; - for (const cfg of biomeConfigs) { + for (const cfg of BIOME_CONFIGS) { if (fs.existsSync(path.join(projectRoot, cfg))) { formatterCache.set(projectRoot, 'biome'); return 'biome'; @@ -95,20 +104,7 @@ function detectFormatter(projectRoot) { // Malformed package.json — continue to file-based detection } - const prettierConfigs = [ - '.prettierrc', - '.prettierrc.json', - '.prettierrc.js', - '.prettierrc.cjs', - '.prettierrc.mjs', - '.prettierrc.yml', - '.prettierrc.yaml', - '.prettierrc.toml', - 'prettier.config.js', - 'prettier.config.cjs', - 'prettier.config.mjs' - ]; - for (const cfg of prettierConfigs) { + for (const cfg of PRETTIER_CONFIGS) { if (fs.existsSync(path.join(projectRoot, cfg))) { formatterCache.set(projectRoot, 'prettier'); return 'prettier'; @@ -126,9 +122,6 @@ function detectFormatter(projectRoot) { * @param {string} projectRoot - Absolute path to the project root * @returns {{ bin: string, prefix: string[] }} */ -// Windows .cmd shim mapping for cross-platform safety -const WIN_CMD_SHIMS = { npx: 'npx.cmd', pnpm: 'pnpm.cmd', yarn: 'yarn.cmd', bunx: 'bunx.cmd' }; - function getRunnerFromPackageManager(projectRoot) { const isWin = process.platform === 'win32'; const { getPackageManager } = require('./package-manager'); @@ -154,37 +147,25 @@ function resolveFormatterBin(projectRoot, formatter) { const cacheKey = `${projectRoot}:${formatter}`; if (binCache.has(cacheKey)) return binCache.get(cacheKey); + const pkg = FORMATTER_PACKAGES[formatter]; + if (!pkg) { + binCache.set(cacheKey, null); + return null; + } + const isWin = process.platform === 'win32'; + const localBin = path.join(projectRoot, 'node_modules', '.bin', isWin ? `${pkg.binName}.cmd` : pkg.binName); - if (formatter === 'biome') { - const localBin = path.join(projectRoot, 'node_modules', '.bin', isWin ? 'biome.cmd' : 'biome'); - if (fs.existsSync(localBin)) { - const result = { bin: localBin, prefix: [] }; - binCache.set(cacheKey, result); - return result; - } - const runner = getRunnerFromPackageManager(projectRoot); - const result = { bin: runner.bin, prefix: [...runner.prefix, '@biomejs/biome'] }; + if (fs.existsSync(localBin)) { + const result = { bin: localBin, prefix: [] }; binCache.set(cacheKey, result); return result; } - if (formatter === 'prettier') { - const localBin = path.join(projectRoot, 'node_modules', '.bin', isWin ? 'prettier.cmd' : 'prettier'); - if (fs.existsSync(localBin)) { - const result = { bin: localBin, prefix: [] }; - binCache.set(cacheKey, result); - return result; - } - const runner = getRunnerFromPackageManager(projectRoot); - const result = { bin: runner.bin, prefix: [...runner.prefix, 'prettier'] }; - binCache.set(cacheKey, result); - return result; - } - - // Unknown formatter — return null so callers can handle gracefully - binCache.set(cacheKey, null); - return null; + const runner = getRunnerFromPackageManager(projectRoot); + const result = { bin: runner.bin, prefix: [...runner.prefix, pkg.pkgName] }; + binCache.set(cacheKey, result); + return result; } /** From 708c265b4f8ad8b7f2c9afe85d00073644deec17 Mon Sep 17 00:00:00 2001 From: ispaydeu <31388982+ispaydeu@users.noreply.github.com> Date: Tue, 10 Mar 2026 20:02:53 -0400 Subject: [PATCH 10/42] fix: read tool_response field in observe.sh (#377) Claude Code sends tool output as `tool_response` in PostToolUse hook payloads, but observe.sh only checked for `tool_output` and `output`. This caused all observations to have empty output fields, making the observer pipeline blind to tool results. Adds `tool_response` as the primary field to check, with backward- compatible fallback to the existing `tool_output` and `output` fields. --- skills/continuous-learning-v2/hooks/observe.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/skills/continuous-learning-v2/hooks/observe.sh b/skills/continuous-learning-v2/hooks/observe.sh index ea44386b..653d56a3 100755 --- a/skills/continuous-learning-v2/hooks/observe.sh +++ b/skills/continuous-learning-v2/hooks/observe.sh @@ -124,7 +124,7 @@ try: # Extract fields - Claude Code hook format tool_name = data.get("tool_name", data.get("tool", "unknown")) tool_input = data.get("tool_input", data.get("input", {})) - tool_output = data.get("tool_output", data.get("output", "")) + tool_output = data.get("tool_response", data.get("tool_output", data.get("output", ""))) session_id = data.get("session_id", "unknown") tool_use_id = data.get("tool_use_id", "") cwd = data.get("cwd", "") From 4de776341ee553144884fcac7945d2acb194568c Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 19:14:07 -0700 Subject: [PATCH 11/42] fix: handle null tool_response fallback --- .../continuous-learning-v2/hooks/observe.sh | 4 +- tests/hooks/hooks.test.js | 60 +++++++++++++++++++ 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/skills/continuous-learning-v2/hooks/observe.sh b/skills/continuous-learning-v2/hooks/observe.sh index 653d56a3..33ec6f04 100755 --- a/skills/continuous-learning-v2/hooks/observe.sh +++ b/skills/continuous-learning-v2/hooks/observe.sh @@ -124,7 +124,9 @@ try: # Extract fields - Claude Code hook format tool_name = data.get("tool_name", data.get("tool", "unknown")) tool_input = data.get("tool_input", data.get("input", {})) - tool_output = data.get("tool_response", data.get("tool_output", data.get("output", ""))) + tool_output = data.get("tool_response") + if tool_output is None: + tool_output = data.get("tool_output", data.get("output", "")) session_id = data.get("session_id", "unknown") tool_use_id = data.get("tool_use_id", "") cwd = data.get("cwd", "") diff --git a/tests/hooks/hooks.test.js b/tests/hooks/hooks.test.js index a5ecaaa0..e13e8193 100644 --- a/tests/hooks/hooks.test.js +++ b/tests/hooks/hooks.test.js @@ -63,6 +63,29 @@ function runScript(scriptPath, input = '', env = {}) { }); } +function runShellScript(scriptPath, args = [], input = '', env = {}, cwd = process.cwd()) { + return new Promise((resolve, reject) => { + const proc = spawn('bash', [scriptPath, ...args], { + cwd, + env: { ...process.env, ...env }, + stdio: ['pipe', 'pipe', 'pipe'] + }); + + let stdout = ''; + let stderr = ''; + + if (input) { + proc.stdin.write(input); + } + proc.stdin.end(); + + proc.stdout.on('data', data => stdout += data); + proc.stderr.on('data', data => stderr += data); + proc.on('close', code => resolve({ code, stdout, stderr })); + proc.on('error', reject); + }); +} + // Create a temporary test directory function createTestDir() { const testDir = path.join(os.tmpdir(), `hooks-test-${Date.now()}`); @@ -1777,6 +1800,43 @@ async function runTests() { assert.ok(stdout.trim().length > 0, 'CLV2_PYTHON_CMD should export a resolved interpreter path'); })) passed++; else failed++; + if (await asyncTest('observe.sh falls back to legacy output fields when tool_response is null', async () => { + const homeDir = createTestDir(); + const projectDir = createTestDir(); + const observePath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'hooks', 'observe.sh'); + const payload = JSON.stringify({ + tool_name: 'Bash', + tool_input: { command: 'echo hello' }, + tool_response: null, + tool_output: 'legacy output', + session_id: 'session-123', + cwd: projectDir + }); + + try { + const result = await runShellScript(observePath, ['post'], payload, { + HOME: homeDir, + CLAUDE_PROJECT_DIR: projectDir + }, projectDir); + + assert.strictEqual(result.code, 0, `observe.sh should exit successfully, stderr: ${result.stderr}`); + + const projectsDir = path.join(homeDir, '.claude', 'homunculus', 'projects'); + const projectIds = fs.readdirSync(projectsDir); + assert.strictEqual(projectIds.length, 1, 'observe.sh should create one project-scoped observation directory'); + + const observationsPath = path.join(projectsDir, projectIds[0], 'observations.jsonl'); + const observations = fs.readFileSync(observationsPath, 'utf8').trim().split('\n').filter(Boolean); + assert.ok(observations.length > 0, 'observe.sh should append at least one observation'); + + const observation = JSON.parse(observations[0]); + assert.strictEqual(observation.output, 'legacy output', 'observe.sh should fall back to legacy tool_output when tool_response is null'); + } finally { + cleanupTestDir(homeDir); + cleanupTestDir(projectDir); + } + })) passed++; else failed++; + if (await asyncTest('matches .tsx extension for type checking', async () => { const testDir = createTestDir(); const testFile = path.join(testDir, 'component.tsx'); From b7bafb40cbfd7353cfff791bed68440e9855a303 Mon Sep 17 00:00:00 2001 From: Pangerkumzuk Longkumer <73515951+pangerlkr@users.noreply.github.com> Date: Tue, 10 Mar 2026 16:56:43 +0530 Subject: [PATCH 12/42] docs: add comprehensive troubleshooting guide (fixes #326) Added a comprehensive troubleshooting guide for the Everything Claude Code (ECC) plugin, covering common issues, symptoms, causes, and solutions. --- TROUBLESHOOTING.md | 391 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 391 insertions(+) create mode 100644 TROUBLESHOOTING.md diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md new file mode 100644 index 00000000..d80e00c6 --- /dev/null +++ b/TROUBLESHOOTING.md @@ -0,0 +1,391 @@ +# Troubleshooting Guide + +Common issues and solutions for Everything Claude Code (ECC) plugin. + +## Table of Contents + +- [Memory & Context Issues](#memory--context-issues) +- [Agent Harness Failures](#agent-harness-failures) +- [Hook & Workflow Errors](#hook--workflow-errors) +- [Installation & Setup](#installation--setup) +- [Performance Issues](#performance-issues) + +--- + +## Memory & Context Issues + +### Context Window Overflow + +**Symptom:** "Context too long" errors or incomplete responses + +**Causes:** +- Large file uploads exceeding token limits +- Accumulated conversation history +- Multiple large tool outputs in single session + +**Solutions:** +```bash +# 1. Clear conversation history and start fresh +# Use Claude Code: "New Chat" or Cmd/Ctrl+Shift+N + +# 2. Reduce file size before analysis +head -n 100 large-file.log > sample.log + +# 3. Use streaming for large outputs +cat large-file.txt | head -50 + +# 4. Split tasks into smaller chunks +# Instead of: "Analyze all 50 files" +# Use: "Analyze files in src/components/ directory" +``` + +### Memory Persistence Failures + +**Symptom:** Agent doesn't remember previous context or observations + +**Causes:** +- Disabled continuous-learning hooks +- Corrupted observation files +- Project detection failures + +**Solutions:** +```bash +# Check if observations are being recorded +ls ~/.claude/homunculus/*/observations.jsonl + +# View recent observations +tail -20 ~/.claude/homunculus/$(basename $PWD)/observations.jsonl + +# Reset observations if corrupted +rm ~/.claude/homunculus/$(basename $PWD)/observations.jsonl + +# Verify hooks are enabled +grep -r "observe" ~/.claude/settings.json +``` + +--- + +## Agent Harness Failures + +### Agent Not Found + +**Symptom:** "Agent not loaded" or "Unknown agent" errors + +**Causes:** +- Plugin not installed correctly +- Agent path misconfiguration +- Marketplace vs manual install mismatch + +**Solutions:** +```bash +# Check plugin installation +ls ~/.claude/plugins/cache/ + +# Verify agent exists (marketplace install) +ls ~/.claude/plugins/cache/*/agents/ + +# For manual install, agents should be in: +ls ~/.claude/agents/ # Custom agents only + +# Reload plugin +# Claude Code → Settings → Extensions → Reload +``` + +### Workflow Execution Hangs + +**Symptom:** Agent starts but never completes + +**Causes:** +- Infinite loops in agent logic +- Blocked on user input +- Network timeout waiting for API + +**Solutions:** +```bash +# 1. Check for stuck processes +ps aux | grep claude + +# 2. Enable debug mode +export CLAUDE_DEBUG=1 + +# 3. Set shorter timeouts +export CLAUDE_TIMEOUT=30 + +# 4. Check network connectivity +curl -I https://api.anthropic.com +``` + +### Tool Use Errors + +**Symptom:** "Tool execution failed" or permission denied + +**Causes:** +- Missing dependencies (npm, python, etc.) +- Insufficient file permissions +- Path not found + +**Solutions:** +```bash +# Verify required tools are installed +which node python3 npm git + +# Fix permissions on hook scripts +chmod +x ~/.claude/plugins/cache/*/hooks/*.sh +chmod +x ~/.claude/plugins/cache/*/skills/*/hooks/*.sh + +# Check PATH includes necessary binaries +echo $PATH +``` + +--- + +## Hook & Workflow Errors + +### Hooks Not Firing + +**Symptom:** Pre/post hooks don't execute + +**Causes:** +- Hooks not registered in settings.json +- Invalid hook syntax +- Hook script not executable + +**Solutions:** +```bash +# Check hooks are registered +cat ~/.claude/settings.json | grep -A 10 '"hooks"' + +# Verify hook files exist and are executable +ls -la ~/.claude/plugins/cache/*/hooks/ + +# Test hook manually +bash ~/.claude/plugins/cache/*/hooks/pre-bash.sh <<< '{"command":"echo test"}' + +# Re-register hooks (if using plugin) +# Disable and re-enable plugin in Claude Code settings +``` + +### Python/Node Version Mismatches + +**Symptom:** "python3 not found" or "node: command not found" + +**Causes:** +- Missing Python/Node installation +- PATH not configured +- Wrong Python version (Windows) + +**Solutions:** +```bash +# Install Python 3 (if missing) +# macOS: brew install python3 +# Ubuntu: sudo apt install python3 +# Windows: Download from python.org + +# Install Node.js (if missing) +# macOS: brew install node +# Ubuntu: sudo apt install nodejs npm +# Windows: Download from nodejs.org + +# Verify installations +python3 --version +node --version +npm --version + +# Windows: Ensure python (not python3) works +python --version +``` + +### Dev Server Blocker False Positives + +**Symptom:** Hook blocks legitimate commands mentioning "dev" + +**Causes:** +- Heredoc content triggering pattern match +- Non-dev commands with "dev" in arguments + +**Solutions:** +```bash +# This is fixed in v1.8.0+ (PR #371) +# Upgrade plugin to latest version + +# Workaround: Wrap dev servers in tmux +tmux new-session -d -s dev "npm run dev" +tmux attach -t dev + +# Disable hook temporarily if needed +# Edit ~/.claude/settings.json and remove pre-bash hook +``` + +--- + +## Installation & Setup + +### Plugin Not Loading + +**Symptom:** Plugin features unavailable after install + +**Causes:** +- Marketplace cache not updated +- Claude Code version incompatibility +- Corrupted plugin files + +**Solutions:** +```bash +# Clear plugin cache +rm -rf ~/.claude/plugins/cache/* + +# Reinstall from marketplace +# Claude Code → Extensions → Everything Claude Code → Uninstall +# Then reinstall from marketplace + +# Check Claude Code version +claude --version +# Requires Claude Code 2.0+ + +# Manual install (if marketplace fails) +git clone https://github.com/affaan-m/everything-claude-code.git +cp -r everything-claude-code ~/.claude/plugins/ecc +``` + +### Package Manager Detection Fails + +**Symptom:** Wrong package manager used (npm instead of pnpm) + +**Causes:** +- No lock file present +- CLAUDE_PACKAGE_MANAGER not set +- Multiple lock files confusing detection + +**Solutions:** +```bash +# Set preferred package manager globally +export CLAUDE_PACKAGE_MANAGER=pnpm +# Add to ~/.bashrc or ~/.zshrc + +# Or set per-project +echo '{"packageManager": "pnpm"}' > .claude/package-manager.json + +# Or use package.json field +npm pkg set packageManager="pnpm@8.15.0" + +# Remove conflicting lock files +rm package-lock.json # If using pnpm/yarn/bun +``` + +--- + +## Performance Issues + +### Slow Response Times + +**Symptom:** Agent takes 30+ seconds to respond + +**Causes:** +- Large observation files +- Too many active hooks +- Network latency to API + +**Solutions:** +```bash +# Archive old observations +find ~/.claude/homunculus -name "observations.jsonl" -size +10M -delete + +# Disable unused hooks temporarily +# Edit ~/.claude/settings.json + +# Use local caching +# Enable Redis for semantic search caching +``` + +### High CPU Usage + +**Symptom:** Claude Code consuming 100% CPU + +**Causes:** +- Infinite observation loops +- File watching on large directories +- Memory leaks in hooks + +**Solutions:** +```bash +# Check for runaway processes +top -o cpu | grep claude + +# Disable continuous learning temporarily +touch ~/.claude/homunculus/disabled + +# Restart Claude Code +# Cmd/Ctrl+Q then reopen + +# Check observation file size +du -sh ~/.claude/homunculus/*/ +``` + +--- + +## Common Error Messages + +### "EACCES: permission denied" + +```bash +# Fix hook permissions +find ~/.claude/plugins -name "*.sh" -exec chmod +x {} \; + +# Fix observation directory permissions +chmod -R 755 ~/.claude/homunculus +``` + +### "MODULE_NOT_FOUND" + +```bash +# Install plugin dependencies +cd ~/.claude/plugins/cache/everything-claude-code +npm install + +# Or for manual install +cd ~/.claude/plugins/ecc +npm install +``` + +### "spawn UNKNOWN" + +```bash +# Windows-specific: Ensure scripts use correct line endings +# Convert CRLF to LF +find ~/.claude/plugins -name "*.sh" -exec dos2unix {} \; + +# Or install dos2unix +# macOS: brew install dos2unix +# Ubuntu: sudo apt install dos2unix +``` + +--- + +## Getting Help + + If you're still experiencing issues: + +1. **Check GitHub Issues**: [github.com/affaan-m/everything-claude-code/issues](https://github.com/affaan-m/everything-claude-code/issues) +2. **Enable Debug Logging**: + ```bash + export CLAUDE_DEBUG=1 + export CLAUDE_LOG_LEVEL=debug + ``` +3. **Collect Diagnostic Info**: + ```bash + claude --version + node --version + python3 --version + echo $CLAUDE_PACKAGE_MANAGER + ls -la ~/.claude/plugins/cache/ + ``` +4. **Open an Issue**: Include debug logs, error messages, and diagnostic info + +--- + +## Related Documentation + +- [README.md](./README.md) - Installation and features +- [CONTRIBUTING.md](./CONTRIBUTING.md) - Development guidelines +- [docs/](./docs/) - Detailed documentation +- [examples/](./examples/) - Usage examples From 5644415767ee692809dd3ae2f9bc929b70f16865 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 19:14:07 -0700 Subject: [PATCH 13/42] docs: tighten troubleshooting safety guidance --- TROUBLESHOOTING.md | 61 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 15 deletions(-) diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md index d80e00c6..57ffa2e4 100644 --- a/TROUBLESHOOTING.md +++ b/TROUBLESHOOTING.md @@ -9,6 +9,8 @@ Common issues and solutions for Everything Claude Code (ECC) plugin. - [Hook & Workflow Errors](#hook--workflow-errors) - [Installation & Setup](#installation--setup) - [Performance Issues](#performance-issues) +- [Common Error Messages](#common-error-messages) +- [Getting Help](#getting-help) --- @@ -32,7 +34,7 @@ Common issues and solutions for Everything Claude Code (ECC) plugin. head -n 100 large-file.log > sample.log # 3. Use streaming for large outputs -cat large-file.txt | head -50 +head -n 50 large-file.txt # 4. Split tasks into smaller chunks # Instead of: "Analyze all 50 files" @@ -51,13 +53,28 @@ cat large-file.txt | head -50 **Solutions:** ```bash # Check if observations are being recorded -ls ~/.claude/homunculus/*/observations.jsonl +ls ~/.claude/homunculus/projects/*/observations.jsonl -# View recent observations -tail -20 ~/.claude/homunculus/$(basename $PWD)/observations.jsonl +# Find the current project's hash id +python3 - <<'PY' +import json, os +registry_path = os.path.expanduser("~/.claude/homunculus/projects.json") +with open(registry_path) as f: + registry = json.load(f) +for project_id, meta in registry.items(): + if meta.get("root") == os.getcwd(): + print(project_id) + break +else: + raise SystemExit("Project hash not found in ~/.claude/homunculus/projects.json") +PY -# Reset observations if corrupted -rm ~/.claude/homunculus/$(basename $PWD)/observations.jsonl +# View recent observations for that project +tail -20 ~/.claude/homunculus/projects//observations.jsonl + +# Back up a corrupted observations file before recreating it +mv ~/.claude/homunculus/projects//observations.jsonl \ + ~/.claude/homunculus/projects//observations.jsonl.bak.$(date +%Y%m%d-%H%M%S) # Verify hooks are enabled grep -r "observe" ~/.claude/settings.json @@ -153,7 +170,7 @@ echo $PATH **Solutions:** ```bash # Check hooks are registered -cat ~/.claude/settings.json | grep -A 10 '"hooks"' +grep -A 10 '"hooks"' ~/.claude/settings.json # Verify hook files exist and are executable ls -la ~/.claude/plugins/cache/*/hooks/ @@ -231,8 +248,12 @@ tmux attach -t dev **Solutions:** ```bash -# Clear plugin cache -rm -rf ~/.claude/plugins/cache/* +# Inspect the plugin cache before changing it +ls -la ~/.claude/plugins/cache/ + +# Back up the plugin cache instead of deleting it in place +mv ~/.claude/plugins/cache ~/.claude/plugins/cache.backup.$(date +%Y%m%d-%H%M%S) +mkdir -p ~/.claude/plugins/cache # Reinstall from marketplace # Claude Code → Extensions → Everything Claude Code → Uninstall @@ -268,7 +289,9 @@ echo '{"packageManager": "pnpm"}' > .claude/package-manager.json # Or use package.json field npm pkg set packageManager="pnpm@8.15.0" -# Remove conflicting lock files +# Warning: removing lock files can change installed dependency versions. +# Commit or back up the lock file first, then run a fresh install and re-run CI. +# Only do this when intentionally switching package managers. rm package-lock.json # If using pnpm/yarn/bun ``` @@ -287,14 +310,22 @@ rm package-lock.json # If using pnpm/yarn/bun **Solutions:** ```bash -# Archive old observations -find ~/.claude/homunculus -name "observations.jsonl" -size +10M -delete +# Archive large observations instead of deleting them +archive_dir="$HOME/.claude/homunculus/archive/$(date +%Y%m%d)" +mkdir -p "$archive_dir" +find ~/.claude/homunculus/projects -name "observations.jsonl" -size +10M -exec sh -c ' + for file do + base=$(basename "$(dirname "$file")") + gzip -c "$file" > "'"$archive_dir"'/${base}-observations.jsonl.gz" + : > "$file" + done +' sh {} + # Disable unused hooks temporarily # Edit ~/.claude/settings.json -# Use local caching -# Enable Redis for semantic search caching +# Keep active observation files small +# Large archives should live under ~/.claude/homunculus/archive/ ``` ### High CPU Usage @@ -332,7 +363,7 @@ du -sh ~/.claude/homunculus/*/ find ~/.claude/plugins -name "*.sh" -exec chmod +x {} \; # Fix observation directory permissions -chmod -R 755 ~/.claude/homunculus +chmod -R u+rwX,go+rX ~/.claude/homunculus ``` ### "MODULE_NOT_FOUND" From 678ee7dc32f458adbd4fbcd9c25aa85eed46d5e9 Mon Sep 17 00:00:00 2001 From: ant Date: Tue, 10 Mar 2026 23:19:09 +0800 Subject: [PATCH 14/42] feat(skills): add blueprint skill for multi-session construction planning --- skills/blueprint/SKILL.md | 69 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 skills/blueprint/SKILL.md diff --git a/skills/blueprint/SKILL.md b/skills/blueprint/SKILL.md new file mode 100644 index 00000000..c8326f71 --- /dev/null +++ b/skills/blueprint/SKILL.md @@ -0,0 +1,69 @@ +--- +name: blueprint +description: >- + Turn a one-line objective into a step-by-step construction plan for + multi-session, multi-agent engineering projects. Each step has a + self-contained context brief so a fresh agent can execute it cold. + Includes adversarial review gate, dependency graph, parallel step + detection, anti-pattern catalog, and plan mutation protocol. + TRIGGER when: user requests a plan, blueprint, or roadmap for a + complex multi-PR task, or describes work that needs multiple sessions. + DO NOT TRIGGER when: task is completable in a single PR or fewer + than 3 tool calls, or user says "just do it". +origin: community +--- + +# Blueprint — Construction Plan Generator + +Turn a one-line objective into a step-by-step construction plan that any coding agent can execute cold. + +``` +/blueprint myapp "migrate database to PostgreSQL" +``` + +## What You Get + +A Markdown plan file in `plans/` where every step is independently executable — a fresh agent in a new session can pick up any step without reading prior steps or conversation history: + +- **Steps** — each one-PR sized, with task list, rollback strategy, verification commands, and exit criteria +- **Dependency graph** — which steps can run in parallel, which must be serial +- **Design decisions** — rationale for key choices, locked against re-litigation +- **Invariants** — properties verified after every step +- **Progress log** — single source of truth for execution state across sessions +- **Review log** — findings from an adversarial review gate + +## Installation + +```bash +mkdir -p ~/.claude/skills +git clone https://github.com/antbotlab/blueprint.git ~/.claude/skills/blueprint +``` + +## Usage + +``` +/blueprint +``` + +## Key Features + +**Cold-start execution** — Every step includes a self-contained context brief. No prior context needed. + +**Adversarial review gate** — Every plan is reviewed by a strongest-model sub-agent (e.g., Opus) against a checklist covering completeness, dependency correctness, and anti-pattern detection. + +**Branch/PR/CI workflow** — Built into every step. Detects git/gh availability and degrades gracefully to direct mode when absent. + +**Parallel step detection** — Dependency graph identifies steps with no shared files or output dependencies. + +**Zero runtime risk** — Pure markdown skill. No hooks, no shell scripts, no executable code. No attack surface. + +**Plan mutation protocol** — Steps can be split, inserted, skipped, reordered, or abandoned with formal protocols and audit trail. + +## Requirements + +- Claude Code (for `/blueprint` slash command) +- Git + GitHub CLI (optional — enables full branch/PR/CI workflow; Blueprint detects absence and auto-switches to direct mode) + +## Source + +[github.com/antbotlab/blueprint](https://github.com/antbotlab/blueprint) — MIT License From f809bdd049339eb03a726842c9f1ada0ce0b1dbc Mon Sep 17 00:00:00 2001 From: ant Date: Tue, 10 Mar 2026 23:55:23 +0800 Subject: [PATCH 15/42] fix(skills): address review feedback on blueprint skill - Pin installation to specific commit hash (full SHA) to mitigate supply-chain risk (cubic-dev-ai feedback) - Add "When to Use", "How It Works", "Examples" sections to match repo skill format conventions (coderabbitai feedback) - Add review-before-update instructions for safe version upgrades - Emphasize zero-runtime-risk: pure Markdown, no executable code --- skills/blueprint/SKILL.md | 78 ++++++++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 25 deletions(-) diff --git a/skills/blueprint/SKILL.md b/skills/blueprint/SKILL.md index c8326f71..4a927d5a 100644 --- a/skills/blueprint/SKILL.md +++ b/skills/blueprint/SKILL.md @@ -17,47 +17,75 @@ origin: community Turn a one-line objective into a step-by-step construction plan that any coding agent can execute cold. +## When to Use + +- Breaking a large feature into multiple PRs with clear dependency order +- Planning a refactor or migration that spans multiple sessions +- Coordinating parallel workstreams across sub-agents +- Any task where context loss between sessions would cause rework + +**Do not use** for tasks completable in a single PR, fewer than 3 tool calls, or when the user says "just do it." + +## How It Works + +Blueprint runs a 5-phase pipeline: + +1. **Research** — Pre-flight checks (git, gh auth, remote, default branch), then reads project structure, existing plans, and memory files to gather context. +2. **Design** — Breaks the objective into one-PR-sized steps (3–12 typical). Assigns dependency edges, parallel/serial ordering, model tier (strongest vs default), and rollback strategy per step. +3. **Draft** — Writes a self-contained Markdown plan file to `plans/`. Every step includes a context brief, task list, verification commands, and exit criteria — so a fresh agent can execute any step without reading prior steps. +4. **Review** — Delegates adversarial review to a strongest-model sub-agent (e.g., Opus) against a checklist and anti-pattern catalog. Fixes all critical findings before finalizing. +5. **Register** — Saves the plan, updates memory index, and presents the step count and parallelism summary to the user. + +Blueprint detects git/gh availability automatically. With git + GitHub CLI, it generates full branch/PR/CI workflow plans. Without them, it switches to direct mode (edit-in-place, no branches). + +## Examples + +### Basic usage + ``` /blueprint myapp "migrate database to PostgreSQL" ``` -## What You Get +Produces `plans/myapp-migrate-database-to-postgresql.md` with steps like: +- Step 1: Add PostgreSQL driver and connection config +- Step 2: Create migration scripts for each table +- Step 3: Update repository layer to use new driver +- Step 4: Add integration tests against PostgreSQL +- Step 5: Remove old database code and config -A Markdown plan file in `plans/` where every step is independently executable — a fresh agent in a new session can pick up any step without reading prior steps or conversation history: +### Multi-agent project -- **Steps** — each one-PR sized, with task list, rollback strategy, verification commands, and exit criteria -- **Dependency graph** — which steps can run in parallel, which must be serial -- **Design decisions** — rationale for key choices, locked against re-litigation -- **Invariants** — properties verified after every step -- **Progress log** — single source of truth for execution state across sessions -- **Review log** — findings from an adversarial review gate +``` +/blueprint chatbot "extract LLM providers into a plugin system" +``` + +Produces a plan with parallel steps where possible (e.g., "implement Anthropic plugin" and "implement OpenAI plugin" run in parallel after the plugin interface step is done), model tier assignments (strongest for the interface design step, default for implementation), and invariants verified after every step (e.g., "all existing tests pass", "no provider imports in core"). + +## Key Features + +- **Cold-start execution** — Every step includes a self-contained context brief. No prior context needed. +- **Adversarial review gate** — Every plan is reviewed by a strongest-model sub-agent against a checklist covering completeness, dependency correctness, and anti-pattern detection. +- **Branch/PR/CI workflow** — Built into every step. Degrades gracefully to direct mode when git/gh is absent. +- **Parallel step detection** — Dependency graph identifies steps with no shared files or output dependencies. +- **Plan mutation protocol** — Steps can be split, inserted, skipped, reordered, or abandoned with formal protocols and audit trail. +- **Zero runtime risk** — Pure Markdown skill. The entire repository contains only `.md` files — no hooks, no shell scripts, no executable code, no `package.json`, no build step. Nothing runs on install or invocation beyond Claude Code's native Markdown skill loader. ## Installation ```bash mkdir -p ~/.claude/skills git clone https://github.com/antbotlab/blueprint.git ~/.claude/skills/blueprint +cd ~/.claude/skills/blueprint +git checkout e6508e9258c763b67a9486af34de84d1e3b5cc74 # pin to reviewed version ``` -## Usage +To update later, review the [changelog](https://github.com/antbotlab/blueprint/blob/main/CHANGELOG.md) before pulling: +```bash +cd ~/.claude/skills/blueprint +git log --oneline HEAD..origin/main # review new commits +git pull ``` -/blueprint -``` - -## Key Features - -**Cold-start execution** — Every step includes a self-contained context brief. No prior context needed. - -**Adversarial review gate** — Every plan is reviewed by a strongest-model sub-agent (e.g., Opus) against a checklist covering completeness, dependency correctness, and anti-pattern detection. - -**Branch/PR/CI workflow** — Built into every step. Detects git/gh availability and degrades gracefully to direct mode when absent. - -**Parallel step detection** — Dependency graph identifies steps with no shared files or output dependencies. - -**Zero runtime risk** — Pure markdown skill. No hooks, no shell scripts, no executable code. No attack surface. - -**Plan mutation protocol** — Steps can be split, inserted, skipped, reordered, or abandoned with formal protocols and audit trail. ## Requirements From 13fe21c5b7bd12625fbab94eba8f4d5d715490f0 Mon Sep 17 00:00:00 2001 From: ant Date: Wed, 11 Mar 2026 06:00:01 +0800 Subject: [PATCH 16/42] fix: add git fetch and use pinned checkout for update flow Address review feedback: - Add missing `git fetch origin` before comparing commits - Replace `git pull` with `git checkout ` for deterministic updates --- skills/blueprint/SKILL.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/skills/blueprint/SKILL.md b/skills/blueprint/SKILL.md index 4a927d5a..27882a20 100644 --- a/skills/blueprint/SKILL.md +++ b/skills/blueprint/SKILL.md @@ -83,8 +83,9 @@ To update later, review the [changelog](https://github.com/antbotlab/blueprint/b ```bash cd ~/.claude/skills/blueprint +git fetch origin git log --oneline HEAD..origin/main # review new commits -git pull +git checkout # pin to a specific reviewed commit ``` ## Requirements From 205fa728094c4268f073f83fbc3392a89aa35c1a Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 19:14:33 -0700 Subject: [PATCH 17/42] docs: align blueprint skill with ECC install flow --- skills/blueprint/SKILL.md | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/skills/blueprint/SKILL.md b/skills/blueprint/SKILL.md index 27882a20..69135cc7 100644 --- a/skills/blueprint/SKILL.md +++ b/skills/blueprint/SKILL.md @@ -72,20 +72,22 @@ Produces a plan with parallel steps where possible (e.g., "implement Anthropic p ## Installation +This skill ships with Everything Claude Code. No separate installation is needed when ECC is installed. + ```bash -mkdir -p ~/.claude/skills -git clone https://github.com/antbotlab/blueprint.git ~/.claude/skills/blueprint -cd ~/.claude/skills/blueprint -git checkout e6508e9258c763b67a9486af34de84d1e3b5cc74 # pin to reviewed version +# Verify the skill is present in your ECC checkout +ls ~/.claude/plugins/ecc/skills/blueprint/SKILL.md ``` -To update later, review the [changelog](https://github.com/antbotlab/blueprint/blob/main/CHANGELOG.md) before pulling: +If you are vendoring only this skill outside the full ECC install, copy the reviewed file from this repository into `~/.claude/skills/blueprint/SKILL.md` and keep it pinned to a reviewed ECC commit. + +To update later, review the ECC diff before updating: ```bash -cd ~/.claude/skills/blueprint -git fetch origin -git log --oneline HEAD..origin/main # review new commits -git checkout # pin to a specific reviewed commit +cd /path/to/everything-claude-code +git fetch origin main +git log --oneline HEAD..origin/main # review new commits before updating +git checkout # pin to a specific reviewed commit ``` ## Requirements @@ -95,4 +97,4 @@ git checkout # pin to a specific reviewed commit ## Source -[github.com/antbotlab/blueprint](https://github.com/antbotlab/blueprint) — MIT License +Inspired by [github.com/antbotlab/blueprint](https://github.com/antbotlab/blueprint) — upstream project and reference design. From 7c82aebc76d66240c41be1449c682966924e0f3e Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 19:21:13 -0700 Subject: [PATCH 18/42] docs: tighten blueprint install guidance --- skills/blueprint/SKILL.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/skills/blueprint/SKILL.md b/skills/blueprint/SKILL.md index 69135cc7..1851c9a8 100644 --- a/skills/blueprint/SKILL.md +++ b/skills/blueprint/SKILL.md @@ -74,12 +74,13 @@ Produces a plan with parallel steps where possible (e.g., "implement Anthropic p This skill ships with Everything Claude Code. No separate installation is needed when ECC is installed. -```bash -# Verify the skill is present in your ECC checkout -ls ~/.claude/plugins/ecc/skills/blueprint/SKILL.md -``` +### Full ECC install -If you are vendoring only this skill outside the full ECC install, copy the reviewed file from this repository into `~/.claude/skills/blueprint/SKILL.md` and keep it pinned to a reviewed ECC commit. +If you are working from the ECC repository checkout, verify the skill is present with: + +```bash +test -f skills/blueprint/SKILL.md +``` To update later, review the ECC diff before updating: @@ -90,6 +91,10 @@ git log --oneline HEAD..origin/main # review new commits before updating git checkout # pin to a specific reviewed commit ``` +### Vendored standalone install + +If you are vendoring only this skill outside the full ECC install, copy the reviewed file from the ECC repository into `~/.claude/skills/blueprint/SKILL.md`. Vendored copies do not have a git remote, so update them by re-copying the file from a reviewed ECC commit rather than running `git pull`. + ## Requirements - Claude Code (for `/blueprint` slash command) @@ -97,4 +102,4 @@ git checkout # pin to a specific reviewed commit ## Source -Inspired by [github.com/antbotlab/blueprint](https://github.com/antbotlab/blueprint) — upstream project and reference design. +Inspired by antbotlab/blueprint — upstream project and reference design. From 77f38955b3bf01cdab61ee2a6347fd4b976b8f48 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 19:22:37 -0700 Subject: [PATCH 19/42] fix: refresh codex config and docs --- .codex/AGENTS.md | 16 +++++- .codex/agents/docs-researcher.toml | 9 ++++ .codex/agents/explorer.toml | 9 ++++ .codex/agents/reviewer.toml | 9 ++++ .codex/config.toml | 83 ++++++++++++++++++------------ README.md | 35 ++++++++++--- 6 files changed, 119 insertions(+), 42 deletions(-) create mode 100644 .codex/agents/docs-researcher.toml create mode 100644 .codex/agents/explorer.toml create mode 100644 .codex/agents/reviewer.toml diff --git a/.codex/AGENTS.md b/.codex/AGENTS.md index 03730594..6de01b1e 100644 --- a/.codex/AGENTS.md +++ b/.codex/AGENTS.md @@ -39,6 +39,20 @@ Available skills: Configure in `~/.codex/config.toml` under `[mcp_servers]`. See `.codex/config.toml` for reference configuration with GitHub, Context7, Memory, and Sequential Thinking servers. +## Multi-Agent Support + +Codex now supports multi-agent workflows behind the experimental `features.multi_agent` flag. + +- Enable it in `.codex/config.toml` with `[features] multi_agent = true` +- Define project-local roles under `[agents.]` +- Point each role at a TOML layer under `.codex/agents/` +- Use `/agent` inside Codex CLI to inspect and steer child agents + +Sample role configs in this repo: +- `.codex/agents/explorer.toml` — read-only evidence gathering +- `.codex/agents/reviewer.toml` — correctness/security review +- `.codex/agents/docs-researcher.toml` — API and release-note verification + ## Key Differences from Claude Code | Feature | Claude Code | Codex CLI | @@ -47,7 +61,7 @@ Configure in `~/.codex/config.toml` under `[mcp_servers]`. See `.codex/config.to | Context file | CLAUDE.md + AGENTS.md | AGENTS.md only | | Skills | Skills loaded via plugin | `.agents/skills/` directory | | Commands | `/slash` commands | Instruction-based | -| Agents | Subagent Task tool | Single agent model | +| Agents | Subagent Task tool | Multi-agent via `/agent` and `[agents.]` roles | | Security | Hook-based enforcement | Instruction + sandbox | | MCP | Full support | Command-based only | diff --git a/.codex/agents/docs-researcher.toml b/.codex/agents/docs-researcher.toml new file mode 100644 index 00000000..f8bd48dd --- /dev/null +++ b/.codex/agents/docs-researcher.toml @@ -0,0 +1,9 @@ +model = "gpt-5.4" +model_reasoning_effort = "medium" +sandbox_mode = "read-only" + +developer_instructions = """ +Verify APIs, framework behavior, and release-note claims against primary documentation before changes land. +Cite the exact docs or file paths that support each claim. +Do not invent undocumented behavior. +""" diff --git a/.codex/agents/explorer.toml b/.codex/agents/explorer.toml new file mode 100644 index 00000000..af7c1965 --- /dev/null +++ b/.codex/agents/explorer.toml @@ -0,0 +1,9 @@ +model = "o4-mini" +model_reasoning_effort = "medium" +sandbox_mode = "read-only" + +developer_instructions = """ +Stay in exploration mode. +Trace the real execution path, cite files and symbols, and avoid proposing fixes unless the parent agent asks for them. +Prefer targeted search and file reads over broad scans. +""" diff --git a/.codex/agents/reviewer.toml b/.codex/agents/reviewer.toml new file mode 100644 index 00000000..269c5e49 --- /dev/null +++ b/.codex/agents/reviewer.toml @@ -0,0 +1,9 @@ +model = "gpt-5.4" +model_reasoning_effort = "high" +sandbox_mode = "read-only" + +developer_instructions = """ +Review like an owner. +Prioritize correctness, security, behavioral regressions, and missing tests. +Lead with concrete findings and avoid style-only feedback unless it hides a real bug. +""" diff --git a/.codex/config.toml b/.codex/config.toml index 87d9747a..c60224ac 100644 --- a/.codex/config.toml +++ b/.codex/config.toml @@ -1,42 +1,37 @@ -# Everything Claude Code (ECC) — Codex CLI Reference Configuration +#:schema https://developers.openai.com/codex/config-schema.json + +# Everything Claude Code (ECC) — Codex Reference Configuration # -# Copy this file to ~/.codex/config.toml to apply globally. -# Or keep it in your project root for project-level config. +# Copy this file to ~/.codex/config.toml for global defaults, or keep it in +# the project root as .codex/config.toml for project-local settings. # -# Docs: https://github.com/openai/codex +# Official docs: +# - https://developers.openai.com/codex/config-reference +# - https://developers.openai.com/codex/multi-agent # Model selection model = "o4-mini" model_provider = "openai" -# Permissions -[permissions] -# "untrusted" = no writes, "on-request" = ask per action, "never" = full auto +# Top-level runtime settings (current Codex schema) approval_policy = "on-request" -# "off", "workspace-read", "workspace-write", "danger-full-access" sandbox_mode = "workspace-write" +web_search = "live" -# Notifications (macOS) -[notify] -command = "terminal-notifier -title 'Codex ECC' -message 'Task completed!' -sound default" -on_complete = true +# External notifications receive a JSON payload on stdin. +notify = [ + "terminal-notifier", + "-title", "Codex ECC", + "-message", "Task completed!", + "-sound", "default", +] -# History - persistent instructions applied to every session -[history] -# These are prepended to every conversation -persistent_instructions = """ -Follow ECC principles: -1. Test-Driven Development (TDD) - write tests first, 80%+ coverage required -2. Immutability - always create new objects, never mutate -3. Security-First - validate all inputs, no hardcoded secrets -4. Conventional commits: feat|fix|refactor|docs|test|chore|perf|ci: description -5. File organization: many small files (200-400 lines, 800 max) -6. Error handling: handle at every level, never swallow errors -7. Input validation: schema-based validation at system boundaries -""" +# Prefer AGENTS.md and project-local .codex/AGENTS.md for instructions. +# model_instructions_file replaces built-in instructions instead of AGENTS.md, +# so leave it unset unless you intentionally want a single override file. +# model_instructions_file = "/absolute/path/to/instructions.md" -# MCP Servers -# Codex supports command-based MCP servers +# MCP servers [mcp_servers.github] command = "npx" args = ["-y", "@modelcontextprotocol/server-github"] @@ -62,19 +57,41 @@ args = ["-y", "@modelcontextprotocol/server-sequential-thinking"] # command = "npx" # args = ["-y", "firecrawl-mcp"] # -# [mcp_servers.railway] +# [mcp_servers.cloudflare] # command = "npx" -# args = ["-y", "@anthropic/railway-mcp"] +# args = ["-y", "@cloudflare/mcp-server-cloudflare"] -# Features [features] -web_search_request = true +# Codex multi-agent support is experimental as of March 2026. +multi_agent = true -# Profiles — switch with CODEX_PROFILE= +# Profiles — switch with `codex -p ` [profiles.strict] approval_policy = "on-request" -sandbox_mode = "workspace-read" +sandbox_mode = "read-only" +web_search = "cached" [profiles.yolo] approval_policy = "never" sandbox_mode = "workspace-write" +web_search = "live" + +# Optional project-local multi-agent roles. +# Keep these commented in global config, because config_file paths are resolved +# relative to the config.toml file and must exist at load time. +# +# [agents] +# max_threads = 6 +# max_depth = 1 +# +# [agents.explorer] +# description = "Read-only codebase explorer for gathering evidence before changes are proposed." +# config_file = "agents/explorer.toml" +# +# [agents.reviewer] +# description = "PR reviewer focused on correctness, security, and missing tests." +# config_file = "agents/reviewer.toml" +# +# [agents.docs_researcher] +# description = "Documentation specialist that verifies APIs, framework behavior, and release notes." +# config_file = "agents/docs-researcher.toml" diff --git a/README.md b/README.md index 09267ca5..25d6c091 100644 --- a/README.md +++ b/README.md @@ -893,27 +893,29 @@ ECC provides **first-class Codex support** for both the macOS app and CLI, with ### Quick Start (Codex App + CLI) ```bash -# Copy the reference config to your home directory -cp .codex/config.toml ~/.codex/config.toml - -# Run Codex CLI in the repo — AGENTS.md is auto-detected +# Run Codex CLI in the repo — AGENTS.md and .codex/ are auto-detected codex + +# Optional: copy the global-safe defaults to your home directory +cp .codex/config.toml ~/.codex/config.toml ``` Codex macOS app: - Open this repository as your workspace. - The root `AGENTS.md` is auto-detected. -- Optional: copy `.codex/config.toml` to `~/.codex/config.toml` for CLI/app behavior consistency. +- `.codex/config.toml` and `.codex/agents/*.toml` work best when kept project-local. +- Optional: copy `.codex/config.toml` to `~/.codex/config.toml` for global defaults; keep the multi-agent role files project-local unless you also copy `.codex/agents/`. ### What's Included | Component | Count | Details | |-----------|-------|---------| -| Config | 1 | `.codex/config.toml` — model, permissions, MCP servers, persistent instructions | +| Config | 1 | `.codex/config.toml` — top-level approvals/sandbox/web_search, MCP servers, notifications, profiles | | AGENTS.md | 2 | Root (universal) + `.codex/AGENTS.md` (Codex-specific supplement) | | Skills | 16 | `.agents/skills/` — SKILL.md + agents/openai.yaml per skill | | MCP Servers | 4 | GitHub, Context7, Memory, Sequential Thinking (command-based) | | Profiles | 2 | `strict` (read-only sandbox) and `yolo` (full auto-approve) | +| Agent Roles | 3 | `.codex/agents/` — explorer, reviewer, docs-researcher | ### Skills @@ -940,7 +942,24 @@ Skills at `.agents/skills/` are auto-loaded by Codex: ### Key Limitation -Codex does **not yet provide Claude-style hook execution parity**. ECC enforcement there is instruction-based via `AGENTS.md` and `persistent_instructions`, plus sandbox permissions. +Codex does **not yet provide Claude-style hook execution parity**. ECC enforcement there is instruction-based via `AGENTS.md`, optional `model_instructions_file` overrides, and sandbox/approval settings. + +### Multi-Agent Support + +Current Codex builds support experimental multi-agent workflows. + +- Enable `features.multi_agent = true` in `.codex/config.toml` +- Define roles under `[agents.]` +- Point each role at a file under `.codex/agents/` +- Use `/agent` in the CLI to inspect or steer child agents + +ECC ships three sample role configs: + +| Role | Purpose | +|------|---------| +| `explorer` | Read-only codebase evidence gathering before edits | +| `reviewer` | Correctness, security, and missing-test review | +| `docs_researcher` | Documentation and API verification before release/docs changes | --- @@ -1090,7 +1109,7 @@ ECC is the **first plugin to maximize every major AI coding tool**. Here's how e - **AGENTS.md** at root is the universal cross-tool file (read by all 4 tools) - **DRY adapter pattern** lets Cursor reuse Claude Code's hook scripts without duplication - **Skills format** (SKILL.md with YAML frontmatter) works across Claude Code, Codex, and OpenCode -- Codex's lack of hooks is compensated by `persistent_instructions` and sandbox permissions +- Codex's lack of hooks is compensated by `AGENTS.md`, optional `model_instructions_file` overrides, and sandbox permissions --- From 65cb240e881661ce5acbede2d846227134e3ba3f Mon Sep 17 00:00:00 2001 From: "ecc-tools[bot]" <257055122+ecc-tools[bot]@users.noreply.github.com> Date: Sat, 7 Mar 2026 14:48:04 -0800 Subject: [PATCH 20/42] feat: add everything-claude-code skill (#335) * feat: add everything-claude-code skill generated by ECC Tools * feat: add everything-claude-code instincts for continuous learning --------- Co-authored-by: ecc-tools[bot] <257055122+ecc-tools[bot]@users.noreply.github.com> --- .../everything-claude-code-instincts.yaml | 649 ++++++++++++++++++ .../skills/everything-claude-code/SKILL.md | 215 ++++++ 2 files changed, 864 insertions(+) create mode 100644 .claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml create mode 100644 .claude/skills/everything-claude-code/SKILL.md diff --git a/.claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml b/.claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml new file mode 100644 index 00000000..5639c86e --- /dev/null +++ b/.claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml @@ -0,0 +1,649 @@ +# Instincts generated from https://github.com/affaan-m/everything-claude-code +# Generated: 2026-03-04T22:56:16.069Z +# Version: 2.0 + +--- +id: everything-claude-code-commit-length +trigger: "when writing a commit message" +confidence: 0.6 +domain: git +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Commit Length + +## Action + +Write moderate-length commit messages (~50 characters) + +## Evidence + +- Average commit message length: 70 chars +- Based on 200 commits + +--- +id: everything-claude-code-naming-files +trigger: "when creating a new file" +confidence: 0.8 +domain: code-style +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Naming Files + +## Action + +Use camelCase naming convention + +## Evidence + +- Analyzed file naming patterns in repository +- Dominant pattern: camelCase + +--- +id: everything-claude-code-export-style +trigger: "when exporting from a module" +confidence: 0.7 +domain: code-style +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Export Style + +## Action + +Prefer mixed exports + +## Evidence + +- Export pattern analysis +- Dominant style: mixed + +--- +id: everything-claude-code-test-separate +trigger: "when writing tests" +confidence: 0.8 +domain: testing +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Test Separate + +## Action + +Place tests in the tests/ or __tests__/ directory, mirroring src structure + +## Evidence + +- Separate test directory pattern detected +- Tests live in dedicated test folders + +--- +id: everything-claude-code-test-framework +trigger: "when writing tests" +confidence: 0.9 +domain: testing +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Test Framework + +## Action + +Use unknown as the test framework + +## Evidence + +- Test framework detected: unknown +- File pattern: *.test.js + +--- +id: everything-claude-code-test-naming +trigger: "when creating a test file" +confidence: 0.85 +domain: testing +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Test Naming + +## Action + +Name test files using the pattern: *.test.js + +## Evidence + +- File pattern: *.test.js +- Consistent across test files + +--- +id: everything-claude-code-test-types +trigger: "when planning tests for a feature" +confidence: 0.7 +domain: testing +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Test Types + +## Action + +Write unit, integration tests to match project standards + +## Evidence + +- Test types detected: unit, integration +- Coverage config: yes + +--- +id: everything-claude-code-workflow-database-migration +trigger: "when modifying the database schema or adding tables" +confidence: 0.65 +domain: workflow +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Workflow Database Migration + +## Action + +Follow the database-migration workflow: +1. Create migration file +2. Update schema definitions +3. Generate/update types + +## Evidence + +- Workflow detected from commit patterns +- Frequency: ~3x per month +- Files: migrations/* + +--- +id: everything-claude-code-workflow-feature-development +trigger: "when implementing a new feature" +confidence: 0.9 +domain: workflow +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Workflow Feature Development + +## Action + +Follow the feature-development workflow: +1. Add feature implementation +2. Add tests for feature +3. Update documentation + +## Evidence + +- Workflow detected from commit patterns +- Frequency: ~14x per month +- Files: .opencode/*, .opencode/plugins/*, .opencode/tools/* + +--- +id: everything-claude-code-workflow-multi-language-documentation-sync +trigger: "when doing multi language documentation sync" +confidence: 0.9 +domain: workflow +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Workflow Multi Language Documentation Sync + +## Action + +Follow the multi-language-documentation-sync workflow: +1. Update English documentation +2. Translate changes to zh-CN +3. Translate changes to zh-TW +4. Translate changes to ja-JP +5. Update cross-references + +## Evidence + +- Workflow detected from commit patterns +- Frequency: ~10x per month +- Files: README.md, docs/zh-CN/*.md, docs/zh-TW/*.md + +--- +id: everything-claude-code-workflow-cross-platform-harness-sync +trigger: "when doing cross platform harness sync" +confidence: 0.9 +domain: workflow +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Workflow Cross Platform Harness Sync + +## Action + +Follow the cross-platform-harness-sync workflow: +1. Update main platform files +2. Port to .cursor/ directory +3. Port to .codex/ directory +4. Port to .opencode/ directory +5. Update platform-specific configs + +## Evidence + +- Workflow detected from commit patterns +- Frequency: ~8x per month +- Files: .cursor/*, .codex/*, .opencode/* + +--- +id: everything-claude-code-workflow-skill-addition-workflow +trigger: "when doing skill addition workflow" +confidence: 0.8 +domain: workflow +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Workflow Skill Addition Workflow + +## Action + +Follow the skill-addition-workflow workflow: +1. Create main skill SKILL.md +2. Add to README.md +3. Port to .cursor/skills/ +4. Port to .agents/skills/ with openai.yaml +5. Update package.json +6. Add translations if needed + +## Evidence + +- Workflow detected from commit patterns +- Frequency: ~6x per month +- Files: skills/*/SKILL.md, README.md, .cursor/skills/*/SKILL.md + +--- +id: everything-claude-code-workflow-version-release-workflow +trigger: "when doing version release workflow" +confidence: 0.65 +domain: workflow +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Workflow Version Release Workflow + +## Action + +Follow the version-release-workflow workflow: +1. Update package.json version +2. Update .claude-plugin/ manifests +3. Update .opencode/package.json +4. Update README version references +5. Update CHANGELOG.md + +## Evidence + +- Workflow detected from commit patterns +- Frequency: ~3x per month +- Files: package.json, .claude-plugin/plugin.json, .claude-plugin/marketplace.json + +--- +id: everything-claude-code-workflow-hook-system-updates +trigger: "when doing hook system updates" +confidence: 0.7 +domain: workflow +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Workflow Hook System Updates + +## Action + +Follow the hook-system-updates workflow: +1. Update hooks/hooks.json +2. Create/update hook scripts +3. Port to .cursor/hooks/ +4. Update tests/hooks/ or tests/integration/ +5. Update hook documentation + +## Evidence + +- Workflow detected from commit patterns +- Frequency: ~4x per month +- Files: hooks/hooks.json, scripts/hooks/*.js, .cursor/hooks/*.js + +--- +id: everything-claude-code-workflow-continuous-learning-updates +trigger: "when doing continuous learning updates" +confidence: 0.75 +domain: workflow +source: repo-analysis +source_repo: https://github.com/affaan-m/everything-claude-code +--- + +# Everything Claude Code Workflow Continuous Learning Updates + +## Action + +Follow the continuous-learning-updates workflow: +1. Update main continuous-learning-v2 skill +2. Update observer scripts +3. Port to platform-specific directories +4. Update translations +5. Update related commands + +## Evidence + +- Workflow detected from commit patterns +- Frequency: ~5x per month +- Files: skills/continuous-learning-v2/*, .cursor/skills/continuous-learning-v2/*, commands/evolve.md + +--- +id: everything-claude-code-camel-case-files +trigger: "When creating new JavaScript files" +confidence: 0.85 +domain: code-style +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Camel Case Files + +## Action + +Use camelCase naming convention + +## Evidence + +- Consistent camelCase pattern in codeStyle analysis +- Files follow camelCase in folderStructure + +--- +id: everything-claude-code-pascal-case-classes +trigger: "When defining JavaScript classes" +confidence: 0.85 +domain: code-style +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Pascal Case Classes + +## Action + +Use PascalCase naming convention + +## Evidence + +- PascalCase classes specified in namingConventions +- Consistent pattern across codebase + +--- +id: everything-claude-code-screaming-snake-constants +trigger: "When defining constants" +confidence: 0.85 +domain: code-style +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Screaming Snake Constants + +## Action + +Use SCREAMING_SNAKE_CASE naming convention + +## Evidence + +- SCREAMING_SNAKE_CASE constants in namingConventions +- Standard JavaScript practice + +--- +id: everything-claude-code-try-catch-errors +trigger: "When handling potential errors" +confidence: 0.8 +domain: code-style +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Try Catch Errors + +## Action + +Use try-catch blocks for error handling + +## Evidence + +- try-catch style specified in errorHandling +- No custom error classes or global handlers detected + +--- +id: everything-claude-code-unit-integration-tests +trigger: "When writing tests" +confidence: 0.8 +domain: testing +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Unit Integration Tests + +## Action + +Create both unit and integration tests using *.test.js pattern + +## Evidence + +- testTypes include unit and integration +- filePattern shows *.test.js convention + +--- +id: everything-claude-code-conventional-commits +trigger: "When making commits" +confidence: 0.9 +domain: git +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Conventional Commits + +## Action + +Use conventional commit format with prefixes: feat, fix, docs, chore, test, refactor + +## Evidence + +- All commit examples follow conventional format +- Consistent prefixes across commit history + +--- +id: everything-claude-code-70-char-commit-length +trigger: "When writing commit messages" +confidence: 0.75 +domain: git +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code 70 Char Commit Length + +## Action + +Keep commit messages around 70 characters average length + +## Evidence + +- averageLength shows 70 characters +- Commit examples align with this pattern + +--- +id: everything-claude-code-multilang-docs-sync +trigger: "When updating core documentation or features" +confidence: 0.95 +domain: workflow +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Multilang Docs Sync + +## Action + +Update English first, then translate to zh-CN, zh-TW, ja-JP and update cross-references + +## Evidence + +- multi-language-documentation-sync workflow frequency ~10x/month +- Clear pattern in filesInvolved across language directories + +--- +id: everything-claude-code-cross-platform-sync +trigger: "When adding new features or updating platform configurations" +confidence: 0.9 +domain: workflow +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Cross Platform Sync + +## Action + +Port changes to .cursor/, .codex/, .opencode/, and .agents/ directories + +## Evidence + +- cross-platform-harness-sync workflow frequency ~8x/month +- Multiple platform directories in architecture + +--- +id: everything-claude-code-skill-structure +trigger: "When adding a new skill or capability" +confidence: 0.88 +domain: workflow +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Skill Structure + +## Action + +Create SKILL.md in main skills/, port to platform-specific directories, update README.md and package.json + +## Evidence + +- skill-addition-workflow frequency ~6x/month +- Consistent skill structure across platforms + +--- +id: everything-claude-code-version-release-sync +trigger: "When preparing a new release" +confidence: 0.85 +domain: workflow +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Version Release Sync + +## Action + +Update all package.json files, plugin manifests, README version references, and CHANGELOG.md + +## Evidence + +- version-release-workflow frequency ~3x/month +- Multiple package.json files in different directories + +--- +id: everything-claude-code-hook-system-testing +trigger: "When modifying hook behavior or adding new hooks" +confidence: 0.82 +domain: workflow +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Hook System Testing + +## Action + +Update hooks.json, create/update scripts, port to .cursor/, and update corresponding tests + +## Evidence + +- hook-system-updates workflow frequency ~4x/month +- hooks/ and tests/ directories in architecture + +--- +id: everything-claude-code-learning-system-updates +trigger: "When enhancing continuous learning capabilities" +confidence: 0.87 +domain: workflow +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Learning System Updates + +## Action + +Update main continuous-learning-v2 skill, port to platforms, update translations and related commands + +## Evidence + +- continuous-learning-updates workflow frequency ~5x/month +- commands/evolve.md and instinct-*.md files referenced + +--- +id: everything-claude-code-hybrid-module-organization +trigger: "When organizing code modules" +confidence: 0.75 +domain: architecture +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Hybrid Module Organization + +## Action + +Use hybrid module organization with separate test location + +## Evidence + +- moduleOrganization: hybrid +- testLocation: separate in architecture analysis + +--- +id: everything-claude-code-mixed-import-export +trigger: "When writing import/export statements" +confidence: 0.7 +domain: code-style +source: repo-analysis +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Mixed Import Export + +## Action + +Use mixed import and export styles as appropriate for the context + +## Evidence + +- importStyle: mixed and exportStyle: mixed in codeStyle +- Flexible approach across codebase + diff --git a/.claude/skills/everything-claude-code/SKILL.md b/.claude/skills/everything-claude-code/SKILL.md new file mode 100644 index 00000000..92111818 --- /dev/null +++ b/.claude/skills/everything-claude-code/SKILL.md @@ -0,0 +1,215 @@ +# everything-claude-code Development Patterns + +> Auto-generated skill from repository analysis + +## Overview + +This skill teaches the development patterns for the everything-claude-code repository - a comprehensive multi-platform AI coding assistant system. The codebase maintains compatibility across multiple AI platforms (Claude, Cursor, Codex, OpenCode) while supporting internationalization across four languages (English, Chinese Simplified/Traditional, Japanese). The system focuses on skill-based architecture, continuous learning capabilities, and cross-platform synchronization. + +## Coding Conventions + +### File Naming +- Use **camelCase** for JavaScript files +- Use **kebab-case** for directories and skill names +- Test files follow pattern: `*.test.js` + +```javascript +// File structure examples +skills/continuousLearningV2/SKILL.md +tests/hooks/hookManager.test.js +scripts/platformSync.js +``` + +### Import/Export Style +- Mixed import styles (both CommonJS and ES modules supported) +- Mixed export styles based on platform compatibility + +```javascript +// CommonJS style +const hookManager = require('./hookManager'); +module.exports = { syncPlatforms }; + +// ES module style +import { translateDocs } from './translationUtils.js'; +export { processSkills }; +``` + +### Commit Conventions +- Use conventional commit prefixes: `feat:`, `fix:`, `test:`, `chore:`, `docs:` +- Keep commit messages under 70 characters +- Be descriptive about cross-platform impacts + +```bash +feat: add multi-language skill sync workflow +fix: resolve cursor platform hook integration +docs: update zh-CN translation for continuous learning +``` + +## Workflows + +### Multi-Language Documentation Sync +**Trigger:** When core documentation or features are updated +**Command:** `/sync-docs` + +1. Update English documentation in root README.md or skill files +2. Identify sections that need translation updates +3. Update Chinese Simplified (`docs/zh-CN/*.md`) +4. Update Traditional Chinese (`docs/zh-TW/*.md`) +5. Update Japanese (`docs/ja-JP/*.md`) +6. Verify cross-references and links work across all languages +7. Test that skill descriptions match across platforms + +```markdown + +# English: skills/example/SKILL.md +# Chinese: docs/zh-CN/skills/example/SKILL.md +# Japanese: docs/ja-JP/skills/example/SKILL.md +``` + +### Cross-Platform Harness Sync +**Trigger:** When adding new features or updating platform configurations +**Command:** `/sync-platforms` + +1. Update main platform files in root directory +2. Port changes to `.cursor/` directory for Cursor IDE +3. Port changes to `.codex/` directory for Codex integration +4. Port changes to `.opencode/` directory for OpenCode platform +5. Update platform-specific configuration files +6. Test that each platform can load and execute the changes +7. Verify skill compatibility across all platforms + +```javascript +// Platform-specific config example +// .cursor/package.json +{ + "name": "everything-claude-code-cursor", + "platform": "cursor" +} + +// .opencode/package.json +{ + "name": "everything-claude-code-opencode", + "platform": "opencode" +} +``` + +### Skill Addition Workflow +**Trigger:** When adding a new skill or capability +**Command:** `/add-skill` + +1. Create main skill directory: `skills/{skillName}/` +2. Write comprehensive `SKILL.md` with examples and usage +3. Add skill reference to root `README.md` +4. Port skill to `.cursor/skills/{skillName}/SKILL.md` +5. Port to `.agents/skills/{skillName}/` with `openai.yaml` +6. Update `package.json` with new skill metadata +7. Add translations for skill name and description +8. Test skill loading across all platforms + +```yaml +# .agents/skills/example/openai.yaml +name: example-skill +description: Example skill for demonstration +version: 1.0.0 +compatibility: ["openai", "claude", "cursor"] +``` + +### Version Release Workflow +**Trigger:** When preparing a new release +**Command:** `/release` + +1. Update version in root `package.json` +2. Update `.claude-plugin/plugin.json` manifest version +3. Update `.claude-plugin/marketplace.json` with release notes +4. Update `.opencode/package.json` version to match +5. Update version references in `README.md` +6. Update `CHANGELOG.md` with new features and fixes +7. Create git tag and push release +8. Verify plugin compatibility across platforms + +```json +// package.json version sync +{ + "version": "2.1.0", + "name": "everything-claude-code" +} +``` + +### Hook System Updates +**Trigger:** When modifying hook behavior or adding new hooks +**Command:** `/update-hooks` + +1. Update `hooks/hooks.json` with new hook definitions +2. Create or modify hook scripts in `scripts/hooks/` +3. Port hook implementations to `.cursor/hooks/` +4. Update tests in `tests/hooks/` or `tests/integration/` +5. Update hook documentation in `hooks/README.md` +6. Test hook execution across different trigger scenarios +7. Verify platform-specific hook behaviors + +```json +// hooks/hooks.json example +{ + "pre-commit": { + "script": "scripts/hooks/preCommit.js", + "platforms": ["cursor", "opencode"], + "enabled": true + } +} +``` + +### Continuous Learning Updates +**Trigger:** When enhancing the continuous learning capabilities +**Command:** `/update-learning` + +1. Update main `skills/continuous-learning-v2/` skill files +2. Modify observer scripts for better learning detection +3. Port updates to platform-specific skill directories +4. Update translation files for learning prompts +5. Update related commands in `commands/evolve.md` +6. Update instinct commands (`commands/instinct-*.md`) +7. Test learning feedback loops across platforms + +```javascript +// Learning observer example +class LearningObserver { + observePatterns() { + // Detect coding patterns + // Update skill knowledge + // Sync across platforms + } +} +``` + +## Testing Patterns + +Tests follow the `*.test.js` pattern and focus on: +- Cross-platform compatibility verification +- Multi-language content validation +- Hook execution testing +- Skill loading and execution + +```javascript +// Example test structure +describe('Platform Sync', () => { + test('should sync skills across all platforms', () => { + const platforms = ['cursor', 'opencode', 'codex']; + platforms.forEach(platform => { + expect(skillExists(platform, 'continuous-learning-v2')).toBe(true); + }); + }); +}); +``` + +## Commands + +| Command | Purpose | +|---------|---------| +| `/sync-docs` | Synchronize documentation across all language versions | +| `/sync-platforms` | Port changes across AI coding platforms | +| `/add-skill` | Add new skill with full cross-platform support | +| `/release` | Prepare and publish new version release | +| `/update-hooks` | Modify hook system and test integration | +| `/update-learning` | Enhance continuous learning capabilities | +| `/test-platforms` | Run compatibility tests across all platforms | +| `/translate` | Generate translations for new content | \ No newline at end of file From c883289abba987981f52989b000d093f4d5f415d Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 19:36:12 -0700 Subject: [PATCH 21/42] fix: curate everything-claude-code skill output --- .../everything-claude-code-instincts.yaml | 655 +++--------------- .../skills/everything-claude-code/SKILL.md | 280 +++----- 2 files changed, 165 insertions(+), 770 deletions(-) diff --git a/.claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml b/.claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml index 5639c86e..7818dff1 100644 --- a/.claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml +++ b/.claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml @@ -1,458 +1,12 @@ -# Instincts generated from https://github.com/affaan-m/everything-claude-code -# Generated: 2026-03-04T22:56:16.069Z -# Version: 2.0 - ---- -id: everything-claude-code-commit-length -trigger: "when writing a commit message" -confidence: 0.6 -domain: git -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Commit Length - -## Action - -Write moderate-length commit messages (~50 characters) - -## Evidence - -- Average commit message length: 70 chars -- Based on 200 commits - ---- -id: everything-claude-code-naming-files -trigger: "when creating a new file" -confidence: 0.8 -domain: code-style -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Naming Files - -## Action - -Use camelCase naming convention - -## Evidence - -- Analyzed file naming patterns in repository -- Dominant pattern: camelCase - ---- -id: everything-claude-code-export-style -trigger: "when exporting from a module" -confidence: 0.7 -domain: code-style -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Export Style - -## Action - -Prefer mixed exports - -## Evidence - -- Export pattern analysis -- Dominant style: mixed - ---- -id: everything-claude-code-test-separate -trigger: "when writing tests" -confidence: 0.8 -domain: testing -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Test Separate - -## Action - -Place tests in the tests/ or __tests__/ directory, mirroring src structure - -## Evidence - -- Separate test directory pattern detected -- Tests live in dedicated test folders - ---- -id: everything-claude-code-test-framework -trigger: "when writing tests" -confidence: 0.9 -domain: testing -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Test Framework - -## Action - -Use unknown as the test framework - -## Evidence - -- Test framework detected: unknown -- File pattern: *.test.js - ---- -id: everything-claude-code-test-naming -trigger: "when creating a test file" -confidence: 0.85 -domain: testing -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Test Naming - -## Action - -Name test files using the pattern: *.test.js - -## Evidence - -- File pattern: *.test.js -- Consistent across test files - ---- -id: everything-claude-code-test-types -trigger: "when planning tests for a feature" -confidence: 0.7 -domain: testing -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Test Types - -## Action - -Write unit, integration tests to match project standards - -## Evidence - -- Test types detected: unit, integration -- Coverage config: yes - ---- -id: everything-claude-code-workflow-database-migration -trigger: "when modifying the database schema or adding tables" -confidence: 0.65 -domain: workflow -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Workflow Database Migration - -## Action - -Follow the database-migration workflow: -1. Create migration file -2. Update schema definitions -3. Generate/update types - -## Evidence - -- Workflow detected from commit patterns -- Frequency: ~3x per month -- Files: migrations/* - ---- -id: everything-claude-code-workflow-feature-development -trigger: "when implementing a new feature" -confidence: 0.9 -domain: workflow -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Workflow Feature Development - -## Action - -Follow the feature-development workflow: -1. Add feature implementation -2. Add tests for feature -3. Update documentation - -## Evidence - -- Workflow detected from commit patterns -- Frequency: ~14x per month -- Files: .opencode/*, .opencode/plugins/*, .opencode/tools/* - ---- -id: everything-claude-code-workflow-multi-language-documentation-sync -trigger: "when doing multi language documentation sync" -confidence: 0.9 -domain: workflow -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Workflow Multi Language Documentation Sync - -## Action - -Follow the multi-language-documentation-sync workflow: -1. Update English documentation -2. Translate changes to zh-CN -3. Translate changes to zh-TW -4. Translate changes to ja-JP -5. Update cross-references - -## Evidence - -- Workflow detected from commit patterns -- Frequency: ~10x per month -- Files: README.md, docs/zh-CN/*.md, docs/zh-TW/*.md - ---- -id: everything-claude-code-workflow-cross-platform-harness-sync -trigger: "when doing cross platform harness sync" -confidence: 0.9 -domain: workflow -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Workflow Cross Platform Harness Sync - -## Action - -Follow the cross-platform-harness-sync workflow: -1. Update main platform files -2. Port to .cursor/ directory -3. Port to .codex/ directory -4. Port to .opencode/ directory -5. Update platform-specific configs - -## Evidence - -- Workflow detected from commit patterns -- Frequency: ~8x per month -- Files: .cursor/*, .codex/*, .opencode/* - ---- -id: everything-claude-code-workflow-skill-addition-workflow -trigger: "when doing skill addition workflow" -confidence: 0.8 -domain: workflow -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Workflow Skill Addition Workflow - -## Action - -Follow the skill-addition-workflow workflow: -1. Create main skill SKILL.md -2. Add to README.md -3. Port to .cursor/skills/ -4. Port to .agents/skills/ with openai.yaml -5. Update package.json -6. Add translations if needed - -## Evidence - -- Workflow detected from commit patterns -- Frequency: ~6x per month -- Files: skills/*/SKILL.md, README.md, .cursor/skills/*/SKILL.md - ---- -id: everything-claude-code-workflow-version-release-workflow -trigger: "when doing version release workflow" -confidence: 0.65 -domain: workflow -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Workflow Version Release Workflow - -## Action - -Follow the version-release-workflow workflow: -1. Update package.json version -2. Update .claude-plugin/ manifests -3. Update .opencode/package.json -4. Update README version references -5. Update CHANGELOG.md - -## Evidence - -- Workflow detected from commit patterns -- Frequency: ~3x per month -- Files: package.json, .claude-plugin/plugin.json, .claude-plugin/marketplace.json - ---- -id: everything-claude-code-workflow-hook-system-updates -trigger: "when doing hook system updates" -confidence: 0.7 -domain: workflow -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Workflow Hook System Updates - -## Action - -Follow the hook-system-updates workflow: -1. Update hooks/hooks.json -2. Create/update hook scripts -3. Port to .cursor/hooks/ -4. Update tests/hooks/ or tests/integration/ -5. Update hook documentation - -## Evidence - -- Workflow detected from commit patterns -- Frequency: ~4x per month -- Files: hooks/hooks.json, scripts/hooks/*.js, .cursor/hooks/*.js - ---- -id: everything-claude-code-workflow-continuous-learning-updates -trigger: "when doing continuous learning updates" -confidence: 0.75 -domain: workflow -source: repo-analysis -source_repo: https://github.com/affaan-m/everything-claude-code ---- - -# Everything Claude Code Workflow Continuous Learning Updates - -## Action - -Follow the continuous-learning-updates workflow: -1. Update main continuous-learning-v2 skill -2. Update observer scripts -3. Port to platform-specific directories -4. Update translations -5. Update related commands - -## Evidence - -- Workflow detected from commit patterns -- Frequency: ~5x per month -- Files: skills/continuous-learning-v2/*, .cursor/skills/continuous-learning-v2/*, commands/evolve.md - ---- -id: everything-claude-code-camel-case-files -trigger: "When creating new JavaScript files" -confidence: 0.85 -domain: code-style -source: repo-analysis -source_repo: affaan-m/everything-claude-code ---- - -# Everything Claude Code Camel Case Files - -## Action - -Use camelCase naming convention - -## Evidence - -- Consistent camelCase pattern in codeStyle analysis -- Files follow camelCase in folderStructure - ---- -id: everything-claude-code-pascal-case-classes -trigger: "When defining JavaScript classes" -confidence: 0.85 -domain: code-style -source: repo-analysis -source_repo: affaan-m/everything-claude-code ---- - -# Everything Claude Code Pascal Case Classes - -## Action - -Use PascalCase naming convention - -## Evidence - -- PascalCase classes specified in namingConventions -- Consistent pattern across codebase - ---- -id: everything-claude-code-screaming-snake-constants -trigger: "When defining constants" -confidence: 0.85 -domain: code-style -source: repo-analysis -source_repo: affaan-m/everything-claude-code ---- - -# Everything Claude Code Screaming Snake Constants - -## Action - -Use SCREAMING_SNAKE_CASE naming convention - -## Evidence - -- SCREAMING_SNAKE_CASE constants in namingConventions -- Standard JavaScript practice - ---- -id: everything-claude-code-try-catch-errors -trigger: "When handling potential errors" -confidence: 0.8 -domain: code-style -source: repo-analysis -source_repo: affaan-m/everything-claude-code ---- - -# Everything Claude Code Try Catch Errors - -## Action - -Use try-catch blocks for error handling - -## Evidence - -- try-catch style specified in errorHandling -- No custom error classes or global handlers detected - ---- -id: everything-claude-code-unit-integration-tests -trigger: "When writing tests" -confidence: 0.8 -domain: testing -source: repo-analysis -source_repo: affaan-m/everything-claude-code ---- - -# Everything Claude Code Unit Integration Tests - -## Action - -Create both unit and integration tests using *.test.js pattern - -## Evidence - -- testTypes include unit and integration -- filePattern shows *.test.js convention +# Curated instincts for affaan-m/everything-claude-code +# Import with: /instinct-import .claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml --- id: everything-claude-code-conventional-commits -trigger: "When making commits" +trigger: "when making a commit in everything-claude-code" confidence: 0.9 domain: git -source: repo-analysis +source: repo-curation source_repo: affaan-m/everything-claude-code --- @@ -460,59 +14,99 @@ source_repo: affaan-m/everything-claude-code ## Action -Use conventional commit format with prefixes: feat, fix, docs, chore, test, refactor +Use conventional commit prefixes such as `feat:`, `fix:`, `docs:`, `test:`, `chore:`, and `refactor:`. ## Evidence -- All commit examples follow conventional format -- Consistent prefixes across commit history +- Mainline history consistently uses conventional commit subjects. +- Release and changelog automation expect readable commit categorization. --- -id: everything-claude-code-70-char-commit-length -trigger: "When writing commit messages" -confidence: 0.75 +id: everything-claude-code-commit-length +trigger: "when writing a commit subject in everything-claude-code" +confidence: 0.8 domain: git -source: repo-analysis +source: repo-curation source_repo: affaan-m/everything-claude-code --- -# Everything Claude Code 70 Char Commit Length +# Everything Claude Code Commit Length ## Action -Keep commit messages around 70 characters average length +Keep commit subjects concise and close to the repository norm of about 70 characters. ## Evidence -- averageLength shows 70 characters -- Commit examples align with this pattern +- Recent history clusters around ~70 characters, not ~50. +- Short, descriptive subjects read well in release notes and PR summaries. --- -id: everything-claude-code-multilang-docs-sync -trigger: "When updating core documentation or features" -confidence: 0.95 +id: everything-claude-code-js-file-naming +trigger: "when creating a new JavaScript or TypeScript module in everything-claude-code" +confidence: 0.85 +domain: code-style +source: repo-curation +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code JS File Naming + +## Action + +Prefer camelCase for JavaScript and TypeScript module filenames, and keep skill or command directories in kebab-case. + +## Evidence + +- `scripts/` and test helpers mostly use camelCase module names. +- `skills/` and `commands/` directories use kebab-case consistently. + +--- +id: everything-claude-code-test-runner +trigger: "when adding or updating tests in everything-claude-code" +confidence: 0.9 +domain: testing +source: repo-curation +source_repo: affaan-m/everything-claude-code +--- + +# Everything Claude Code Test Runner + +## Action + +Use the repository's existing Node-based test flow: targeted `*.test.js` files first, then `node tests/run-all.js` or `npm test` for broader verification. + +## Evidence + +- The repo uses `tests/run-all.js` as the central test orchestrator. +- Test files follow the `*.test.js` naming pattern across hook, CI, and integration coverage. + +--- +id: everything-claude-code-hooks-change-set +trigger: "when modifying hooks or hook-adjacent behavior in everything-claude-code" +confidence: 0.88 domain: workflow -source: repo-analysis +source: repo-curation source_repo: affaan-m/everything-claude-code --- -# Everything Claude Code Multilang Docs Sync +# Everything Claude Code Hooks Change Set ## Action -Update English first, then translate to zh-CN, zh-TW, ja-JP and update cross-references +Update the hook script, its configuration, its tests, and its user-facing documentation together. ## Evidence -- multi-language-documentation-sync workflow frequency ~10x/month -- Clear pattern in filesInvolved across language directories +- Hook fixes routinely span `hooks/hooks.json`, `scripts/hooks/`, `tests/hooks/`, `tests/integration/`, and `hooks/README.md`. +- Partial hook changes are a common source of regressions and stale docs. --- id: everything-claude-code-cross-platform-sync -trigger: "When adding new features or updating platform configurations" +trigger: "when shipping a user-visible feature across ECC surfaces" confidence: 0.9 domain: workflow -source: repo-analysis +source: repo-curation source_repo: affaan-m/everything-claude-code --- @@ -520,130 +114,49 @@ source_repo: affaan-m/everything-claude-code ## Action -Port changes to .cursor/, .codex/, .opencode/, and .agents/ directories +Treat the root repo as the source of truth, then mirror shipped changes to `.cursor/`, `.codex/`, `.opencode/`, and `.agents/` only where the feature actually exists. ## Evidence -- cross-platform-harness-sync workflow frequency ~8x/month -- Multiple platform directories in architecture +- ECC maintains multiple harness-specific surfaces with overlapping but not identical files. +- The safest workflow is root-first followed by explicit parity updates. --- -id: everything-claude-code-skill-structure -trigger: "When adding a new skill or capability" -confidence: 0.88 +id: everything-claude-code-release-sync +trigger: "when preparing a release for everything-claude-code" +confidence: 0.86 domain: workflow -source: repo-analysis +source: repo-curation source_repo: affaan-m/everything-claude-code --- -# Everything Claude Code Skill Structure +# Everything Claude Code Release Sync ## Action -Create SKILL.md in main skills/, port to platform-specific directories, update README.md and package.json +Keep package versions, plugin manifests, and release-facing docs synchronized before publishing. ## Evidence -- skill-addition-workflow frequency ~6x/month -- Consistent skill structure across platforms +- Release work spans `package.json`, `.claude-plugin/*`, `.opencode/package.json`, and release-note content. +- Version drift causes broken update paths and confusing install surfaces. --- -id: everything-claude-code-version-release-sync -trigger: "When preparing a new release" -confidence: 0.85 +id: everything-claude-code-learning-curation +trigger: "when importing or evolving instincts for everything-claude-code" +confidence: 0.84 domain: workflow -source: repo-analysis +source: repo-curation source_repo: affaan-m/everything-claude-code --- -# Everything Claude Code Version Release Sync +# Everything Claude Code Learning Curation ## Action -Update all package.json files, plugin manifests, README version references, and CHANGELOG.md +Prefer a small set of accurate instincts over bulk-generated, duplicated, or contradictory instincts. ## Evidence -- version-release-workflow frequency ~3x/month -- Multiple package.json files in different directories - ---- -id: everything-claude-code-hook-system-testing -trigger: "When modifying hook behavior or adding new hooks" -confidence: 0.82 -domain: workflow -source: repo-analysis -source_repo: affaan-m/everything-claude-code ---- - -# Everything Claude Code Hook System Testing - -## Action - -Update hooks.json, create/update scripts, port to .cursor/, and update corresponding tests - -## Evidence - -- hook-system-updates workflow frequency ~4x/month -- hooks/ and tests/ directories in architecture - ---- -id: everything-claude-code-learning-system-updates -trigger: "When enhancing continuous learning capabilities" -confidence: 0.87 -domain: workflow -source: repo-analysis -source_repo: affaan-m/everything-claude-code ---- - -# Everything Claude Code Learning System Updates - -## Action - -Update main continuous-learning-v2 skill, port to platforms, update translations and related commands - -## Evidence - -- continuous-learning-updates workflow frequency ~5x/month -- commands/evolve.md and instinct-*.md files referenced - ---- -id: everything-claude-code-hybrid-module-organization -trigger: "When organizing code modules" -confidence: 0.75 -domain: architecture -source: repo-analysis -source_repo: affaan-m/everything-claude-code ---- - -# Everything Claude Code Hybrid Module Organization - -## Action - -Use hybrid module organization with separate test location - -## Evidence - -- moduleOrganization: hybrid -- testLocation: separate in architecture analysis - ---- -id: everything-claude-code-mixed-import-export -trigger: "When writing import/export statements" -confidence: 0.7 -domain: code-style -source: repo-analysis -source_repo: affaan-m/everything-claude-code ---- - -# Everything Claude Code Mixed Import Export - -## Action - -Use mixed import and export styles as appropriate for the context - -## Evidence - -- importStyle: mixed and exportStyle: mixed in codeStyle -- Flexible approach across codebase - +- Auto-generated instinct dumps can duplicate rules, widen triggers too far, or preserve placeholder detector output. +- Curated instincts are easier to import, audit, and trust during continuous-learning workflows. diff --git a/.claude/skills/everything-claude-code/SKILL.md b/.claude/skills/everything-claude-code/SKILL.md index 92111818..83da0161 100644 --- a/.claude/skills/everything-claude-code/SKILL.md +++ b/.claude/skills/everything-claude-code/SKILL.md @@ -1,215 +1,97 @@ -# everything-claude-code Development Patterns +# Everything Claude Code -> Auto-generated skill from repository analysis +Use this skill when working inside the `everything-claude-code` repository and you need repo-specific guidance instead of generic coding advice. -## Overview +Optional companion instincts live at `.claude/homunculus/instincts/inherited/everything-claude-code-instincts.yaml` for teams using `continuous-learning-v2`. -This skill teaches the development patterns for the everything-claude-code repository - a comprehensive multi-platform AI coding assistant system. The codebase maintains compatibility across multiple AI platforms (Claude, Cursor, Codex, OpenCode) while supporting internationalization across four languages (English, Chinese Simplified/Traditional, Japanese). The system focuses on skill-based architecture, continuous learning capabilities, and cross-platform synchronization. +## When to Use -## Coding Conventions +Activate this skill when the task touches one or more of these areas: +- cross-platform parity across Claude Code, Cursor, Codex, and OpenCode +- hook scripts, hook docs, or hook tests +- skills, commands, agents, or rules that must stay synchronized across surfaces +- release work such as version bumps, changelog updates, or plugin metadata updates +- continuous-learning or instinct workflows inside this repository -### File Naming -- Use **camelCase** for JavaScript files -- Use **kebab-case** for directories and skill names -- Test files follow pattern: `*.test.js` +## How It Works -```javascript -// File structure examples -skills/continuousLearningV2/SKILL.md -tests/hooks/hookManager.test.js -scripts/platformSync.js +### 1. Follow the repo's development contract + +- Use conventional commits such as `feat:`, `fix:`, `docs:`, `test:`, `chore:`. +- Keep commit subjects concise and close to the repo norm of about 70 characters. +- Prefer camelCase for JavaScript and TypeScript module filenames. +- Use kebab-case for skill directories and command filenames. +- Keep test files on the existing `*.test.js` pattern. + +### 2. Treat the root repo as the source of truth + +Start from the root implementation, then mirror changes where they are intentionally shipped. + +Typical mirror targets: +- `.cursor/` +- `.codex/` +- `.opencode/` +- `.agents/` + +Do not assume every `.claude/` artifact needs a cross-platform copy. Only mirror files that are part of the shipped multi-platform surface. + +### 3. Update hooks with tests and docs together + +When changing hook behavior: +1. update `hooks/hooks.json` or the relevant script in `scripts/hooks/` +2. update matching tests in `tests/hooks/` or `tests/integration/` +3. update `hooks/README.md` if behavior or configuration changed +4. verify parity for `.cursor/hooks/` and `.opencode/plugins/` when applicable + +### 4. Keep release metadata in sync + +When preparing a release, verify the same version is reflected anywhere it is surfaced: +- `package.json` +- `.claude-plugin/plugin.json` +- `.claude-plugin/marketplace.json` +- `.opencode/package.json` +- release notes or changelog entries when the release process expects them + +### 5. Be explicit about continuous-learning changes + +If the task touches `skills/continuous-learning-v2/` or imported instincts: +- prefer accurate, low-noise instincts over auto-generated bulk output +- keep instinct files importable by `instinct-cli.py` +- remove duplicated or contradictory instincts instead of layering more guidance on top + +## Examples + +### Naming examples + +```text +skills/continuous-learning-v2/SKILL.md +commands/update-docs.md +scripts/hooks/session-start.js +tests/hooks/hooks.test.js ``` -### Import/Export Style -- Mixed import styles (both CommonJS and ES modules supported) -- Mixed export styles based on platform compatibility +### Commit examples -```javascript -// CommonJS style -const hookManager = require('./hookManager'); -module.exports = { syncPlatforms }; - -// ES module style -import { translateDocs } from './translationUtils.js'; -export { processSkills }; +```text +fix: harden session summary extraction on Stop hook +docs: align Codex config examples with current schema +test: cover Windows formatter fallback behavior ``` -### Commit Conventions -- Use conventional commit prefixes: `feat:`, `fix:`, `test:`, `chore:`, `docs:` -- Keep commit messages under 70 characters -- Be descriptive about cross-platform impacts +### Skill update checklist -```bash -feat: add multi-language skill sync workflow -fix: resolve cursor platform hook integration -docs: update zh-CN translation for continuous learning +```text +1. Update the root skill or command. +2. Mirror it only where that surface is shipped. +3. Run targeted tests first, then the broader suite if behavior changed. +4. Review docs and release notes for user-visible changes. ``` -## Workflows +### Release checklist -### Multi-Language Documentation Sync -**Trigger:** When core documentation or features are updated -**Command:** `/sync-docs` - -1. Update English documentation in root README.md or skill files -2. Identify sections that need translation updates -3. Update Chinese Simplified (`docs/zh-CN/*.md`) -4. Update Traditional Chinese (`docs/zh-TW/*.md`) -5. Update Japanese (`docs/ja-JP/*.md`) -6. Verify cross-references and links work across all languages -7. Test that skill descriptions match across platforms - -```markdown - -# English: skills/example/SKILL.md -# Chinese: docs/zh-CN/skills/example/SKILL.md -# Japanese: docs/ja-JP/skills/example/SKILL.md +```text +1. Bump package and plugin versions. +2. Run npm test. +3. Verify platform-specific manifests. +4. Publish the release notes with a human-readable summary. ``` - -### Cross-Platform Harness Sync -**Trigger:** When adding new features or updating platform configurations -**Command:** `/sync-platforms` - -1. Update main platform files in root directory -2. Port changes to `.cursor/` directory for Cursor IDE -3. Port changes to `.codex/` directory for Codex integration -4. Port changes to `.opencode/` directory for OpenCode platform -5. Update platform-specific configuration files -6. Test that each platform can load and execute the changes -7. Verify skill compatibility across all platforms - -```javascript -// Platform-specific config example -// .cursor/package.json -{ - "name": "everything-claude-code-cursor", - "platform": "cursor" -} - -// .opencode/package.json -{ - "name": "everything-claude-code-opencode", - "platform": "opencode" -} -``` - -### Skill Addition Workflow -**Trigger:** When adding a new skill or capability -**Command:** `/add-skill` - -1. Create main skill directory: `skills/{skillName}/` -2. Write comprehensive `SKILL.md` with examples and usage -3. Add skill reference to root `README.md` -4. Port skill to `.cursor/skills/{skillName}/SKILL.md` -5. Port to `.agents/skills/{skillName}/` with `openai.yaml` -6. Update `package.json` with new skill metadata -7. Add translations for skill name and description -8. Test skill loading across all platforms - -```yaml -# .agents/skills/example/openai.yaml -name: example-skill -description: Example skill for demonstration -version: 1.0.0 -compatibility: ["openai", "claude", "cursor"] -``` - -### Version Release Workflow -**Trigger:** When preparing a new release -**Command:** `/release` - -1. Update version in root `package.json` -2. Update `.claude-plugin/plugin.json` manifest version -3. Update `.claude-plugin/marketplace.json` with release notes -4. Update `.opencode/package.json` version to match -5. Update version references in `README.md` -6. Update `CHANGELOG.md` with new features and fixes -7. Create git tag and push release -8. Verify plugin compatibility across platforms - -```json -// package.json version sync -{ - "version": "2.1.0", - "name": "everything-claude-code" -} -``` - -### Hook System Updates -**Trigger:** When modifying hook behavior or adding new hooks -**Command:** `/update-hooks` - -1. Update `hooks/hooks.json` with new hook definitions -2. Create or modify hook scripts in `scripts/hooks/` -3. Port hook implementations to `.cursor/hooks/` -4. Update tests in `tests/hooks/` or `tests/integration/` -5. Update hook documentation in `hooks/README.md` -6. Test hook execution across different trigger scenarios -7. Verify platform-specific hook behaviors - -```json -// hooks/hooks.json example -{ - "pre-commit": { - "script": "scripts/hooks/preCommit.js", - "platforms": ["cursor", "opencode"], - "enabled": true - } -} -``` - -### Continuous Learning Updates -**Trigger:** When enhancing the continuous learning capabilities -**Command:** `/update-learning` - -1. Update main `skills/continuous-learning-v2/` skill files -2. Modify observer scripts for better learning detection -3. Port updates to platform-specific skill directories -4. Update translation files for learning prompts -5. Update related commands in `commands/evolve.md` -6. Update instinct commands (`commands/instinct-*.md`) -7. Test learning feedback loops across platforms - -```javascript -// Learning observer example -class LearningObserver { - observePatterns() { - // Detect coding patterns - // Update skill knowledge - // Sync across platforms - } -} -``` - -## Testing Patterns - -Tests follow the `*.test.js` pattern and focus on: -- Cross-platform compatibility verification -- Multi-language content validation -- Hook execution testing -- Skill loading and execution - -```javascript -// Example test structure -describe('Platform Sync', () => { - test('should sync skills across all platforms', () => { - const platforms = ['cursor', 'opencode', 'codex']; - platforms.forEach(platform => { - expect(skillExists(platform, 'continuous-learning-v2')).toBe(true); - }); - }); -}); -``` - -## Commands - -| Command | Purpose | -|---------|---------| -| `/sync-docs` | Synchronize documentation across all language versions | -| `/sync-platforms` | Port changes across AI coding platforms | -| `/add-skill` | Add new skill with full cross-platform support | -| `/release` | Prepare and publish new version release | -| `/update-hooks` | Modify hook system and test integration | -| `/update-learning` | Enhance continuous learning capabilities | -| `/test-platforms` | Run compatibility tests across all platforms | -| `/translate` | Generate translations for new content | \ No newline at end of file From a50349181a19967beecdc617d32b26fc88053fc6 Mon Sep 17 00:00:00 2001 From: kinshukdutta Date: Mon, 9 Mar 2026 15:05:17 -0400 Subject: [PATCH 22/42] =?UTF-8?q?feat:=20architecture=20improvements=20?= =?UTF-8?q?=E2=80=94=20test=20discovery,=20hooks=20schema,=20catalog,=20co?= =?UTF-8?q?mmand=20map,=20coverage,=20cross-harness=20docs?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - AGENTS.md: sync skills count to 65+ - tests/run-all.js: glob-based test discovery for *.test.js - scripts/ci/validate-hooks.js: validate hooks.json with ajv + schemas/hooks.schema.json - schemas/hooks.schema.json: hookItem.type enum command|notification - scripts/ci/catalog.js: catalog agents, commands, skills (--json | --md) - docs/COMMAND-AGENT-MAP.md: command → agent/skill map - docs/ARCHITECTURE-IMPROVEMENTS.md: improvement recommendations - package.json: ajv, c8 devDeps; npm run coverage - CONTRIBUTING.md: Cross-Harness and Translations section - .gitignore: coverage/ Made-with: Cursor --- .gitignore | 1 + AGENTS.md | 10 +- CONTRIBUTING.md | 24 + docs/ARCHITECTURE-IMPROVEMENTS.md | 146 +++++ docs/COMMAND-AGENT-MAP.md | 61 ++ package-lock.json | 907 +++++++++++++++++++++++++++++- package.json | 5 +- schemas/hooks.schema.json | 13 +- scripts/ci/catalog.js | 71 +++ scripts/ci/validate-hooks.js | 18 +- tests/run-all.js | 34 +- 11 files changed, 1247 insertions(+), 43 deletions(-) create mode 100644 docs/ARCHITECTURE-IMPROVEMENTS.md create mode 100644 docs/COMMAND-AGENT-MAP.md create mode 100644 scripts/ci/catalog.js diff --git a/.gitignore b/.gitignore index 007765b7..7e37f0fd 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,7 @@ node_modules/ # Build output dist/ +coverage/ # Python __pycache__/ diff --git a/AGENTS.md b/AGENTS.md index 52e96fab..3c0ada3b 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,6 +1,6 @@ # Everything Claude Code (ECC) — Agent Instructions -This is a **production-ready AI coding plugin** providing 13 specialized agents, 50+ skills, 33 commands, and automated hook workflows for software development. +This is a **production-ready AI coding plugin** providing 16 specialized agents, 65+ skills, 40 commands, and automated hook workflows for software development. ## Core Principles @@ -27,6 +27,9 @@ This is a **production-ready AI coding plugin** providing 13 specialized agents, | go-build-resolver | Go build errors | Go build failures | | database-reviewer | PostgreSQL/Supabase specialist | Schema design, query optimization | | python-reviewer | Python code review | Python projects | +| chief-of-staff | Communication triage and drafts | Multi-channel email, Slack, LINE, Messenger | +| loop-operator | Autonomous loop execution | Run loops safely, monitor stalls, intervene | +| harness-optimizer | Harness config tuning | Reliability, cost, throughput | ## Agent Orchestration @@ -36,6 +39,9 @@ Use agents proactively without user prompt: - Bug fix or new feature → **tdd-guide** - Architectural decision → **architect** - Security-sensitive code → **security-reviewer** +- Multi-channel communication triage → **chief-of-staff** +- Autonomous loops / loop monitoring → **loop-operator** +- Harness config reliability and cost → **harness-optimizer** Use parallel execution for independent operations — launch multiple agents simultaneously. @@ -123,7 +129,7 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat ``` agents/ — 13 specialized subagents -skills/ — 50+ workflow skills and domain knowledge +skills/ — 65+ workflow skills and domain knowledge commands/ — 33 slash commands hooks/ — Trigger-based automations rules/ — Always-follow guidelines (common + per-language) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d7a6a664..61b0f92b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -10,6 +10,7 @@ Thanks for wanting to contribute! This repo is a community resource for Claude C - [Contributing Agents](#contributing-agents) - [Contributing Hooks](#contributing-hooks) - [Contributing Commands](#contributing-commands) +- [Cross-Harness and Translations](#cross-harness-and-translations) - [Pull Request Process](#pull-request-process) --- @@ -348,6 +349,29 @@ What the user receives. --- +## Cross-Harness and Translations + +### Skill subsets (Codex and Cursor) + +ECC ships skill subsets for other harnesses: + +- **Codex:** `.agents/skills/` — skills listed in `agents/openai.yaml` are loaded by Codex. +- **Cursor:** `.cursor/skills/` — a subset of skills is bundled for Cursor. + +When you **add a new skill** that should be available on Codex or Cursor: + +1. Add the skill under `skills/your-skill-name/` as usual. +2. If it should be available on **Codex**, add it to `.agents/skills/` (copy the skill directory or add a reference) and ensure it is referenced in `agents/openai.yaml` if required. +3. If it should be available on **Cursor**, add it under `.cursor/skills/` per Cursor's layout. + +Check existing skills in those directories for the expected structure. Keeping these subsets in sync is manual; mention in your PR if you updated them. + +### Translations + +Translations live under `docs/` (e.g. `docs/zh-CN`, `docs/zh-TW`, `docs/ja-JP`). If you change agents, commands, or skills that are translated, consider updating the corresponding translation files or opening an issue so maintainers or translators can update them. + +--- + ## Pull Request Process ### 1. PR Title Format diff --git a/docs/ARCHITECTURE-IMPROVEMENTS.md b/docs/ARCHITECTURE-IMPROVEMENTS.md new file mode 100644 index 00000000..f8a5e605 --- /dev/null +++ b/docs/ARCHITECTURE-IMPROVEMENTS.md @@ -0,0 +1,146 @@ +# Architecture Improvement Recommendations + +This document captures architect-level improvements for the Everything Claude Code (ECC) project. It is written from the perspective of a Claude Code coding architect aiming to improve maintainability, consistency, and long-term quality. + +--- + +## 1. Documentation and Single Source of Truth + +### 1.1 Agent / Command / Skill Count Sync + +**Issue:** AGENTS.md states "13 specialized agents, 50+ skills, 33 commands" while the repo has **16 agents**, **65+ skills**, and **40 commands**. README and other docs also vary. This causes confusion for contributors and users. + +**Recommendation:** + +- **Single source of truth:** Derive counts (and optionally tables) from the filesystem or a small manifest. Options: + - **Option A:** Add a script (e.g. `scripts/ci/catalog.js`) that scans `agents/*.md`, `commands/*.md`, and `skills/*/SKILL.md` and outputs JSON/Markdown. CI and docs can consume this. + - **Option B:** Maintain one `docs/catalog.json` (or YAML) that lists agents, commands, and skills with metadata; scripts and docs read from it. Requires discipline to update on add/remove. +- **Short-term:** Manually sync AGENTS.md, README.md, and CLAUDE.md with actual counts and list any new agents (e.g. chief-of-staff, loop-operator, harness-optimizer) in the agent table. + +**Impact:** High — affects first impression and contributor trust. + +--- + +### 1.2 Command → Agent / Skill Map + +**Issue:** There is no single machine- or human-readable map of "which command uses which agent(s) or skill(s)." This lives in README tables and individual command `.md` files, which can drift. + +**Recommendation:** + +- Add a **command registry** (e.g. in `docs/` or as frontmatter in command files) that lists for each command: name, description, primary agent(s), skills referenced. Can be generated from command file content or maintained by hand. +- Expose a "map" in docs (e.g. `docs/COMMAND-AGENT-MAP.md`) or in the generated catalog for discoverability and for tooling (e.g. "which commands use tdd-guide?"). + +**Impact:** Medium — improves discoverability and refactoring safety. + +--- + +## 2. Testing and Quality + +### 2.1 Test Discovery vs Hardcoded List + +**Issue:** `tests/run-all.js` uses a **hardcoded list** of test files. New test files are not run unless someone updates `run-all.js`, so coverage can be incomplete by omission. + +**Recommendation:** + +- **Glob-based discovery:** Discover test files by pattern (e.g. `**/*.test.js` under `tests/`) and run them, with an optional allowlist/denylist for special cases. This makes new tests automatically part of the suite. +- Keep a single entry point (`tests/run-all.js`) that runs discovered tests and aggregates results. + +**Impact:** High — prevents regression where new tests exist but are never executed. + +--- + +### 2.2 Test Coverage Metrics + +**Issue:** There is no coverage tool (e.g. nyc/c8/istanbul). The project cannot assert "80%+ coverage" for its own scripts; coverage is implicit. + +**Recommendation:** + +- Introduce a coverage tool for Node scripts (e.g. `c8` or `nyc`) and run it in CI. Start with a baseline (e.g. 60%) and raise over time; or at least report coverage in CI without failing so the team can see trends. +- Focus on `scripts/` (lib + hooks + ci) as the primary target; exclude one-off scripts if needed. + +**Impact:** Medium — aligns the project with its own AGENTS.md guidance (80%+ coverage) and surfaces untested paths. + +--- + +## 3. Schema and Validation + +### 3.1 Use Hooks JSON Schema in CI + +**Issue:** `schemas/hooks.schema.json` exists and defines the hook configuration shape, but `scripts/ci/validate-hooks.js` does **not** use it. Validation is duplicated (VALID_EVENTS, structure) and can drift from the schema. + +**Recommendation:** + +- Use a JSON Schema validator (e.g. `ajv`) in `validate-hooks.js` to validate `hooks/hooks.json` against `schemas/hooks.schema.json`. Keep the validator as the single source of truth for structure; retain only hook-specific checks (e.g. inline JS syntax) in the script. +- Ensures schema and validator stay in sync and allows IDE/editor validation via `$schema` in hooks.json. + +**Impact:** Medium — reduces drift and improves contributor experience when editing hooks. + +--- + +## 4. Cross-Harness and i18n + +### 4.1 Skill/Agent Subset Sync (.agents/skills, .cursor/skills) + +**Issue:** `.agents/skills/` (Codex) and `.cursor/skills/` are subsets of `skills/`. Adding or removing a skill in the main repo requires manually updating these subsets, which can be forgotten. + +**Recommendation:** + +- Document in CONTRIBUTING.md that adding a skill may require updating `.agents/skills` and `.cursor/skills` (and how to do it). +- Optionally: a CI check or script that compares `skills/` to the subsets and fails or warns if a skill is in one set but not the other when it should be (e.g. by convention or by a small manifest). + +**Impact:** Low–Medium — reduces cross-harness drift. + +--- + +### 4.2 Translation Drift (docs/ zh-CN, zh-TW, ja-JP) + +**Issue:** Translations in `docs/` duplicate agents, commands, skills. As the English source evolves, translations can become outdated without clear process or tooling. + +**Recommendation:** + +- Document a **translation process:** when to update (e.g. on release), who owns each locale, and how to detect stale content (e.g. diff file lists or key sections). +- Consider: translation status file (e.g. `docs/i18n-status.md`) or CI that checks translation file existence/timestamps and warns if English was updated more recently than a translation. +- Long-term: consider extraction/placeholder format (e.g. i18n keys) so translations reference the same structure as the English source. + +**Impact:** Medium — improves experience for non-English users and reduces confusion from outdated translations. + +--- + +## 5. Hooks and Scripts + +### 5.1 Hook Runtime Consistency + +**Issue:** Most hooks invoke Node scripts via `run-with-flags.js`; one path uses `run-with-flags-shell.sh` + `observe.sh`. The mixed runtime is documented but could be simplified over time. + +**Recommendation:** + +- Prefer Node for new hooks when possible (cross-platform, single runtime). If shell is required, document why and keep the surface small. +- Ensure `ECC_HOOK_PROFILE` and `ECC_DISABLED_HOOKS` are respected in all code paths (including shell) so behavior is consistent. + +**Impact:** Low — maintains current design; improves if more hooks migrate to Node. + +--- + +## 6. Summary Table + +| Area | Improvement | Priority | Effort | +|-------------------|--------------------------------------|----------|---------| +| Doc sync | Sync AGENTS.md/README counts & table | High | Low | +| Single source | Catalog script or manifest | High | Medium | +| Test discovery | Glob-based test runner | High | Low | +| Coverage | Add c8/nyc and CI coverage | Medium | Medium | +| Hook schema in CI | Validate hooks.json via schema | Medium | Low | +| Command map | Command → agent/skill registry | Medium | Medium | +| Subset sync | Document/CI for .agents/.cursor | Low–Med | Low–Med | +| Translations | Process + stale detection | Medium | Medium | +| Hook runtime | Prefer Node; document shell use | Low | Low | + +--- + +## 7. Quick Wins (Immediate) + +1. **Update AGENTS.md:** Set agent count to 16; add chief-of-staff, loop-operator, harness-optimizer to the agent table; align skill/command counts with repo. +2. **Test discovery:** Change `run-all.js` to discover `**/*.test.js` under `tests/` (with optional allowlist) so new tests are always run. +3. **Wire hooks schema:** In `validate-hooks.js`, validate `hooks/hooks.json` against `schemas/hooks.schema.json` using ajv (or similar) and keep only hook-specific checks in the script. + +These three can be done in one or two sessions and materially improve consistency and reliability. diff --git a/docs/COMMAND-AGENT-MAP.md b/docs/COMMAND-AGENT-MAP.md new file mode 100644 index 00000000..d5e462de --- /dev/null +++ b/docs/COMMAND-AGENT-MAP.md @@ -0,0 +1,61 @@ +# Command → Agent / Skill Map + +This document lists each slash command and the primary agent(s) or skills it invokes. Use it to discover which commands use which agents and to keep refactoring consistent. + +| Command | Primary agent(s) | Notes | +|---------|------------------|--------| +| `/plan` | planner | Implementation planning before code | +| `/tdd` | tdd-guide | Test-driven development | +| `/code-review` | code-reviewer | Quality and security review | +| `/build-fix` | build-error-resolver | Fix build/type errors | +| `/e2e` | e2e-runner | Playwright E2E tests | +| `/refactor-clean` | refactor-cleaner | Dead code removal | +| `/update-docs` | doc-updater | Documentation sync | +| `/update-codemaps` | doc-updater | Codemaps / architecture docs | +| `/go-review` | go-reviewer | Go code review | +| `/go-test` | tdd-guide | Go TDD workflow | +| `/go-build` | go-build-resolver | Fix Go build errors | +| `/python-review` | python-reviewer | Python code review | +| `/harness-audit` | — | Harness scorecard (no single agent) | +| `/loop-start` | loop-operator | Start autonomous loop | +| `/loop-status` | loop-operator | Inspect loop status | +| `/quality-gate` | — | Quality pipeline (hook-like) | +| `/model-route` | — | Model recommendation (no agent) | +| `/orchestrate` | planner, tdd-guide, code-reviewer, security-reviewer, architect | Multi-agent handoff | +| `/multi-plan` | architect (Codex/Gemini prompts) | Multi-model planning | +| `/multi-execute` | architect / frontend prompts | Multi-model execution | +| `/multi-backend` | architect | Backend multi-service | +| `/multi-frontend` | architect | Frontend multi-service | +| `/multi-workflow` | architect | General multi-service | +| `/learn` | — | continuous-learning skill, instincts | +| `/learn-eval` | — | continuous-learning-v2, evaluate then save | +| `/instinct-status` | — | continuous-learning-v2 | +| `/instinct-import` | — | continuous-learning-v2 | +| `/instinct-export` | — | continuous-learning-v2 | +| `/evolve` | — | continuous-learning-v2, cluster instincts | +| `/promote` | — | continuous-learning-v2 | +| `/projects` | — | continuous-learning-v2 | +| `/skill-create` | — | skill-create-output script, git history | +| `/checkpoint` | — | verification-loop skill | +| `/verify` | — | verification-loop skill | +| `/eval` | — | eval-harness skill | +| `/test-coverage` | — | Coverage analysis | +| `/sessions` | — | Session history | +| `/setup-pm` | — | Package manager setup script | +| `/claw` | — | NanoClaw CLI (scripts/claw.js) | +| `/pm2` | — | PM2 service lifecycle | +| `/security-scan` | security-reviewer (skill) | AgentShield via security-scan skill | + +## Skills referenced by commands + +- **continuous-learning**, **continuous-learning-v2**: `/learn`, `/learn-eval`, `/instinct-*`, `/evolve`, `/promote`, `/projects` +- **verification-loop**: `/checkpoint`, `/verify` +- **eval-harness**: `/eval` +- **security-scan**: `/security-scan` (runs AgentShield) +- **strategic-compact**: suggested at compaction points (hooks) + +## How to use this map + +- **Discoverability:** Find which command triggers which agent (e.g. “use `/code-review` for code-reviewer”). +- **Refactoring:** When renaming or removing an agent, search this doc and the command files for references. +- **CI/docs:** The catalog script (`node scripts/ci/catalog.js`) outputs agent/command/skill counts; this map complements it with command–agent relationships. diff --git a/package-lock.json b/package-lock.json index 5034650c..7658be1b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,6 +14,8 @@ }, "devDependencies": { "@eslint/js": "^9.39.2", + "ajv": "^8.17.0", + "c8": "^10.1.2", "eslint": "^9.39.2", "globals": "^17.1.0", "markdownlint-cli": "^0.47.0" @@ -22,6 +24,16 @@ "node": ">=18" } }, + "node_modules/@bcoe/v8-coverage": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-1.0.2.tgz", + "integrity": "sha512-6zABk/ECA/QYSCQ1NGiVwwbQerUCZ+TQbp64Q3AgmfNvurHH0j8TtXa1qbShXA6qqkpAj4V5W8pP6mLe1mcMqA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/@eslint-community/eslint-utils": { "version": "4.9.1", "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", @@ -129,6 +141,23 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/@eslint/eslintrc/node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, "node_modules/@eslint/eslintrc/node_modules/globals": { "version": "14.0.0", "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", @@ -142,6 +171,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/@eslint/eslintrc/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, "node_modules/@eslint/js": { "version": "9.39.2", "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.2.tgz", @@ -254,6 +290,98 @@ "node": "20 || >=22" } }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@types/debug": { "version": "4.1.12", "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz", @@ -271,6 +399,13 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/istanbul-lib-coverage": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz", + "integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/json-schema": { "version": "7.0.15", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", @@ -305,7 +440,6 @@ "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", - "peer": true, "bin": { "acorn": "bin/acorn" }, @@ -324,16 +458,16 @@ } }, "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz", + "integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==", "dev": true, "license": "MIT", "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" }, "funding": { "type": "github", @@ -394,6 +528,40 @@ "concat-map": "0.0.1" } }, + "node_modules/c8": { + "version": "10.1.3", + "resolved": "https://registry.npmjs.org/c8/-/c8-10.1.3.tgz", + "integrity": "sha512-LvcyrOAaOnrrlMpW22n690PUvxiq4Uf9WMhQwNJ9vgagkL/ph1+D4uvjvDA5XCbykrc0sx+ay6pVi9YZ1GnhyA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@bcoe/v8-coverage": "^1.0.1", + "@istanbuljs/schema": "^0.1.3", + "find-up": "^5.0.0", + "foreground-child": "^3.1.1", + "istanbul-lib-coverage": "^3.2.0", + "istanbul-lib-report": "^3.0.1", + "istanbul-reports": "^3.1.6", + "test-exclude": "^7.0.1", + "v8-to-istanbul": "^9.0.0", + "yargs": "^17.7.2", + "yargs-parser": "^21.1.1" + }, + "bin": { + "c8": "bin/c8.js" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "monocart-coverage-reports": "^2" + }, + "peerDependenciesMeta": { + "monocart-coverage-reports": { + "optional": true + } + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -454,6 +622,77 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -491,6 +730,13 @@ "dev": true, "license": "MIT" }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, "node_modules/cross-spawn": { "version": "7.0.6", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", @@ -579,6 +825,20 @@ "url": "https://github.com/sponsors/wooorm" } }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, "node_modules/entities": { "version": "4.5.0", "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", @@ -592,6 +852,16 @@ "url": "https://github.com/fb55/entities?sponsor=1" } }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/escape-string-regexp": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", @@ -611,7 +881,6 @@ "integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==", "dev": true, "license": "MIT", - "peer": true, "dependencies": { "@eslint-community/eslint-utils": "^4.8.0", "@eslint-community/regexpp": "^4.12.1", @@ -696,6 +965,30 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/eslint/node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/eslint/node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, "node_modules/espree": { "version": "10.4.0", "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", @@ -781,6 +1074,23 @@ "dev": true, "license": "MIT" }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, "node_modules/fdir": { "version": "6.5.0", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", @@ -850,6 +1160,33 @@ "dev": true, "license": "ISC" }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "dev": true, + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, "node_modules/get-east-asian-width": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz", @@ -863,6 +1200,28 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -876,6 +1235,32 @@ "node": ">=10.13.0" } }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "9.0.9", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz", + "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.2" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/globals": { "version": "17.1.0", "resolved": "https://registry.npmjs.org/globals/-/globals-17.1.0.tgz", @@ -899,6 +1284,13 @@ "node": ">=8" } }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, "node_modules/ignore": { "version": "5.3.2", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", @@ -993,6 +1385,16 @@ "node": ">=0.10.0" } }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -1024,6 +1426,61 @@ "dev": true, "license": "ISC" }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, "node_modules/js-yaml": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", @@ -1045,9 +1502,9 @@ "license": "MIT" }, "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", "dev": true, "license": "MIT" }, @@ -1159,6 +1616,29 @@ "dev": true, "license": "MIT" }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/markdown-it": { "version": "14.1.0", "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-14.1.0.tgz", @@ -1820,6 +2300,16 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/minipass": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.3.tgz", + "integrity": "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -1884,6 +2374,13 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -1937,13 +2434,29 @@ "node": ">=8" } }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/picomatch": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", - "peer": true, "engines": { "node": ">=12" }, @@ -1981,6 +2494,26 @@ "node": ">=6" } }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -2007,6 +2540,19 @@ "run-con": "cli.js" } }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", @@ -2030,6 +2576,19 @@ "node": ">=8" } }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/smol-toml": { "version": "1.5.2", "resolved": "https://registry.npmjs.org/smol-toml/-/smol-toml-1.5.2.tgz", @@ -2060,6 +2619,45 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/strip-ansi": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", @@ -2076,6 +2674,30 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/strip-json-comments": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", @@ -2102,6 +2724,60 @@ "node": ">=8" } }, + "node_modules/test-exclude": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-7.0.2.tgz", + "integrity": "sha512-u9E6A+ZDYdp7a4WnarkXPZOx8Ilz46+kby6p1yZ8zsGTz9gYa6FIS7lj2oezzNKmtdyyJNNmmXDppga5GB7kSw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^10.4.1", + "minimatch": "^10.2.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/test-exclude/node_modules/balanced-match": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/test-exclude/node_modules/brace-expansion": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.4.tgz", + "integrity": "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/test-exclude/node_modules/minimatch": { + "version": "10.2.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.4.tgz", + "integrity": "sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "brace-expansion": "^5.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/tinyglobby": { "version": "0.2.15", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", @@ -2149,6 +2825,21 @@ "punycode": "^2.1.0" } }, + "node_modules/v8-to-istanbul": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz", + "integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.12", + "@types/istanbul-lib-coverage": "^2.0.1", + "convert-source-map": "^2.0.0" + }, + "engines": { + "node": ">=10.12.0" + } + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", @@ -2175,6 +2866,196 @@ "node": ">=0.10.0" } }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", diff --git a/package.json b/package.json index 685ec79e..65054448 100644 --- a/package.json +++ b/package.json @@ -83,10 +83,13 @@ "postinstall": "echo '\\n ecc-universal installed!\\n Run: npx ecc-install typescript\\n Docs: https://github.com/affaan-m/everything-claude-code\\n'", "lint": "eslint . && markdownlint '**/*.md' --ignore node_modules", "claw": "node scripts/claw.js", - "test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js" + "test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js", + "coverage": "c8 --all --include='scripts/**/*.js' --reporter=text --reporter=lcov node tests/run-all.js" }, "devDependencies": { "@eslint/js": "^9.39.2", + "ajv": "^8.17.0", + "c8": "^10.1.2", "eslint": "^9.39.2", "globals": "^17.1.0", "markdownlint-cli": "^0.47.0" diff --git a/schemas/hooks.schema.json b/schemas/hooks.schema.json index cd58cfb1..3aa2ba1f 100644 --- a/schemas/hooks.schema.json +++ b/schemas/hooks.schema.json @@ -12,17 +12,8 @@ "properties": { "type": { "type": "string", - "enum": [ - "PreToolUse", - "PostToolUse", - "PreCompact", - "SessionStart", - "SessionEnd", - "Stop", - "Notification", - "SubagentStop" - ], - "description": "Hook event type that triggers this hook" + "enum": ["command", "notification"], + "description": "Hook action type (command or notification)" }, "command": { "oneOf": [ diff --git a/scripts/ci/catalog.js b/scripts/ci/catalog.js new file mode 100644 index 00000000..1e279445 --- /dev/null +++ b/scripts/ci/catalog.js @@ -0,0 +1,71 @@ +#!/usr/bin/env node +/** + * Catalog agents, commands, and skills from the repo. + * Outputs JSON with counts and lists for CI/docs sync. + * + * Usage: node scripts/ci/catalog.js [--json|--md] + * Default: --json to stdout + */ + +const fs = require('fs'); +const path = require('path'); + +const ROOT = path.join(__dirname, '../..'); +const AGENTS_DIR = path.join(ROOT, 'agents'); +const COMMANDS_DIR = path.join(ROOT, 'commands'); +const SKILLS_DIR = path.join(ROOT, 'skills'); + +function listAgents() { + if (!fs.existsSync(AGENTS_DIR)) return []; + return fs.readdirSync(AGENTS_DIR) + .filter(f => f.endsWith('.md')) + .map(f => f.slice(0, -3)) + .sort(); +} + +function listCommands() { + if (!fs.existsSync(COMMANDS_DIR)) return []; + return fs.readdirSync(COMMANDS_DIR) + .filter(f => f.endsWith('.md')) + .map(f => f.slice(0, -3)) + .sort(); +} + +function listSkills() { + if (!fs.existsSync(SKILLS_DIR)) return []; + const entries = fs.readdirSync(SKILLS_DIR, { withFileTypes: true }); + return entries + .filter(e => e.isDirectory() && fs.existsSync(path.join(SKILLS_DIR, e.name, 'SKILL.md'))) + .map(e => e.name) + .sort(); +} + +function run() { + const agents = listAgents(); + const commands = listCommands(); + const skills = listSkills(); + + const catalog = { + agents: { count: agents.length, list: agents }, + commands: { count: commands.length, list: commands }, + skills: { count: skills.length, list: skills } + }; + + const format = process.argv[2] === '--md' ? 'md' : 'json'; + if (format === 'md') { + console.log('# ECC Catalog (generated)\n'); + console.log(`- **Agents:** ${catalog.agents.count}`); + console.log(`- **Commands:** ${catalog.commands.count}`); + console.log(`- **Skills:** ${catalog.skills.count}\n`); + console.log('## Agents\n'); + catalog.agents.list.forEach(a => console.log(`- ${a}`)); + console.log('\n## Commands\n'); + catalog.commands.list.forEach(c => console.log(`- ${c}`)); + console.log('\n## Skills\n'); + catalog.skills.list.forEach(s => console.log(`- ${s}`)); + } else { + console.log(JSON.stringify(catalog, null, 2)); + } +} + +run(); diff --git a/scripts/ci/validate-hooks.js b/scripts/ci/validate-hooks.js index 0b335232..7dd5ebee 100644 --- a/scripts/ci/validate-hooks.js +++ b/scripts/ci/validate-hooks.js @@ -1,13 +1,15 @@ #!/usr/bin/env node /** - * Validate hooks.json schema + * Validate hooks.json schema and hook entry rules. */ const fs = require('fs'); const path = require('path'); const vm = require('vm'); +const Ajv = require('ajv'); const HOOKS_FILE = path.join(__dirname, '../../hooks/hooks.json'); +const HOOKS_SCHEMA_PATH = path.join(__dirname, '../../schemas/hooks.schema.json'); const VALID_EVENTS = ['PreToolUse', 'PostToolUse', 'PreCompact', 'SessionStart', 'SessionEnd', 'Stop', 'Notification', 'SubagentStop']; /** @@ -67,6 +69,20 @@ function validateHooks() { process.exit(1); } + // Validate against JSON schema + if (fs.existsSync(HOOKS_SCHEMA_PATH)) { + const schema = JSON.parse(fs.readFileSync(HOOKS_SCHEMA_PATH, 'utf-8')); + const ajv = new Ajv({ allErrors: true }); + const validate = ajv.compile(schema); + const valid = validate(data); + if (!valid) { + for (const err of validate.errors) { + console.error(`ERROR: hooks.json schema: ${err.instancePath || '/'} ${err.message}`); + } + process.exit(1); + } + } + // Support both object format { hooks: {...} } and array format const hooks = data.hooks || data; let hasErrors = false; diff --git a/tests/run-all.js b/tests/run-all.js index 54d8c212..25d44666 100644 --- a/tests/run-all.js +++ b/tests/run-all.js @@ -10,21 +10,25 @@ const path = require('path'); const fs = require('fs'); const testsDir = __dirname; -const testFiles = [ - 'lib/utils.test.js', - 'lib/package-manager.test.js', - 'lib/session-manager.test.js', - 'lib/session-aliases.test.js', - 'lib/project-detect.test.js', - 'hooks/hooks.test.js', - 'hooks/evaluate-session.test.js', - 'hooks/suggest-compact.test.js', - 'integration/hooks.test.js', - 'ci/validators.test.js', - 'scripts/claw.test.js', - 'scripts/setup-package-manager.test.js', - 'scripts/skill-create-output.test.js' -]; + +/** + * Discover all *.test.js files under testsDir (relative paths for stable output order). + */ +function discoverTestFiles(dir, baseDir = dir, acc = []) { + const entries = fs.readdirSync(dir, { withFileTypes: true }); + for (const e of entries) { + const full = path.join(dir, e.name); + const rel = path.relative(baseDir, full); + if (e.isDirectory()) { + discoverTestFiles(full, baseDir, acc); + } else if (e.isFile() && e.name.endsWith('.test.js')) { + acc.push(rel); + } + } + return acc.sort(); +} + +const testFiles = discoverTestFiles(testsDir); const BOX_W = 58; // inner width between ║ delimiters const boxLine = (s) => `║${s.padEnd(BOX_W)}║`; From 770505191003c21bab81272931e17654b4de3a15 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 19:31:02 -0700 Subject: [PATCH 23/42] fix: align architecture tooling with current hooks docs --- AGENTS.md | 2 +- package.json | 2 +- schemas/hooks.schema.json | 131 +++++++++++++++++++++++++++++++---- scripts/ci/catalog.js | 44 +++++++----- scripts/ci/validate-hooks.js | 115 ++++++++++++++++++++++++------ tests/ci/validators.test.js | 29 +++++++- tests/hooks/hooks.test.js | 1 + tests/run-all.js | 11 +++ 8 files changed, 282 insertions(+), 53 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 3c0ada3b..8e9e9fd1 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -130,7 +130,7 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat ``` agents/ — 13 specialized subagents skills/ — 65+ workflow skills and domain knowledge -commands/ — 33 slash commands +commands/ — 40 slash commands hooks/ — Trigger-based automations rules/ — Always-follow guidelines (common + per-language) scripts/ — Cross-platform Node.js utilities diff --git a/package.json b/package.json index 65054448..1acb0444 100644 --- a/package.json +++ b/package.json @@ -84,7 +84,7 @@ "lint": "eslint . && markdownlint '**/*.md' --ignore node_modules", "claw": "node scripts/claw.js", "test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js", - "coverage": "c8 --all --include='scripts/**/*.js' --reporter=text --reporter=lcov node tests/run-all.js" + "coverage": "c8 --all --include=\"scripts/**/*.js\" --check-coverage --lines 80 --functions 80 --branches 80 --statements 80 --reporter=text --reporter=lcov node tests/run-all.js" }, "devDependencies": { "@eslint/js": "^9.39.2", diff --git a/schemas/hooks.schema.json b/schemas/hooks.schema.json index 3aa2ba1f..4d119297 100644 --- a/schemas/hooks.schema.json +++ b/schemas/hooks.schema.json @@ -1,9 +1,17 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "title": "Claude Code Hooks Configuration", - "description": "Configuration for Claude Code hooks. Event types are validated at runtime and must be one of: PreToolUse, PostToolUse, PreCompact, SessionStart, SessionEnd, Stop, Notification, SubagentStop", + "description": "Configuration for Claude Code hooks. Supports current Claude Code hook events and hook action types.", "$defs": { - "hookItem": { + "stringArray": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + }, + "minItems": 1 + }, + "commandHookItem": { "type": "object", "required": [ "type", @@ -12,19 +20,17 @@ "properties": { "type": { "type": "string", - "enum": ["command", "notification"], - "description": "Hook action type (command or notification)" + "const": "command", + "description": "Run a local command" }, "command": { "oneOf": [ { - "type": "string" + "type": "string", + "minLength": 1 }, { - "type": "array", - "items": { - "type": "string" - } + "$ref": "#/$defs/stringArray" } ] }, @@ -37,17 +43,94 @@ "minimum": 0, "description": "Timeout in seconds for async hooks" } - } + }, + "additionalProperties": true + }, + "httpHookItem": { + "type": "object", + "required": [ + "type", + "url" + ], + "properties": { + "type": { + "type": "string", + "const": "http" + }, + "url": { + "type": "string", + "minLength": 1 + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "allowedEnvVars": { + "$ref": "#/$defs/stringArray" + }, + "timeout": { + "type": "number", + "minimum": 0 + } + }, + "additionalProperties": true + }, + "promptHookItem": { + "type": "object", + "required": [ + "type", + "prompt" + ], + "properties": { + "type": { + "type": "string", + "enum": ["prompt", "agent"] + }, + "prompt": { + "type": "string", + "minLength": 1 + }, + "model": { + "type": "string", + "minLength": 1 + }, + "timeout": { + "type": "number", + "minimum": 0 + } + }, + "additionalProperties": true + }, + "hookItem": { + "oneOf": [ + { + "$ref": "#/$defs/commandHookItem" + }, + { + "$ref": "#/$defs/httpHookItem" + }, + { + "$ref": "#/$defs/promptHookItem" + } + ] }, "matcherEntry": { "type": "object", "required": [ - "matcher", "hooks" ], "properties": { "matcher": { - "type": "string" + "oneOf": [ + { + "type": "string" + }, + { + "type": "object" + } + ] }, "hooks": { "type": "array", @@ -70,6 +153,28 @@ }, "hooks": { "type": "object", + "propertyNames": { + "enum": [ + "SessionStart", + "UserPromptSubmit", + "PreToolUse", + "PermissionRequest", + "PostToolUse", + "PostToolUseFailure", + "Notification", + "SubagentStart", + "Stop", + "SubagentStop", + "PreCompact", + "InstructionsLoaded", + "TeammateIdle", + "TaskCompleted", + "ConfigChange", + "WorktreeCreate", + "WorktreeRemove", + "SessionEnd" + ] + }, "additionalProperties": { "type": "array", "items": { @@ -89,4 +194,4 @@ } } ] -} \ No newline at end of file +} diff --git a/scripts/ci/catalog.js b/scripts/ci/catalog.js index 1e279445..02a71e78 100644 --- a/scripts/ci/catalog.js +++ b/scripts/ci/catalog.js @@ -17,27 +17,39 @@ const SKILLS_DIR = path.join(ROOT, 'skills'); function listAgents() { if (!fs.existsSync(AGENTS_DIR)) return []; - return fs.readdirSync(AGENTS_DIR) - .filter(f => f.endsWith('.md')) - .map(f => f.slice(0, -3)) - .sort(); + try { + return fs.readdirSync(AGENTS_DIR) + .filter(f => f.endsWith('.md')) + .map(f => f.slice(0, -3)) + .sort(); + } catch (error) { + throw new Error(`Failed to read agents directory (${AGENTS_DIR}): ${error.message}`); + } } function listCommands() { if (!fs.existsSync(COMMANDS_DIR)) return []; - return fs.readdirSync(COMMANDS_DIR) - .filter(f => f.endsWith('.md')) - .map(f => f.slice(0, -3)) - .sort(); + try { + return fs.readdirSync(COMMANDS_DIR) + .filter(f => f.endsWith('.md')) + .map(f => f.slice(0, -3)) + .sort(); + } catch (error) { + throw new Error(`Failed to read commands directory (${COMMANDS_DIR}): ${error.message}`); + } } function listSkills() { if (!fs.existsSync(SKILLS_DIR)) return []; - const entries = fs.readdirSync(SKILLS_DIR, { withFileTypes: true }); - return entries - .filter(e => e.isDirectory() && fs.existsSync(path.join(SKILLS_DIR, e.name, 'SKILL.md'))) - .map(e => e.name) - .sort(); + try { + const entries = fs.readdirSync(SKILLS_DIR, { withFileTypes: true }); + return entries + .filter(e => e.isDirectory() && fs.existsSync(path.join(SKILLS_DIR, e.name, 'SKILL.md'))) + .map(e => e.name) + .sort(); + } catch (error) { + throw new Error(`Failed to read skills directory (${SKILLS_DIR}): ${error.message}`); + } } function run() { @@ -58,11 +70,11 @@ function run() { console.log(`- **Commands:** ${catalog.commands.count}`); console.log(`- **Skills:** ${catalog.skills.count}\n`); console.log('## Agents\n'); - catalog.agents.list.forEach(a => console.log(`- ${a}`)); + catalog.agents.list.forEach(a => { console.log(`- ${a}`); }); console.log('\n## Commands\n'); - catalog.commands.list.forEach(c => console.log(`- ${c}`)); + catalog.commands.list.forEach(c => { console.log(`- ${c}`); }); console.log('\n## Skills\n'); - catalog.skills.list.forEach(s => console.log(`- ${s}`)); + catalog.skills.list.forEach(s => { console.log(`- ${s}`); }); } else { console.log(JSON.stringify(catalog, null, 2)); } diff --git a/scripts/ci/validate-hooks.js b/scripts/ci/validate-hooks.js index 7dd5ebee..b4e440d9 100644 --- a/scripts/ci/validate-hooks.js +++ b/scripts/ci/validate-hooks.js @@ -10,7 +10,36 @@ const Ajv = require('ajv'); const HOOKS_FILE = path.join(__dirname, '../../hooks/hooks.json'); const HOOKS_SCHEMA_PATH = path.join(__dirname, '../../schemas/hooks.schema.json'); -const VALID_EVENTS = ['PreToolUse', 'PostToolUse', 'PreCompact', 'SessionStart', 'SessionEnd', 'Stop', 'Notification', 'SubagentStop']; +const VALID_EVENTS = [ + 'SessionStart', + 'UserPromptSubmit', + 'PreToolUse', + 'PermissionRequest', + 'PostToolUse', + 'PostToolUseFailure', + 'Notification', + 'SubagentStart', + 'Stop', + 'SubagentStop', + 'PreCompact', + 'InstructionsLoaded', + 'TeammateIdle', + 'TaskCompleted', + 'ConfigChange', + 'WorktreeCreate', + 'WorktreeRemove', + 'SessionEnd', +]; +const VALID_HOOK_TYPES = ['command', 'http', 'prompt', 'agent']; +const EVENTS_WITHOUT_MATCHER = new Set(['UserPromptSubmit', 'Notification', 'Stop', 'SubagentStop']); + +function isNonEmptyString(value) { + return typeof value === 'string' && value.trim().length > 0; +} + +function isNonEmptyStringArray(value) { + return Array.isArray(value) && value.length > 0 && value.every(item => isNonEmptyString(item)); +} /** * Validate a single hook entry has required fields and valid inline JS @@ -24,32 +53,72 @@ function validateHookEntry(hook, label) { if (!hook.type || typeof hook.type !== 'string') { console.error(`ERROR: ${label} missing or invalid 'type' field`); hasErrors = true; - } - - // Validate optional async and timeout fields - if ('async' in hook && typeof hook.async !== 'boolean') { - console.error(`ERROR: ${label} 'async' must be a boolean`); + } else if (!VALID_HOOK_TYPES.includes(hook.type)) { + console.error(`ERROR: ${label} has unsupported hook type '${hook.type}'`); hasErrors = true; } + if ('timeout' in hook && (typeof hook.timeout !== 'number' || hook.timeout < 0)) { console.error(`ERROR: ${label} 'timeout' must be a non-negative number`); hasErrors = true; } - if (!hook.command || (typeof hook.command !== 'string' && !Array.isArray(hook.command)) || (typeof hook.command === 'string' && !hook.command.trim()) || (Array.isArray(hook.command) && (hook.command.length === 0 || !hook.command.every(s => typeof s === 'string' && s.length > 0)))) { - console.error(`ERROR: ${label} missing or invalid 'command' field`); - hasErrors = true; - } else if (typeof hook.command === 'string') { - // Validate inline JS syntax in node -e commands - const nodeEMatch = hook.command.match(/^node -e "(.*)"$/s); - if (nodeEMatch) { - try { - new vm.Script(nodeEMatch[1].replace(/\\\\/g, '\\').replace(/\\"/g, '"').replace(/\\n/g, '\n').replace(/\\t/g, '\t')); - } catch (syntaxErr) { - console.error(`ERROR: ${label} has invalid inline JS: ${syntaxErr.message}`); - hasErrors = true; + if (hook.type === 'command') { + if ('async' in hook && typeof hook.async !== 'boolean') { + console.error(`ERROR: ${label} 'async' must be a boolean`); + hasErrors = true; + } + + if (!isNonEmptyString(hook.command) && !isNonEmptyStringArray(hook.command)) { + console.error(`ERROR: ${label} missing or invalid 'command' field`); + hasErrors = true; + } else if (typeof hook.command === 'string') { + const nodeEMatch = hook.command.match(/^node -e "(.*)"$/s); + if (nodeEMatch) { + try { + new vm.Script(nodeEMatch[1].replace(/\\\\/g, '\\').replace(/\\"/g, '"').replace(/\\n/g, '\n').replace(/\\t/g, '\t')); + } catch (syntaxErr) { + console.error(`ERROR: ${label} has invalid inline JS: ${syntaxErr.message}`); + hasErrors = true; + } } } + + return hasErrors; + } + + if ('async' in hook) { + console.error(`ERROR: ${label} 'async' is only supported for command hooks`); + hasErrors = true; + } + + if (hook.type === 'http') { + if (!isNonEmptyString(hook.url)) { + console.error(`ERROR: ${label} missing or invalid 'url' field`); + hasErrors = true; + } + + if ('headers' in hook && (typeof hook.headers !== 'object' || hook.headers === null || Array.isArray(hook.headers) || !Object.values(hook.headers).every(value => typeof value === 'string'))) { + console.error(`ERROR: ${label} 'headers' must be an object with string values`); + hasErrors = true; + } + + if ('allowedEnvVars' in hook && (!Array.isArray(hook.allowedEnvVars) || !hook.allowedEnvVars.every(value => isNonEmptyString(value)))) { + console.error(`ERROR: ${label} 'allowedEnvVars' must be an array of strings`); + hasErrors = true; + } + + return hasErrors; + } + + if (!isNonEmptyString(hook.prompt)) { + console.error(`ERROR: ${label} missing or invalid 'prompt' field`); + hasErrors = true; + } + + if ('model' in hook && !isNonEmptyString(hook.model)) { + console.error(`ERROR: ${label} 'model' must be a non-empty string`); + hasErrors = true; } return hasErrors; @@ -110,9 +179,12 @@ function validateHooks() { hasErrors = true; continue; } - if (!matcher.matcher) { + if (!('matcher' in matcher) && !EVENTS_WITHOUT_MATCHER.has(eventType)) { console.error(`ERROR: ${eventType}[${i}] missing 'matcher' field`); hasErrors = true; + } else if ('matcher' in matcher && typeof matcher.matcher !== 'string' && (typeof matcher.matcher !== 'object' || matcher.matcher === null)) { + console.error(`ERROR: ${eventType}[${i}] has invalid 'matcher' field`); + hasErrors = true; } if (!matcher.hooks || !Array.isArray(matcher.hooks)) { console.error(`ERROR: ${eventType}[${i}] missing 'hooks' array`); @@ -132,9 +204,12 @@ function validateHooks() { // Array format (legacy) for (let i = 0; i < hooks.length; i++) { const hook = hooks[i]; - if (!hook.matcher) { + if (!('matcher' in hook)) { console.error(`ERROR: Hook ${i} missing 'matcher' field`); hasErrors = true; + } else if (typeof hook.matcher !== 'string' && (typeof hook.matcher !== 'object' || hook.matcher === null)) { + console.error(`ERROR: Hook ${i} has invalid 'matcher' field`); + hasErrors = true; } if (!hook.hooks || !Array.isArray(hook.hooks)) { console.error(`ERROR: Hook ${i} missing 'hooks' array`); diff --git a/tests/ci/validators.test.js b/tests/ci/validators.test.js index 6077a389..f016a538 100644 --- a/tests/ci/validators.test.js +++ b/tests/ci/validators.test.js @@ -1927,7 +1927,7 @@ function runTests() { PreToolUse: [{ matcher: 'Write', hooks: [{ - type: 'intercept', + type: 'command', command: 'echo test', async: 'yes' // Should be boolean, not string }] @@ -1947,7 +1947,7 @@ function runTests() { PostToolUse: [{ matcher: 'Edit', hooks: [{ - type: 'intercept', + type: 'command', command: 'echo test', timeout: -5 // Must be non-negative }] @@ -2105,6 +2105,31 @@ function runTests() { cleanupTestDir(testDir); })) passed++; else failed++; + console.log('\nRound 82b: validate-hooks (current official events and hook types):'); + + if (test('accepts UserPromptSubmit with omitted matcher and prompt/http/agent hooks', () => { + const testDir = createTestDir(); + const hooksJson = JSON.stringify({ + hooks: { + UserPromptSubmit: [ + { + hooks: [ + { type: 'prompt', prompt: 'Summarize the request.' }, + { type: 'agent', prompt: 'Review for security issues.', model: 'gpt-5.4' }, + { type: 'http', url: 'https://example.com/hooks', headers: { Authorization: 'Bearer token' } } + ] + } + ] + } + }); + const hooksFile = path.join(testDir, 'hooks.json'); + fs.writeFileSync(hooksFile, hooksJson); + + const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile); + assert.strictEqual(result.code, 0, 'Should accept current official hook event/type combinations'); + cleanupTestDir(testDir); + })) passed++; else failed++; + // ── Round 83: validate-agents whitespace-only field, validate-skills empty SKILL.md ── console.log('\nRound 83: validate-agents (whitespace-only frontmatter field value):'); diff --git a/tests/hooks/hooks.test.js b/tests/hooks/hooks.test.js index e13e8193..5610ed37 100644 --- a/tests/hooks/hooks.test.js +++ b/tests/hooks/hooks.test.js @@ -2561,6 +2561,7 @@ async function runTests() { assert.ok(!runAllSource.includes('execSync'), 'Should not use execSync'); // Verify it shows stderr assert.ok(runAllSource.includes('stderr'), 'Should handle stderr output'); + assert.ok(runAllSource.includes('result.status !== 0'), 'Should treat non-zero child exits as failures'); })) passed++; else failed++; // ── Round 32: post-edit-typecheck special characters & check-console-log ── diff --git a/tests/run-all.js b/tests/run-all.js index 25d44666..399de5a7 100644 --- a/tests/run-all.js +++ b/tests/run-all.js @@ -71,6 +71,17 @@ for (const testFile of testFiles) { if (passedMatch) totalPassed += parseInt(passedMatch[1], 10); if (failedMatch) totalFailed += parseInt(failedMatch[1], 10); + + if (result.error) { + console.log(`✗ ${testFile} failed to start: ${result.error.message}`); + totalFailed += failedMatch ? 0 : 1; + continue; + } + + if (result.status !== 0) { + console.log(`✗ ${testFile} exited with status ${result.status}`); + totalFailed += failedMatch ? 0 : 1; + } } totalTests = totalPassed + totalFailed; From 327c2e97d8bd5a59fe5b9b83a150f6e20893a5fa Mon Sep 17 00:00:00 2001 From: Jason Davey Date: Mon, 9 Mar 2026 14:41:46 -0400 Subject: [PATCH 24/42] feat: enhance TypeScript coding style guidelines with detailed examples and best practices esp interfaces and types --- rules/typescript/coding-style.md | 154 ++++++++++++++++++++++++++++--- 1 file changed, 141 insertions(+), 13 deletions(-) diff --git a/rules/typescript/coding-style.md b/rules/typescript/coding-style.md index db62a9bc..582c4033 100644 --- a/rules/typescript/coding-style.md +++ b/rules/typescript/coding-style.md @@ -9,19 +9,128 @@ paths: > This file extends [common/coding-style.md](../common/coding-style.md) with TypeScript/JavaScript specific content. +## Types and Interfaces + +Use types to make public APIs, shared models, and component props explicit, readable, and reusable. + +### Public APIs + +- Add parameter and return types to exported functions, shared utilities, and public class methods +- Let TypeScript infer obvious local variable types +- Extract repeated inline object shapes into named types or interfaces + +```typescript +// WRONG: Exported function without explicit types +export function formatUser(user) { + return `${user.firstName} ${user.lastName}` +} + +// CORRECT: Explicit types on public APIs +interface User { + firstName: string + lastName: string +} + +export function formatUser(user: User): string { + return `${user.firstName} ${user.lastName}` +} +``` + +### Interfaces vs. Type Aliases + +- Use `interface` for object shapes that may be extended or implemented +- Use `type` for unions, intersections, tuples, mapped types, and utility types +- Prefer string literal unions over `enum` unless an `enum` is required for interoperability + +```typescript +interface User { + id: string + email: string +} + +type UserRole = 'admin' | 'member' +type UserWithRole = User & { + role: UserRole +} +``` + +### Avoid `any` + +- Avoid `any` in application code +- Use `unknown` for external or untrusted input, then narrow it safely +- Use generics when a value's type depends on the caller + +```typescript +// WRONG: any removes type safety +function getErrorMessage(error: any) { + return error.message +} + +// CORRECT: unknown forces safe narrowing +function getErrorMessage(error: unknown): string { + if (error instanceof Error) { + return error.message + } + + return 'Unknown error' +} +``` + +### React Props + +- Define component props with a named `interface` or `type` +- Type callback props explicitly +- Do not use `React.FC` unless there is a specific reason to do so + +```typescript +interface User { + id: string + email: string +} + +interface UserCardProps { + user: User + onSelect: (id: string) => void +} + +function UserCard({ user, onSelect }: UserCardProps) { + return +} +``` + +### JavaScript Files + +- In `.js` and `.jsx` files, use JSDoc when types improve clarity and a TypeScript migration is not practical +- Keep JSDoc aligned with runtime behavior + +```javascript +/** + * @param {{ firstName: string, lastName: string }} user + * @returns {string} + */ +export function formatUser(user) { + return `${user.firstName} ${user.lastName}` +} +``` + ## Immutability Use spread operator for immutable updates: ```typescript +interface User { + id: string + name: string +} + // WRONG: Mutation -function updateUser(user, name) { - user.name = name // MUTATION! +function updateUser(user: User, name: string): User { + user.name = name // MUTATION! return user } // CORRECT: Immutability -function updateUser(user, name) { +function updateUser(user: Readonly, name: string): User { return { ...user, name @@ -31,31 +140,50 @@ function updateUser(user, name) { ## Error Handling -Use async/await with try-catch: +Use async/await with try-catch and narrow unknown errors safely: ```typescript -try { - const result = await riskyOperation() - return result -} catch (error) { - console.error('Operation failed:', error) - throw new Error('Detailed user-friendly message') +interface User { + id: string + email: string +} + +declare function riskyOperation(userId: string): Promise + +function getErrorMessage(error: unknown): string { + if (error instanceof Error) { + return error.message + } + + return 'Unexpected error' +} + +async function loadUser(userId: string): Promise { + try { + const result = await riskyOperation(userId) + return result + } catch (error: unknown) { + console.error('Operation failed:', error) + throw new Error(getErrorMessage(error)) + } } ``` ## Input Validation -Use Zod for schema-based validation: +Use Zod for schema-based validation and infer types from the schema: ```typescript import { z } from 'zod' -const schema = z.object({ +const userSchema = z.object({ email: z.string().email(), age: z.number().int().min(0).max(150) }) -const validated = schema.parse(input) +type UserInput = z.infer + +const validated: UserInput = userSchema.parse(input) ``` ## Console.log From b0a6847007c280ae36db78ba4b2c1e250de8b980 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 19:34:10 -0700 Subject: [PATCH 25/42] docs: align TypeScript error handling examples --- rules/typescript/coding-style.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/rules/typescript/coding-style.md b/rules/typescript/coding-style.md index 582c4033..090c0a17 100644 --- a/rules/typescript/coding-style.md +++ b/rules/typescript/coding-style.md @@ -72,7 +72,7 @@ function getErrorMessage(error: unknown): string { return error.message } - return 'Unknown error' + return 'Unexpected error' } ``` @@ -158,12 +158,18 @@ function getErrorMessage(error: unknown): string { return 'Unexpected error' } +const logger = { + error: (message: string, error: unknown) => { + // Replace with your production logger (for example, pino or winston). + } +} + async function loadUser(userId: string): Promise { try { const result = await riskyOperation(userId) return result } catch (error: unknown) { - console.error('Operation failed:', error) + logger.error('Operation failed', error) throw new Error(getErrorMessage(error)) } } From 4fa817cd7d5b2562cf2a25b24abd549a4c439191 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 20:13:05 -0700 Subject: [PATCH 26/42] ci: install validation deps for hook checks --- .github/workflows/ci.yml | 3 +++ .github/workflows/reusable-validate.yml | 3 +++ package-lock.json | 2 +- package.json | 2 +- 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e4d61e29..7498eee2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -156,6 +156,9 @@ jobs: with: node-version: '20.x' + - name: Install validation dependencies + run: npm ci --ignore-scripts + - name: Validate agents run: node scripts/ci/validate-agents.js continue-on-error: false diff --git a/.github/workflows/reusable-validate.yml b/.github/workflows/reusable-validate.yml index 1d0da8f1..27b483de 100644 --- a/.github/workflows/reusable-validate.yml +++ b/.github/workflows/reusable-validate.yml @@ -24,6 +24,9 @@ jobs: with: node-version: ${{ inputs.node-version }} + - name: Install validation dependencies + run: npm ci --ignore-scripts + - name: Validate agents run: node scripts/ci/validate-agents.js diff --git a/package-lock.json b/package-lock.json index 7658be1b..0ad4a637 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,7 +14,7 @@ }, "devDependencies": { "@eslint/js": "^9.39.2", - "ajv": "^8.17.0", + "ajv": "^8.18.0", "c8": "^10.1.2", "eslint": "^9.39.2", "globals": "^17.1.0", diff --git a/package.json b/package.json index 1acb0444..352d41fc 100644 --- a/package.json +++ b/package.json @@ -88,7 +88,7 @@ }, "devDependencies": { "@eslint/js": "^9.39.2", - "ajv": "^8.17.0", + "ajv": "^8.18.0", "c8": "^10.1.2", "eslint": "^9.39.2", "globals": "^17.1.0", From 32e11b8701d451dba4ba4b35d5d38017a95fb143 Mon Sep 17 00:00:00 2001 From: Tatsuya Shimomoto Date: Sun, 8 Mar 2026 19:35:28 +0900 Subject: [PATCH 27/42] feat(commands): improve learn-eval with checklist-based holistic verdict Replace the 5-dimension numeric scoring rubric with a checklist + holistic verdict system (Save / Improve then Save / Absorb into [X] / Drop). Key improvements: - Explicit pre-save checklist: grep skills/ for duplicates, check MEMORY.md, consider appending to existing skills, confirm reusability - 4-way verdict instead of binary save/don't-save: adds "Absorb into [X]" to prevent skill file proliferation, and "Improve then Save" for iterative refinement - Verdict-specific confirmation flows tailored to each outcome - Design rationale explaining why holistic judgment outperforms numeric scoring with modern frontier models --- commands/learn-eval.md | 78 +++++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 27 deletions(-) diff --git a/commands/learn-eval.md b/commands/learn-eval.md index 18d8853c..6f38894a 100644 --- a/commands/learn-eval.md +++ b/commands/learn-eval.md @@ -1,10 +1,10 @@ --- -description: Extract reusable patterns from the session, self-evaluate quality before saving, and determine the right save location (Global vs Project). +description: "Extract reusable patterns from the session, self-evaluate quality before saving, and determine the right save location (Global vs Project)." --- # /learn-eval - Extract, Evaluate, then Save -Extends `/learn` with a quality gate and save-location decision before writing any skill file. +Extends `/learn` with a quality gate, save-location decision, and knowledge-placement awareness before writing any skill file. ## What to Extract @@ -51,36 +51,60 @@ origin: auto-extracted [Trigger conditions] ``` -5. **Self-evaluate before saving** using this rubric: +5. **Quality gate — Checklist + Holistic verdict** - | Dimension | 1 | 3 | 5 | - |-----------|---|---|---| - | Specificity | Abstract principles only, no code examples | Representative code example present | Rich examples covering all usage patterns | - | Actionability | Unclear what to do | Main steps are understandable | Immediately actionable, edge cases covered | - | Scope Fit | Too broad or too narrow | Mostly appropriate, some boundary ambiguity | Name, trigger, and content perfectly aligned | - | Non-redundancy | Nearly identical to another skill | Some overlap but unique perspective exists | Completely unique value | - | Coverage | Covers only a fraction of the target task | Main cases covered, common variants missing | Main cases, edge cases, and pitfalls covered | + #### 5a. Required checklist (verify by actually reading files) - - Score each dimension 1–5 - - If any dimension scores 1–2, improve the draft and re-score until all dimensions are ≥ 3 - - Show the user the scores table and the final draft + Execute **all** of the following before evaluating the draft: -6. Ask user to confirm: - - Show: proposed save path + scores table + final draft - - Wait for explicit confirmation before writing + - [ ] Grep `~/.claude/skills/` by keyword to check for content overlap + - [ ] Check MEMORY.md (both project and global) for overlap + - [ ] Consider whether appending to an existing skill would suffice + - [ ] Confirm this is a reusable pattern, not a one-off fix -7. Save to the determined location + #### 5b. Holistic verdict -## Output Format for Step 5 (scores table) + Synthesize the checklist results and draft quality, then choose **one** of the following: -| Dimension | Score | Rationale | -|-----------|-------|-----------| -| Specificity | N/5 | ... | -| Actionability | N/5 | ... | -| Scope Fit | N/5 | ... | -| Non-redundancy | N/5 | ... | -| Coverage | N/5 | ... | -| **Total** | **N/25** | | + | Verdict | Meaning | Next Action | + |---------|---------|-------------| + | **Save** | Unique, specific, well-scoped | Proceed to Step 6 | + | **Improve then Save** | Valuable but needs refinement | List improvements → revise → re-evaluate (once) | + | **Absorb into [X]** | Should be appended to an existing skill | Show target skill and additions → Step 6 | + | **Drop** | Trivial, redundant, or too abstract | Explain reasoning and stop | + + **Guideline dimensions** (informing the verdict, not scored): + + - **Specificity & Actionability**: Contains code examples or commands that are immediately usable + - **Scope Fit**: Name, trigger conditions, and content are aligned and focused on a single pattern + - **Uniqueness**: Provides value not covered by existing skills (informed by checklist results) + - **Reusability**: Realistic trigger scenarios exist in future sessions + +6. **Verdict-specific confirmation flow** + + - **Save**: Present save path + checklist results + 1-line verdict rationale + full draft → save after user confirmation + - **Absorb into [X]**: Present target path + additions (diff format) + checklist results + verdict rationale → append after user confirmation + - **Drop**: Show checklist results + reasoning only (no confirmation needed) + +7. Save / Absorb to the determined location + +## Output Format for Step 5 + +``` +### Checklist +- [x] skills/ grep: no overlap (or: overlap found → details) +- [x] MEMORY.md: no overlap (or: overlap found → details) +- [x] Existing skill append: new file appropriate (or: should append to [X]) +- [x] Reusability: confirmed (or: one-off → Drop) + +### Verdict: Save / Improve then Save / Absorb into [X] / Drop + +**Rationale:** (1-2 sentences explaining the verdict) +``` + +## Design Rationale + +This version replaces the previous 5-dimension numeric scoring rubric (Specificity, Actionability, Scope Fit, Non-redundancy, Coverage scored 1-5) with a checklist-based holistic verdict system. Modern frontier models (Opus 4.6+) have strong contextual judgment — forcing rich qualitative signals into numeric scores loses nuance and can produce misleading totals. The holistic approach lets the model weigh all factors naturally, producing more accurate save/drop decisions while the explicit checklist ensures no critical check is skipped. ## Notes @@ -88,4 +112,4 @@ origin: auto-extracted - Don't extract one-time issues (specific API outages, etc.) - Focus on patterns that will save time in future sessions - Keep skills focused — one pattern per skill -- If Coverage score is low, add related variants before saving +- When the verdict is Absorb, append to the existing skill rather than creating a new file From 5929db9b231ebe98109f26abd322a143a48afc30 Mon Sep 17 00:00:00 2001 From: Tatsuya Shimomoto Date: Sun, 8 Mar 2026 21:04:38 +0900 Subject: [PATCH 28/42] fix: resolve markdownlint MD001 heading level violation Change h4 (####) to h3 (###) for sub-steps 5a and 5b to comply with heading increment rule (headings must increment by one level at a time). --- commands/learn-eval.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/commands/learn-eval.md b/commands/learn-eval.md index 6f38894a..53b7bf58 100644 --- a/commands/learn-eval.md +++ b/commands/learn-eval.md @@ -53,7 +53,7 @@ origin: auto-extracted 5. **Quality gate — Checklist + Holistic verdict** - #### 5a. Required checklist (verify by actually reading files) + ### 5a. Required checklist (verify by actually reading files) Execute **all** of the following before evaluating the draft: @@ -62,7 +62,7 @@ origin: auto-extracted - [ ] Consider whether appending to an existing skill would suffice - [ ] Confirm this is a reusable pattern, not a one-off fix - #### 5b. Holistic verdict + ### 5b. Holistic verdict Synthesize the checklist results and draft quality, then choose **one** of the following: From 973be02aa6f620fc848cd95ef8c0715199525a1b Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 20:08:20 -0700 Subject: [PATCH 29/42] docs: clarify learn-eval verdict flow --- commands/learn-eval.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/commands/learn-eval.md b/commands/learn-eval.md index 53b7bf58..b98fcf4f 100644 --- a/commands/learn-eval.md +++ b/commands/learn-eval.md @@ -57,7 +57,7 @@ origin: auto-extracted Execute **all** of the following before evaluating the draft: - - [ ] Grep `~/.claude/skills/` by keyword to check for content overlap + - [ ] Grep `~/.claude/skills/` and relevant project `.claude/skills/` files by keyword to check for content overlap - [ ] Check MEMORY.md (both project and global) for overlap - [ ] Consider whether appending to an existing skill would suffice - [ ] Confirm this is a reusable pattern, not a one-off fix @@ -82,6 +82,7 @@ origin: auto-extracted 6. **Verdict-specific confirmation flow** + - **Improve then Save**: Present the required improvements + revised draft + updated checklist/verdict after one re-evaluation; if the revised verdict is **Save**, save after user confirmation, otherwise follow the new verdict - **Save**: Present save path + checklist results + 1-line verdict rationale + full draft → save after user confirmation - **Absorb into [X]**: Present target path + additions (diff format) + checklist results + verdict rationale → append after user confirmation - **Drop**: Show checklist results + reasoning only (no confirmation needed) From 02d754ba675f15e1a56d76b62c97d820e615fecc Mon Sep 17 00:00:00 2001 From: Tatsuya Shimomoto Date: Sun, 8 Mar 2026 21:02:14 +0900 Subject: [PATCH 30/42] fix: use general-purpose agent instead of Explore for skill-stocktake evaluation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Explore agent is a "Fast agent" optimized for codebase exploration, not deep reasoning. The skill-stocktake V4 design requires holistic AI judgment (actionability, scope fit, uniqueness, currency) which needs the full reasoning capability of the conversation's main model. Additionally, the Agent tool has no `model` parameter — specifying `model: opus` was silently ignored, causing the evaluation to run on the lightweight Explore model. This resulted in all skills receiving "Keep" verdicts without genuine critical analysis. Changing to `general-purpose` agent ensures evaluation runs on the conversation's main model (e.g., Opus 4.6), enabling the holistic judgment that V4 was designed for. --- skills/skill-stocktake/SKILL.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/skills/skill-stocktake/SKILL.md b/skills/skill-stocktake/SKILL.md index 03bc497b..9d86b4db 100644 --- a/skills/skill-stocktake/SKILL.md +++ b/skills/skill-stocktake/SKILL.md @@ -74,7 +74,7 @@ Scanning: ### Phase 2 — Quality Evaluation -Launch a Task tool subagent (**Explore agent, model: opus**) with the full inventory and checklist. +Launch an Agent tool subagent (**general-purpose agent**) with the full inventory and checklist. The subagent reads each skill, applies the checklist, and returns per-skill JSON: `{ "verdict": "Keep"|"Improve"|"Update"|"Retire"|"Merge into [X]", "reason": "..." }` From 0c2954565dce632053c068131bbdd9d20807dd0c Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 20:08:20 -0700 Subject: [PATCH 31/42] docs: add skill-stocktake agent invocation example --- skills/skill-stocktake/SKILL.md | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/skills/skill-stocktake/SKILL.md b/skills/skill-stocktake/SKILL.md index 9d86b4db..7ae77c27 100644 --- a/skills/skill-stocktake/SKILL.md +++ b/skills/skill-stocktake/SKILL.md @@ -74,7 +74,24 @@ Scanning: ### Phase 2 — Quality Evaluation -Launch an Agent tool subagent (**general-purpose agent**) with the full inventory and checklist. +Launch an Agent tool subagent (**general-purpose agent**) with the full inventory and checklist: + +```text +Agent( + subagent_type="general-purpose", + prompt=" +Evaluate the following skill inventory against the checklist. + +[INVENTORY] + +[CHECKLIST] + +Return JSON for each skill: +{ \"verdict\": \"Keep\"|\"Improve\"|\"Update\"|\"Retire\"|\"Merge into [X]\", \"reason\": \"...\" } +" +) +``` + The subagent reads each skill, applies the checklist, and returns per-skill JSON: `{ "verdict": "Keep"|"Improve"|"Update"|"Retire"|"Merge into [X]", "reason": "..." }` From 6937491d2a78aa751f5a58c8c6cf7924d4c7cae6 Mon Sep 17 00:00:00 2001 From: avesh-devx Date: Mon, 9 Mar 2026 12:30:40 +0530 Subject: [PATCH 32/42] feat: add resume and save session commands for session management Introduced two new commands: `/resume-session` and `/save-session`. The `/resume-session` command allows users to load the most recent session file or a specific session file, providing a structured briefing of the session's context. The `/save-session` command captures the current session state, saving it to a dated file for future reference. Both commands enhance user experience by enabling seamless session continuity and context preservation. --- commands/resume-session.md | 150 +++++++++++++++++++++ commands/save-session.md | 261 +++++++++++++++++++++++++++++++++++++ 2 files changed, 411 insertions(+) create mode 100644 commands/resume-session.md create mode 100644 commands/save-session.md diff --git a/commands/resume-session.md b/commands/resume-session.md new file mode 100644 index 00000000..b33da0cc --- /dev/null +++ b/commands/resume-session.md @@ -0,0 +1,150 @@ +--- +description: Load the most recent session file from .claude/sessions/ and resume work with full context from where the last session ended. +--- + +# Resume Session Command + +Load the last saved session state and orient fully before doing any work. +This command is the counterpart to `/save-session`. + +## When to Use + +- Starting a new session to continue work from a previous day +- After starting a fresh session due to context limits +- When handing off a session file from another source (just provide the file path) +- Any time you have a session file and want Claude to fully absorb it before proceeding + +## Usage + +``` +/resume-session # loads most recent file in .claude/sessions/ +/resume-session 2024-01-15 # loads specific date +/resume-session .claude/sessions/foo.md # loads specific file path +``` + +## Process + +### Step 1: Find the session file + +If no argument provided: +1. List all files in `.claude/sessions/` +2. Pick the most recently modified file +3. If the folder doesn't exist or is empty, tell the user: + ``` + No session files found in .claude/sessions/ + Run /save-session at the end of a session to create one. + ``` + Then stop. + +If an argument is provided: +- If it looks like a date (`YYYY-MM-DD`), look for `.claude/sessions/YYYY-MM-DD.md` +- If it looks like a file path, read that file directly +- If not found, report clearly and stop + +### Step 2: Read the entire session file + +Read the complete file. Do not summarize yet. + +### Step 3: Confirm understanding + +Respond with a structured briefing in this exact format: + +``` +SESSION LOADED: .claude/sessions/YYYY-MM-DD.md +════════════════════════════════════════════════ + +PROJECT: [project name / topic from file] + +WHAT WE'RE BUILDING: +[2-3 sentence summary in your own words] + +CURRENT STATE: +✅ Working: [count] items confirmed +🔄 In Progress: [list files that are in progress] +🗒️ Not Started: [list planned but untouched] + +WHAT NOT TO RETRY: +[list every failed approach with its reason — this is critical] + +OPEN QUESTIONS / BLOCKERS: +[list any blockers or unanswered questions] + +NEXT STEP: +[exact next step if defined in the file] +[if not defined: "No next step defined — recommend reviewing 'What Has NOT Been Tried Yet' together before starting"] + +════════════════════════════════════════════════ +Ready to continue. What would you like to do? +``` + +### Step 4: Wait for the user + +Do NOT start working automatically. Do NOT touch any files. Wait for the user to say what to do next. + +If the next step is clearly defined in the session file and the user says "continue" or "yes" or similar — proceed with that exact next step. + +If no next step is defined — ask the user where to start, and optionally suggest an approach from the "What Has NOT Been Tried Yet" section. + +--- + +## Edge Cases + +**Multiple sessions for the same date** (`2024-01-15.md`, `2024-01-15-2.md`): +Load the highest-numbered file (most recent). + +**Session file references files that no longer exist:** +Note this during the briefing — "⚠️ `path/to/file.ts` referenced in session but not found on disk." + +**Session file is from a very old date:** +Note the gap — "⚠️ This session is from X days ago. Things may have changed." — then proceed normally. + +**User provides a file path directly (e.g., forwarded from a teammate):** +Read it and follow the same briefing process — the format is the same regardless of source. + +**Session file is empty or malformed:** +Report: "Session file found but appears empty or unreadable. You may need to create a new one with /save-session." + +--- + +## Example Output + +``` +SESSION LOADED: .claude/sessions/2024-01-15.md +════════════════════════════════════════════════ + +PROJECT: my-app — JWT Authentication + +WHAT WE'RE BUILDING: +User authentication with JWT tokens stored in httpOnly cookies. +Register and login endpoints are partially done. Route protection +via middleware hasn't been started yet. + +CURRENT STATE: +✅ Working: 3 items (register endpoint, JWT generation, password hashing) +🔄 In Progress: app/api/auth/login/route.ts (token works, cookie not set yet) +🗒️ Not Started: middleware.ts, app/login/page.tsx + +WHAT NOT TO RETRY: +❌ Next-Auth — conflicts with custom Prisma adapter, threw adapter error on every request +❌ localStorage for JWT — causes SSR hydration mismatch, incompatible with Next.js + +OPEN QUESTIONS / BLOCKERS: +- Does cookies().set() work inside a Route Handler or only Server Actions? + +NEXT STEP: +In app/api/auth/login/route.ts — set the JWT as an httpOnly cookie using +cookies().set('token', jwt, { httpOnly: true, secure: true, sameSite: 'strict' }) +then test with Postman for a Set-Cookie header in the response. + +════════════════════════════════════════════════ +Ready to continue. What would you like to do? +``` + +--- + +## Notes + +- Never modify the session file when loading it — it's a read-only historical record +- The briefing format is fixed — do not skip sections even if they are empty +- "What Not To Retry" must always be shown, even if it just says "None" — it's too important to miss +- After resuming, the user may want to run `/save-session` again at the end of the new session to create a new dated file diff --git a/commands/save-session.md b/commands/save-session.md new file mode 100644 index 00000000..84254e1e --- /dev/null +++ b/commands/save-session.md @@ -0,0 +1,261 @@ +--- +description: Save current session state to a dated file in .claude/sessions/ so work can be resumed in a future session with full context. +--- + +# Save Session Command + +Capture everything that happened in this session — what was built, what worked, what failed, what's left — and write it to a dated file so the next session can pick up exactly where this one left off. + +## When to Use + +- End of a work session before closing Claude Code +- Before hitting context limits (run this first, then start a fresh session) +- After solving a complex problem you want to remember +- Any time you need to hand off context to a future session + +## Process + +### Step 1: Gather context + +Before writing the file, collect: + +- Read all files modified during this session (use git diff or recall from conversation) +- Review what was discussed, attempted, and decided +- Note any errors encountered and how they were resolved (or not) +- Check current test/build status if relevant + +### Step 2: Create the sessions folder if it doesn't exist + +```bash +mkdir -p .claude/sessions +``` + +### Step 3: Write the session file + +Create `.claude/sessions/YYYY-MM-DD.md` using today's actual date. + +If a file for today already exists (multiple sessions in one day), name it `YYYY-MM-DD-2.md`, `YYYY-MM-DD-3.md`, etc. + +### Step 4: Populate the file with all sections below + +Write every section honestly. Do not skip sections — write "Nothing yet" or "N/A" if a section genuinely has no content. An incomplete file is worse than an honest empty section. + +### Step 5: Show the file to the user + +After writing, display the full contents and ask: + +``` +Session saved to .claude/sessions/YYYY-MM-DD.md + +Does this look accurate? Anything to correct or add before we close? +``` + +Wait for confirmation. Make edits if requested. + +--- + +## Session File Format + +```markdown +# Session: YYYY-MM-DD +**Started:** [approximate time if known] +**Last Updated:** [current time] +**Project:** [project name or path] +**Topic:** [one-line summary of what this session was about] + +--- + +## What We Are Building + +[1-3 paragraphs describing the feature, bug fix, or task. Include enough +context that someone with zero memory of this session can understand the goal. +Include: what it does, why it's needed, how it fits into the larger system.] + +--- + +## What WORKED (with evidence) + +[List only things that are confirmed working. For each item include WHY you +know it works — test passed, ran in browser, Postman returned 200, etc. +Without evidence, move it to "Not Tried Yet" instead.] + +- **[thing that works]** — confirmed by: [specific evidence] +- **[thing that works]** — confirmed by: [specific evidence] + +If nothing is confirmed working yet: "Nothing confirmed working yet — all approaches still in progress or untested." + +--- + +## What Did NOT Work (and why) + +[This is the most important section. List every approach tried that failed. +For each failure write the EXACT reason so the next session doesn't retry it. +Be specific: "threw X error because Y" is useful. "didn't work" is not.] + +- **[approach tried]** — failed because: [exact reason / error message] +- **[approach tried]** — failed because: [exact reason / error message] + +If nothing failed: "No failed approaches yet." + +--- + +## What Has NOT Been Tried Yet + +[Approaches that seem promising but haven't been attempted. Ideas from the +conversation. Alternative solutions worth exploring. Be specific enough that +the next session knows exactly what to try.] + +- [approach / idea] +- [approach / idea] + +If nothing is queued: "No specific untried approaches identified." + +--- + +## Current State of Files + +[Every file touched this session. Be precise about what state each file is in.] + +| File | Status | Notes | +|------|--------|-------| +| `path/to/file.ts` | ✅ Complete | [what it does] | +| `path/to/file.ts` | 🔄 In Progress | [what's done, what's left] | +| `path/to/file.ts` | ❌ Broken | [what's wrong] | +| `path/to/file.ts` | 🗒️ Not Started | [planned but not touched] | + +If no files were touched: "No files modified this session." + +--- + +## Decisions Made + +[Architecture choices, tradeoffs accepted, approaches chosen and why. +These prevent the next session from relitigating settled decisions.] + +- **[decision]** — reason: [why this was chosen over alternatives] + +If no significant decisions: "No major decisions made this session." + +--- + +## Blockers & Open Questions + +[Anything unresolved that the next session needs to address or investigate. +Questions that came up but weren't answered. External dependencies waiting on.] + +- [blocker / open question] + +If none: "No active blockers." + +--- + +## Exact Next Step + +[If known: The single most important thing to do when resuming. Be precise +enough that resuming requires zero thinking about where to start.] + +[If not known: "Next step not determined — review 'What Has NOT Been Tried Yet' +and 'Blockers' sections to decide on direction before starting."] + +--- + +## Environment & Setup Notes + +[Only fill this if relevant — commands needed to run the project, env vars +required, services that need to be running, etc. Skip if standard setup.] + +[If none: omit this section entirely.] +``` + +--- + +## Example Output + +```markdown +# Session: 2024-01-15 +**Started:** ~2pm +**Last Updated:** 5:30pm +**Project:** my-app +**Topic:** Building JWT authentication with httpOnly cookies + +--- + +## What We Are Building + +User authentication system for the Next.js app. Users register with email/password, +receive a JWT stored in an httpOnly cookie (not localStorage), and protected routes +check for a valid token via middleware. The goal is session persistence across browser +refreshes without exposing the token to JavaScript. + +--- + +## What WORKED (with evidence) + +- **`/api/auth/register` endpoint** — confirmed by: Postman POST returns 200 with user + object, row visible in Supabase dashboard, bcrypt hash stored correctly +- **JWT generation in `lib/auth.ts`** — confirmed by: unit test passes + (`npm test -- auth.test.ts`), decoded token at jwt.io shows correct payload +- **Password hashing** — confirmed by: `bcrypt.compare()` returns true in test + +--- + +## What Did NOT Work (and why) + +- **Next-Auth library** — failed because: conflicts with our custom Prisma adapter, + threw "Cannot use adapter with credentials provider in this configuration" on every + request. Not worth debugging — too opinionated for our setup. +- **Storing JWT in localStorage** — failed because: SSR renders happen before + localStorage is available, caused React hydration mismatch error on every page load. + This approach is fundamentally incompatible with Next.js SSR. + +--- + +## What Has NOT Been Tried Yet + +- Store JWT as httpOnly cookie in the login route response (most likely solution) +- Use `cookies()` from `next/headers` to read token in server components +- Write middleware.ts to protect routes by checking cookie existence + +--- + +## Current State of Files + +| File | Status | Notes | +|------|--------|-------| +| `app/api/auth/register/route.ts` | ✅ Complete | Works, tested | +| `app/api/auth/login/route.ts` | 🔄 In Progress | Token generates but not setting cookie yet | +| `lib/auth.ts` | ✅ Complete | JWT helpers, all tested | +| `middleware.ts` | 🗒️ Not Started | Route protection, needs cookie read logic first | +| `app/login/page.tsx` | 🗒️ Not Started | UI not started | + +--- + +## Decisions Made + +- **httpOnly cookie over localStorage** — reason: prevents XSS token theft, works with SSR +- **Custom auth over Next-Auth** — reason: Next-Auth conflicts with our Prisma setup, not worth the fight + +--- + +## Blockers & Open Questions + +- Does `cookies().set()` work inside a Route Handler or only in Server Actions? Need to verify. + +--- + +## Exact Next Step + +In `app/api/auth/login/route.ts`, after generating the JWT, set it as an httpOnly +cookie using `cookies().set('token', jwt, { httpOnly: true, secure: true, sameSite: 'strict' })`. +Then test with Postman — the response should include a `Set-Cookie` header. +``` + +--- + +## Notes + +- Each session gets its own file — never append to a previous session's file +- The "What Did NOT Work" section is the most critical — future sessions will blindly retry failed approaches without it +- If the user asks to save mid-session (not just at the end), save what's known so far and mark in-progress items clearly +- The file is meant to be read by Claude at the start of the next session via `/resume-session` +- Keep this file in `.claude/sessions/` — add that folder to `.gitignore` if session logs shouldn't be committed From 043b3cd9a959d8804eae70fa3b8bb1413ff524c4 Mon Sep 17 00:00:00 2001 From: avesh-devx Date: Mon, 9 Mar 2026 13:12:36 +0530 Subject: [PATCH 33/42] fix: update session file paths to use the home directory Updated the documentation for the `/resume-session` and `/save-session` commands to reflect the correct file paths, changing references from `.claude/sessions/` to `~/.claude/sessions/`. This ensures clarity on the global directory used for session management and maintains consistency across commands. --- commands/resume-session.md | 30 +++++++++++++++++------------- commands/save-session.md | 38 ++++++++++++++++++++------------------ 2 files changed, 37 insertions(+), 31 deletions(-) diff --git a/commands/resume-session.md b/commands/resume-session.md index b33da0cc..5ed86a77 100644 --- a/commands/resume-session.md +++ b/commands/resume-session.md @@ -1,5 +1,5 @@ --- -description: Load the most recent session file from .claude/sessions/ and resume work with full context from where the last session ended. +description: Load the most recent session file from ~/.claude/sessions/ and resume work with full context from where the last session ended. --- # Resume Session Command @@ -17,9 +17,9 @@ This command is the counterpart to `/save-session`. ## Usage ``` -/resume-session # loads most recent file in .claude/sessions/ -/resume-session 2024-01-15 # loads specific date -/resume-session .claude/sessions/foo.md # loads specific file path +/resume-session # loads most recent file in ~/.claude/sessions/ +/resume-session 2024-01-15 # loads most recent session for that date +/resume-session ~/.claude/sessions/2024-01-15-abc123-session.tmp # loads specific file path ``` ## Process @@ -27,17 +27,21 @@ This command is the counterpart to `/save-session`. ### Step 1: Find the session file If no argument provided: -1. List all files in `.claude/sessions/` + +1. List all `.tmp` files in `~/.claude/sessions/` 2. Pick the most recently modified file 3. If the folder doesn't exist or is empty, tell the user: ``` - No session files found in .claude/sessions/ + No session files found in ~/.claude/sessions/ Run /save-session at the end of a session to create one. ``` Then stop. If an argument is provided: -- If it looks like a date (`YYYY-MM-DD`), look for `.claude/sessions/YYYY-MM-DD.md` + +- If it looks like a date (`YYYY-MM-DD`), find all files in `~/.claude/sessions/` matching + `YYYY-MM-DD-session.tmp` (old format) or `YYYY-MM-DD--session.tmp` (new format) + and load the most recently modified variant for that date - If it looks like a file path, read that file directly - If not found, report clearly and stop @@ -50,7 +54,7 @@ Read the complete file. Do not summarize yet. Respond with a structured briefing in this exact format: ``` -SESSION LOADED: .claude/sessions/YYYY-MM-DD.md +SESSION LOADED: ~/.claude/sessions/YYYY-MM-DD--session.tmp ════════════════════════════════════════════════ PROJECT: [project name / topic from file] @@ -89,14 +93,14 @@ If no next step is defined — ask the user where to start, and optionally sugge ## Edge Cases -**Multiple sessions for the same date** (`2024-01-15.md`, `2024-01-15-2.md`): -Load the highest-numbered file (most recent). +**Multiple sessions for the same date** (`2024-01-15-session.tmp`, `2024-01-15-abc123-session.tmp`): +Load the most recently modified file for that date. **Session file references files that no longer exist:** Note this during the briefing — "⚠️ `path/to/file.ts` referenced in session but not found on disk." -**Session file is from a very old date:** -Note the gap — "⚠️ This session is from X days ago. Things may have changed." — then proceed normally. +**Session file is from more than 7 days ago:** +Note the gap — "⚠️ This session is from N days ago (threshold: 7 days). Things may have changed." — then proceed normally. **User provides a file path directly (e.g., forwarded from a teammate):** Read it and follow the same briefing process — the format is the same regardless of source. @@ -109,7 +113,7 @@ Report: "Session file found but appears empty or unreadable. You may need to cre ## Example Output ``` -SESSION LOADED: .claude/sessions/2024-01-15.md +SESSION LOADED: ~/.claude/sessions/2024-01-15-abc123-session.tmp ════════════════════════════════════════════════ PROJECT: my-app — JWT Authentication diff --git a/commands/save-session.md b/commands/save-session.md index 84254e1e..a9a155cc 100644 --- a/commands/save-session.md +++ b/commands/save-session.md @@ -1,5 +1,5 @@ --- -description: Save current session state to a dated file in .claude/sessions/ so work can be resumed in a future session with full context. +description: Save current session state to a dated file in ~/.claude/sessions/ so work can be resumed in a future session with full context. --- # Save Session Command @@ -27,14 +27,14 @@ Before writing the file, collect: ### Step 2: Create the sessions folder if it doesn't exist ```bash -mkdir -p .claude/sessions +mkdir -p ~/.claude/sessions ``` ### Step 3: Write the session file -Create `.claude/sessions/YYYY-MM-DD.md` using today's actual date. +Create `~/.claude/sessions/YYYY-MM-DD--session.tmp` using today's actual date and a short random alphanumeric ID (e.g., `2024-01-15-abc123de-session.tmp`). -If a file for today already exists (multiple sessions in one day), name it `YYYY-MM-DD-2.md`, `YYYY-MM-DD-3.md`, etc. +Each session always gets a unique short-id, so multiple sessions on the same day never collide. ### Step 4: Populate the file with all sections below @@ -45,7 +45,7 @@ Write every section honestly. Do not skip sections — write "Nothing yet" or "N After writing, display the full contents and ask: ``` -Session saved to .claude/sessions/YYYY-MM-DD.md +Session saved to ~/.claude/sessions/YYYY-MM-DD--session.tmp Does this look accurate? Anything to correct or add before we close? ``` @@ -58,6 +58,7 @@ Wait for confirmation. Make edits if requested. ```markdown # Session: YYYY-MM-DD + **Started:** [approximate time if known] **Last Updated:** [current time] **Project:** [project name or path] @@ -116,12 +117,12 @@ If nothing is queued: "No specific untried approaches identified." [Every file touched this session. Be precise about what state each file is in.] -| File | Status | Notes | -|------|--------|-------| -| `path/to/file.ts` | ✅ Complete | [what it does] | +| File | Status | Notes | +| ----------------- | -------------- | -------------------------- | +| `path/to/file.ts` | ✅ Complete | [what it does] | | `path/to/file.ts` | 🔄 In Progress | [what's done, what's left] | -| `path/to/file.ts` | ❌ Broken | [what's wrong] | -| `path/to/file.ts` | 🗒️ Not Started | [planned but not touched] | +| `path/to/file.ts` | ❌ Broken | [what's wrong] | +| `path/to/file.ts` | 🗒️ Not Started | [planned but not touched] | If no files were touched: "No files modified this session." @@ -173,6 +174,7 @@ required, services that need to be running, etc. Skip if standard setup.] ```markdown # Session: 2024-01-15 + **Started:** ~2pm **Last Updated:** 5:30pm **Project:** my-app @@ -220,13 +222,13 @@ refreshes without exposing the token to JavaScript. ## Current State of Files -| File | Status | Notes | -|------|--------|-------| -| `app/api/auth/register/route.ts` | ✅ Complete | Works, tested | -| `app/api/auth/login/route.ts` | 🔄 In Progress | Token generates but not setting cookie yet | -| `lib/auth.ts` | ✅ Complete | JWT helpers, all tested | -| `middleware.ts` | 🗒️ Not Started | Route protection, needs cookie read logic first | -| `app/login/page.tsx` | 🗒️ Not Started | UI not started | +| File | Status | Notes | +| -------------------------------- | -------------- | ----------------------------------------------- | +| `app/api/auth/register/route.ts` | ✅ Complete | Works, tested | +| `app/api/auth/login/route.ts` | 🔄 In Progress | Token generates but not setting cookie yet | +| `lib/auth.ts` | ✅ Complete | JWT helpers, all tested | +| `middleware.ts` | 🗒️ Not Started | Route protection, needs cookie read logic first | +| `app/login/page.tsx` | 🗒️ Not Started | UI not started | --- @@ -258,4 +260,4 @@ Then test with Postman — the response should include a `Set-Cookie` header. - The "What Did NOT Work" section is the most critical — future sessions will blindly retry failed approaches without it - If the user asks to save mid-session (not just at the end), save what's known so far and mark in-progress items clearly - The file is meant to be read by Claude at the start of the next session via `/resume-session` -- Keep this file in `.claude/sessions/` — add that folder to `.gitignore` if session logs shouldn't be committed +- Keep this file in `~/.claude/sessions/` — this is a global directory shared across all projects, so session logs are not committed to any repo by default From e71024c4bd886977dda0bda0d70d11e9a247cb3f Mon Sep 17 00:00:00 2001 From: avesh-devx Date: Mon, 9 Mar 2026 13:23:50 +0530 Subject: [PATCH 34/42] docs: enhance session file naming guidelines in save-session command Updated the documentation for the `/save-session` command to include detailed rules for generating the session short-id, including allowed characters, minimum length, and examples of valid and invalid formats. This improves clarity and helps users adhere to the required naming conventions. --- commands/save-session.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/commands/save-session.md b/commands/save-session.md index a9a155cc..348fd2e1 100644 --- a/commands/save-session.md +++ b/commands/save-session.md @@ -32,7 +32,16 @@ mkdir -p ~/.claude/sessions ### Step 3: Write the session file -Create `~/.claude/sessions/YYYY-MM-DD--session.tmp` using today's actual date and a short random alphanumeric ID (e.g., `2024-01-15-abc123de-session.tmp`). +Create `~/.claude/sessions/YYYY-MM-DD--session.tmp` using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`: + +- Allowed characters: lowercase `a-z`, digits `0-9`, hyphens `-` +- Minimum length: 8 characters +- No uppercase letters, no underscores, no spaces + +Valid examples: `abc123de`, `a1b2c3d4`, `frontend-worktree-1` +Invalid examples: `ABC123de` (uppercase), `short` (under 8 chars), `test_id1` (underscore) + +Full valid filename example: `2024-01-15-abc123de-session.tmp` Each session always gets a unique short-id, so multiple sessions on the same day never collide. From 81022fdcfe9b234decb0020e838a63e6d5e369a4 Mon Sep 17 00:00:00 2001 From: avesh-h Date: Mon, 9 Mar 2026 21:27:32 +0530 Subject: [PATCH 35/42] docs: clarify session file paths and usage in resume-session command Updated the documentation for the `/resume-session` command to specify that session files are loaded from the project-level `.claude/sessions/` directory first, with a fallback to the global `~/.claude/sessions/` directory. Enhanced usage examples and clarified the process for locating session files, improving user understanding of session management. --- commands/resume-session.md | 22 ++++++++++++---------- commands/save-session.md | 13 +++++++++---- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/commands/resume-session.md b/commands/resume-session.md index 5ed86a77..3a08831a 100644 --- a/commands/resume-session.md +++ b/commands/resume-session.md @@ -1,5 +1,5 @@ --- -description: Load the most recent session file from ~/.claude/sessions/ and resume work with full context from where the last session ended. +description: Load the most recent session file from .claude/sessions/ in the current project (or ~/.claude/sessions/ globally) and resume work with full context from where the last session ended. --- # Resume Session Command @@ -17,9 +17,10 @@ This command is the counterpart to `/save-session`. ## Usage ``` -/resume-session # loads most recent file in ~/.claude/sessions/ -/resume-session 2024-01-15 # loads most recent session for that date -/resume-session ~/.claude/sessions/2024-01-15-abc123-session.tmp # loads specific file path +/resume-session # loads most recent file in .claude/sessions/ (project level) +/resume-session 2024-01-15 # loads most recent session for that date +/resume-session .claude/sessions/2024-01-15-abc123-session.tmp # loads specific file path +/resume-session ~/.claude/sessions/2024-01-15-abc123-session.tmp # loads from global location ``` ## Process @@ -28,18 +29,19 @@ This command is the counterpart to `/save-session`. If no argument provided: -1. List all `.tmp` files in `~/.claude/sessions/` -2. Pick the most recently modified file -3. If the folder doesn't exist or is empty, tell the user: +1. First check `.claude/sessions/` in the current project directory +2. If not found there, fall back to `~/.claude/sessions/` +3. Pick the most recently modified `.tmp` file from whichever location has files +4. If neither folder exists or both are empty, tell the user: ``` - No session files found in ~/.claude/sessions/ + No session files found in .claude/sessions/ or ~/.claude/sessions/ Run /save-session at the end of a session to create one. ``` Then stop. If an argument is provided: -- If it looks like a date (`YYYY-MM-DD`), find all files in `~/.claude/sessions/` matching +- If it looks like a date (`YYYY-MM-DD`), search `.claude/sessions/` first then `~/.claude/sessions/` for files matching `YYYY-MM-DD-session.tmp` (old format) or `YYYY-MM-DD--session.tmp` (new format) and load the most recently modified variant for that date - If it looks like a file path, read that file directly @@ -54,7 +56,7 @@ Read the complete file. Do not summarize yet. Respond with a structured briefing in this exact format: ``` -SESSION LOADED: ~/.claude/sessions/YYYY-MM-DD--session.tmp +SESSION LOADED: .claude/sessions/YYYY-MM-DD--session.tmp ════════════════════════════════════════════════ PROJECT: [project name / topic from file] diff --git a/commands/save-session.md b/commands/save-session.md index 348fd2e1..7c03d19c 100644 --- a/commands/save-session.md +++ b/commands/save-session.md @@ -26,13 +26,17 @@ Before writing the file, collect: ### Step 2: Create the sessions folder if it doesn't exist +Create the folder at the **project level** by default: + ```bash -mkdir -p ~/.claude/sessions +mkdir -p .claude/sessions ``` +If the user explicitly asks for global storage, use `~/.claude/sessions` instead. + ### Step 3: Write the session file -Create `~/.claude/sessions/YYYY-MM-DD--session.tmp` using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`: +Create `.claude/sessions/YYYY-MM-DD--session.tmp` in the current project directory, using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`: - Allowed characters: lowercase `a-z`, digits `0-9`, hyphens `-` - Minimum length: 8 characters @@ -54,7 +58,7 @@ Write every section honestly. Do not skip sections — write "Nothing yet" or "N After writing, display the full contents and ask: ``` -Session saved to ~/.claude/sessions/YYYY-MM-DD--session.tmp +Session saved to .claude/sessions/YYYY-MM-DD--session.tmp Does this look accurate? Anything to correct or add before we close? ``` @@ -269,4 +273,5 @@ Then test with Postman — the response should include a `Set-Cookie` header. - The "What Did NOT Work" section is the most critical — future sessions will blindly retry failed approaches without it - If the user asks to save mid-session (not just at the end), save what's known so far and mark in-progress items clearly - The file is meant to be read by Claude at the start of the next session via `/resume-session` -- Keep this file in `~/.claude/sessions/` — this is a global directory shared across all projects, so session logs are not committed to any repo by default +- Save to `.claude/sessions/` inside the current project by default — this keeps session logs co-located with the project they belong to +- Use `~/.claude/sessions/` only if the user explicitly requests global storage or there is no active project directory From b39e25a58f2498724583866a0d40d5c960e1f77e Mon Sep 17 00:00:00 2001 From: avesh-h Date: Mon, 9 Mar 2026 21:35:57 +0530 Subject: [PATCH 36/42] docs: update session file paths in save-session and resume-session commands Revised the documentation for both the and commands to clarify that session files are saved and loaded from the project-level directory, rather than the global directory. This change enhances user understanding of session management and ensures consistency in file path references. --- commands/resume-session.md | 4 ++-- commands/save-session.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/commands/resume-session.md b/commands/resume-session.md index 3a08831a..9cba97af 100644 --- a/commands/resume-session.md +++ b/commands/resume-session.md @@ -56,7 +56,7 @@ Read the complete file. Do not summarize yet. Respond with a structured briefing in this exact format: ``` -SESSION LOADED: .claude/sessions/YYYY-MM-DD--session.tmp +SESSION LOADED: [actual resolved path to the file] ════════════════════════════════════════════════ PROJECT: [project name / topic from file] @@ -115,7 +115,7 @@ Report: "Session file found but appears empty or unreadable. You may need to cre ## Example Output ``` -SESSION LOADED: ~/.claude/sessions/2024-01-15-abc123-session.tmp +SESSION LOADED: .claude/sessions/2024-01-15-abc123-session.tmp ════════════════════════════════════════════════ PROJECT: my-app — JWT Authentication diff --git a/commands/save-session.md b/commands/save-session.md index 7c03d19c..7acf384a 100644 --- a/commands/save-session.md +++ b/commands/save-session.md @@ -1,5 +1,5 @@ --- -description: Save current session state to a dated file in ~/.claude/sessions/ so work can be resumed in a future session with full context. +description: Save current session state to a dated file in .claude/sessions/ (project level by default) so work can be resumed in a future session with full context. --- # Save Session Command From b365ce861ad3b17326a84b38e97c2c6a7d8277fd Mon Sep 17 00:00:00 2001 From: avesh-h Date: Mon, 9 Mar 2026 21:41:17 +0530 Subject: [PATCH 37/42] docs: update session file path in save-session command documentation Revised the documentation for the `/save-session` command to reflect the actual resolved path to the session file, enhancing clarity for users regarding where their session data is stored. This change aligns with previous updates to session file management. --- commands/save-session.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/commands/save-session.md b/commands/save-session.md index 7acf384a..0f3ceeac 100644 --- a/commands/save-session.md +++ b/commands/save-session.md @@ -58,7 +58,7 @@ Write every section honestly. Do not skip sections — write "Nothing yet" or "N After writing, display the full contents and ask: ``` -Session saved to .claude/sessions/YYYY-MM-DD--session.tmp +Session saved to [actual resolved path to the session file] Does this look accurate? Anything to correct or add before we close? ``` From 8f87a5408ff2e12c06d54621a443cc42ff14ad6e Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Tue, 10 Mar 2026 20:20:50 -0700 Subject: [PATCH 38/42] docs: align session commands with session manager --- commands/resume-session.md | 27 +++++++++++++-------------- commands/save-session.md | 16 +++++++--------- 2 files changed, 20 insertions(+), 23 deletions(-) diff --git a/commands/resume-session.md b/commands/resume-session.md index 9cba97af..5f84cf61 100644 --- a/commands/resume-session.md +++ b/commands/resume-session.md @@ -1,5 +1,5 @@ --- -description: Load the most recent session file from .claude/sessions/ in the current project (or ~/.claude/sessions/ globally) and resume work with full context from where the last session ended. +description: Load the most recent session file from ~/.claude/sessions/ and resume work with full context from where the last session ended. --- # Resume Session Command @@ -17,10 +17,10 @@ This command is the counterpart to `/save-session`. ## Usage ``` -/resume-session # loads most recent file in .claude/sessions/ (project level) +/resume-session # loads most recent file in ~/.claude/sessions/ /resume-session 2024-01-15 # loads most recent session for that date -/resume-session .claude/sessions/2024-01-15-abc123-session.tmp # loads specific file path -/resume-session ~/.claude/sessions/2024-01-15-abc123-session.tmp # loads from global location +/resume-session ~/.claude/sessions/2024-01-15-session.tmp # loads a specific legacy-format file +/resume-session ~/.claude/sessions/2024-01-15-abc123de-session.tmp # loads a current short-id session file ``` ## Process @@ -29,20 +29,19 @@ This command is the counterpart to `/save-session`. If no argument provided: -1. First check `.claude/sessions/` in the current project directory -2. If not found there, fall back to `~/.claude/sessions/` -3. Pick the most recently modified `.tmp` file from whichever location has files -4. If neither folder exists or both are empty, tell the user: +1. Check `~/.claude/sessions/` +2. Pick the most recently modified `*-session.tmp` file +3. If the folder does not exist or has no matching files, tell the user: ``` - No session files found in .claude/sessions/ or ~/.claude/sessions/ + No session files found in ~/.claude/sessions/ Run /save-session at the end of a session to create one. ``` Then stop. If an argument is provided: -- If it looks like a date (`YYYY-MM-DD`), search `.claude/sessions/` first then `~/.claude/sessions/` for files matching - `YYYY-MM-DD-session.tmp` (old format) or `YYYY-MM-DD--session.tmp` (new format) +- If it looks like a date (`YYYY-MM-DD`), search `~/.claude/sessions/` for files matching + `YYYY-MM-DD-session.tmp` (legacy format) or `YYYY-MM-DD--session.tmp` (current format) and load the most recently modified variant for that date - If it looks like a file path, read that file directly - If not found, report clearly and stop @@ -95,8 +94,8 @@ If no next step is defined — ask the user where to start, and optionally sugge ## Edge Cases -**Multiple sessions for the same date** (`2024-01-15-session.tmp`, `2024-01-15-abc123-session.tmp`): -Load the most recently modified file for that date. +**Multiple sessions for the same date** (`2024-01-15-session.tmp`, `2024-01-15-abc123de-session.tmp`): +Load the most recently modified matching file for that date, regardless of whether it uses the legacy no-id format or the current short-id format. **Session file references files that no longer exist:** Note this during the briefing — "⚠️ `path/to/file.ts` referenced in session but not found on disk." @@ -115,7 +114,7 @@ Report: "Session file found but appears empty or unreadable. You may need to cre ## Example Output ``` -SESSION LOADED: .claude/sessions/2024-01-15-abc123-session.tmp +SESSION LOADED: /Users/you/.claude/sessions/2024-01-15-abc123de-session.tmp ════════════════════════════════════════════════ PROJECT: my-app — JWT Authentication diff --git a/commands/save-session.md b/commands/save-session.md index 0f3ceeac..676d74cd 100644 --- a/commands/save-session.md +++ b/commands/save-session.md @@ -1,5 +1,5 @@ --- -description: Save current session state to a dated file in .claude/sessions/ (project level by default) so work can be resumed in a future session with full context. +description: Save current session state to a dated file in ~/.claude/sessions/ so work can be resumed in a future session with full context. --- # Save Session Command @@ -26,17 +26,15 @@ Before writing the file, collect: ### Step 2: Create the sessions folder if it doesn't exist -Create the folder at the **project level** by default: +Create the canonical sessions folder in the user's Claude home directory: ```bash -mkdir -p .claude/sessions +mkdir -p ~/.claude/sessions ``` -If the user explicitly asks for global storage, use `~/.claude/sessions` instead. - ### Step 3: Write the session file -Create `.claude/sessions/YYYY-MM-DD--session.tmp` in the current project directory, using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`: +Create `~/.claude/sessions/YYYY-MM-DD--session.tmp`, using today's actual date and a short-id that satisfies the rules enforced by `SESSION_FILENAME_REGEX` in `session-manager.js`: - Allowed characters: lowercase `a-z`, digits `0-9`, hyphens `-` - Minimum length: 8 characters @@ -47,7 +45,7 @@ Invalid examples: `ABC123de` (uppercase), `short` (under 8 chars), `test_id1` (u Full valid filename example: `2024-01-15-abc123de-session.tmp` -Each session always gets a unique short-id, so multiple sessions on the same day never collide. +The legacy filename `YYYY-MM-DD-session.tmp` is still valid, but new session files should prefer the short-id form to avoid same-day collisions. ### Step 4: Populate the file with all sections below @@ -273,5 +271,5 @@ Then test with Postman — the response should include a `Set-Cookie` header. - The "What Did NOT Work" section is the most critical — future sessions will blindly retry failed approaches without it - If the user asks to save mid-session (not just at the end), save what's known so far and mark in-progress items clearly - The file is meant to be read by Claude at the start of the next session via `/resume-session` -- Save to `.claude/sessions/` inside the current project by default — this keeps session logs co-located with the project they belong to -- Use `~/.claude/sessions/` only if the user explicitly requests global storage or there is no active project directory +- Use the canonical global session store: `~/.claude/sessions/` +- Prefer the short-id filename form (`YYYY-MM-DD--session.tmp`) for any new session file From ae5c9243c921d941324c53651ff2510ac489eb3f Mon Sep 17 00:00:00 2001 From: Necip Sunmaz Date: Mon, 9 Mar 2026 06:20:26 +0300 Subject: [PATCH 39/42] feat: add Perl language rules and update documentation Add rules/perl/ with 5 rule files (coding-style, testing, patterns, hooks, security) following the same structure as existing languages. Update README.md, README.zh-CN.md, and rules/README.md to document Perl support including badges, directory trees, install instructions, and rule counts. --- README.md | 24 ++++++++---- README.zh-CN.md | 52 ++++++++++++++++++-------- rules/README.md | 7 +++- rules/perl/coding-style.md | 46 +++++++++++++++++++++++ rules/perl/hooks.md | 22 +++++++++++ rules/perl/patterns.md | 76 ++++++++++++++++++++++++++++++++++++++ rules/perl/security.md | 69 ++++++++++++++++++++++++++++++++++ rules/perl/testing.md | 54 +++++++++++++++++++++++++++ 8 files changed, 325 insertions(+), 25 deletions(-) create mode 100644 rules/perl/coding-style.md create mode 100644 rules/perl/hooks.md create mode 100644 rules/perl/patterns.md create mode 100644 rules/perl/security.md create mode 100644 rules/perl/testing.md diff --git a/README.md b/README.md index 25d6c091..2e5c7d19 100644 --- a/README.md +++ b/README.md @@ -14,9 +14,10 @@ ![Python](https://img.shields.io/badge/-Python-3776AB?logo=python&logoColor=white) ![Go](https://img.shields.io/badge/-Go-00ADD8?logo=go&logoColor=white) ![Java](https://img.shields.io/badge/-Java-ED8B00?logo=openjdk&logoColor=white) +![Perl](https://img.shields.io/badge/-Perl-39457E?logo=perl&logoColor=white) ![Markdown](https://img.shields.io/badge/-Markdown-000000?logo=markdown&logoColor=white) -> **50K+ stars** | **6K+ forks** | **30 contributors** | **6 languages supported** | **Anthropic Hackathon Winner** +> **50K+ stars** | **6K+ forks** | **30 contributors** | **7 languages supported** | **Anthropic Hackathon Winner** --- @@ -109,7 +110,7 @@ This repo is the raw code only. The guides explain everything. - **Interactive installation wizard** — New `configure-ecc` skill provides guided setup with merge/overwrite detection - **PM2 & multi-agent orchestration** — 6 new commands (`/pm2`, `/multi-plan`, `/multi-execute`, `/multi-backend`, `/multi-frontend`, `/multi-workflow`) for managing complex multi-service workflows -- **Multi-language rules architecture** — Rules restructured from flat files into `common/` + `typescript/` + `python/` + `golang/` directories. Install only the languages you need +- **Multi-language rules architecture** — Rules restructured from flat files into `common/` + `typescript/` + `python/` + `golang/` + `perl/` directories. Install only the languages you need - **Chinese (zh-CN) translations** — Complete translation of all agents, commands, skills, and rules (80+ files) - **GitHub Sponsors support** — Sponsor the project via GitHub Sponsors - **Enhanced CONTRIBUTING.md** — Detailed PR templates for each contribution type @@ -155,9 +156,9 @@ git clone https://github.com/affaan-m/everything-claude-code.git cd everything-claude-code # Recommended: use the installer (handles common + language rules safely) -./install.sh typescript # or python or golang +./install.sh typescript # or python or golang or perl # You can pass multiple languages: -# ./install.sh typescript python golang +# ./install.sh typescript python golang perl # or target cursor: # ./install.sh --target cursor typescript # or target antigravity: @@ -310,6 +311,9 @@ everything-claude-code/ | |-- liquid-glass-design/ # iOS 26 Liquid Glass design system (NEW) | |-- foundation-models-on-device/ # Apple on-device LLM with FoundationModels (NEW) | |-- swift-concurrency-6-2/ # Swift 6.2 Approachable Concurrency (NEW) +| |-- perl-patterns/ # Modern Perl 5.36+ idioms and best practices (NEW) +| |-- perl-security/ # Perl security patterns, taint mode, safe I/O (NEW) +| |-- perl-testing/ # Perl TDD with Test2::V0, prove, Devel::Cover (NEW) | |-- autonomous-loops/ # Autonomous loop patterns: sequential pipelines, PR loops, DAG orchestration (NEW) | |-- plankton-code-quality/ # Write-time code quality enforcement with Plankton hooks (NEW) | @@ -361,6 +365,7 @@ everything-claude-code/ | |-- typescript/ # TypeScript/JavaScript specific | |-- python/ # Python specific | |-- golang/ # Go specific +| |-- perl/ # Perl specific (NEW) | |-- hooks/ # Trigger-based automations | |-- README.md # Hook documentation, recipes, and customization guide @@ -563,6 +568,7 @@ This gives you instant access to all commands, agents, skills, and hooks. > cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack > cp -r everything-claude-code/rules/python/* ~/.claude/rules/ > cp -r everything-claude-code/rules/golang/* ~/.claude/rules/ +> cp -r everything-claude-code/rules/perl/* ~/.claude/rules/ > > # Option B: Project-level rules (applies to current project only) > mkdir -p .claude/rules @@ -588,6 +594,7 @@ cp -r everything-claude-code/rules/common/* ~/.claude/rules/ cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack cp -r everything-claude-code/rules/python/* ~/.claude/rules/ cp -r everything-claude-code/rules/golang/* ~/.claude/rules/ +cp -r everything-claude-code/rules/perl/* ~/.claude/rules/ # Copy commands cp everything-claude-code/commands/*.md ~/.claude/commands/ @@ -670,6 +677,7 @@ rules/ typescript/ # TS/JS specific patterns and tools python/ # Python specific patterns and tools golang/ # Go specific patterns and tools + perl/ # Perl specific patterns and tools ``` See [`rules/README.md`](rules/README.md) for installation and structure details. @@ -824,7 +832,7 @@ Please contribute! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines. ### Ideas for Contributions -- Language-specific skills (Rust, C#, Swift, Kotlin) — Go, Python, Java already included +- Language-specific skills (Rust, C#, Swift, Kotlin) — Go, Python, Java, Perl already included - Framework-specific configs (Rails, Laravel, FastAPI, NestJS) — Django, Spring Boot already included - DevOps agents (Kubernetes, Terraform, AWS, Docker) - Testing strategies (different frameworks, visual regression) @@ -841,7 +849,7 @@ ECC provides **full Cursor IDE support** with hooks, rules, agents, skills, comm ```bash # Install for your language(s) ./install.sh --target cursor typescript -./install.sh --target cursor python golang swift +./install.sh --target cursor python golang swift perl ``` ### What's Included @@ -850,7 +858,7 @@ ECC provides **full Cursor IDE support** with hooks, rules, agents, skills, comm |-----------|-------|---------| | Hook Events | 15 | sessionStart, beforeShellExecution, afterFileEdit, beforeMCPExecution, beforeSubmitPrompt, and 10 more | | Hook Scripts | 16 | Thin Node.js scripts delegating to `scripts/hooks/` via shared adapter | -| Rules | 29 | 9 common (alwaysApply) + 20 language-specific (TypeScript, Python, Go, Swift) | +| Rules | 34 | 9 common (alwaysApply) + 25 language-specific (TypeScript, Python, Go, Swift, Perl) | | Agents | Shared | Via AGENTS.md at root (read by Cursor natively) | | Skills | Shared + Bundled | Via AGENTS.md at root and `.cursor/skills/` for translated additions | | Commands | Shared | `.cursor/commands/` if installed | @@ -1096,7 +1104,7 @@ ECC is the **first plugin to maximize every major AI coding tool**. Here's how e | **Skills** | 65 | Shared | 10 (native format) | 37 | | **Hook Events** | 8 types | 15 types | None yet | 11 types | | **Hook Scripts** | 20+ scripts | 16 scripts (DRY adapter) | N/A | Plugin hooks | -| **Rules** | 29 (common + lang) | 29 (YAML frontmatter) | Instruction-based | 13 instructions | +| **Rules** | 34 (common + lang) | 34 (YAML frontmatter) | Instruction-based | 13 instructions | | **Custom Tools** | Via hooks | Via hooks | N/A | 6 native tools | | **MCP Servers** | 14 | Shared (mcp.json) | 4 (command-based) | Full | | **Config Format** | settings.json | hooks.json + rules/ | config.toml | opencode.json | diff --git a/README.zh-CN.md b/README.zh-CN.md index ef72d195..45171f64 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -5,6 +5,7 @@ ![Shell](https://img.shields.io/badge/-Shell-4EAA25?logo=gnu-bash&logoColor=white) ![TypeScript](https://img.shields.io/badge/-TypeScript-3178C6?logo=typescript&logoColor=white) ![Go](https://img.shields.io/badge/-Go-00ADD8?logo=go&logoColor=white) +![Perl](https://img.shields.io/badge/-Perl-39457E?logo=perl&logoColor=white) ![Markdown](https://img.shields.io/badge/-Markdown-000000?logo=markdown&logoColor=white) --- @@ -81,8 +82,12 @@ # 首先克隆仓库 git clone https://github.com/affaan-m/everything-claude-code.git -# 复制规则(应用于所有项目) -cp -r everything-claude-code/rules/* ~/.claude/rules/ +# 复制规则(通用 + 语言特定) +cp -r everything-claude-code/rules/common/* ~/.claude/rules/ +cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈 +cp -r everything-claude-code/rules/python/* ~/.claude/rules/ +cp -r everything-claude-code/rules/golang/* ~/.claude/rules/ +cp -r everything-claude-code/rules/perl/* ~/.claude/rules/ ``` ### 第三步:开始使用 @@ -175,6 +180,9 @@ everything-claude-code/ | |-- golang-patterns/ # Go 惯用语和最佳实践(新增) | |-- golang-testing/ # Go 测试模式、TDD、基准测试(新增) | |-- cpp-testing/ # C++ 测试模式、GoogleTest、CMake/CTest(新增) +| |-- perl-patterns/ # 现代 Perl 5.36+ 惯用语和最佳实践(新增) +| |-- perl-security/ # Perl 安全模式、污染模式、安全 I/O(新增) +| |-- perl-testing/ # 使用 Test2::V0、prove、Devel::Cover 的 Perl TDD(新增) | |-- commands/ # 用于快速执行的斜杠命令 | |-- tdd.md # /tdd - 测试驱动开发 @@ -197,12 +205,20 @@ everything-claude-code/ | |-- evolve.md # /evolve - 将直觉聚类到技能中(新增) | |-- rules/ # 始终遵循的指南(复制到 ~/.claude/rules/) -| |-- security.md # 强制性安全检查 -| |-- coding-style.md # 不可变性、文件组织 -| |-- testing.md # TDD、80% 覆盖率要求 -| |-- git-workflow.md # 提交格式、PR 流程 -| |-- agents.md # 何时委托给子代理 -| |-- performance.md # 模型选择、上下文管理 +| |-- README.md # 结构概述和安装指南 +| |-- common/ # 与语言无关的原则 +| | |-- coding-style.md # 不可变性、文件组织 +| | |-- git-workflow.md # 提交格式、PR 流程 +| | |-- testing.md # TDD、80% 覆盖率要求 +| | |-- performance.md # 模型选择、上下文管理 +| | |-- patterns.md # 设计模式、骨架项目 +| | |-- hooks.md # 钩子架构、TodoWrite +| | |-- agents.md # 何时委托给子代理 +| | |-- security.md # 强制性安全检查 +| |-- typescript/ # TypeScript/JavaScript 特定 +| |-- python/ # Python 特定 +| |-- golang/ # Go 特定 +| |-- perl/ # Perl 特定(新增) | |-- hooks/ # 基于触发器的自动化 | |-- hooks.json # 所有钩子配置(PreToolUse、PostToolUse、Stop 等) @@ -356,8 +372,12 @@ git clone https://github.com/affaan-m/everything-claude-code.git # 将代理复制到你的 Claude 配置 cp everything-claude-code/agents/*.md ~/.claude/agents/ -# 复制规则 -cp everything-claude-code/rules/*.md ~/.claude/rules/ +# 复制规则(通用 + 语言特定) +cp -r everything-claude-code/rules/common/* ~/.claude/rules/ +cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择你的技术栈 +cp -r everything-claude-code/rules/python/* ~/.claude/rules/ +cp -r everything-claude-code/rules/golang/* ~/.claude/rules/ +cp -r everything-claude-code/rules/perl/* ~/.claude/rules/ # 复制命令 cp everything-claude-code/commands/*.md ~/.claude/commands/ @@ -425,13 +445,15 @@ model: opus ### 规则 -规则是始终遵循的指南。保持模块化: +规则是始终遵循的指南,分为 `common/`(通用)+ 语言特定目录: ``` ~/.claude/rules/ - security.md # 无硬编码秘密 - coding-style.md # 不可变性、文件限制 - testing.md # TDD、覆盖率要求 + common/ # 通用原则(必装) + typescript/ # TS/JS 特定模式和工具 + python/ # Python 特定模式和工具 + golang/ # Go 特定模式和工具 + perl/ # Perl 特定模式和工具 ``` --- @@ -466,7 +488,7 @@ node tests/hooks/hooks.test.js ### 贡献想法 -- 特定语言的技能(Python、Rust 模式)- 现已包含 Go! +- 特定语言的技能(Rust、C#、Swift、Kotlin)- 现已包含 Go、Python、Java、Perl! - 特定框架的配置(Django、Rails、Laravel) - DevOps 代理(Kubernetes、Terraform、AWS) - 测试策略(不同框架) diff --git a/rules/README.md b/rules/README.md index 0cb01485..a9c83776 100644 --- a/rules/README.md +++ b/rules/README.md @@ -17,7 +17,8 @@ rules/ ├── typescript/ # TypeScript/JavaScript specific ├── python/ # Python specific ├── golang/ # Go specific -└── swift/ # Swift specific +├── swift/ # Swift specific +└── perl/ # Perl specific ``` - **common/** contains universal principles — no language-specific code examples. @@ -33,6 +34,7 @@ rules/ ./install.sh python ./install.sh golang ./install.sh swift +./install.sh perl # Install multiple languages at once ./install.sh typescript python @@ -55,6 +57,7 @@ cp -r rules/typescript ~/.claude/rules/typescript cp -r rules/python ~/.claude/rules/python cp -r rules/golang ~/.claude/rules/golang cp -r rules/swift ~/.claude/rules/swift +cp -r rules/perl ~/.claude/rules/perl # Attention ! ! ! Configure according to your actual project requirements; the configuration here is for reference only. ``` @@ -88,7 +91,7 @@ To add support for a new language (e.g., `rust/`): When language-specific rules and common rules conflict, **language-specific rules take precedence** (specific overrides general). This follows the standard layered configuration pattern (similar to CSS specificity or `.gitignore` precedence). - `rules/common/` defines universal defaults applicable to all projects. -- `rules/golang/`, `rules/python/`, `rules/typescript/`, etc. override those defaults where language idioms differ. +- `rules/golang/`, `rules/python/`, `rules/perl/`, `rules/typescript/`, etc. override those defaults where language idioms differ. ### Example diff --git a/rules/perl/coding-style.md b/rules/perl/coding-style.md new file mode 100644 index 00000000..6e522c65 --- /dev/null +++ b/rules/perl/coding-style.md @@ -0,0 +1,46 @@ +--- +paths: + - "**/*.pl" + - "**/*.pm" + - "**/*.t" + - "**/*.psgi" + - "**/*.cgi" +--- +# Perl Coding Style + +> This file extends [common/coding-style.md](../common/coding-style.md) with Perl specific content. + +## Standards + +- Always `use v5.36` (enables `strict`, `warnings`, `say`, subroutine signatures) +- Use subroutine signatures — never unpack `@_` manually +- Prefer `say` over `print` with explicit newlines + +## Immutability + +- Use **Moo** with `is => 'ro'` and `Types::Standard` for all attributes +- Never use blessed hashrefs directly — always use Moo/Moose accessors +- **OO override note**: Moo `has` attributes with `builder` or `default` are acceptable for computed read-only values + +## Formatting + +Use **perltidy** with these settings: + +``` +-i=4 # 4-space indent +-l=100 # 100 char line length +-ce # cuddled else +-bar # opening brace always right +``` + +## Linting + +Use **perlcritic** at severity 3 with themes: `core`, `pbp`, `security`. + +```bash +perlcritic --severity 3 --theme 'core || pbp || security' lib/ +``` + +## Reference + +See skill: `perl-patterns` for comprehensive modern Perl idioms and best practices. diff --git a/rules/perl/hooks.md b/rules/perl/hooks.md new file mode 100644 index 00000000..c8dc850b --- /dev/null +++ b/rules/perl/hooks.md @@ -0,0 +1,22 @@ +--- +paths: + - "**/*.pl" + - "**/*.pm" + - "**/*.t" + - "**/*.psgi" + - "**/*.cgi" +--- +# Perl Hooks + +> This file extends [common/hooks.md](../common/hooks.md) with Perl specific content. + +## PostToolUse Hooks + +Configure in `~/.claude/settings.json`: + +- **perltidy**: Auto-format `.pl` and `.pm` files after edit +- **perlcritic**: Run lint check after editing `.pm` files + +## Warnings + +- Warn about `print` in non-script `.pm` files — use `say` or a logging module (e.g., `Log::Any`) diff --git a/rules/perl/patterns.md b/rules/perl/patterns.md new file mode 100644 index 00000000..ad03fa59 --- /dev/null +++ b/rules/perl/patterns.md @@ -0,0 +1,76 @@ +--- +paths: + - "**/*.pl" + - "**/*.pm" + - "**/*.t" + - "**/*.psgi" + - "**/*.cgi" +--- +# Perl Patterns + +> This file extends [common/patterns.md](../common/patterns.md) with Perl specific content. + +## Repository Pattern + +Use **DBI** or **DBIx::Class** behind an interface: + +```perl +package MyApp::Repo::User; +use Moo; + +has dbh => (is => 'ro', required => 1); + +sub find_by_id ($self, $id) { + my $sth = $self->dbh->prepare('SELECT * FROM users WHERE id = ?'); + $sth->execute($id); + return $sth->fetchrow_hashref; +} +``` + +## DTOs / Value Objects + +Use **Moo** classes with **Types::Standard** (equivalent to Python dataclasses): + +```perl +package MyApp::DTO::User; +use Moo; +use Types::Standard qw(Str Int); + +has name => (is => 'ro', isa => Str, required => 1); +has email => (is => 'ro', isa => Str, required => 1); +has age => (is => 'ro', isa => Int); +``` + +## Resource Management + +- Always use **three-arg open** with `autodie` +- Use **Path::Tiny** for file operations + +```perl +use autodie; +use Path::Tiny; + +my $content = path('config.json')->slurp_utf8; +``` + +## Module Interface + +Use `Exporter 'import'` with `@EXPORT_OK` — never `@EXPORT`: + +```perl +use Exporter 'import'; +our @EXPORT_OK = qw(parse_config validate_input); +``` + +## Dependency Management + +Use **cpanfile** + **carton** for reproducible installs: + +```bash +carton install +carton exec prove -lr t/ +``` + +## Reference + +See skill: `perl-patterns` for comprehensive modern Perl patterns and idioms. diff --git a/rules/perl/security.md b/rules/perl/security.md new file mode 100644 index 00000000..3df3cd75 --- /dev/null +++ b/rules/perl/security.md @@ -0,0 +1,69 @@ +--- +paths: + - "**/*.pl" + - "**/*.pm" + - "**/*.t" + - "**/*.psgi" + - "**/*.cgi" +--- +# Perl Security + +> This file extends [common/security.md](../common/security.md) with Perl specific content. + +## Taint Mode + +- Use `-T` flag on all CGI/web-facing scripts +- Sanitize `%ENV` (`$ENV{PATH}`, `$ENV{CDPATH}`, etc.) before any external command + +## Input Validation + +- Use allowlist regex for untainting — never `/(.*)/s` +- Validate all user input with explicit patterns: + +```perl +if ($input =~ /\A([a-zA-Z0-9_-]+)\z/) { + my $clean = $1; +} +``` + +## File I/O + +- **Three-arg open only** — never two-arg open +- Prevent path traversal with `Cwd::realpath`: + +```perl +use Cwd 'realpath'; +my $safe_path = realpath($user_path); +die "Path traversal" unless $safe_path =~ m{\A/allowed/directory/}; +``` + +## Process Execution + +- Use **list-form `system()`** — never single-string form +- Use **IPC::Run3** for capturing output +- Never use backticks with variable interpolation + +```perl +system('grep', '-r', $pattern, $directory); # safe +``` + +## SQL Injection Prevention + +Always use DBI placeholders — never interpolate into SQL: + +```perl +my $sth = $dbh->prepare('SELECT * FROM users WHERE email = ?'); +$sth->execute($email); +``` + +## Security Scanning + +Run **perlcritic** with the security theme at severity 4+: + +```bash +perlcritic --severity 4 --theme security lib/ +``` + +## Reference + +See skill: `perl-security` for comprehensive Perl security patterns, taint mode, and safe I/O. diff --git a/rules/perl/testing.md b/rules/perl/testing.md new file mode 100644 index 00000000..c1755ada --- /dev/null +++ b/rules/perl/testing.md @@ -0,0 +1,54 @@ +--- +paths: + - "**/*.pl" + - "**/*.pm" + - "**/*.t" + - "**/*.psgi" + - "**/*.cgi" +--- +# Perl Testing + +> This file extends [common/testing.md](../common/testing.md) with Perl specific content. + +## Framework + +Use **Test2::V0** for new projects (not Test::More): + +```perl +use Test2::V0; + +is($result, 42, 'answer is correct'); + +done_testing; +``` + +## Runner + +```bash +prove -l t/ # adds lib/ to @INC +prove -lr -j8 t/ # recursive, 8 parallel jobs +``` + +Always use `-l` to ensure `lib/` is on `@INC`. + +## Coverage + +Use **Devel::Cover** — target 80%+: + +```bash +cover -test +``` + +## Mocking + +- **Test::MockModule** — mock methods on existing modules +- **Test::MockObject** — create test doubles from scratch + +## Pitfalls + +- Always end test files with `done_testing` +- Never forget the `-l` flag with `prove` + +## Reference + +See skill: `perl-testing` for detailed Perl TDD patterns with Test2::V0, prove, and Devel::Cover. From b2a7bae5db48f7d2364d5b7068ac8f92ef27a8ca Mon Sep 17 00:00:00 2001 From: Necip Sunmaz Date: Mon, 9 Mar 2026 06:24:50 +0300 Subject: [PATCH 40/42] feat: add Perl skills (patterns, security, testing) --- skills/perl-patterns/SKILL.md | 500 ++++++++++++++++++++++++++++++++++ skills/perl-security/SKILL.md | 499 +++++++++++++++++++++++++++++++++ skills/perl-testing/SKILL.md | 475 ++++++++++++++++++++++++++++++++ 3 files changed, 1474 insertions(+) create mode 100644 skills/perl-patterns/SKILL.md create mode 100644 skills/perl-security/SKILL.md create mode 100644 skills/perl-testing/SKILL.md diff --git a/skills/perl-patterns/SKILL.md b/skills/perl-patterns/SKILL.md new file mode 100644 index 00000000..eafd245c --- /dev/null +++ b/skills/perl-patterns/SKILL.md @@ -0,0 +1,500 @@ +--- +name: perl-patterns +description: Modern Perl 5.36+ idioms, best practices, and conventions for building robust, maintainable Perl applications. +origin: ECC +--- + +# Modern Perl Development Patterns + +Idiomatic Perl 5.36+ patterns and best practices for building robust, maintainable applications. + +## When to Activate + +- Writing new Perl code or modules +- Reviewing Perl code for idiom compliance +- Refactoring legacy Perl to modern standards +- Designing Perl module architecture +- Migrating pre-5.36 code to modern Perl + +## Core Principles + +### 1. Use `v5.36` Pragma + +A single `use v5.36` replaces the old boilerplate and enables strict, warnings, and subroutine signatures. + +```perl +# Good: Modern preamble +use v5.36; + +sub greet($name) { + say "Hello, $name!"; +} + +# Bad: Legacy boilerplate +use strict; +use warnings; +use feature 'say', 'signatures'; +no warnings 'experimental::signatures'; + +sub greet { + my ($name) = @_; + say "Hello, $name!"; +} +``` + +### 2. Subroutine Signatures + +Use signatures for clarity and automatic arity checking. + +```perl +use v5.36; + +# Good: Signatures with defaults +sub connect_db($host, $port = 5432, $timeout = 30) { + # $host is required, others have defaults + return DBI->connect("dbi:Pg:host=$host;port=$port", undef, undef, { + RaiseError => 1, + PrintError => 0, + }); +} + +# Good: Slurpy parameter for variable args +sub log_message($level, @details) { + say "[$level] " . join(' ', @details); +} + +# Bad: Manual argument unpacking +sub connect_db { + my ($host, $port, $timeout) = @_; + $port //= 5432; + $timeout //= 30; + # ... +} +``` + +### 3. Context Sensitivity + +Understand scalar vs list context — a core Perl concept. + +```perl +use v5.36; + +my @items = (1, 2, 3, 4, 5); + +my @copy = @items; # List context: all elements +my $count = @items; # Scalar context: count (5) +say "Items: " . scalar @items; # Force scalar context +``` + +### 4. Postfix Dereferencing + +Use postfix dereference syntax for readability with nested structures. + +```perl +use v5.36; + +my $data = { + users => [ + { name => 'Alice', roles => ['admin', 'user'] }, + { name => 'Bob', roles => ['user'] }, + ], +}; + +# Good: Postfix dereferencing +my @users = $data->{users}->@*; +my @roles = $data->{users}[0]{roles}->@*; +my %first = $data->{users}[0]->%*; + +# Bad: Circumfix dereferencing (harder to read in chains) +my @users = @{ $data->{users} }; +my @roles = @{ $data->{users}[0]{roles} }; +``` + +### 5. The `isa` Operator (5.32+) + +Infix type-check — replaces `blessed($o) && $o->isa('X')`. + +```perl +use v5.36; +if ($obj isa 'My::Class') { $obj->do_something } +``` + +## Error Handling + +### eval/die Pattern + +```perl +use v5.36; + +sub parse_config($path) { + my $content = eval { path($path)->slurp_utf8 }; + die "Config error: $@" if $@; + return decode_json($content); +} +``` + +### Try::Tiny (Reliable Exception Handling) + +```perl +use v5.36; +use Try::Tiny; + +sub fetch_user($id) { + my $user = try { + $db->resultset('User')->find($id) + // die "User $id not found\n"; + } + catch { + warn "Failed to fetch user $id: $_"; + undef; + }; + return $user; +} +``` + +### Native try/catch (5.40+) + +```perl +use v5.40; + +sub divide($a, $b) { + try { + die "Division by zero" if $b == 0; + return $a / $b; + } + catch ($e) { + warn "Error: $e"; + return undef; + } +} +``` + +## Modern OO with Moo + +Prefer Moo for lightweight, modern OO. Use Moose only when its metaprotocol is needed. + +```perl +# Good: Moo class +package User; +use Moo; +use Types::Standard qw(Str Int ArrayRef); +use namespace::autoclean; + +has name => (is => 'ro', isa => Str, required => 1); +has email => (is => 'ro', isa => Str, required => 1); +has age => (is => 'ro', isa => Int, default => sub { 0 }); +has roles => (is => 'ro', isa => ArrayRef[Str], default => sub { [] }); + +sub is_admin($self) { + return grep { $_ eq 'admin' } $self->roles->@*; +} + +sub greet($self) { + return "Hello, I'm " . $self->name; +} + +1; + +# Usage +my $user = User->new( + name => 'Alice', + email => 'alice@example.com', + roles => ['admin', 'user'], +); + +# Bad: Blessed hashref (no validation, no accessors) +package User; +sub new { + my ($class, %args) = @_; + return bless \%args, $class; +} +sub name { return $_[0]->{name} } +1; +``` + +### Moo Roles + +```perl +package Role::Serializable; +use Moo::Role; +use JSON::MaybeXS qw(encode_json); +requires 'TO_HASH'; +sub to_json($self) { encode_json($self->TO_HASH) } +1; + +package User; +use Moo; +with 'Role::Serializable'; +has name => (is => 'ro', required => 1); +has email => (is => 'ro', required => 1); +sub TO_HASH($self) { { name => $self->name, email => $self->email } } +1; +``` + +### Native `class` Keyword (5.38+, Corinna) + +```perl +use v5.38; +use feature 'class'; +no warnings 'experimental::class'; + +class Point { + field $x :param; + field $y :param; + method magnitude() { sqrt($x**2 + $y**2) } +} + +my $p = Point->new(x => 3, y => 4); +say $p->magnitude; # 5 +``` + +## Regular Expressions + +### Named Captures and `/x` Flag + +```perl +use v5.36; + +# Good: Named captures with /x for readability +my $log_re = qr{ + ^ (? \d{4}-\d{2}-\d{2} \s \d{2}:\d{2}:\d{2} ) + \s+ \[ (? \w+ ) \] + \s+ (? .+ ) $ +}x; + +if ($line =~ $log_re) { + say "Time: $+{timestamp}, Level: $+{level}"; + say "Message: $+{message}"; +} + +# Bad: Positional captures (hard to maintain) +if ($line =~ /^(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})\s+\[(\w+)\]\s+(.+)$/) { + say "Time: $1, Level: $2"; +} +``` + +### Precompiled Patterns + +```perl +use v5.36; + +# Good: Compile once, use many +my $email_re = qr/^[A-Za-z0-9._%+-]+\@[A-Za-z0-9.-]+\.[A-Za-z]{2,}$/; + +sub validate_emails(@emails) { + return grep { $_ =~ $email_re } @emails; +} +``` + +## Data Structures + +### References and Safe Deep Access + +```perl +use v5.36; + +# Hash and array references +my $config = { + database => { + host => 'localhost', + port => 5432, + options => ['utf8', 'sslmode=require'], + }, +}; + +# Safe deep access (returns undef if any level missing) +my $port = $config->{database}{port}; # 5432 +my $missing = $config->{cache}{host}; # undef, no error + +# Hash slices +my %subset; +@subset{qw(host port)} = @{$config->{database}}{qw(host port)}; + +# Array slices +my @first_two = $config->{database}{options}->@[0, 1]; + +# Multi-variable for loop (experimental in 5.36, stable in 5.40) +use feature 'for_list'; +no warnings 'experimental::for_list'; +for my ($key, $val) (%$config) { + say "$key => $val"; +} +``` + +## File I/O + +### Three-Argument Open + +```perl +use v5.36; + +# Good: Three-arg open with autodie (core module, eliminates 'or die') +use autodie; + +sub read_file($path) { + open my $fh, '<:encoding(UTF-8)', $path; + local $/; + my $content = <$fh>; + close $fh; + return $content; +} + +# Bad: Two-arg open (shell injection risk, see perl-security) +open FH, $path; # NEVER do this +open FH, "< $path"; # Still bad — user data in mode string +``` + +### Path::Tiny for File Operations + +```perl +use v5.36; +use Path::Tiny; + +my $file = path('config', 'app.json'); +my $content = $file->slurp_utf8; +$file->spew_utf8($new_content); + +# Iterate directory +for my $child (path('src')->children(qr/\.pl$/)) { + say $child->basename; +} +``` + +## Module Organization + +### Standard Project Layout + +```text +MyApp/ +├── lib/ +│ └── MyApp/ +│ ├── App.pm # Main module +│ ├── Config.pm # Configuration +│ ├── DB.pm # Database layer +│ └── Util.pm # Utilities +├── bin/ +│ └── myapp # Entry-point script +├── t/ +│ ├── 00-load.t # Compilation tests +│ ├── unit/ # Unit tests +│ └── integration/ # Integration tests +├── cpanfile # Dependencies +├── Makefile.PL # Build system +└── .perlcriticrc # Linting config +``` + +### Exporter Patterns + +```perl +package MyApp::Util; +use v5.36; +use Exporter 'import'; + +our @EXPORT_OK = qw(trim); +our %EXPORT_TAGS = (all => \@EXPORT_OK); + +sub trim($str) { $str =~ s/^\s+|\s+$//gr } + +1; +``` + +## Tooling + +### perltidy Configuration (.perltidyrc) + +```text +-i=4 # 4-space indent +-l=100 # 100-char line length +-ci=4 # continuation indent +-ce # cuddled else +-bar # opening brace on same line +-nolq # don't outdent long quoted strings +``` + +### perlcritic Configuration (.perlcriticrc) + +```ini +severity = 3 +theme = core + pbp + security + +[InputOutput::RequireCheckedSyscalls] +functions = :builtins +exclude_functions = say print + +[Subroutines::ProhibitExplicitReturnUndef] +severity = 4 + +[ValuesAndExpressions::ProhibitMagicNumbers] +allowed_values = 0 1 2 -1 +``` + +### Dependency Management (cpanfile + carton) + +```bash +cpanm App::cpanminus Carton # Install tools +carton install # Install deps from cpanfile +carton exec -- perl bin/myapp # Run with local deps +``` + +```perl +# cpanfile +requires 'Moo', '>= 2.005'; +requires 'Path::Tiny'; +requires 'JSON::MaybeXS'; +requires 'Try::Tiny'; + +on test => sub { + requires 'Test2::V0'; + requires 'Test::MockModule'; +}; +``` + +## Quick Reference: Modern Perl Idioms + +| Legacy Pattern | Modern Replacement | +|---|---| +| `use strict; use warnings;` | `use v5.36;` | +| `my ($x, $y) = @_;` | `sub foo($x, $y) { ... }` | +| `@{ $ref }` | `$ref->@*` | +| `%{ $ref }` | `$ref->%*` | +| `open FH, "< $file"` | `open my $fh, '<:encoding(UTF-8)', $file` | +| `blessed hashref` | `Moo` class with types | +| `$1, $2, $3` | `$+{name}` (named captures) | +| `eval { }; if ($@)` | `Try::Tiny` or native `try/catch` (5.40+) | +| `BEGIN { require Exporter; }` | `use Exporter 'import';` | +| Manual file ops | `Path::Tiny` | +| `blessed($o) && $o->isa('X')` | `$o isa 'X'` (5.32+) | +| `builtin::true / false` | `use builtin 'true', 'false';` (5.36+, experimental) | + +## Anti-Patterns + +```perl +# 1. Two-arg open (security risk) +open FH, $filename; # NEVER + +# 2. Indirect object syntax (ambiguous parsing) +my $obj = new Foo(bar => 1); # Bad +my $obj = Foo->new(bar => 1); # Good + +# 3. Excessive reliance on $_ +map { process($_) } grep { validate($_) } @items; # Hard to follow +my @valid = grep { validate($_) } @items; # Better: break it up +my @results = map { process($_) } @valid; + +# 4. Disabling strict refs +no strict 'refs'; # Almost always wrong +${"My::Package::$var"} = $value; # Use a hash instead + +# 5. Global variables as configuration +our $TIMEOUT = 30; # Bad: mutable global +use constant TIMEOUT => 30; # Better: constant +# Best: Moo attribute with default + +# 6. String eval for module loading +eval "require $module"; # Bad: code injection risk +eval "use $module"; # Bad +use Module::Runtime 'require_module'; # Good: safe module loading +require_module($module); +``` + +**Remember**: Modern Perl is clean, readable, and safe. Let `use v5.36` handle the boilerplate, use Moo for objects, and prefer CPAN's battle-tested modules over hand-rolled solutions. diff --git a/skills/perl-security/SKILL.md b/skills/perl-security/SKILL.md new file mode 100644 index 00000000..113ff5bb --- /dev/null +++ b/skills/perl-security/SKILL.md @@ -0,0 +1,499 @@ +--- +name: perl-security +description: Comprehensive Perl security covering taint mode, input validation, safe process execution, DBI parameterized queries, web security (XSS/SQLi/CSRF), and perlcritic security policies. +origin: ECC +--- + +# Perl Security Patterns + +Comprehensive security guidelines for Perl applications covering input validation, injection prevention, and secure coding practices. + +## When to Activate + +- Handling user input in Perl applications +- Building Perl web applications (CGI, Mojolicious, Dancer2, Catalyst) +- Reviewing Perl code for security vulnerabilities +- Performing file operations with user-supplied paths +- Executing system commands from Perl +- Writing DBI database queries + +## Taint Mode + +Perl's taint mode (`-T`) tracks data from external sources and prevents it from being used in unsafe operations without explicit validation. + +### Enabling Taint Mode + +```perl +#!/usr/bin/perl -T +use v5.36; + +# Tainted: anything from outside the program +my $input = $ARGV[0]; # Tainted +my $env_path = $ENV{PATH}; # Tainted +my $form = ; # Tainted +my $query = $ENV{QUERY_STRING}; # Tainted + +# Sanitize PATH early (required in taint mode) +$ENV{PATH} = '/usr/local/bin:/usr/bin:/bin'; +delete @ENV{qw(IFS CDPATH ENV BASH_ENV)}; +``` + +### Untainting Pattern + +```perl +use v5.36; + +# Good: Validate and untaint with a specific regex +sub untaint_username($input) { + if ($input =~ /^([a-zA-Z0-9_]{3,30})$/) { + return $1; # $1 is untainted + } + die "Invalid username: must be 3-30 alphanumeric characters\n"; +} + +# Good: Validate and untaint a file path +sub untaint_filename($input) { + if ($input =~ m{^([a-zA-Z0-9._-]+)$}) { + return $1; + } + die "Invalid filename: contains unsafe characters\n"; +} + +# Bad: Overly permissive untainting (defeats the purpose) +sub bad_untaint($input) { + $input =~ /^(.*)$/s; + return $1; # Accepts ANYTHING — pointless +} +``` + +## Input Validation + +### Allowlist Over Blocklist + +```perl +use v5.36; + +# Good: Allowlist — define exactly what's permitted +sub validate_sort_field($field) { + my %allowed = map { $_ => 1 } qw(name email created_at updated_at); + die "Invalid sort field: $field\n" unless $allowed{$field}; + return $field; +} + +# Good: Validate with specific patterns +sub validate_email($email) { + if ($email =~ /^([a-zA-Z0-9._%+-]+\@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,})$/) { + return $1; + } + die "Invalid email address\n"; +} + +sub validate_integer($input) { + if ($input =~ /^(-?\d{1,10})$/) { + return $1 + 0; # Coerce to number + } + die "Invalid integer\n"; +} + +# Bad: Blocklist — always incomplete +sub bad_validate($input) { + die "Invalid" if $input =~ /[<>"';&|]/; # Misses encoded attacks + return $input; +} +``` + +### Length Constraints + +```perl +use v5.36; + +sub validate_comment($text) { + die "Comment is required\n" unless length($text) > 0; + die "Comment exceeds 10000 chars\n" if length($text) > 10_000; + return $text; +} +``` + +## Safe Regular Expressions + +### ReDoS Prevention + +Catastrophic backtracking occurs with nested quantifiers on overlapping patterns. + +```perl +use v5.36; + +# Bad: Vulnerable to ReDoS (exponential backtracking) +my $bad_re = qr/^(a+)+$/; # Nested quantifiers +my $bad_re2 = qr/^([a-zA-Z]+)*$/; # Nested quantifiers on class +my $bad_re3 = qr/^(.*?,){10,}$/; # Repeated greedy/lazy combo + +# Good: Rewrite without nesting +my $good_re = qr/^a+$/; # Single quantifier +my $good_re2 = qr/^[a-zA-Z]+$/; # Single quantifier on class + +# Good: Use possessive quantifiers or atomic groups to prevent backtracking +my $safe_re = qr/^[a-zA-Z]++$/; # Possessive (5.10+) +my $safe_re2 = qr/^(?>a+)$/; # Atomic group + +# Good: Enforce timeout on untrusted patterns +use POSIX qw(alarm); +sub safe_match($string, $pattern, $timeout = 2) { + my $matched; + eval { + local $SIG{ALRM} = sub { die "Regex timeout\n" }; + alarm($timeout); + $matched = $string =~ $pattern; + alarm(0); + }; + alarm(0); + die $@ if $@; + return $matched; +} +``` + +## Safe File Operations + +### Three-Argument Open + +```perl +use v5.36; + +# Good: Three-arg open, lexical filehandle, check return +sub read_file($path) { + open my $fh, '<:encoding(UTF-8)', $path + or die "Cannot open '$path': $!\n"; + local $/; + my $content = <$fh>; + close $fh; + return $content; +} + +# Bad: Two-arg open with user data (command injection) +sub bad_read($path) { + open my $fh, $path; # If $path = "|rm -rf /", runs command! + open my $fh, "< $path"; # Shell metacharacter injection +} +``` + +### TOCTOU Prevention and Path Traversal + +```perl +use v5.36; +use Fcntl qw(:DEFAULT :flock); +use File::Spec; +use Cwd qw(realpath); + +# Atomic file creation +sub create_file_safe($path) { + sysopen(my $fh, $path, O_WRONLY | O_CREAT | O_EXCL, 0600) + or die "Cannot create '$path': $!\n"; + return $fh; +} + +# Validate path stays within allowed directory +sub safe_path($base_dir, $user_path) { + my $real = realpath(File::Spec->catfile($base_dir, $user_path)) + // die "Path does not exist\n"; + my $base_real = realpath($base_dir) + // die "Base dir does not exist\n"; + die "Path traversal blocked\n" unless $real =~ /^\Q$base_real\E/; + return $real; +} +``` + +Use `File::Temp` for temporary files (`tempfile(UNLINK => 1)`) and `flock(LOCK_EX)` to prevent race conditions. + +## Safe Process Execution + +### List-Form system and exec + +```perl +use v5.36; + +# Good: List form — no shell interpolation +sub run_command(@cmd) { + system(@cmd) == 0 + or die "Command failed: @cmd\n"; +} + +run_command('grep', '-r', $user_pattern, '/var/log/app/'); + +# Good: Capture output safely with IPC::Run3 +use IPC::Run3; +sub capture_output(@cmd) { + my ($stdout, $stderr); + run3(\@cmd, \undef, \$stdout, \$stderr); + if ($?) { + die "Command failed (exit $?): $stderr\n"; + } + return $stdout; +} + +# Bad: String form — shell injection! +sub bad_search($pattern) { + system("grep -r '$pattern' /var/log/app/"); # If $pattern = "'; rm -rf / #" +} + +# Bad: Backticks with interpolation +my $output = `ls $user_dir`; # Shell injection risk +``` + +Also use `Capture::Tiny` for capturing stdout/stderr from external commands safely. + +## SQL Injection Prevention + +### DBI Placeholders + +```perl +use v5.36; +use DBI; + +my $dbh = DBI->connect($dsn, $user, $pass, { + RaiseError => 1, + PrintError => 0, + AutoCommit => 1, +}); + +# Good: Parameterized queries — always use placeholders +sub find_user($dbh, $email) { + my $sth = $dbh->prepare('SELECT * FROM users WHERE email = ?'); + $sth->execute($email); + return $sth->fetchrow_hashref; +} + +sub search_users($dbh, $name, $status) { + my $sth = $dbh->prepare( + 'SELECT * FROM users WHERE name LIKE ? AND status = ? ORDER BY name' + ); + $sth->execute("%$name%", $status); + return $sth->fetchall_arrayref({}); +} + +# Bad: String interpolation in SQL (SQLi vulnerability!) +sub bad_find($dbh, $email) { + my $sth = $dbh->prepare("SELECT * FROM users WHERE email = '$email'"); + # If $email = "' OR 1=1 --", returns all users + $sth->execute; + return $sth->fetchrow_hashref; +} +``` + +### Dynamic Column Allowlists + +```perl +use v5.36; + +# Good: Validate column names against an allowlist +sub order_by($dbh, $column, $direction) { + my %allowed_cols = map { $_ => 1 } qw(name email created_at); + my %allowed_dirs = map { $_ => 1 } qw(ASC DESC); + + die "Invalid column: $column\n" unless $allowed_cols{$column}; + die "Invalid direction: $direction\n" unless $allowed_dirs{uc $direction}; + + my $sth = $dbh->prepare("SELECT * FROM users ORDER BY $column $direction"); + $sth->execute; + return $sth->fetchall_arrayref({}); +} + +# Bad: Directly interpolating user-chosen column +sub bad_order($dbh, $column) { + $dbh->prepare("SELECT * FROM users ORDER BY $column"); # SQLi! +} +``` + +### DBIx::Class (ORM Safety) + +```perl +use v5.36; + +# DBIx::Class generates safe parameterized queries +my @users = $schema->resultset('User')->search({ + status => 'active', + email => { -like => '%@example.com' }, +}, { + order_by => { -asc => 'name' }, + rows => 50, +}); +``` + +## Web Security + +### XSS Prevention + +```perl +use v5.36; +use HTML::Entities qw(encode_entities); +use URI::Escape qw(uri_escape_utf8); + +# Good: Encode output for HTML context +sub safe_html($user_input) { + return encode_entities($user_input); +} + +# Good: Encode for URL context +sub safe_url_param($value) { + return uri_escape_utf8($value); +} + +# Good: Encode for JSON context +use JSON::MaybeXS qw(encode_json); +sub safe_json($data) { + return encode_json($data); # Handles escaping +} + +# Template auto-escaping (Mojolicious) +# <%= $user_input %> — auto-escaped (safe) +# <%== $raw_html %> — raw output (dangerous, use only for trusted content) + +# Template auto-escaping (Template Toolkit) +# [% user_input | html %] — explicit HTML encoding + +# Bad: Raw output in HTML +sub bad_html($input) { + print "
$input
"; # XSS if $input contains