mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-05-15 21:33:04 +08:00
fix: integrate recent hook and docs PRs (#1905)
Integrates useful changes from #1882, #1884, #1889, #1893, #1898, #1899, and #1903: - fix rule install docs to preserve language directories - correct Ruby security command examples - harden dev-server hook command-substitution parsing - add Prisma patterns skill and catalog/package surfaces - allow first-time protected config creation while blocking existing configs - read cost metrics from Stop hook transcripts - emit suggest-compact additionalContext on stdout Co-authored-by: Jamkris <dltmdgus1412@gmail.com> Co-authored-by: Levi-Evan <levishantz@gmail.com> Co-authored-by: gaurav0107 <gauravdubey0107@gmail.com> Co-authored-by: richm-spp <richard.millar@salarypackagingplus.com.au> Co-authored-by: zomia <zomians@outlook.jp> Co-authored-by: donghyeun02 <donghyeun02@gmail.com>
This commit is contained in:
@@ -7,12 +7,13 @@
|
||||
* the actual code. This hook steers the agent back to fixing the source.
|
||||
*
|
||||
* Exit codes:
|
||||
* 0 = allow (not a config file)
|
||||
* 2 = block (config file modification attempted)
|
||||
* 0 = allow (not a config file, or first-time creation of one)
|
||||
* 2 = block (existing config file modification attempted)
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
@@ -58,7 +59,7 @@ const PROTECTED_FILES = new Set([
|
||||
'.stylelintrc.yml',
|
||||
'.markdownlint.json',
|
||||
'.markdownlint.yaml',
|
||||
'.markdownlintrc',
|
||||
'.markdownlintrc'
|
||||
]);
|
||||
|
||||
function parseInput(inputOrRaw) {
|
||||
@@ -94,13 +95,41 @@ function run(inputOrRaw, options = {}) {
|
||||
|
||||
const basename = path.basename(filePath);
|
||||
if (PROTECTED_FILES.has(basename)) {
|
||||
// Allow first-time creation — there's no existing config to weaken.
|
||||
// The hook's purpose is blocking modifications; writing a brand-new
|
||||
// config file in a project that has none is a legitimate bootstrap
|
||||
// path (e.g. scaffolding ESLint into a fresh repo).
|
||||
//
|
||||
// Fail closed on any stat error other than ENOENT. Use lstatSync so a
|
||||
// symlink at the protected path is treated as present even if its target
|
||||
// is missing — a dangling symlink at e.g. .eslintrc.js still represents
|
||||
// an existing config entry that an agent should not silently replace.
|
||||
// fs.existsSync would swallow EACCES/EPERM as false; lstatSync exposes
|
||||
// the error code so we can treat only genuine "path not found" (ENOENT)
|
||||
// as absent.
|
||||
let exists = true;
|
||||
try {
|
||||
fs.lstatSync(filePath);
|
||||
// lstat succeeded — something (file, dir, or symlink) exists here.
|
||||
} catch (err) {
|
||||
if (err && err.code === 'ENOENT') {
|
||||
exists = false;
|
||||
}
|
||||
// Any other error (EACCES, EPERM, ELOOP, etc.) leaves exists=true
|
||||
// so the guard is never silently weakened.
|
||||
}
|
||||
|
||||
if (!exists) {
|
||||
return { exitCode: 0 };
|
||||
}
|
||||
|
||||
return {
|
||||
exitCode: 2,
|
||||
stderr:
|
||||
`BLOCKED: Modifying ${basename} is not allowed. ` +
|
||||
'Fix the source code to satisfy linter/formatter rules instead of ' +
|
||||
'weakening the config. If this is a legitimate config change, ' +
|
||||
'disable the config-protection hook temporarily.',
|
||||
'disable the config-protection hook temporarily.'
|
||||
};
|
||||
}
|
||||
|
||||
@@ -125,7 +154,7 @@ process.stdin.on('data', chunk => {
|
||||
process.stdin.on('end', () => {
|
||||
const result = run(raw, {
|
||||
truncated,
|
||||
maxStdin: Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN,
|
||||
maxStdin: Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN
|
||||
});
|
||||
|
||||
if (result.stderr) {
|
||||
|
||||
@@ -1,63 +1,157 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Cost Tracker Hook
|
||||
* Cost Tracker Hook (v2)
|
||||
*
|
||||
* Appends lightweight session usage metrics to ~/.claude/metrics/costs.jsonl.
|
||||
* Reads transcript_path from Stop hook stdin, sums usage across all
|
||||
* assistant turns in the session JSONL, and appends one row to
|
||||
* ~/.claude/metrics/costs.jsonl.
|
||||
*
|
||||
* Stop hook stdin payload: { session_id, transcript_path, cwd, hook_event_name, ... }
|
||||
* The Stop payload does NOT include `usage` or `model` directly. The previous
|
||||
* version of this hook expected those fields and silently produced zero-filled
|
||||
* rows (verified: 2,340 rows captured with 0.0% non-zero token rate over 52
|
||||
* days). The fix is to read the transcript file Claude Code already passes us.
|
||||
*
|
||||
* JSONL assistant entry shape (per Claude Code):
|
||||
* { type: "assistant", message: { model, usage: { input_tokens, output_tokens,
|
||||
* cache_creation_input_tokens, cache_read_input_tokens } } }
|
||||
*
|
||||
* Cumulative behavior: Stop fires per assistant response, not per session.
|
||||
* Each row therefore represents the cumulative session total up to that point.
|
||||
* To get per-session cost, take the last row per session_id. To get per-day
|
||||
* spend, aggregate.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { ensureDir, appendFile, getClaudeDir } = require('../lib/utils');
|
||||
const { estimateCost } = require('../lib/cost-estimate');
|
||||
const { sanitizeSessionId } = require('../lib/session-bridge');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
// Approximate per-1M-token billing rates (USD).
|
||||
// Cache creation: 1.25x input rate. Cache read: 0.1x input rate.
|
||||
const RATE_TABLE = {
|
||||
haiku: { in: 0.80, out: 4.0, cacheWrite: 1.00, cacheRead: 0.08 },
|
||||
sonnet: { in: 3.00, out: 15.0, cacheWrite: 3.75, cacheRead: 0.30 },
|
||||
opus: { in: 15.00, out: 75.0, cacheWrite: 18.75, cacheRead: 1.50 }
|
||||
};
|
||||
|
||||
function toNumber(value) {
|
||||
const n = Number(value);
|
||||
function getRates(model) {
|
||||
const m = String(model || '').toLowerCase();
|
||||
if (m.includes('haiku')) return RATE_TABLE.haiku;
|
||||
if (m.includes('opus')) return RATE_TABLE.opus;
|
||||
return RATE_TABLE.sonnet;
|
||||
}
|
||||
|
||||
function toNumber(v) {
|
||||
const n = Number(v);
|
||||
return Number.isFinite(n) ? n : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan the session JSONL and sum token usage across all assistant turns.
|
||||
* Returns { inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens, model }
|
||||
* or null on read failure.
|
||||
*/
|
||||
function sumUsageFromTranscript(transcriptPath) {
|
||||
let content;
|
||||
try {
|
||||
content = fs.readFileSync(transcriptPath, 'utf8');
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
|
||||
let inputTokens = 0;
|
||||
let outputTokens = 0;
|
||||
let cacheWriteTokens = 0;
|
||||
let cacheReadTokens = 0;
|
||||
let model = 'unknown';
|
||||
|
||||
for (const line of content.split('\n')) {
|
||||
if (!line.trim()) continue;
|
||||
let entry;
|
||||
try { entry = JSON.parse(line); } catch { continue; }
|
||||
|
||||
if (entry.type !== 'assistant') continue;
|
||||
const msg = entry.message;
|
||||
if (!msg || !msg.usage) continue;
|
||||
|
||||
const u = msg.usage;
|
||||
inputTokens += toNumber(u.input_tokens);
|
||||
outputTokens += toNumber(u.output_tokens);
|
||||
cacheWriteTokens += toNumber(u.cache_creation_input_tokens);
|
||||
cacheReadTokens += toNumber(u.cache_read_input_tokens);
|
||||
|
||||
if (msg.model && msg.model !== 'unknown') model = msg.model;
|
||||
}
|
||||
|
||||
return { inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens, model };
|
||||
}
|
||||
|
||||
const MAX_STDIN = 64 * 1024;
|
||||
let raw = '';
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
if (raw.length < MAX_STDIN) raw += chunk.substring(0, MAX_STDIN - raw.length);
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = raw.trim() ? JSON.parse(raw) : {};
|
||||
const usage = input.usage || input.token_usage || {};
|
||||
const inputTokens = toNumber(usage.input_tokens || usage.prompt_tokens || 0);
|
||||
const outputTokens = toNumber(usage.output_tokens || usage.completion_tokens || 0);
|
||||
|
||||
const model = String(input.model || input._cursor?.model || process.env.CLAUDE_MODEL || 'unknown');
|
||||
const transcriptPath = (typeof input.transcript_path === 'string' && input.transcript_path)
|
||||
? input.transcript_path
|
||||
: process.env.CLAUDE_TRANSCRIPT_PATH || null;
|
||||
|
||||
const sessionId =
|
||||
sanitizeSessionId(input.session_id) ||
|
||||
sanitizeSessionId(process.env.ECC_SESSION_ID) ||
|
||||
sanitizeSessionId(process.env.CLAUDE_SESSION_ID) ||
|
||||
'default';
|
||||
|
||||
let usageTotals = null;
|
||||
if (transcriptPath && fs.existsSync(transcriptPath)) {
|
||||
usageTotals = sumUsageFromTranscript(transcriptPath);
|
||||
}
|
||||
|
||||
const {
|
||||
inputTokens = 0,
|
||||
outputTokens = 0,
|
||||
cacheWriteTokens = 0,
|
||||
cacheReadTokens = 0,
|
||||
model = 'unknown'
|
||||
} = usageTotals || {};
|
||||
|
||||
const rates = getRates(model);
|
||||
const estimatedCostUsd = Math.round((
|
||||
(inputTokens / 1e6) * rates.in +
|
||||
(outputTokens / 1e6) * rates.out +
|
||||
(cacheWriteTokens / 1e6) * rates.cacheWrite +
|
||||
(cacheReadTokens / 1e6) * rates.cacheRead
|
||||
) * 1e6) / 1e6;
|
||||
|
||||
const metricsDir = path.join(getClaudeDir(), 'metrics');
|
||||
ensureDir(metricsDir);
|
||||
|
||||
const row = {
|
||||
timestamp: new Date().toISOString(),
|
||||
session_id: sessionId,
|
||||
timestamp: new Date().toISOString(),
|
||||
session_id: sessionId,
|
||||
transcript_path: transcriptPath || '',
|
||||
model,
|
||||
input_tokens: inputTokens,
|
||||
output_tokens: outputTokens,
|
||||
estimated_cost_usd: estimateCost(model, inputTokens, outputTokens)
|
||||
input_tokens: inputTokens,
|
||||
output_tokens: outputTokens,
|
||||
cache_write_tokens: cacheWriteTokens,
|
||||
cache_read_tokens: cacheReadTokens,
|
||||
estimated_cost_usd: estimatedCostUsd
|
||||
};
|
||||
|
||||
appendFile(path.join(metricsDir, 'costs.jsonl'), `${JSON.stringify(row)}\n`);
|
||||
} catch {
|
||||
// Keep hook non-blocking.
|
||||
// Non-blocking — never fail the Stop hook.
|
||||
}
|
||||
|
||||
// Pass stdin through (required by ECC hook convention).
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
|
||||
@@ -4,6 +4,10 @@
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
const path = require('path');
|
||||
const { splitShellSegments } = require('../lib/shell-split');
|
||||
const {
|
||||
extractCommandSubstitutions,
|
||||
extractSubshellGroups
|
||||
} = require('../lib/shell-substitution');
|
||||
|
||||
const DEV_COMMAND_WORDS = new Set([
|
||||
'npm',
|
||||
@@ -123,6 +127,8 @@ function getLeadingCommandWord(segment) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (token === '{' || token === '}') continue;
|
||||
|
||||
if (/^[A-Za-z_][A-Za-z0-9_]*=.*/.test(token)) continue;
|
||||
|
||||
const normalizedToken = normalizeCommandWord(token);
|
||||
@@ -154,23 +160,55 @@ process.stdin.on('data', chunk => {
|
||||
}
|
||||
});
|
||||
|
||||
const TMUX_LAUNCHER = /^\s*tmux\s+(new|new-session|new-window|split-window)\b/;
|
||||
const DEV_PATTERN = /\b(npm\s+run\s+dev|pnpm(?:\s+run)?\s+dev|yarn(?:\s+run)?\s+dev|bun(?:\s+run)?\s+dev)\b/;
|
||||
|
||||
/**
|
||||
* Collect every command-line segment we should evaluate. Returns the top-level
|
||||
* segments first, then segments harvested from `$(...)` / backtick command
|
||||
* substitutions and plain `(...)` subshell groups, recursively.
|
||||
*
|
||||
* Without this expansion the leading-command and dev-pattern check below only
|
||||
* sees the outermost command, so wrappers like `$(npm run dev)` and
|
||||
* `(npm run dev)` (which still spawn a dev server) sneak past.
|
||||
*/
|
||||
function collectCheckSegments(cmd) {
|
||||
const segments = [...splitShellSegments(cmd)];
|
||||
const queue = [cmd];
|
||||
const seen = new Set();
|
||||
|
||||
while (queue.length) {
|
||||
const current = queue.shift();
|
||||
if (seen.has(current)) continue;
|
||||
seen.add(current);
|
||||
|
||||
for (const body of extractCommandSubstitutions(current)) {
|
||||
for (const seg of splitShellSegments(body)) segments.push(seg);
|
||||
queue.push(body);
|
||||
}
|
||||
for (const body of extractSubshellGroups(current)) {
|
||||
for (const seg of splitShellSegments(body)) segments.push(seg);
|
||||
queue.push(body);
|
||||
}
|
||||
}
|
||||
|
||||
return segments;
|
||||
}
|
||||
|
||||
function isBlockedDevSegment(segment) {
|
||||
const commandWord = getLeadingCommandWord(segment);
|
||||
if (!commandWord || !DEV_COMMAND_WORDS.has(commandWord)) return false;
|
||||
return DEV_PATTERN.test(segment) && !TMUX_LAUNCHER.test(segment);
|
||||
}
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const cmd = String(input.tool_input?.command || '');
|
||||
|
||||
if (process.platform !== 'win32') {
|
||||
const segments = splitShellSegments(cmd);
|
||||
const tmuxLauncher = /^\s*tmux\s+(new|new-session|new-window|split-window)\b/;
|
||||
const devPattern = /\b(npm\s+run\s+dev|pnpm(?:\s+run)?\s+dev|yarn\s+dev|bun\s+run\s+dev)\b/;
|
||||
|
||||
const hasBlockedDev = segments.some(segment => {
|
||||
const commandWord = getLeadingCommandWord(segment);
|
||||
if (!commandWord || !DEV_COMMAND_WORDS.has(commandWord)) {
|
||||
return false;
|
||||
}
|
||||
return devPattern.test(segment) && !tmuxLauncher.test(segment);
|
||||
});
|
||||
const segments = collectCheckSegments(cmd);
|
||||
const hasBlockedDev = segments.some(isBlockedDevSegment);
|
||||
|
||||
if (hasBlockedDev) {
|
||||
console.error('[Hook] BLOCKED: Dev server must run in tmux for log access');
|
||||
|
||||
@@ -19,7 +19,8 @@ const {
|
||||
getTempDir,
|
||||
writeFile,
|
||||
readStdinJson,
|
||||
log
|
||||
log,
|
||||
output
|
||||
} = require('../lib/utils');
|
||||
|
||||
async function resolveSessionId() {
|
||||
@@ -77,14 +78,25 @@ async function main() {
|
||||
writeFile(counterFile, String(count));
|
||||
}
|
||||
|
||||
// Suggest compact after threshold tool calls
|
||||
// Suggest compact after threshold tool calls.
|
||||
//
|
||||
// log() writes to stderr (debug log). Per the Claude Code hooks guide,
|
||||
// non-blocking PreToolUse stderr (exit 0) is only written to the debug log;
|
||||
// it does not reach the model. To inject a user-facing suggestion without
|
||||
// blocking the tool call, emit structured JSON to stdout with
|
||||
// hookSpecificOutput.additionalContext — the documented mechanism for
|
||||
// PreToolUse hooks to add context to the next model turn.
|
||||
if (count === threshold) {
|
||||
log(`[StrategicCompact] ${threshold} tool calls reached - consider /compact if transitioning phases`);
|
||||
const msg = `[StrategicCompact] ${threshold} tool calls reached - consider /compact if transitioning phases`;
|
||||
log(msg);
|
||||
output({ hookSpecificOutput: { hookEventName: 'PreToolUse', additionalContext: msg } });
|
||||
}
|
||||
|
||||
// Suggest at regular intervals after threshold (every 25 calls from threshold)
|
||||
if (count > threshold && (count - threshold) % 25 === 0) {
|
||||
log(`[StrategicCompact] ${count} tool calls - good checkpoint for /compact if context is stale`);
|
||||
const msg = `[StrategicCompact] ${count} tool calls - good checkpoint for /compact if context is stale`;
|
||||
log(msg);
|
||||
output({ hookSpecificOutput: { hookEventName: 'PreToolUse', additionalContext: msg } });
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
|
||||
Reference in New Issue
Block a user