Compare commits

..

8 Commits

Author SHA1 Message Date
Affaan Mustafa
45a9bcf295 test: lift harness manifest branch coverage 2026-04-29 18:38:48 -04:00
Affaan Mustafa
ebf0d4322b test: support windows pre-bash linter shims 2026-04-29 18:36:33 -04:00
Affaan Mustafa
015b00b8fc test: stabilize mcp health crash probes 2026-04-29 18:29:02 -04:00
Affaan Mustafa
51511461f6 test: cover pre-bash commit quality edges 2026-04-29 18:28:56 -04:00
Affaan Mustafa
aaaf52fb1e test: cover session adapter edge cases 2026-04-29 18:21:31 -04:00
Affaan Mustafa
33edfd3bb3 test: cover session activity tracker edge paths 2026-04-29 18:15:51 -04:00
Affaan Mustafa
f92dc544c4 test: cover mcp health edge paths 2026-04-29 18:08:45 -04:00
Affaan Mustafa
1c2d5dd389 fix: fail open on insaits monitor errors 2026-04-29 18:03:33 -04:00
10 changed files with 1790 additions and 32 deletions

View File

@@ -98,6 +98,16 @@ process.stdin.on('end', () => {
process.exit(0); process.exit(0);
} }
// The monitor only uses 0 (pass) and 2 (block). Other statuses usually
// mean Python launcher/dependency/runtime failure, so keep the hook fail-open.
if (result.status !== 0 && result.status !== 2) {
const detail = (result.stderr || result.stdout || '').trim();
const suffix = detail ? `: ${detail}` : '';
process.stderr.write(`[InsAIts] Security monitor exited with status ${result.status}${suffix}\n`);
process.stdout.write(raw);
process.exit(0);
}
if (result.stdout) { if (result.stdout) {
process.stdout.write(result.stdout); process.stdout.write(result.stdout);
} else if (result.status === 0) { } else if (result.status === 0) {

View File

@@ -166,6 +166,29 @@ function runTests() {
} }
})) passed++; else failed++; })) passed++; else failed++;
if (test('enabled monitor unexpected failure fails open with warning and raw stdin', () => {
const tempDir = createTempDir();
try {
writeFakePython(path.join(tempDir, 'bin'));
const result = run({
input: 'raw-input',
env: {
ECC_ENABLE_INSAITS: '1',
FAKE_INSAITS_MODE: 'error',
PATH: path.join(tempDir, 'bin'),
},
});
assert.strictEqual(result.status, 0);
assert.strictEqual(result.stdout, 'raw-input');
assert.ok(result.stderr.includes('Security monitor exited with status 1'));
assert.ok(result.stderr.includes('spawned but failed'));
} finally {
cleanup(tempDir);
}
})) passed++; else failed++;
if (test('missing Python fails open with warning and raw stdin', () => { if (test('missing Python fails open with warning and raw stdin', () => {
const result = run({ const result = run({
input: 'raw-input', input: 'raw-input',
@@ -177,7 +200,10 @@ function runTests() {
assert.strictEqual(result.status, 0); assert.strictEqual(result.status, 0);
assert.strictEqual(result.stdout, 'raw-input'); assert.strictEqual(result.stdout, 'raw-input');
assert.ok(result.stderr.includes('python3/python not found')); assert.ok(
result.stderr.includes('python3/python not found')
|| result.stderr.includes('Security monitor exited with status')
);
})) passed++; else failed++; })) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);

View File

@@ -61,15 +61,29 @@ function createCommandConfig(scriptPath) {
}; };
} }
function runHook(input, env = {}) { function buildHookEnv(env = {}) {
const merged = {
...process.env,
ECC_HOOK_PROFILE: 'standard'
};
for (const [key, value] of Object.entries(env)) {
if (value === null || value === undefined) {
delete merged[key];
} else {
merged[key] = value;
}
}
return merged;
}
function runHook(input, env = {}, options = {}) {
const result = spawnSync('node', [script], { const result = spawnSync('node', [script], {
input: JSON.stringify(input), input: JSON.stringify(input),
encoding: 'utf8', encoding: 'utf8',
env: { cwd: options.cwd || process.cwd(),
...process.env, env: buildHookEnv(env),
ECC_HOOK_PROFILE: 'standard',
...env
},
timeout: 15000, timeout: 15000,
stdio: ['pipe', 'pipe', 'pipe'] stdio: ['pipe', 'pipe', 'pipe']
}); });
@@ -81,15 +95,12 @@ function runHook(input, env = {}) {
}; };
} }
function runRawHook(rawInput, env = {}) { function runRawHook(rawInput, env = {}, options = {}) {
const result = spawnSync('node', [script], { const result = spawnSync('node', [script], {
input: rawInput, input: rawInput,
encoding: 'utf8', encoding: 'utf8',
env: { cwd: options.cwd || process.cwd(),
...process.env, env: buildHookEnv(env),
ECC_HOOK_PROFILE: 'standard',
...env
},
timeout: 15000, timeout: 15000,
stdio: ['pipe', 'pipe', 'pipe'] stdio: ['pipe', 'pipe', 'pipe']
}); });
@@ -173,6 +184,192 @@ async function runTests() {
assert.ok(result.stderr.includes('Hook input exceeded 512 bytes'), `Expected size warning, got: ${result.stderr}`); assert.ok(result.stderr.includes('Hook input exceeded 512 bytes'), `Expected size warning, got: ${result.stderr}`);
assert.ok(/blocking search/i.test(result.stderr), `Expected blocking message, got: ${result.stderr}`); assert.ok(/blocking search/i.test(result.stderr), `Expected blocking message, got: ${result.stderr}`);
})) passed++; else failed++; })) passed++; else failed++;
if (test('allows truncated MCP hook input when fail-open mode is enabled', () => {
const rawInput = JSON.stringify({ tool_name: 'mcp__flaky__search', tool_input: {} });
const result = runRawHook(rawInput, {
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_HOOK_INPUT_TRUNCATED: 'true',
ECC_HOOK_INPUT_MAX_BYTES: '256',
ECC_MCP_HEALTH_FAIL_OPEN: 'yes'
});
assert.strictEqual(result.code, 0, 'Expected fail-open mode to allow truncated MCP input');
assert.strictEqual(result.stdout, rawInput, 'Expected raw input passthrough on stdout');
assert.ok(result.stderr.includes('Hook input exceeded 256 bytes'), `Expected size warning, got: ${result.stderr}`);
assert.ok(/fail-open mode is enabled/i.test(result.stderr), `Expected fail-open log, got: ${result.stderr}`);
})) passed++; else failed++;
if (await asyncTest('uses default cwd config path and default home state path', async () => {
const tempDir = createTempDir();
const homeDir = path.join(tempDir, 'home');
const configDir = path.join(tempDir, '.claude');
const configPath = path.join(configDir, 'settings.json');
const expectedStatePath = path.join(homeDir, '.claude', 'mcp-health-cache.json');
const serverScript = path.join(tempDir, 'default-path-server.js');
try {
fs.mkdirSync(configDir, { recursive: true });
fs.mkdirSync(homeDir, { recursive: true });
fs.writeFileSync(serverScript, "setInterval(() => {}, 1000);\n");
writeConfig(configPath, {
mcpServers: {
cwddefault: createCommandConfig(serverScript)
}
});
const input = { tool_name: 'mcp__cwddefault__list', tool_input: {} };
const result = runHook(
input,
{
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: null,
ECC_MCP_HEALTH_STATE_PATH: null,
ECC_MCP_HEALTH_TIMEOUT_MS: '100',
HOME: homeDir,
USERPROFILE: homeDir
},
{ cwd: tempDir }
);
assert.strictEqual(result.code, 0, `Expected default-path server to pass, got ${result.code}: ${result.stderr}`);
assert.strictEqual(result.stdout.trim(), JSON.stringify(input), 'Expected original JSON on stdout');
const state = readState(expectedStatePath);
assert.strictEqual(state.servers.cwddefault.status, 'healthy', 'Expected default home state path to be used');
assert.strictEqual(
fs.realpathSync(state.servers.cwddefault.source),
fs.realpathSync(configPath),
'Expected cwd .claude/settings.json config source'
);
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (test('uses cached healthy and unhealthy states without probing configs', () => {
const tempDir = createTempDir();
const now = Date.now();
const healthyStatePath = path.join(tempDir, 'healthy-state.json');
const unhealthyStatePath = path.join(tempDir, 'unhealthy-state.json');
try {
fs.writeFileSync(healthyStatePath, JSON.stringify({
version: 1,
servers: {
cached: {
status: 'healthy',
checkedAt: now,
expiresAt: now + 60000,
failureCount: 0,
nextRetryAt: now
}
}
}));
fs.writeFileSync(unhealthyStatePath, JSON.stringify({
version: 1,
servers: {
blocked: {
status: 'unhealthy',
checkedAt: now,
expiresAt: now,
failureCount: 1,
nextRetryAt: now + 60000,
lastError: 'cached outage'
}
}
}));
const healthy = runHook(
{ tool_name: 'mcp__cached__list', tool_input: {} },
{
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: path.join(tempDir, 'missing.json'),
ECC_MCP_HEALTH_STATE_PATH: healthyStatePath
}
);
const unhealthy = runHook(
{ tool_name: 'mcp__blocked__query', tool_input: {} },
{
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: path.join(tempDir, 'missing.json'),
ECC_MCP_HEALTH_STATE_PATH: unhealthyStatePath
}
);
assert.strictEqual(healthy.code, 0, 'Expected cached healthy server to pass without config lookup');
assert.strictEqual(healthy.stderr, '', 'Expected cached healthy server to skip logging');
assert.strictEqual(unhealthy.code, 2, 'Expected cached unhealthy server to block before retry time');
assert.ok(unhealthy.stderr.includes('marked unhealthy until'), `Expected cached unhealthy log, got: ${unhealthy.stderr}`);
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (test('ignores malformed state files and allows missing MCP configs', () => {
const tempDir = createTempDir();
const statePath = path.join(tempDir, 'malformed-state.json');
try {
fs.writeFileSync(statePath, '[]');
const result = runHook(
{
tool_name: 'Invoke',
server: 'ghost',
tool: 'lookup',
tool_input: {}
},
{
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: path.join(tempDir, 'missing.json'),
ECC_MCP_HEALTH_STATE_PATH: statePath
}
);
assert.strictEqual(result.code, 0, 'Expected missing config to be non-blocking');
assert.ok(result.stderr.includes('No MCP config found for ghost'), `Expected missing config log, got: ${result.stderr}`);
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (await asyncTest('supports explicit tool_input server targets and mcp_servers config aliases', async () => {
const tempDir = createTempDir();
const configPath = path.join(tempDir, 'claude.json');
const statePath = path.join(tempDir, 'mcp-health.json');
const serverScript = path.join(tempDir, 'alias-server.js');
try {
fs.writeFileSync(serverScript, "setInterval(() => {}, 1000);\n");
writeConfig(configPath, {
mcp_servers: {
alias: createCommandConfig(serverScript)
}
});
const input = {
tool_name: 'GenericMcpTool',
tool_input: {
connector: 'alias',
mcp_tool: 'lookup'
}
};
const result = runHook(input, {
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: configPath,
ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_HEALTH_TIMEOUT_MS: '100'
});
assert.strictEqual(result.code, 0, `Expected explicit MCP target to pass, got ${result.code}: ${result.stderr}`);
const state = readState(statePath);
assert.strictEqual(state.servers.alias.status, 'healthy', 'Expected alias server to be marked healthy');
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (await asyncTest('marks healthy command MCP servers and allows the tool call', async () => { if (await asyncTest('marks healthy command MCP servers and allows the tool call', async () => {
const tempDir = createTempDir(); const tempDir = createTempDir();
const configPath = path.join(tempDir, 'claude.json'); const configPath = path.join(tempDir, 'claude.json');
@@ -225,7 +422,7 @@ async function runTests() {
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse', CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: configPath, ECC_MCP_CONFIG_PATH: configPath,
ECC_MCP_HEALTH_STATE_PATH: statePath, ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_HEALTH_TIMEOUT_MS: '100' ECC_MCP_HEALTH_TIMEOUT_MS: '1000'
} }
); );
@@ -261,7 +458,7 @@ async function runTests() {
ECC_MCP_CONFIG_PATH: configPath, ECC_MCP_CONFIG_PATH: configPath,
ECC_MCP_HEALTH_STATE_PATH: statePath, ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_HEALTH_FAIL_OPEN: '1', ECC_MCP_HEALTH_FAIL_OPEN: '1',
ECC_MCP_HEALTH_TIMEOUT_MS: '100' ECC_MCP_HEALTH_TIMEOUT_MS: '1000'
} }
); );
@@ -272,6 +469,151 @@ async function runTests() {
} }
})) passed++; else failed++; })) passed++; else failed++;
if (await asyncTest('blocks unsupported MCP configs and command spawn failures', async () => {
const tempDir = createTempDir();
const configPath = path.join(tempDir, 'claude.json');
const statePath = path.join(tempDir, 'mcp-health.json');
try {
writeConfig(configPath, {
mcpServers: {
unsupported: {},
missingcmd: {
command: path.join(tempDir, 'missing-mcp-server')
}
}
});
const unsupported = runHook(
{ tool_name: 'mcp__unsupported__search', tool_input: {} },
{
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: configPath,
ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_HEALTH_TIMEOUT_MS: '1000'
}
);
const missingCommand = runHook(
{ tool_name: 'mcp__missingcmd__search', tool_input: {} },
{
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: configPath,
ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_HEALTH_TIMEOUT_MS: '1000'
}
);
assert.strictEqual(unsupported.code, 2, 'Expected unsupported config to block');
assert.ok(unsupported.stderr.includes('unsupported MCP server config'), `Expected unsupported reason, got: ${unsupported.stderr}`);
assert.strictEqual(missingCommand.code, 2, 'Expected missing command to block');
assert.ok(/ENOENT|spawn/i.test(missingCommand.stderr), `Expected spawn failure reason, got: ${missingCommand.stderr}`);
const state = readState(statePath);
assert.strictEqual(state.servers.unsupported.status, 'unhealthy', 'Expected unsupported server state');
assert.strictEqual(state.servers.missingcmd.status, 'unhealthy', 'Expected missing command server state');
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (await asyncTest('includes command stderr and config env in unhealthy probe reasons', async () => {
const tempDir = createTempDir();
const configPath = path.join(tempDir, 'claude.json');
const statePath = path.join(tempDir, 'mcp-health.json');
const serverScript = path.join(tempDir, 'stderr-server.js');
try {
fs.writeFileSync(
serverScript,
"console.error(`probe failed with ${process.env.ECC_MCP_TEST_MARKER}`); process.exit(1);\n"
);
writeConfig(configPath, {
mcpServers: {
stderrprobe: {
command: process.execPath,
args: [serverScript],
env: {
ECC_MCP_TEST_MARKER: 'marker-from-config'
}
}
}
});
const result = runHook(
{ tool_name: 'mcp__stderrprobe__search', tool_input: {} },
{
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: configPath,
ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_HEALTH_TIMEOUT_MS: '100'
}
);
assert.strictEqual(result.code, 2, 'Expected stderr probe failure to block');
assert.ok(result.stderr.includes('marker-from-config'), `Expected command stderr in reason, got: ${result.stderr}`);
const state = readState(statePath);
assert.ok(state.servers.stderrprobe.lastError.includes('marker-from-config'), 'Expected stderr reason in state');
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (await asyncTest('records reconnect reprobe failures for previously unhealthy servers', async () => {
const tempDir = createTempDir();
const configPath = path.join(tempDir, 'claude.json');
const statePath = path.join(tempDir, 'mcp-health.json');
const serverScript = path.join(tempDir, 'still-down-server.js');
const reconnectScript = path.join(tempDir, 'noop-reconnect.js');
const now = Date.now();
try {
fs.writeFileSync(serverScript, "console.error('503 Service Unavailable'); process.exit(1);\n");
fs.writeFileSync(reconnectScript, "process.exit(0);\n");
fs.writeFileSync(statePath, JSON.stringify({
version: 1,
servers: {
sticky: {
status: 'unhealthy',
checkedAt: now - 60000,
expiresAt: now - 60000,
failureCount: 2,
lastError: 'previous outage',
nextRetryAt: now - 1000,
lastRestoredAt: now - 120000
}
}
}));
writeConfig(configPath, {
mcpServers: {
sticky: createCommandConfig(serverScript)
}
});
const result = runHook(
{ tool_name: 'mcp__sticky__search', tool_input: {} },
{
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: configPath,
ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_RECONNECT_COMMAND: `${JSON.stringify(process.execPath)} ${JSON.stringify(reconnectScript)}`,
ECC_MCP_HEALTH_TIMEOUT_MS: '1000',
ECC_MCP_HEALTH_BACKOFF_MS: '10'
}
);
assert.strictEqual(result.code, 2, 'Expected still-unhealthy server to block');
assert.ok(result.stderr.includes('reconnect reprobe failed'), `Expected reprobe failure reason, got: ${result.stderr}`);
assert.ok(result.stderr.includes('Reconnect attempt: ok'), `Expected reconnect attempt suffix, got: ${result.stderr}`);
const state = readState(statePath);
assert.strictEqual(state.servers.sticky.failureCount, 3, 'Expected failure count to increment');
assert.strictEqual(state.servers.sticky.lastRestoredAt, now - 120000, 'Expected previous restore timestamp to survive');
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (await asyncTest('post-failure reconnect command restores server health when a reprobe succeeds', async () => { if (await asyncTest('post-failure reconnect command restores server health when a reprobe succeeds', async () => {
const tempDir = createTempDir(); const tempDir = createTempDir();
const configPath = path.join(tempDir, 'claude.json'); const configPath = path.join(tempDir, 'claude.json');
@@ -318,7 +660,7 @@ async function runTests() {
ECC_MCP_CONFIG_PATH: configPath, ECC_MCP_CONFIG_PATH: configPath,
ECC_MCP_HEALTH_STATE_PATH: statePath, ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_RECONNECT_COMMAND: `node ${JSON.stringify(reconnectScript)}`, ECC_MCP_RECONNECT_COMMAND: `node ${JSON.stringify(reconnectScript)}`,
ECC_MCP_HEALTH_TIMEOUT_MS: '100' ECC_MCP_HEALTH_TIMEOUT_MS: '1000'
} }
); );
@@ -334,6 +676,131 @@ async function runTests() {
} }
})) passed++; else failed++; })) passed++; else failed++;
if (test('ignores post-failure events without a reconnect-worthy failure code', () => {
const tempDir = createTempDir();
const statePath = path.join(tempDir, 'mcp-health.json');
try {
const result = runHook(
{
tool_name: 'mcp__quiet__messages',
tool_input: {},
error: 'tool returned an application-level validation error'
},
{
CLAUDE_HOOK_EVENT_NAME: 'PostToolUseFailure',
ECC_MCP_HEALTH_STATE_PATH: statePath
}
);
assert.strictEqual(result.code, 0, 'Expected unmatched post-failure to remain non-blocking');
assert.strictEqual(result.stderr, '', 'Expected no logs for unmatched post-failure');
assert.strictEqual(fs.existsSync(statePath), false, 'Expected no state write for unmatched post-failure');
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (test('post-failure marks servers unhealthy and skips reconnect when no command is configured', () => {
const tempDir = createTempDir();
const statePath = path.join(tempDir, 'mcp-health.json');
try {
const result = runHook(
{
tool_name: 'mcp__noplan__messages',
tool_input: {},
tool_output: {
stderr: '403 Forbidden from upstream MCP'
}
},
{
CLAUDE_HOOK_EVENT_NAME: 'PostToolUseFailure',
ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_RECONNECT_COMMAND: null
}
);
assert.strictEqual(result.code, 0, 'Expected post-failure hook to remain non-blocking');
assert.ok(result.stderr.includes('reported 403'), `Expected detected failure code log, got: ${result.stderr}`);
assert.ok(result.stderr.includes('reconnect skipped'), `Expected reconnect skipped log, got: ${result.stderr}`);
const state = readState(statePath);
assert.strictEqual(state.servers.noplan.status, 'unhealthy', 'Expected post-failure to mark server unhealthy');
assert.strictEqual(state.servers.noplan.lastFailureCode, 403, 'Expected detected status code in state');
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (test('post-failure reports failed reconnect commands', () => {
const tempDir = createTempDir();
const statePath = path.join(tempDir, 'mcp-health.json');
const reconnectScript = path.join(tempDir, 'failed-reconnect.js');
try {
fs.writeFileSync(reconnectScript, "console.error('cannot reconnect'); process.exit(7);\n");
const result = runHook(
{
tool_name: 'mcp__badreconnect__messages',
tool_input: {},
tool_response: 'service unavailable 503'
},
{
CLAUDE_HOOK_EVENT_NAME: 'PostToolUseFailure',
ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_RECONNECT_COMMAND: `${JSON.stringify(process.execPath)} ${JSON.stringify(reconnectScript)}`
}
);
assert.strictEqual(result.code, 0, 'Expected reconnect failure hook to remain non-blocking');
assert.ok(result.stderr.includes('reported 503'), `Expected detected failure code log, got: ${result.stderr}`);
assert.ok(result.stderr.includes('reconnect failed: cannot reconnect'), `Expected reconnect failure reason, got: ${result.stderr}`);
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (test('post-failure expands per-server reconnect commands before follow-up config checks', () => {
const tempDir = createTempDir();
const statePath = path.join(tempDir, 'mcp-health.json');
const reconnectScript = path.join(tempDir, 'server-reconnect.js');
const markerFile = path.join(tempDir, 'server-name.txt');
try {
fs.writeFileSync(
reconnectScript,
[
"const fs = require('fs');",
"fs.writeFileSync(process.argv[2], process.argv[3]);"
].join('\n')
);
const result = runHook(
{
tool_name: 'mcp__foo-bar__messages',
tool_input: {},
message: 'transport connection reset'
},
{
CLAUDE_HOOK_EVENT_NAME: 'PostToolUseFailure',
ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_CONFIG_PATH: path.join(tempDir, 'missing.json'),
ECC_MCP_RECONNECT_COMMAND: null,
ECC_MCP_RECONNECT_FOO_BAR: `${JSON.stringify(process.execPath)} ${JSON.stringify(reconnectScript)} ${JSON.stringify(markerFile)} {server}`
}
);
assert.strictEqual(result.code, 0, 'Expected per-server reconnect hook to remain non-blocking');
assert.strictEqual(fs.readFileSync(markerFile, 'utf8'), 'foo-bar', 'Expected {server} token expansion');
assert.ok(result.stderr.includes('reported transport'), `Expected transport failure log, got: ${result.stderr}`);
assert.ok(result.stderr.includes('no config was available'), `Expected missing config follow-up log, got: ${result.stderr}`);
} finally {
cleanupTempDir(tempDir);
}
})) passed++; else failed++;
if (await asyncTest('treats HTTP 400 probe responses as healthy reachable servers', async () => { if (await asyncTest('treats HTTP 400 probe responses as healthy reachable servers', async () => {
const tempDir = createTempDir(); const tempDir = createTempDir();
const configPath = path.join(tempDir, 'claude.json'); const configPath = path.join(tempDir, 'claude.json');

View File

@@ -40,6 +40,65 @@ function inTempRepo(fn) {
} }
} }
function captureConsoleError(fn) {
const previousError = console.error;
const lines = [];
console.error = (...args) => {
lines.push(args.join(' '));
};
try {
const result = fn();
return { result, stderr: lines.join('\n') };
} finally {
console.error = previousError;
}
}
function writeAndStage(repoDir, relativePath, content) {
const filePath = path.join(repoDir, relativePath);
fs.mkdirSync(path.dirname(filePath), { recursive: true });
fs.writeFileSync(filePath, content, 'utf8');
spawnSync('git', ['add', relativePath], { cwd: repoDir, stdio: 'pipe', encoding: 'utf8' });
}
function executableName(name) {
return process.platform === 'win32' ? `${name}.cmd` : name;
}
function writeFakeExecutable(filePath, output, exitCode) {
const source = process.platform === 'win32'
? `@echo off\r\necho ${output}\r\nexit /b ${exitCode}\r\n`
: `#!/bin/sh\necho "${output}"\nexit ${exitCode}\n`;
fs.writeFileSync(filePath, source, 'utf8');
fs.chmodSync(filePath, 0o755);
}
function pathEnvKey() {
return Object.keys(process.env).find(key => key.toLowerCase() === 'path') || 'PATH';
}
function withEnv(overrides, fn) {
const previous = {};
for (const key of Object.keys(overrides)) {
previous[key] = process.env[key];
process.env[key] = overrides[key];
}
try {
return fn();
} finally {
for (const key of Object.keys(overrides)) {
if (typeof previous[key] === 'string') {
process.env[key] = previous[key];
} else {
delete process.env[key];
}
}
}
}
let passed = 0; let passed = 0;
let failed = 0; let failed = 0;
@@ -77,5 +136,157 @@ if (test('evaluate inspects staged snapshot instead of newer working tree conten
}); });
})) passed++; else failed++; })) passed++; else failed++;
if (test('passes through non-commit amend malformed JSON and run wrapper paths', () => {
const readInput = JSON.stringify({ tool_input: { command: 'git status --short' } });
assert.deepStrictEqual(hook.evaluate(readInput), { output: readInput, exitCode: 0 });
const amendInput = JSON.stringify({ tool_input: { command: 'git commit --amend -m "fix: update"' } });
assert.deepStrictEqual(hook.evaluate(amendInput), { output: amendInput, exitCode: 0 });
const malformed = 'not json {{{';
const malformedResult = captureConsoleError(() => hook.run(malformed));
assert.deepStrictEqual(malformedResult.result, { stdout: malformed, exitCode: 0 });
assert.ok(malformedResult.stderr.includes('[Hook] Error:'), 'should log JSON parse errors without blocking');
})) passed++; else failed++;
if (test('allows git commit when no files are staged', () => {
inTempRepo(() => {
const input = JSON.stringify({ tool_input: { command: 'git commit -m "fix: no staged files"' } });
const { result, stderr } = captureConsoleError(() => hook.evaluate(input));
assert.strictEqual(result.output, input);
assert.strictEqual(result.exitCode, 0);
assert.ok(stderr.includes('No staged files found'), `expected no-staged warning, got: ${stderr}`);
});
})) passed++; else failed++;
if (test('allows warning-only issues while reporting console TODO and message warnings', () => {
inTempRepo(repoDir => {
writeAndStage(repoDir, 'index.js', [
'console.log("debug only");',
'// TODO: clean this up',
'// TODO: tracked in issue #123',
'// console.log("commented out");',
'* console.log("doc comment");',
'const ok = true;',
''
].join('\n'));
const input = JSON.stringify({
tool_input: {
command: 'git commit -m "fix: Uppercase subject."'
}
});
const { result, stderr } = captureConsoleError(() => hook.evaluate(input));
assert.strictEqual(result.output, input);
assert.strictEqual(result.exitCode, 0, 'warning-only issues should not block');
assert.ok(stderr.includes('WARNING Line 1'), `expected console warning, got: ${stderr}`);
assert.ok(stderr.includes('INFO Line 2'), `expected TODO info warning, got: ${stderr}`);
assert.ok(stderr.includes('Subject should start with lowercase'), `expected capitalization warning, got: ${stderr}`);
assert.ok(stderr.includes('should not end with a period'), `expected punctuation warning, got: ${stderr}`);
assert.ok(stderr.includes('Warnings found'), `expected warning summary, got: ${stderr}`);
});
})) passed++; else failed++;
if (test('reports invalid and long commit messages without blocking when files are clean', () => {
inTempRepo(repoDir => {
writeAndStage(repoDir, 'index.js', 'const clean = true;\n');
const longMessage = `Bad message ${'x'.repeat(80)}`;
const input = JSON.stringify({
tool_input: {
command: `git commit --message="${longMessage}"`
}
});
const { result, stderr } = captureConsoleError(() => hook.evaluate(input));
assert.strictEqual(result.output, input);
assert.strictEqual(result.exitCode, 0);
assert.ok(stderr.includes('does not follow conventional commit format'), `expected format warning, got: ${stderr}`);
assert.ok(stderr.includes('Commit message too long'), `expected length warning, got: ${stderr}`);
});
})) passed++; else failed++;
if (test('blocks commits with staged secret patterns across checkable files', () => {
inTempRepo(repoDir => {
writeAndStage(repoDir, 'index.js', [
"const openai = 'sk-abcdefghijklmnopqrstuvwxyz';",
"const token = 'ghp_abcdefghijklmnopqrstuvwxyzABCDEFGHIJ';",
''
].join('\n'));
writeAndStage(repoDir, 'app.py', [
'aws = "AKIAABCDEFGHIJKLMNOP"',
'api_key = "secret-value"',
''
].join('\n'));
const input = JSON.stringify({ tool_input: { command: 'git commit -m "fix: block secrets"' } });
const { result, stderr } = captureConsoleError(() => hook.evaluate(input));
assert.strictEqual(result.output, input);
assert.strictEqual(result.exitCode, 2);
assert.ok(stderr.includes('Potential OpenAI API key'), `expected OpenAI secret warning, got: ${stderr}`);
assert.ok(stderr.includes('Potential GitHub PAT'), `expected GitHub PAT warning, got: ${stderr}`);
assert.ok(stderr.includes('Potential AWS Access Key'), `expected AWS key warning, got: ${stderr}`);
assert.ok(stderr.includes('Potential API key'), `expected generic API key warning, got: ${stderr}`);
});
})) passed++; else failed++;
if (test('reports eslint pylint and golint failures from staged files', () => {
inTempRepo(repoDir => {
writeAndStage(repoDir, 'index.js', 'const lint = true;\n');
writeAndStage(repoDir, 'app.py', 'print("lint")\n');
writeAndStage(repoDir, 'main.go', 'package main\n');
const eslintPath = path.join(repoDir, 'node_modules', '.bin', executableName('eslint'));
fs.mkdirSync(path.dirname(eslintPath), { recursive: true });
writeFakeExecutable(eslintPath, 'eslint failed', 1);
const binDir = path.join(repoDir, 'fake-bin');
fs.mkdirSync(binDir, { recursive: true });
const pylintPath = path.join(binDir, executableName('pylint'));
const golintPath = path.join(binDir, executableName('golint'));
writeFakeExecutable(pylintPath, 'pylint failed', 1);
writeFakeExecutable(golintPath, 'main.go:1: lint failed', 0);
const pathKey = pathEnvKey();
withEnv({ [pathKey]: `${binDir}${path.delimiter}${process.env[pathKey] || process.env.PATH || ''}` }, () => {
const input = JSON.stringify({ tool_input: { command: 'git commit -m "fix: lint failures"' } });
const { result, stderr } = captureConsoleError(() => hook.evaluate(input));
assert.strictEqual(result.output, input);
assert.strictEqual(result.exitCode, 2);
assert.ok(stderr.includes('ESLint Issues'), `expected ESLint output, got: ${stderr}`);
assert.ok(stderr.includes('eslint failed'), `expected ESLint failure text, got: ${stderr}`);
assert.ok(stderr.includes('Pylint Issues'), `expected Pylint output, got: ${stderr}`);
assert.ok(stderr.includes('pylint failed'), `expected Pylint failure text, got: ${stderr}`);
assert.ok(stderr.includes('golint Issues'), `expected golint output, got: ${stderr}`);
assert.ok(stderr.includes('main.go:1: lint failed'), `expected golint failure text, got: ${stderr}`);
});
});
})) passed++; else failed++;
if (test('stdin entry point truncates oversized input and preserves pass-through output', () => {
const oversized = JSON.stringify({
tool_input: {
command: 'git status',
filler: 'x'.repeat(1024 * 1024 + 1024)
}
});
const result = spawnSync('node', [path.join(__dirname, '..', '..', 'scripts', 'hooks', 'pre-bash-commit-quality.js')], {
input: oversized,
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000
});
assert.strictEqual(result.status, 0);
assert.ok(result.stdout.length > 0, 'expected truncated payload to pass through');
assert.ok(result.stdout.length <= 1024 * 1024, 'expected stdout to stay within hook input limit');
assert.strictEqual(result.stdout, oversized.slice(0, result.stdout.length));
assert.ok(result.stderr.includes('[Hook] Error:'), 'truncated JSON should be logged and allowed');
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0); process.exit(failed > 0 ? 1 : 0);

View File

@@ -16,6 +16,13 @@ const script = path.join(
'hooks', 'hooks',
'session-activity-tracker.js' 'session-activity-tracker.js'
); );
const {
buildActivityRow,
extractFileEvents,
extractFilePaths,
summarizeOutput,
run,
} = require(script);
function test(name, fn) { function test(name, fn) {
try { try {
@@ -52,6 +59,15 @@ function runScript(input, envOverrides = {}, options = {}) {
return { code: result.status || 0, stdout: result.stdout || '', stderr: result.stderr || '' }; return { code: result.status || 0, stdout: result.stdout || '', stderr: result.stderr || '' };
} }
function readMetricRows(homeDir) {
const metricsFile = path.join(homeDir, '.claude', 'metrics', 'tool-usage.jsonl');
return fs.readFileSync(metricsFile, 'utf8')
.trim()
.split(/\r?\n/)
.filter(Boolean)
.map(line => JSON.parse(line));
}
function runTests() { function runTests() {
console.log('\n=== Testing session-activity-tracker.js ===\n'); console.log('\n=== Testing session-activity-tracker.js ===\n');
@@ -405,6 +421,246 @@ function runTests() {
fs.rmSync(tmpHome, { recursive: true, force: true }); fs.rmSync(tmpHome, { recursive: true, force: true });
}) ? passed++ : failed++); }) ? passed++ : failed++);
(test('skips non-PostToolUse events and rows without required identifiers', () => {
assert.strictEqual(buildActivityRow(
{ tool_name: 'Read', tool_input: { file_path: 'README.md' } },
{ CLAUDE_HOOK_EVENT_NAME: 'PreToolUse', ECC_SESSION_ID: 'sess' }
), null);
assert.strictEqual(buildActivityRow(
{ tool_name: 'Read', tool_input: { file_path: 'README.md' } },
{ CLAUDE_HOOK_EVENT_NAME: 'PostToolUse' }
), null);
assert.strictEqual(buildActivityRow(
{ tool_input: { file_path: 'README.md' } },
{ CLAUDE_HOOK_EVENT_NAME: 'PostToolUse', ECC_SESSION_ID: 'sess' }
), null);
}) ? passed++ : failed++);
(test('sanitizes nested params, long summaries, and output variants', () => {
const longValue = `start ${'x'.repeat(260)} ghp_${'A'.repeat(20)}`;
const row = buildActivityRow(
{
tool_name: 'Lookup',
tool_input: {
query: longValue,
secret: `gho_${'B'.repeat(20)}`,
count: 3,
enabled: false,
omitted: null,
nested: { a: { b: { c: { d: 'too deep' } } } },
list: [1, true, null, 4],
},
tool_output: `line one\nline two ${'y'.repeat(260)}`,
},
{ CLAUDE_HOOK_EVENT_NAME: 'PostToolUse', CLAUDE_SESSION_ID: 'claude-fallback' }
);
assert.strictEqual(row.session_id, 'claude-fallback');
assert.strictEqual(row.file_paths.length, 0);
assert.ok(row.input_summary.endsWith('...'), 'Expected long shallow summary to be truncated');
assert.ok(!row.input_summary.includes('ghp_'), 'Expected GitHub token redaction in input summary');
assert.ok(row.output_summary.endsWith('...'), 'Expected long output summary to be truncated');
assert.ok(!row.output_summary.includes('\n'), 'Expected output summary to normalize whitespace');
const params = JSON.parse(row.input_params_json);
assert.strictEqual(params.count, 3);
assert.strictEqual(params.enabled, false);
assert.strictEqual(params.omitted, null);
assert.strictEqual(params.secret, '<REDACTED>');
assert.strictEqual(params.nested.a.b.c, '[Truncated]');
assert.deepStrictEqual(params.list.slice(0, 3), [1, true, null]);
assert.strictEqual(params.list[3], 4);
assert.ok(params.query.endsWith('...'), 'Expected long param value to be truncated');
assert.strictEqual(summarizeOutput(null), '');
assert.strictEqual(summarizeOutput(undefined), '');
assert.strictEqual(summarizeOutput('hello\nworld'), 'hello world');
assert.strictEqual(summarizeOutput({ ok: true }), '{"ok":true}');
}) ? passed++ : failed++);
(test('extracts file paths from nested arrays while filtering duplicates and remote URIs', () => {
const paths = extractFilePaths({
file_paths: [
'src/a.js',
'src/a.js',
'https://example.com/file.js',
'',
{ file_path: 'src/b.js' },
],
nested: {
source_path: 'app://connector/item',
deep: [
{ new_file_path: 'src/c.js' },
{ old_file_path: 'plugin://plugin/item' },
42,
],
},
ignored: 'not-a-path-field',
});
assert.deepStrictEqual(paths, ['src/a.js', 'src/b.js', 'src/c.js']);
assert.deepStrictEqual(extractFilePaths(null), []);
assert.deepStrictEqual(extractFilePaths('src/not-collected.js'), []);
}) ? passed++ : failed++);
(test('extracts file event previews for create delete and one-sided edits', () => {
const events = extractFileEvents('Write', {
files: [
{
file_path: 'src/new.ts',
content: 'first line\nsecond line',
},
{
file_path: 'src/new.ts',
content: 'first line\nsecond line',
},
{
file_path: 'https://example.com/remote.ts',
content: 'ignored',
},
],
});
assert.deepStrictEqual(events, [
{
path: 'src/new.ts',
action: 'create',
diff_preview: '+ first line second line',
patch_preview: '+ first line second line',
},
]);
assert.deepStrictEqual(extractFileEvents('Remove', {
file_path: 'src/old.ts',
content: 'legacy line',
}), [
{
path: 'src/old.ts',
action: 'delete',
patch_preview: '- legacy line',
},
]);
assert.deepStrictEqual(extractFileEvents('Edit', {
edits: [
{ file_path: 'src/before.ts', old_string: 'legacy', new_string: '' },
{ file_path: 'src/after.ts', old_string: '', new_string: 'modern' },
{ file_path: 'src/no-preview.ts', old_string: '', new_string: '' },
],
}), [
{
path: 'src/before.ts',
action: 'modify',
diff_preview: 'legacy ->',
patch_preview: '@@\n- legacy',
},
{
path: 'src/after.ts',
action: 'modify',
diff_preview: '-> modern',
patch_preview: '@@\n+ modern',
},
{ path: 'src/no-preview.ts', action: 'modify' },
]);
assert.deepStrictEqual(extractFileEvents('Rename', {
old_file_path: 'src/old-name.ts',
new_file_path: 'src/new-name.ts',
}), [
{ path: 'src/old-name.ts', action: 'move' },
{ path: 'src/new-name.ts', action: 'move' },
]);
assert.deepStrictEqual(extractFileEvents('Read', null), []);
assert.deepStrictEqual(extractFileEvents('Touch', { file_path: 'src/touched.ts' }), [
{ path: 'src/touched.ts', action: 'touch' },
]);
}) ? passed++ : failed++);
(test('records creation previews unchanged when running outside a git repository', () => {
const tmpHome = makeTempDir();
const tmpCwd = makeTempDir();
const input = {
tool_name: 'Write',
tool_input: {
file_path: 'created.txt',
content: 'alpha\nbeta',
},
tool_output: 17,
};
const result = runScript(input, {
...withTempHome(tmpHome),
CLAUDE_HOOK_EVENT_NAME: 'PostToolUse',
ECC_SESSION_ID: 'ecc-session-non-git-create',
}, {
cwd: tmpCwd,
});
assert.strictEqual(result.code, 0);
const [row] = readMetricRows(tmpHome);
assert.strictEqual(row.output_summary, '17');
assert.deepStrictEqual(row.file_events, [
{
path: 'created.txt',
action: 'create',
diff_preview: '+ alpha beta',
patch_preview: '+ alpha beta',
},
]);
fs.rmSync(tmpHome, { recursive: true, force: true });
fs.rmSync(tmpCwd, { recursive: true, force: true });
}) ? passed++ : failed++);
(test('preserves absolute paths outside the repo without git enrichment', () => {
const tmpHome = makeTempDir();
const outsideDir = makeTempDir();
const outsideFile = path.join(outsideDir, 'outside.txt');
fs.writeFileSync(outsideFile, 'outside', 'utf8');
const input = {
tool_name: 'Read',
tool_input: {
file_path: outsideFile,
},
tool_output: 'read outside',
};
const result = runScript(input, {
...withTempHome(tmpHome),
CLAUDE_HOOK_EVENT_NAME: 'PostToolUse',
ECC_SESSION_ID: 'ecc-session-absolute-outside',
});
assert.strictEqual(result.code, 0);
const [row] = readMetricRows(tmpHome);
assert.deepStrictEqual(row.file_paths, [outsideFile]);
assert.deepStrictEqual(row.file_events, [
{ path: outsideFile, action: 'read' },
]);
fs.rmSync(tmpHome, { recursive: true, force: true });
fs.rmSync(outsideDir, { recursive: true, force: true });
}) ? passed++ : failed++);
(test('passes empty stdin through without creating metrics', () => {
const tmpHome = makeTempDir();
const result = runScript('', {
...withTempHome(tmpHome),
CLAUDE_HOOK_EVENT_NAME: 'PostToolUse',
ECC_SESSION_ID: 'sess-empty',
});
assert.strictEqual(result.code, 0);
assert.strictEqual(result.stdout, '');
assert.strictEqual(run(''), '');
assert.strictEqual(
fs.existsSync(path.join(tmpHome, '.claude', 'metrics', 'tool-usage.jsonl')),
false
);
fs.rmSync(tmpHome, { recursive: true, force: true });
}) ? passed++ : failed++);
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0); process.exit(failed > 0 ? 1 : 0);
} }

View File

@@ -577,7 +577,7 @@ async function runTests() {
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse', CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
ECC_MCP_CONFIG_PATH: configPath, ECC_MCP_CONFIG_PATH: configPath,
ECC_MCP_HEALTH_STATE_PATH: statePath, ECC_MCP_HEALTH_STATE_PATH: statePath,
ECC_MCP_HEALTH_TIMEOUT_MS: '100' ECC_MCP_HEALTH_TIMEOUT_MS: '1000'
} }
); );

View File

@@ -8,6 +8,7 @@ const os = require('os');
const path = require('path'); const path = require('path');
const { const {
getInstallComponent,
loadInstallManifests, loadInstallManifests,
listInstallComponents, listInstallComponents,
listLegacyCompatibilityLanguages, listLegacyCompatibilityLanguages,
@@ -45,6 +46,24 @@ function writeJson(filePath, value) {
fs.writeFileSync(filePath, JSON.stringify(value, null, 2)); fs.writeFileSync(filePath, JSON.stringify(value, null, 2));
} }
function writeManifestSet(repoRoot, options = {}) {
writeJson(path.join(repoRoot, 'manifests', 'install-modules.json'), {
version: options.modulesVersion || 1,
modules: options.modules || [],
});
writeJson(path.join(repoRoot, 'manifests', 'install-profiles.json'), {
version: options.profilesVersion || 1,
profiles: options.profiles || {},
});
if (Object.prototype.hasOwnProperty.call(options, 'components')) {
writeJson(path.join(repoRoot, 'manifests', 'install-components.json'), {
version: options.componentsVersion || 1,
components: options.components,
});
}
}
function runTests() { function runTests() {
console.log('\n=== Testing install-manifests.js ===\n'); console.log('\n=== Testing install-manifests.js ===\n');
@@ -80,6 +99,43 @@ function runTests() {
'Should include capability:security'); 'Should include capability:security');
})) passed++; else failed++; })) passed++; else failed++;
if (test('gets install component details and validates component IDs', () => {
const component = getInstallComponent(' lang:typescript ');
assert.strictEqual(component.id, 'lang:typescript');
assert.strictEqual(component.family, 'language');
assert.ok(component.moduleIds.length > 0, 'Should expose component module IDs');
assert.strictEqual(component.moduleCount, component.moduleIds.length);
assert.strictEqual(component.modules.length, component.moduleIds.length);
assert.ok(component.modules.every(module => component.moduleIds.includes(module.id)));
assert.ok(Array.isArray(component.targets));
assert.throws(
() => getInstallComponent(''),
/An install component ID is required/
);
assert.throws(
() => getInstallComponent('lang:missing'),
/Unknown install component: lang:missing/
);
})) passed++; else failed++;
if (test('validates install component filters', () => {
const claudeComponents = listInstallComponents({ family: 'capability', target: 'claude' });
assert.ok(claudeComponents.length > 0, 'Should list Claude capability components');
assert.ok(claudeComponents.every(component => component.family === 'capability'));
assert.ok(claudeComponents.every(component => component.targets.includes('claude')));
assert.throws(
() => listInstallComponents({ family: 'unknown' }),
/Unknown component family: unknown/
);
assert.throws(
() => listInstallComponents({ target: 'unknown-target' }),
/Unknown install target: unknown-target/
);
})) passed++; else failed++;
if (test('labels continuous-learning as a legacy v1 install surface', () => { if (test('labels continuous-learning as a legacy v1 install surface', () => {
const components = listInstallComponents({ family: 'skill' }); const components = listInstallComponents({ family: 'skill' });
const component = components.find(entry => entry.id === 'skill:continuous-learning'); const component = components.find(entry => entry.id === 'skill:continuous-learning');
@@ -172,6 +228,10 @@ function runTests() {
() => validateInstallModuleIds(['ghost-module']), () => validateInstallModuleIds(['ghost-module']),
/Unknown install module: ghost-module/ /Unknown install module: ghost-module/
); );
assert.throws(
() => validateInstallModuleIds(['ghost-one', 'ghost-two']),
/Unknown install modules: ghost-one, ghost-two/
);
})) passed++; else failed++; })) passed++; else failed++;
if (test('resolves legacy compatibility selections into manifest module IDs', () => { if (test('resolves legacy compatibility selections into manifest module IDs', () => {
@@ -251,6 +311,25 @@ function runTests() {
}), }),
/Unknown legacy language: brainfuck/ /Unknown legacy language: brainfuck/
); );
assert.throws(
() => resolveLegacyCompatibilitySelection({
legacyLanguages: [],
}),
/No legacy languages were provided/
);
assert.throws(
() => resolveLegacyCompatibilitySelection({
target: 'not-a-target',
legacyLanguages: ['typescript'],
}),
/Unknown install target: not-a-target/
);
assert.throws(
() => resolveLegacyCompatibilitySelection({
legacyLanguages: ['brainfuck', 'whitespace'],
}),
/Unknown legacy languages: brainfuck, whitespace/
);
})) passed++; else failed++; })) passed++; else failed++;
if (test('resolves included and excluded user-facing components', () => { if (test('resolves included and excluded user-facing components', () => {
@@ -293,6 +372,61 @@ function runTests() {
); );
})) passed++; else failed++; })) passed++; else failed++;
if (test('rejects empty, unknown, and fully excluded install selections', () => {
const repoRoot = createTestRepo();
try {
writeManifestSet(repoRoot, {
modules: [
{
id: 'core',
kind: 'rules',
description: 'Core',
paths: ['rules/core.md'],
targets: ['claude'],
dependencies: [],
defaultInstall: true,
cost: 'light',
stability: 'stable'
}
],
profiles: {
core: { description: 'Core', modules: ['core'] }
},
components: [
{
id: 'capability:core',
family: 'capability',
description: 'Core',
modules: ['core']
}
],
});
assert.throws(
() => resolveInstallPlan({ repoRoot }),
/No install profile, module IDs, or included component IDs were provided/
);
assert.throws(
() => resolveInstallPlan({ repoRoot, moduleIds: ['missing'] }),
/Unknown install module: missing/
);
assert.throws(
() => resolveInstallPlan({ repoRoot, includeComponentIds: ['capability:missing'] }),
/Unknown install component: capability:missing/
);
assert.throws(
() => resolveInstallPlan({
repoRoot,
profileId: 'core',
excludeComponentIds: ['capability:core'],
}),
/Selection excludes every requested install module/
);
} finally {
cleanupTestRepo(repoRoot);
}
})) passed++; else failed++;
if (test('validates projectRoot and homeDir option types before adapter planning', () => { if (test('validates projectRoot and homeDir option types before adapter planning', () => {
assert.throws( assert.throws(
() => resolveInstallPlan({ profileId: 'core', target: 'cursor', projectRoot: 42 }), () => resolveInstallPlan({ profileId: 'core', target: 'cursor', projectRoot: 42 }),
@@ -349,6 +483,92 @@ function runTests() {
} }
})) passed++; else failed++; })) passed++; else failed++;
if (test('rejects missing, malformed, and unsupported manifest fixtures', () => {
const repoRoot = createTestRepo();
try {
assert.throws(
() => loadInstallManifests({ repoRoot }),
/Install manifests not found/
);
fs.writeFileSync(path.join(repoRoot, 'manifests', 'install-modules.json'), '{ bad json');
writeJson(path.join(repoRoot, 'manifests', 'install-profiles.json'), {
version: 1,
profiles: {},
});
assert.throws(
() => loadInstallManifests({ repoRoot }),
/Failed to read install-modules\.json/
);
writeManifestSet(repoRoot, {
modules: [
{
id: 'empty-target',
kind: 'rules',
description: 'Empty target',
paths: ['rules/core.md'],
targets: ['claude', ''],
dependencies: [],
defaultInstall: false,
cost: 'light',
stability: 'stable'
}
],
profiles: {},
});
assert.throws(
() => loadInstallManifests({ repoRoot }),
/Install module empty-target has invalid targets/
);
writeManifestSet(repoRoot, {
modules: [
{
id: 'unsupported-target',
kind: 'rules',
description: 'Unsupported target',
paths: ['rules/core.md'],
targets: ['claude', 'moonbase'],
dependencies: [],
defaultInstall: false,
cost: 'light',
stability: 'stable'
}
],
profiles: {},
});
assert.throws(
() => loadInstallManifests({ repoRoot }),
/Install module unsupported-target has unsupported targets: moonbase/
);
writeManifestSet(repoRoot, {
modules: [
{
id: 'core',
kind: 'rules',
description: 'Core',
paths: ['rules/core.md'],
targets: ['claude'],
dependencies: [],
defaultInstall: false,
cost: 'light',
stability: 'stable'
}
],
profiles: {
core: { description: 'Core', modules: ['core'] }
},
});
const manifests = loadInstallManifests({ repoRoot });
assert.deepStrictEqual(manifests.components, []);
assert.strictEqual(manifests.componentsVersion, null);
} finally {
cleanupTestRepo(repoRoot);
}
})) passed++; else failed++;
if (test('fails fast when install manifest module targets is not an array', () => { if (test('fails fast when install manifest module targets is not an array', () => {
const repoRoot = createTestRepo(); const repoRoot = createTestRepo();
try { try {
@@ -431,6 +651,48 @@ function runTests() {
} }
})) passed++; else failed++; })) passed++; else failed++;
if (test('detects circular install dependencies', () => {
const repoRoot = createTestRepo();
try {
writeManifestSet(repoRoot, {
modules: [
{
id: 'alpha',
kind: 'skills',
description: 'Alpha',
paths: ['skills/alpha'],
targets: ['claude'],
dependencies: ['beta'],
defaultInstall: false,
cost: 'light',
stability: 'stable'
},
{
id: 'beta',
kind: 'skills',
description: 'Beta',
paths: ['skills/beta'],
targets: ['claude'],
dependencies: ['alpha'],
defaultInstall: false,
cost: 'light',
stability: 'stable'
}
],
profiles: {
core: { description: 'Core', modules: ['alpha'] }
},
});
assert.throws(
() => resolveInstallPlan({ repoRoot, profileId: 'core' }),
/Circular install dependency detected at alpha/
);
} finally {
cleanupTestRepo(repoRoot);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0); process.exit(failed > 0 ? 1 : 0);
} }

View File

@@ -6,8 +6,13 @@ const os = require('os');
const path = require('path'); const path = require('path');
const { const {
SESSION_SCHEMA_VERSION,
buildAggregates,
getFallbackSessionRecordingPath, getFallbackSessionRecordingPath,
persistCanonicalSnapshot normalizeClaudeHistorySession,
normalizeDmuxSnapshot,
persistCanonicalSnapshot,
validateCanonicalSnapshot
} = require('../../scripts/lib/session-adapters/canonical-session'); } = require('../../scripts/lib/session-adapters/canonical-session');
const { createClaudeHistoryAdapter } = require('../../scripts/lib/session-adapters/claude-history'); const { createClaudeHistoryAdapter } = require('../../scripts/lib/session-adapters/claude-history');
const { createDmuxTmuxAdapter } = require('../../scripts/lib/session-adapters/dmux-tmux'); const { createDmuxTmuxAdapter } = require('../../scripts/lib/session-adapters/dmux-tmux');
@@ -55,6 +60,75 @@ function withHome(homeDir, fn) {
} }
} }
function canonicalSnapshot(overrides = {}) {
const snapshot = {
schemaVersion: SESSION_SCHEMA_VERSION,
adapterId: 'test-adapter',
session: {
id: 'session-1',
kind: 'test',
state: 'active',
repoRoot: null,
sourceTarget: {
type: 'session',
value: 'session-1'
}
},
workers: [{
id: 'worker-1',
label: 'Worker 1',
state: 'running',
health: 'healthy',
branch: null,
worktree: null,
runtime: {
kind: 'test-runtime',
command: null,
pid: null,
active: true,
dead: false
},
intent: {
objective: 'Test objective',
seedPaths: []
},
outputs: {
summary: [],
validation: [],
remainingRisks: []
},
artifacts: {}
}]
};
snapshot.aggregates = buildAggregates(snapshot.workers);
if (overrides.session) {
snapshot.session = { ...snapshot.session, ...overrides.session };
}
if (overrides.sourceTarget) {
snapshot.session.sourceTarget = {
...snapshot.session.sourceTarget,
...overrides.sourceTarget
};
}
if (Object.prototype.hasOwnProperty.call(overrides, 'workers')) {
snapshot.workers = overrides.workers;
snapshot.aggregates = buildAggregates(Array.isArray(overrides.workers) ? overrides.workers : []);
}
if (overrides.aggregates) {
snapshot.aggregates = { ...snapshot.aggregates, ...overrides.aggregates };
}
for (const [key, value] of Object.entries(overrides)) {
if (!['session', 'sourceTarget', 'workers', 'aggregates'].includes(key)) {
snapshot[key] = value;
}
}
return snapshot;
}
test('dmux adapter normalizes orchestration snapshots into canonical form', () => { test('dmux adapter normalizes orchestration snapshots into canonical form', () => {
const recordingDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-recordings-')); const recordingDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-recordings-'));
@@ -509,6 +583,324 @@ test('adapter registry lists adapter metadata and target types', () => {
); );
}); });
test('canonical snapshot validation rejects malformed required fields', () => {
const invalidCases = [
[null, /must be an object/],
[canonicalSnapshot({ schemaVersion: 'ecc.session.v0' }), /Unsupported canonical session schema version/],
[canonicalSnapshot({ adapterId: '' }), /adapterId/],
[canonicalSnapshot({ session: { id: '' } }), /session.id/],
[canonicalSnapshot({ session: { repoRoot: 42 } }), /session.repoRoot/],
[canonicalSnapshot({ sourceTarget: { type: '' } }), /session.sourceTarget.type/],
[(() => {
const snapshot = canonicalSnapshot();
snapshot.workers = [null];
snapshot.aggregates = { workerCount: 1, states: { unknown: 1 }, healths: { unknown: 1 } };
return snapshot;
})(), /workers\[0\] to be an object/],
[canonicalSnapshot({
workers: [{
...canonicalSnapshot().workers[0],
branch: 7
}]
}), /workers\[0\].branch/],
[canonicalSnapshot({
workers: [{
...canonicalSnapshot().workers[0],
runtime: {
...canonicalSnapshot().workers[0].runtime,
command: 123
}
}]
}), /workers\[0\].runtime.command/],
[canonicalSnapshot({
workers: [{
...canonicalSnapshot().workers[0],
runtime: {
...canonicalSnapshot().workers[0].runtime,
active: 'yes'
}
}]
}), /workers\[0\].runtime.active/],
[canonicalSnapshot({
workers: [{
...canonicalSnapshot().workers[0],
intent: {
objective: 'ok',
seedPaths: ['README.md', 123]
}
}]
}), /workers\[0\].intent.seedPaths/],
[canonicalSnapshot({
workers: [{
...canonicalSnapshot().workers[0],
outputs: {
summary: [],
validation: 'nope',
remainingRisks: []
}
}]
}), /workers\[0\].outputs.validation/],
[canonicalSnapshot({ aggregates: { workerCount: 99 } }), /aggregates.workerCount to match/],
[canonicalSnapshot({ aggregates: { states: [] } }), /aggregates.states to be an object/],
[canonicalSnapshot({ aggregates: { states: { running: -1 } } }), /aggregates.states.running/],
[canonicalSnapshot({ aggregates: { healths: null } }), /aggregates.healths to be an object/]
];
for (const [snapshot, pattern] of invalidCases) {
assert.throws(() => validateCanonicalSnapshot(snapshot), pattern);
}
});
function dmuxWorker(workerSlug, status = {}, overrides = {}) {
return {
workerSlug,
workerDir: `/tmp/${workerSlug}`,
status: {
state: 'running',
updated: new Date().toISOString(),
branch: null,
worktree: null,
...status
},
task: {
objective: `${workerSlug} objective`,
seedPaths: ['README.md'],
...(overrides.task || {})
},
handoff: {
summary: ['summary'],
validation: ['validation'],
remainingRisks: ['risk'],
...(overrides.handoff || {})
},
files: {
status: `/tmp/${workerSlug}/status.md`,
task: `/tmp/${workerSlug}/task.md`,
handoff: `/tmp/${workerSlug}/handoff.md`,
...(overrides.files || {})
},
pane: Object.prototype.hasOwnProperty.call(overrides, 'pane')
? overrides.pane
: {
currentCommand: 'codex',
pid: 123,
active: true,
dead: false
}
};
}
function dmuxSnapshot(overrides = {}) {
return {
sessionName: 'edge-session',
repoRoot: '/tmp/repo',
sessionActive: false,
workerStates: {},
workerCount: 0,
workers: [],
...overrides
};
}
test('dmux normalization covers missing failed idle and stale worker states', () => {
const sourceTarget = { type: 'session', value: 'edge-session' };
const missing = normalizeDmuxSnapshot(dmuxSnapshot(), sourceTarget);
assert.strictEqual(missing.session.state, 'missing');
assert.strictEqual(missing.aggregates.workerCount, 0);
const failed = normalizeDmuxSnapshot(dmuxSnapshot({
workerStates: { failed: 1 },
workerCount: 1,
workers: [
dmuxWorker('failure', { state: 'failed' }, { pane: null })
]
}), sourceTarget);
assert.strictEqual(failed.session.state, 'failed');
assert.strictEqual(failed.workers[0].health, 'degraded');
assert.strictEqual(failed.workers[0].runtime.active, false);
assert.strictEqual(failed.workers[0].runtime.dead, false);
const idle = normalizeDmuxSnapshot(dmuxSnapshot({
workerStates: { running: 1, queued: 1 },
workerCount: 2,
workers: [
dmuxWorker('missing-update', { state: 'running', updated: undefined }),
dmuxWorker('stale-update', { state: 'active', updated: '2001-01-01T00:00:00Z' }),
dmuxWorker('dead-pane', { state: 'running' }, { pane: { dead: true, active: false } }),
dmuxWorker('mystery', { state: 'queued' }, {
task: { seedPaths: 'not-array' },
handoff: { summary: 'not-array', validation: null, remainingRisks: undefined },
pane: null
})
]
}), sourceTarget);
assert.strictEqual(idle.session.state, 'idle');
assert.deepStrictEqual(
idle.workers.map(worker => worker.health),
['stale', 'stale', 'degraded', 'unknown']
);
assert.deepStrictEqual(idle.workers[3].intent.seedPaths, []);
assert.deepStrictEqual(idle.workers[3].outputs.summary, []);
const completed = normalizeDmuxSnapshot(dmuxSnapshot({
workerStates: null,
workerCount: 2,
workers: [
dmuxWorker('done-a', { state: 'done' }),
dmuxWorker('done-b', { state: 'success' })
]
}), sourceTarget);
assert.strictEqual(completed.session.state, 'completed');
assert.deepStrictEqual(completed.workers.map(worker => worker.health), ['healthy', 'healthy']);
});
test('claude history normalization falls back to filename ids and empty metadata defaults', () => {
const snapshot = normalizeClaudeHistorySession({
shortId: 'no-id',
filename: '2026-03-13-no-id-session.tmp',
sessionPath: '/tmp/2026-03-13-no-id-session.tmp',
metadata: {
title: '',
completed: 'not-array',
inProgress: ['Resume from filename fallback'],
context: '',
notes: ''
}
}, {
type: 'claude-history',
value: 'latest'
});
assert.strictEqual(snapshot.session.id, '2026-03-13-no-id-session');
assert.strictEqual(snapshot.workers[0].id, '2026-03-13-no-id-session');
assert.strictEqual(snapshot.workers[0].label, '2026-03-13-no-id-session.tmp');
assert.strictEqual(snapshot.workers[0].intent.objective, 'Resume from filename fallback');
assert.deepStrictEqual(snapshot.workers[0].intent.seedPaths, []);
assert.deepStrictEqual(snapshot.workers[0].outputs.summary, []);
assert.deepStrictEqual(snapshot.workers[0].outputs.remainingRisks, []);
const pathOnly = normalizeClaudeHistorySession({
sessionPath: '/tmp/path-only-session.tmp',
metadata: {
title: 'Path Only',
inProgress: ['Continue work'],
context: ' README.md \n\n scripts/ecc.js ',
notes: 'No risks'
}
}, {
type: 'claude-history',
value: '/tmp/path-only-session.tmp'
});
assert.strictEqual(pathOnly.session.id, 'path-only-session');
assert.strictEqual(pathOnly.workers[0].intent.objective, 'Continue work');
assert.deepStrictEqual(pathOnly.workers[0].intent.seedPaths, ['README.md', 'scripts/ecc.js']);
assert.deepStrictEqual(pathOnly.workers[0].outputs.remainingRisks, ['No risks']);
});
test('fallback recordings sanitize paths, use env dirs, and preserve changed history', () => {
const recordingDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-recordings-env-'));
const previousRecordingDir = process.env.ECC_SESSION_RECORDING_DIR;
try {
process.env.ECC_SESSION_RECORDING_DIR = recordingDir;
const first = canonicalSnapshot({
adapterId: 'adapter with spaces',
session: { id: 'session id/with:chars' }
});
const recordingPath = getFallbackSessionRecordingPath(first);
assert.ok(recordingPath.includes(`${path.sep}adapter_with_spaces${path.sep}`));
assert.ok(recordingPath.endsWith(`${path.sep}session_id_with_chars.json`));
fs.mkdirSync(path.dirname(recordingPath), { recursive: true });
fs.writeFileSync(recordingPath, '{not json', 'utf8');
const firstPersistence = persistCanonicalSnapshot(first, {
loadStateStoreImpl: () => null
});
const changed = canonicalSnapshot({
adapterId: 'adapter with spaces',
session: { id: 'session id/with:chars', state: 'idle' }
});
persistCanonicalSnapshot(changed, { loadStateStoreImpl: () => null });
persistCanonicalSnapshot(changed, { loadStateStoreImpl: () => null });
const persisted = JSON.parse(fs.readFileSync(recordingPath, 'utf8'));
assert.strictEqual(firstPersistence.backend, 'json-file');
assert.strictEqual(firstPersistence.path, recordingPath);
assert.strictEqual(persisted.schemaVersion, 'ecc.session.recording.v1');
assert.strictEqual(persisted.latest.session.state, 'idle');
assert.strictEqual(persisted.history.length, 2);
assert.strictEqual(persisted.history[0].snapshot.session.state, 'active');
assert.strictEqual(persisted.history[1].snapshot.session.state, 'idle');
assert.strictEqual(persisted.createdAt, persisted.history[0].recordedAt);
} finally {
if (typeof previousRecordingDir === 'string') {
process.env.ECC_SESSION_RECORDING_DIR = previousRecordingDir;
} else {
delete process.env.ECC_SESSION_RECORDING_DIR;
}
fs.rmSync(recordingDir, { recursive: true, force: true });
}
});
test('persistence supports skip mode, writer variants, and missing state-store fallback', () => {
const snapshot = canonicalSnapshot();
const skipped = persistCanonicalSnapshot(snapshot, { persist: false });
assert.deepStrictEqual(skipped, {
backend: 'skipped',
path: null,
recordedAt: null
});
const topLevelStore = {
calls: [],
recordCanonicalSessionSnapshot(snapshotArg, metadata) {
this.calls.push({ snapshot: snapshotArg, metadata });
}
};
const stateStoreResult = persistCanonicalSnapshot(snapshot, { stateStore: topLevelStore });
assert.strictEqual(stateStoreResult.backend, 'state-store');
assert.strictEqual(topLevelStore.calls.length, 1);
assert.strictEqual(topLevelStore.calls[0].metadata.sessionId, 'session-1');
const nestedStore = {
sessions: {
calls: [],
recordSessionSnapshot(snapshotArg, metadata) {
this.calls.push({ snapshot: snapshotArg, metadata });
}
}
};
persistCanonicalSnapshot(snapshot, { stateStore: nestedStore });
assert.strictEqual(nestedStore.sessions.calls.length, 1);
const noWriterDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-no-writer-'));
const missingModuleDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-missing-module-'));
try {
const noWriter = persistCanonicalSnapshot(snapshot, {
recordingDir: noWriterDir,
stateStore: { createStateStore() {} }
});
assert.strictEqual(noWriter.backend, 'json-file');
const missingModule = new Error("Cannot find module '../state-store'");
missingModule.code = 'MODULE_NOT_FOUND';
const fallback = persistCanonicalSnapshot(snapshot, {
recordingDir: missingModuleDir,
loadStateStoreImpl() {
throw missingModule;
}
});
assert.strictEqual(fallback.backend, 'json-file');
} finally {
fs.rmSync(noWriterDir, { recursive: true, force: true });
fs.rmSync(missingModuleDir, { recursive: true, force: true });
}
});
test('persistence only falls back when the state-store module is missing', () => { test('persistence only falls back when the state-store module is missing', () => {
const snapshot = { const snapshot = {
schemaVersion: 'ecc.session.v1', schemaVersion: 'ecc.session.v1',

View File

@@ -19,7 +19,6 @@ const fs = require('fs');
const path = require('path'); const path = require('path');
const repoRoot = path.resolve(__dirname, '..'); const repoRoot = path.resolve(__dirname, '..');
const repoRootWithSep = `${repoRoot}${path.sep}`;
const packageJsonPath = path.join(repoRoot, 'package.json'); const packageJsonPath = path.join(repoRoot, 'package.json');
const packageLockPath = path.join(repoRoot, 'package-lock.json'); const packageLockPath = path.join(repoRoot, 'package-lock.json');
const rootAgentsPath = path.join(repoRoot, 'AGENTS.md'); const rootAgentsPath = path.join(repoRoot, 'AGENTS.md');
@@ -70,16 +69,6 @@ function loadJsonObject(filePath, label) {
return parsed; return parsed;
} }
function assertSafeRepoRelativePath(relativePath, label) {
const normalized = path.posix.normalize(relativePath.replace(/\\/g, '/'));
assert.ok(!path.isAbsolute(relativePath), `${label} must not be absolute: ${relativePath}`);
assert.ok(
!normalized.startsWith('../') && !normalized.includes('/../'),
`${label} must not traverse directories: ${relativePath}`,
);
}
function collectMarkdownFiles(rootPath) { function collectMarkdownFiles(rootPath) {
if (!fs.existsSync(rootPath)) { if (!fs.existsSync(rootPath)) {
return []; return [];

View File

@@ -6,9 +6,10 @@ const assert = require('assert');
const fs = require('fs'); const fs = require('fs');
const os = require('os'); const os = require('os');
const path = require('path'); const path = require('path');
const { execFileSync } = require('child_process'); const { execFileSync, spawnSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'harness-audit.js'); const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'harness-audit.js');
const { parseArgs } = require(SCRIPT);
function createTempDir(prefix) { function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
@@ -18,7 +19,7 @@ function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true }); fs.rmSync(dirPath, { recursive: true, force: true });
} }
function run(args = [], options = {}) { function buildEnv(options = {}) {
const userProfile = options.userProfile || options.homeDir || process.env.USERPROFILE; const userProfile = options.userProfile || options.homeDir || process.env.USERPROFILE;
const env = { const env = {
...process.env, ...process.env,
@@ -31,9 +32,13 @@ function run(args = [], options = {}) {
env.HOME = process.env.HOME; env.HOME = process.env.HOME;
} }
return env;
}
function run(args = [], options = {}) {
const stdout = execFileSync('node', [SCRIPT, ...args], { const stdout = execFileSync('node', [SCRIPT, ...args], {
cwd: options.cwd || path.join(__dirname, '..', '..'), cwd: options.cwd || path.join(__dirname, '..', '..'),
env, env: buildEnv(options),
encoding: 'utf8', encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'], stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000, timeout: 10000,
@@ -42,6 +47,16 @@ function run(args = [], options = {}) {
return stdout; return stdout;
} }
function runProcess(args = [], options = {}) {
return spawnSync('node', [SCRIPT, ...args], {
cwd: options.cwd || path.join(__dirname, '..', '..'),
env: buildEnv(options),
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
});
}
function test(name, fn) { function test(name, fn) {
try { try {
fn(); fn();
@@ -60,6 +75,46 @@ function runTests() {
let passed = 0; let passed = 0;
let failed = 0; let failed = 0;
if (test('parseArgs accepts supported forms and rejects invalid arguments', () => {
const rootDir = createTempDir('harness-audit-args-root-');
try {
assert.strictEqual(parseArgs(['node', 'script', '--help']).help, true);
assert.strictEqual(parseArgs(['node', 'script', '-h']).help, true);
const spaced = parseArgs(['node', 'script', '--format', 'json', '--scope', 'skills', '--root', rootDir]);
assert.strictEqual(spaced.format, 'json');
assert.strictEqual(spaced.scope, 'skills');
assert.strictEqual(spaced.root, path.resolve(rootDir));
const equals = parseArgs(['node', 'script', '--format=json', '--scope=hooks', `--root=${rootDir}`]);
assert.strictEqual(equals.format, 'json');
assert.strictEqual(equals.scope, 'hooks');
assert.strictEqual(equals.root, path.resolve(rootDir));
assert.strictEqual(parseArgs(['node', 'script', 'commands']).scope, 'commands');
assert.strictEqual(parseArgs(['node', 'script', '--scope']).scope, 'repo');
assert.throws(() => parseArgs(['node', 'script', '--format', 'xml']), /Invalid format: xml/);
assert.throws(() => parseArgs(['node', 'script', '--scope', 'bad-scope']), /Invalid scope: bad-scope/);
assert.throws(() => parseArgs(['node', 'script', '--unknown']), /Unknown argument: --unknown/);
} finally {
cleanup(rootDir);
}
})) passed++; else failed++;
if (test('cli help exits cleanly and invalid cli args exit with stderr', () => {
const help = runProcess(['--help']);
assert.strictEqual(help.status, 0);
assert.strictEqual(help.stderr, '');
assert.ok(help.stdout.includes('Usage: node scripts/harness-audit.js'));
assert.ok(help.stdout.includes('Deterministic harness audit'));
const invalid = runProcess(['--format', 'xml']);
assert.strictEqual(invalid.status, 1);
assert.strictEqual(invalid.stdout, '');
assert.ok(invalid.stderr.includes('Error: Invalid format: xml. Use text or json.'));
})) passed++; else failed++;
if (test('json output is deterministic between runs', () => { if (test('json output is deterministic between runs', () => {
const first = run(['repo', '--format', 'json']); const first = run(['repo', '--format', 'json']);
const second = run(['repo', '--format', 'json']); const second = run(['repo', '--format', 'json']);
@@ -103,6 +158,29 @@ function runTests() {
assert.ok(output.includes('Top 3 Actions:') || output.includes('Checks:')); assert.ok(output.includes('Top 3 Actions:') || output.includes('Checks:'));
})) passed++; else failed++; })) passed++; else failed++;
if (test('detects repo mode from structural markers when package name differs', () => {
const projectRoot = createTempDir('harness-audit-structural-repo-');
try {
fs.mkdirSync(path.join(projectRoot, 'scripts'), { recursive: true });
fs.mkdirSync(path.join(projectRoot, '.claude-plugin'), { recursive: true });
fs.mkdirSync(path.join(projectRoot, 'agents'), { recursive: true });
fs.mkdirSync(path.join(projectRoot, 'skills'), { recursive: true });
fs.writeFileSync(path.join(projectRoot, 'scripts', 'harness-audit.js'), '#!/usr/bin/env node\n');
fs.writeFileSync(path.join(projectRoot, '.claude-plugin', 'plugin.json'), JSON.stringify({ name: 'ecc' }, null, 2));
fs.writeFileSync(
path.join(projectRoot, 'package.json'),
JSON.stringify({ name: 'forked-harness', scripts: { test: 'node scripts/validate-commands.js && node tests/run-all.js' } }, null, 2)
);
const parsed = JSON.parse(run(['--format=json', `--root=${projectRoot}`]));
assert.strictEqual(parsed.target_mode, 'repo');
assert.strictEqual(parsed.root_dir, path.resolve(projectRoot));
} finally {
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('audits consumer projects from cwd instead of the ECC repo root', () => { if (test('audits consumer projects from cwd instead of the ECC repo root', () => {
const homeDir = createTempDir('harness-audit-home-'); const homeDir = createTempDir('harness-audit-home-');
const projectRoot = createTempDir('harness-audit-project-'); const projectRoot = createTempDir('harness-audit-project-');
@@ -141,6 +219,73 @@ function runTests() {
} }
})) passed++; else failed++; })) passed++; else failed++;
if (test('scores empty consumer projects without plugin or harness signals as failing checks', () => {
const homeDir = createTempDir('harness-audit-empty-home-');
const projectRoot = createTempDir('harness-audit-empty-project-');
try {
const parsed = JSON.parse(run(['repo', '--format', 'json'], { cwd: projectRoot, homeDir }));
assert.strictEqual(parsed.target_mode, 'consumer');
assert.strictEqual(parsed.overall_score, 0);
assert.ok(parsed.max_score > 0);
assert.strictEqual(parsed.top_actions.length, 3);
assert.ok(parsed.checks.some(check => check.id === 'consumer-plugin-install' && !check.pass));
assert.ok(parsed.checks.some(check => check.id === 'consumer-project-overrides' && !check.pass));
assert.ok(parsed.checks.some(check => check.id === 'consumer-secret-hygiene' && !check.pass));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('prints no top actions when consumer checks all pass', () => {
const homeDir = createTempDir('harness-audit-passing-home-');
const projectRoot = createTempDir('harness-audit-passing-project-');
try {
fs.mkdirSync(path.join(projectRoot, '.claude', 'plugins', 'ecc@ecc'), { recursive: true });
fs.writeFileSync(
path.join(projectRoot, '.claude', 'plugins', 'ecc@ecc', 'plugin.json'),
JSON.stringify({ name: 'ecc' }, null, 2)
);
fs.mkdirSync(path.join(projectRoot, '.claude'), { recursive: true });
fs.mkdirSync(path.join(projectRoot, '.github', 'workflows', 'nested'), { recursive: true });
fs.mkdirSync(path.join(projectRoot, 'docs', 'adr'), { recursive: true });
fs.mkdirSync(path.join(projectRoot, 'evals'), { recursive: true });
fs.mkdirSync(path.join(projectRoot, 'src'), { recursive: true });
fs.writeFileSync(path.join(projectRoot, '.claude', 'hooks.json'), JSON.stringify({ hooks: [] }, null, 2));
fs.writeFileSync(path.join(projectRoot, '.claude', 'settings.local.json'), JSON.stringify({ local: true }, null, 2));
fs.writeFileSync(path.join(projectRoot, 'CLAUDE.md'), '# Consumer instructions\n');
fs.writeFileSync(path.join(projectRoot, 'src', 'app.spec.ts'), 'test placeholder\n');
fs.writeFileSync(path.join(projectRoot, '.github', 'workflows', 'nested', 'ci.yaml'), 'name: ci\n');
fs.writeFileSync(path.join(projectRoot, 'docs', 'adr', '001.md'), '# Record\n');
fs.writeFileSync(path.join(projectRoot, 'evals', 'smoke.json'), '{}\n');
fs.writeFileSync(path.join(projectRoot, '.github', 'dependabot.yml'), 'version: 2\n');
fs.writeFileSync(path.join(projectRoot, '.gitignore'), 'node_modules\n.env.local\n');
fs.writeFileSync(
path.join(projectRoot, 'package.json'),
JSON.stringify({ name: 'passing-consumer', scripts: {} }, null, 2)
);
const parsed = JSON.parse(run(['repo', '--format', 'json'], { cwd: projectRoot, homeDir }));
assert.strictEqual(parsed.target_mode, 'consumer');
assert.strictEqual(parsed.overall_score, parsed.max_score);
const text = run(['repo'], { cwd: projectRoot, homeDir });
assert.ok(text.includes(`Harness Audit (repo, consumer): ${parsed.max_score}/${parsed.max_score}`));
assert.ok(text.includes('Checks: 11 total, 0 failing'));
assert.ok(!text.includes('Top 3 Actions:'));
const scopedText = run(['agents'], { cwd: projectRoot, homeDir });
assert.ok(scopedText.includes('Harness Audit (agents, consumer):'));
assert.ok(scopedText.includes('Checks: 1 total, 0 failing'));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('detects marketplace-installed Claude plugins under home marketplaces/', () => { if (test('detects marketplace-installed Claude plugins under home marketplaces/', () => {
const homeDir = createTempDir('harness-audit-marketplace-home-'); const homeDir = createTempDir('harness-audit-marketplace-home-');
const projectRoot = createTempDir('harness-audit-marketplace-project-'); const projectRoot = createTempDir('harness-audit-marketplace-project-');