mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-03-31 06:03:29 +08:00
Implements three roadmap features: - Agent description compression (#491): New `agent-compress` module with catalog/summary/full compression modes and lazy-loading. Reduces ~26k token agent descriptions to ~2-3k catalog entries for context efficiency. - Inspection logic (#485): New `inspection` module that detects recurring failure patterns in skill_runs. Groups by skill + normalized failure reason, generates structured reports with suggested remediation actions. Configurable threshold (default: 3 failures). - Governance event capture hook (#482): PreToolUse/PostToolUse hook that detects secrets, policy violations, approval-required commands, and elevated privilege usage. Gated behind ECC_GOVERNANCE_CAPTURE=1 flag. Writes to governance_events table via JSON-line stderr output. 59 new tests (16 + 16 + 27), all passing.
This commit is contained in:
280
scripts/hooks/governance-capture.js
Normal file
280
scripts/hooks/governance-capture.js
Normal file
@@ -0,0 +1,280 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Governance Event Capture Hook
|
||||
*
|
||||
* PreToolUse/PostToolUse hook that detects governance-relevant events
|
||||
* and writes them to the governance_events table in the state store.
|
||||
*
|
||||
* Captured event types:
|
||||
* - secret_detected: Hardcoded secrets in tool input/output
|
||||
* - policy_violation: Actions that violate configured policies
|
||||
* - security_finding: Security-relevant tool invocations
|
||||
* - approval_requested: Operations requiring explicit approval
|
||||
*
|
||||
* Enable: Set ECC_GOVERNANCE_CAPTURE=1
|
||||
* Configure session: Set ECC_SESSION_ID for session correlation
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const crypto = require('crypto');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
|
||||
// Patterns that indicate potential hardcoded secrets
|
||||
const SECRET_PATTERNS = [
|
||||
{ name: 'aws_key', pattern: /(?:AKIA|ASIA)[A-Z0-9]{16}/i },
|
||||
{ name: 'generic_secret', pattern: /(?:secret|password|token|api[_-]?key)\s*[:=]\s*["'][^"']{8,}/i },
|
||||
{ name: 'private_key', pattern: /-----BEGIN (?:RSA |EC |DSA )?PRIVATE KEY-----/ },
|
||||
{ name: 'jwt', pattern: /eyJ[A-Za-z0-9_-]{10,}\.eyJ[A-Za-z0-9_-]{10,}\.[A-Za-z0-9_-]{10,}/ },
|
||||
{ name: 'github_token', pattern: /gh[pousr]_[A-Za-z0-9_]{36,}/ },
|
||||
];
|
||||
|
||||
// Tool names that represent security-relevant operations
|
||||
const SECURITY_RELEVANT_TOOLS = new Set([
|
||||
'Bash', // Could execute arbitrary commands
|
||||
]);
|
||||
|
||||
// Commands that require governance approval
|
||||
const APPROVAL_COMMANDS = [
|
||||
/git\s+push\s+.*--force/,
|
||||
/git\s+reset\s+--hard/,
|
||||
/rm\s+-rf?\s/,
|
||||
/DROP\s+(?:TABLE|DATABASE)/i,
|
||||
/DELETE\s+FROM\s+\w+\s*(?:;|$)/i,
|
||||
];
|
||||
|
||||
// File patterns that indicate policy-sensitive paths
|
||||
const SENSITIVE_PATHS = [
|
||||
/\.env(?:\.|$)/,
|
||||
/credentials/i,
|
||||
/secrets?\./i,
|
||||
/\.pem$/,
|
||||
/\.key$/,
|
||||
/id_rsa/,
|
||||
];
|
||||
|
||||
/**
|
||||
* Generate a unique event ID.
|
||||
*/
|
||||
function generateEventId() {
|
||||
return `gov-${Date.now()}-${crypto.randomBytes(4).toString('hex')}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan text content for hardcoded secrets.
|
||||
* Returns array of { name, match } for each detected secret.
|
||||
*/
|
||||
function detectSecrets(text) {
|
||||
if (!text || typeof text !== 'string') return [];
|
||||
|
||||
const findings = [];
|
||||
for (const { name, pattern } of SECRET_PATTERNS) {
|
||||
if (pattern.test(text)) {
|
||||
findings.push({ name });
|
||||
}
|
||||
}
|
||||
return findings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a command requires governance approval.
|
||||
*/
|
||||
function detectApprovalRequired(command) {
|
||||
if (!command || typeof command !== 'string') return [];
|
||||
|
||||
const findings = [];
|
||||
for (const pattern of APPROVAL_COMMANDS) {
|
||||
if (pattern.test(command)) {
|
||||
findings.push({ pattern: pattern.source });
|
||||
}
|
||||
}
|
||||
return findings;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a file path is policy-sensitive.
|
||||
*/
|
||||
function detectSensitivePath(filePath) {
|
||||
if (!filePath || typeof filePath !== 'string') return false;
|
||||
|
||||
return SENSITIVE_PATHS.some(pattern => pattern.test(filePath));
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyze a hook input payload and return governance events to capture.
|
||||
*
|
||||
* @param {Object} input - Parsed hook input (tool_name, tool_input, tool_output)
|
||||
* @param {Object} [context] - Additional context (sessionId, hookPhase)
|
||||
* @returns {Array<Object>} Array of governance event objects
|
||||
*/
|
||||
function analyzeForGovernanceEvents(input, context = {}) {
|
||||
const events = [];
|
||||
const toolName = input.tool_name || '';
|
||||
const toolInput = input.tool_input || {};
|
||||
const toolOutput = typeof input.tool_output === 'string' ? input.tool_output : '';
|
||||
const sessionId = context.sessionId || null;
|
||||
const hookPhase = context.hookPhase || 'unknown';
|
||||
|
||||
// 1. Secret detection in tool input content
|
||||
const inputText = typeof toolInput === 'object'
|
||||
? JSON.stringify(toolInput)
|
||||
: String(toolInput);
|
||||
|
||||
const inputSecrets = detectSecrets(inputText);
|
||||
const outputSecrets = detectSecrets(toolOutput);
|
||||
const allSecrets = [...inputSecrets, ...outputSecrets];
|
||||
|
||||
if (allSecrets.length > 0) {
|
||||
events.push({
|
||||
id: generateEventId(),
|
||||
sessionId,
|
||||
eventType: 'secret_detected',
|
||||
payload: {
|
||||
toolName,
|
||||
hookPhase,
|
||||
secretTypes: allSecrets.map(s => s.name),
|
||||
location: inputSecrets.length > 0 ? 'input' : 'output',
|
||||
severity: 'critical',
|
||||
},
|
||||
resolvedAt: null,
|
||||
resolution: null,
|
||||
});
|
||||
}
|
||||
|
||||
// 2. Approval-required commands (Bash only)
|
||||
if (toolName === 'Bash') {
|
||||
const command = toolInput.command || '';
|
||||
const approvalFindings = detectApprovalRequired(command);
|
||||
|
||||
if (approvalFindings.length > 0) {
|
||||
events.push({
|
||||
id: generateEventId(),
|
||||
sessionId,
|
||||
eventType: 'approval_requested',
|
||||
payload: {
|
||||
toolName,
|
||||
hookPhase,
|
||||
command: command.slice(0, 200),
|
||||
matchedPatterns: approvalFindings.map(f => f.pattern),
|
||||
severity: 'high',
|
||||
},
|
||||
resolvedAt: null,
|
||||
resolution: null,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Policy violation: writing to sensitive paths
|
||||
const filePath = toolInput.file_path || toolInput.path || '';
|
||||
if (filePath && detectSensitivePath(filePath)) {
|
||||
events.push({
|
||||
id: generateEventId(),
|
||||
sessionId,
|
||||
eventType: 'policy_violation',
|
||||
payload: {
|
||||
toolName,
|
||||
hookPhase,
|
||||
filePath: filePath.slice(0, 200),
|
||||
reason: 'sensitive_file_access',
|
||||
severity: 'warning',
|
||||
},
|
||||
resolvedAt: null,
|
||||
resolution: null,
|
||||
});
|
||||
}
|
||||
|
||||
// 4. Security-relevant tool usage tracking
|
||||
if (SECURITY_RELEVANT_TOOLS.has(toolName) && hookPhase === 'post') {
|
||||
const command = toolInput.command || '';
|
||||
const hasElevated = /sudo\s/.test(command) || /chmod\s/.test(command) || /chown\s/.test(command);
|
||||
|
||||
if (hasElevated) {
|
||||
events.push({
|
||||
id: generateEventId(),
|
||||
sessionId,
|
||||
eventType: 'security_finding',
|
||||
payload: {
|
||||
toolName,
|
||||
hookPhase,
|
||||
command: command.slice(0, 200),
|
||||
reason: 'elevated_privilege_command',
|
||||
severity: 'medium',
|
||||
},
|
||||
resolvedAt: null,
|
||||
resolution: null,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return events;
|
||||
}
|
||||
|
||||
/**
|
||||
* Core hook logic — exported so run-with-flags.js can call directly.
|
||||
*
|
||||
* @param {string} rawInput - Raw JSON string from stdin
|
||||
* @returns {string} The original input (pass-through)
|
||||
*/
|
||||
function run(rawInput) {
|
||||
// Gate on feature flag
|
||||
if (String(process.env.ECC_GOVERNANCE_CAPTURE || '').toLowerCase() !== '1') {
|
||||
return rawInput;
|
||||
}
|
||||
|
||||
try {
|
||||
const input = JSON.parse(rawInput);
|
||||
const sessionId = process.env.ECC_SESSION_ID || null;
|
||||
const hookPhase = process.env.CLAUDE_HOOK_EVENT_NAME || 'unknown';
|
||||
|
||||
const events = analyzeForGovernanceEvents(input, {
|
||||
sessionId,
|
||||
hookPhase: hookPhase.startsWith('Pre') ? 'pre' : 'post',
|
||||
});
|
||||
|
||||
if (events.length > 0) {
|
||||
// Write events to stderr as JSON-lines for the caller to capture.
|
||||
// The state store write is async and handled by a separate process
|
||||
// to avoid blocking the hook pipeline.
|
||||
for (const event of events) {
|
||||
process.stderr.write(
|
||||
`[governance] ${JSON.stringify(event)}\n`
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Silently ignore parse errors — never block the tool pipeline.
|
||||
}
|
||||
|
||||
return rawInput;
|
||||
}
|
||||
|
||||
// ── stdin entry point ────────────────────────────────
|
||||
if (require.main === module) {
|
||||
let raw = '';
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
const result = run(raw);
|
||||
process.stdout.write(result);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
APPROVAL_COMMANDS,
|
||||
SECRET_PATTERNS,
|
||||
SECURITY_RELEVANT_TOOLS,
|
||||
SENSITIVE_PATHS,
|
||||
analyzeForGovernanceEvents,
|
||||
detectApprovalRequired,
|
||||
detectSecrets,
|
||||
detectSensitivePath,
|
||||
generateEventId,
|
||||
run,
|
||||
};
|
||||
230
scripts/lib/agent-compress.js
Normal file
230
scripts/lib/agent-compress.js
Normal file
@@ -0,0 +1,230 @@
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
/**
|
||||
* Parse YAML frontmatter from a markdown string.
|
||||
* Returns { frontmatter: {}, body: string }.
|
||||
*/
|
||||
function parseFrontmatter(content) {
|
||||
const match = content.match(/^---\r?\n([\s\S]*?)\r?\n---\r?\n([\s\S]*)$/);
|
||||
if (!match) {
|
||||
return { frontmatter: {}, body: content };
|
||||
}
|
||||
|
||||
const frontmatter = {};
|
||||
for (const line of match[1].split('\n')) {
|
||||
const colonIdx = line.indexOf(':');
|
||||
if (colonIdx === -1) continue;
|
||||
|
||||
const key = line.slice(0, colonIdx).trim();
|
||||
let value = line.slice(colonIdx + 1).trim();
|
||||
|
||||
// Handle JSON arrays (e.g. tools: ["Read", "Grep"])
|
||||
if (value.startsWith('[') && value.endsWith(']')) {
|
||||
try {
|
||||
value = JSON.parse(value);
|
||||
} catch {
|
||||
// keep as string
|
||||
}
|
||||
}
|
||||
|
||||
// Strip surrounding quotes
|
||||
if (typeof value === 'string' && value.startsWith('"') && value.endsWith('"')) {
|
||||
value = value.slice(1, -1);
|
||||
}
|
||||
|
||||
frontmatter[key] = value;
|
||||
}
|
||||
|
||||
return { frontmatter, body: match[2] };
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the first meaningful paragraph from agent body as a summary.
|
||||
* Skips headings and blank lines, returns up to maxSentences sentences.
|
||||
*/
|
||||
function extractSummary(body, maxSentences = 1) {
|
||||
const lines = body.split('\n');
|
||||
const paragraphs = [];
|
||||
let current = [];
|
||||
|
||||
for (const line of lines) {
|
||||
const trimmed = line.trim();
|
||||
|
||||
if (trimmed === '') {
|
||||
if (current.length > 0) {
|
||||
paragraphs.push(current.join(' '));
|
||||
current = [];
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip headings
|
||||
if (trimmed.startsWith('#')) {
|
||||
if (current.length > 0) {
|
||||
paragraphs.push(current.join(' '));
|
||||
current = [];
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip list items, code blocks, etc.
|
||||
if (trimmed.startsWith('```') || trimmed.startsWith('- **') || trimmed.startsWith('|')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
current.push(trimmed);
|
||||
}
|
||||
if (current.length > 0) {
|
||||
paragraphs.push(current.join(' '));
|
||||
}
|
||||
|
||||
// Find first non-empty paragraph
|
||||
const firstParagraph = paragraphs.find(p => p.length > 0);
|
||||
if (!firstParagraph) {
|
||||
return '';
|
||||
}
|
||||
|
||||
// Extract up to maxSentences sentences
|
||||
const sentences = firstParagraph.match(/[^.!?]+[.!?]+/g) || [firstParagraph];
|
||||
return sentences.slice(0, maxSentences).join(' ').trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Load and parse a single agent file.
|
||||
* Returns the full agent object with frontmatter and body.
|
||||
*/
|
||||
function loadAgent(filePath) {
|
||||
const content = fs.readFileSync(filePath, 'utf8');
|
||||
const { frontmatter, body } = parseFrontmatter(content);
|
||||
const fileName = path.basename(filePath, '.md');
|
||||
|
||||
return {
|
||||
fileName,
|
||||
name: frontmatter.name || fileName,
|
||||
description: frontmatter.description || '',
|
||||
tools: Array.isArray(frontmatter.tools) ? frontmatter.tools : [],
|
||||
model: frontmatter.model || 'sonnet',
|
||||
body,
|
||||
byteSize: Buffer.byteLength(content, 'utf8'),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Load all agents from a directory.
|
||||
*/
|
||||
function loadAgents(agentsDir) {
|
||||
if (!fs.existsSync(agentsDir)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return fs.readdirSync(agentsDir)
|
||||
.filter(f => f.endsWith('.md'))
|
||||
.sort()
|
||||
.map(f => loadAgent(path.join(agentsDir, f)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Compress an agent to its catalog entry (metadata only).
|
||||
* This is the minimal representation needed for agent selection.
|
||||
*/
|
||||
function compressToCatalog(agent) {
|
||||
return {
|
||||
name: agent.name,
|
||||
description: agent.description,
|
||||
tools: agent.tools,
|
||||
model: agent.model,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Compress an agent to a summary entry (metadata + first paragraph).
|
||||
* More context than catalog, less than full body.
|
||||
*/
|
||||
function compressToSummary(agent) {
|
||||
return {
|
||||
name: agent.name,
|
||||
description: agent.description,
|
||||
tools: agent.tools,
|
||||
model: agent.model,
|
||||
summary: extractSummary(agent.body),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a full compressed catalog from a directory of agents.
|
||||
*
|
||||
* Modes:
|
||||
* - 'catalog': name, description, tools, model only (~2-3k tokens for 27 agents)
|
||||
* - 'summary': catalog + first paragraph summary (~4-5k tokens)
|
||||
* - 'full': no compression, full body included
|
||||
*
|
||||
* Returns { agents: [], stats: { totalAgents, originalBytes, compressedTokenEstimate } }
|
||||
*/
|
||||
function buildAgentCatalog(agentsDir, options = {}) {
|
||||
const mode = options.mode || 'catalog';
|
||||
const filter = options.filter || null;
|
||||
|
||||
let agents = loadAgents(agentsDir);
|
||||
|
||||
if (typeof filter === 'function') {
|
||||
agents = agents.filter(filter);
|
||||
}
|
||||
|
||||
const originalBytes = agents.reduce((sum, a) => sum + a.byteSize, 0);
|
||||
|
||||
let compressed;
|
||||
if (mode === 'catalog') {
|
||||
compressed = agents.map(compressToCatalog);
|
||||
} else if (mode === 'summary') {
|
||||
compressed = agents.map(compressToSummary);
|
||||
} else {
|
||||
compressed = agents.map(a => ({
|
||||
name: a.name,
|
||||
description: a.description,
|
||||
tools: a.tools,
|
||||
model: a.model,
|
||||
body: a.body,
|
||||
}));
|
||||
}
|
||||
|
||||
const compressedJson = JSON.stringify(compressed);
|
||||
// Rough token estimate: ~4 chars per token for English text
|
||||
const compressedTokenEstimate = Math.ceil(compressedJson.length / 4);
|
||||
|
||||
return {
|
||||
agents: compressed,
|
||||
stats: {
|
||||
totalAgents: agents.length,
|
||||
originalBytes,
|
||||
compressedBytes: Buffer.byteLength(compressedJson, 'utf8'),
|
||||
compressedTokenEstimate,
|
||||
mode,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Lazy-load a single agent's full content by name from a directory.
|
||||
* Returns null if not found.
|
||||
*/
|
||||
function lazyLoadAgent(agentsDir, agentName) {
|
||||
const filePath = path.join(agentsDir, `${agentName}.md`);
|
||||
if (!fs.existsSync(filePath)) {
|
||||
return null;
|
||||
}
|
||||
return loadAgent(filePath);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
buildAgentCatalog,
|
||||
compressToCatalog,
|
||||
compressToSummary,
|
||||
extractSummary,
|
||||
lazyLoadAgent,
|
||||
loadAgent,
|
||||
loadAgents,
|
||||
parseFrontmatter,
|
||||
};
|
||||
212
scripts/lib/inspection.js
Normal file
212
scripts/lib/inspection.js
Normal file
@@ -0,0 +1,212 @@
|
||||
'use strict';
|
||||
|
||||
const DEFAULT_FAILURE_THRESHOLD = 3;
|
||||
const DEFAULT_WINDOW_SIZE = 50;
|
||||
|
||||
const FAILURE_OUTCOMES = new Set(['failure', 'failed', 'error']);
|
||||
|
||||
/**
|
||||
* Normalize a failure reason string for grouping.
|
||||
* Strips timestamps, UUIDs, file paths, and numeric suffixes.
|
||||
*/
|
||||
function normalizeFailureReason(reason) {
|
||||
if (!reason || typeof reason !== 'string') {
|
||||
return 'unknown';
|
||||
}
|
||||
|
||||
return reason
|
||||
.trim()
|
||||
.toLowerCase()
|
||||
// Strip ISO timestamps (note: already lowercased, so t/z not T/Z)
|
||||
.replace(/\d{4}-\d{2}-\d{2}[t ]\d{2}:\d{2}:\d{2}[.\dz]*/g, '<timestamp>')
|
||||
// Strip UUIDs (already lowercased)
|
||||
.replace(/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/g, '<uuid>')
|
||||
// Strip file paths
|
||||
.replace(/\/[\w./-]+/g, '<path>')
|
||||
// Collapse whitespace
|
||||
.replace(/\s+/g, ' ')
|
||||
.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Group skill runs by skill ID and normalized failure reason.
|
||||
*
|
||||
* @param {Array} skillRuns - Array of skill run objects
|
||||
* @returns {Map<string, { skillId: string, normalizedReason: string, runs: Array }>}
|
||||
*/
|
||||
function groupFailures(skillRuns) {
|
||||
const groups = new Map();
|
||||
|
||||
for (const run of skillRuns) {
|
||||
const outcome = String(run.outcome || '').toLowerCase();
|
||||
if (!FAILURE_OUTCOMES.has(outcome)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const normalizedReason = normalizeFailureReason(run.failureReason);
|
||||
const key = `${run.skillId}::${normalizedReason}`;
|
||||
|
||||
if (!groups.has(key)) {
|
||||
groups.set(key, {
|
||||
skillId: run.skillId,
|
||||
normalizedReason,
|
||||
runs: [],
|
||||
});
|
||||
}
|
||||
|
||||
groups.get(key).runs.push(run);
|
||||
}
|
||||
|
||||
return groups;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect recurring failure patterns from skill runs.
|
||||
*
|
||||
* @param {Array} skillRuns - Array of skill run objects (newest first)
|
||||
* @param {Object} [options]
|
||||
* @param {number} [options.threshold=3] - Minimum failure count to trigger pattern detection
|
||||
* @returns {Array<Object>} Array of detected patterns sorted by count descending
|
||||
*/
|
||||
function detectPatterns(skillRuns, options = {}) {
|
||||
const threshold = options.threshold ?? DEFAULT_FAILURE_THRESHOLD;
|
||||
const groups = groupFailures(skillRuns);
|
||||
const patterns = [];
|
||||
|
||||
for (const [, group] of groups) {
|
||||
if (group.runs.length < threshold) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const sortedRuns = [...group.runs].sort(
|
||||
(a, b) => (b.createdAt || '').localeCompare(a.createdAt || '')
|
||||
);
|
||||
|
||||
const firstSeen = sortedRuns[sortedRuns.length - 1].createdAt || null;
|
||||
const lastSeen = sortedRuns[0].createdAt || null;
|
||||
const sessionIds = [...new Set(sortedRuns.map(r => r.sessionId).filter(Boolean))];
|
||||
const versions = [...new Set(sortedRuns.map(r => r.skillVersion).filter(Boolean))];
|
||||
|
||||
// Collect unique raw failure reasons for this normalized group
|
||||
const rawReasons = [...new Set(sortedRuns.map(r => r.failureReason).filter(Boolean))];
|
||||
|
||||
patterns.push({
|
||||
skillId: group.skillId,
|
||||
normalizedReason: group.normalizedReason,
|
||||
count: group.runs.length,
|
||||
firstSeen,
|
||||
lastSeen,
|
||||
sessionIds,
|
||||
versions,
|
||||
rawReasons,
|
||||
runIds: sortedRuns.map(r => r.id),
|
||||
});
|
||||
}
|
||||
|
||||
// Sort by count descending, then by lastSeen descending
|
||||
return patterns.sort((a, b) => {
|
||||
if (b.count !== a.count) return b.count - a.count;
|
||||
return (b.lastSeen || '').localeCompare(a.lastSeen || '');
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate an inspection report from detected patterns.
|
||||
*
|
||||
* @param {Array} patterns - Output from detectPatterns()
|
||||
* @param {Object} [options]
|
||||
* @param {string} [options.generatedAt] - ISO timestamp for the report
|
||||
* @returns {Object} Inspection report
|
||||
*/
|
||||
function generateReport(patterns, options = {}) {
|
||||
const generatedAt = options.generatedAt || new Date().toISOString();
|
||||
|
||||
if (patterns.length === 0) {
|
||||
return {
|
||||
generatedAt,
|
||||
status: 'clean',
|
||||
patternCount: 0,
|
||||
patterns: [],
|
||||
summary: 'No recurring failure patterns detected.',
|
||||
};
|
||||
}
|
||||
|
||||
const totalFailures = patterns.reduce((sum, p) => sum + p.count, 0);
|
||||
const affectedSkills = [...new Set(patterns.map(p => p.skillId))];
|
||||
|
||||
return {
|
||||
generatedAt,
|
||||
status: 'attention_needed',
|
||||
patternCount: patterns.length,
|
||||
totalFailures,
|
||||
affectedSkills,
|
||||
patterns: patterns.map(p => ({
|
||||
skillId: p.skillId,
|
||||
normalizedReason: p.normalizedReason,
|
||||
count: p.count,
|
||||
firstSeen: p.firstSeen,
|
||||
lastSeen: p.lastSeen,
|
||||
sessionIds: p.sessionIds,
|
||||
versions: p.versions,
|
||||
rawReasons: p.rawReasons.slice(0, 5),
|
||||
suggestedAction: suggestAction(p),
|
||||
})),
|
||||
summary: `Found ${patterns.length} recurring failure pattern(s) across ${affectedSkills.length} skill(s) (${totalFailures} total failures).`,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Suggest a remediation action based on pattern characteristics.
|
||||
*/
|
||||
function suggestAction(pattern) {
|
||||
const reason = pattern.normalizedReason;
|
||||
|
||||
if (reason.includes('timeout')) {
|
||||
return 'Increase timeout or optimize skill execution time.';
|
||||
}
|
||||
if (reason.includes('permission') || reason.includes('denied') || reason.includes('auth')) {
|
||||
return 'Check tool permissions and authentication configuration.';
|
||||
}
|
||||
if (reason.includes('not found') || reason.includes('missing')) {
|
||||
return 'Verify required files/dependencies exist before skill execution.';
|
||||
}
|
||||
if (reason.includes('parse') || reason.includes('syntax') || reason.includes('json')) {
|
||||
return 'Review input/output format expectations and add validation.';
|
||||
}
|
||||
if (pattern.versions.length > 1) {
|
||||
return 'Failure spans multiple versions. Consider rollback to last stable version.';
|
||||
}
|
||||
|
||||
return 'Investigate root cause and consider adding error handling.';
|
||||
}
|
||||
|
||||
/**
|
||||
* Run full inspection pipeline: query skill runs, detect patterns, generate report.
|
||||
*
|
||||
* @param {Object} store - State store instance with listRecentSessions, getSessionDetail
|
||||
* @param {Object} [options]
|
||||
* @param {number} [options.threshold] - Minimum failure count
|
||||
* @param {number} [options.windowSize] - Number of recent skill runs to analyze
|
||||
* @returns {Object} Inspection report
|
||||
*/
|
||||
function inspect(store, options = {}) {
|
||||
const windowSize = options.windowSize ?? DEFAULT_WINDOW_SIZE;
|
||||
const threshold = options.threshold ?? DEFAULT_FAILURE_THRESHOLD;
|
||||
|
||||
const status = store.getStatus({ recentSkillRunLimit: windowSize });
|
||||
const skillRuns = status.skillRuns.recent || [];
|
||||
|
||||
const patterns = detectPatterns(skillRuns, { threshold });
|
||||
return generateReport(patterns, { generatedAt: status.generatedAt });
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
DEFAULT_FAILURE_THRESHOLD,
|
||||
DEFAULT_WINDOW_SIZE,
|
||||
detectPatterns,
|
||||
generateReport,
|
||||
groupFailures,
|
||||
inspect,
|
||||
normalizeFailureReason,
|
||||
suggestAction,
|
||||
};
|
||||
Reference in New Issue
Block a user