mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-04-12 12:43:32 +08:00
Compare commits
5 Commits
6f94c2e28f
...
v1.6.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
db27ba1eb2 | ||
|
|
3c833d8922 | ||
|
|
156b89ed30 | ||
|
|
41ce1a52e5 | ||
|
|
91b7ccf56f |
16
README.md
16
README.md
@@ -13,7 +13,7 @@
|
|||||||

|

|
||||||

|

|
||||||
|
|
||||||
> **42K+ stars** | **5K+ forks** | **24 contributors** | **6 languages supported** | **Anthropic Hackathon Winner**
|
> **50K+ stars** | **6K+ forks** | **30 contributors** | **6 languages supported** | **Anthropic Hackathon Winner**
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -143,7 +143,7 @@ For manual install instructions see the README in the `rules/` folder.
|
|||||||
/plugin list everything-claude-code@everything-claude-code
|
/plugin list everything-claude-code@everything-claude-code
|
||||||
```
|
```
|
||||||
|
|
||||||
✨ **That's it!** You now have access to 13 agents, 43 skills, and 31 commands.
|
✨ **That's it!** You now have access to 13 agents, 44 skills, and 32 commands.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -251,6 +251,7 @@ everything-claude-code/
|
|||||||
| |-- regex-vs-llm-structured-text/ # Decision framework: regex vs LLM for text parsing (NEW)
|
| |-- regex-vs-llm-structured-text/ # Decision framework: regex vs LLM for text parsing (NEW)
|
||||||
| |-- swift-actor-persistence/ # Thread-safe Swift data persistence with actors (NEW)
|
| |-- swift-actor-persistence/ # Thread-safe Swift data persistence with actors (NEW)
|
||||||
| |-- swift-protocol-di-testing/ # Protocol-based DI for testable Swift code (NEW)
|
| |-- swift-protocol-di-testing/ # Protocol-based DI for testable Swift code (NEW)
|
||||||
|
| |-- search-first/ # Research-before-coding workflow (NEW)
|
||||||
|
|
|
|
||||||
|-- commands/ # Slash commands for quick execution
|
|-- commands/ # Slash commands for quick execution
|
||||||
| |-- tdd.md # /tdd - Test-driven development
|
| |-- tdd.md # /tdd - Test-driven development
|
||||||
@@ -260,6 +261,7 @@ everything-claude-code/
|
|||||||
| |-- build-fix.md # /build-fix - Fix build errors
|
| |-- build-fix.md # /build-fix - Fix build errors
|
||||||
| |-- refactor-clean.md # /refactor-clean - Dead code removal
|
| |-- refactor-clean.md # /refactor-clean - Dead code removal
|
||||||
| |-- learn.md # /learn - Extract patterns mid-session (Longform Guide)
|
| |-- learn.md # /learn - Extract patterns mid-session (Longform Guide)
|
||||||
|
| |-- learn-eval.md # /learn-eval - Extract, evaluate, and save patterns (NEW)
|
||||||
| |-- checkpoint.md # /checkpoint - Save verification state (Longform Guide)
|
| |-- checkpoint.md # /checkpoint - Save verification state (Longform Guide)
|
||||||
| |-- verify.md # /verify - Run verification loop (Longform Guide)
|
| |-- verify.md # /verify - Run verification loop (Longform Guide)
|
||||||
| |-- setup-pm.md # /setup-pm - Configure package manager
|
| |-- setup-pm.md # /setup-pm - Configure package manager
|
||||||
@@ -698,11 +700,12 @@ Each component is fully independent.
|
|||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><b>Does this work with Cursor / OpenCode?</b></summary>
|
<summary><b>Does this work with Cursor / OpenCode / Codex?</b></summary>
|
||||||
|
|
||||||
Yes. ECC is cross-platform:
|
Yes. ECC is cross-platform:
|
||||||
- **Cursor**: Pre-translated configs in `.cursor/`. See [Cursor IDE Support](#cursor-ide-support).
|
- **Cursor**: Pre-translated configs in `.cursor/`. See [Cursor IDE Support](#cursor-ide-support).
|
||||||
- **OpenCode**: Full plugin support in `.opencode/`. See [OpenCode Support](#-opencode-support).
|
- **OpenCode**: Full plugin support in `.opencode/`. See [OpenCode Support](#-opencode-support).
|
||||||
|
- **Codex**: First-class support with adapter drift guards and SessionStart fallback. See PR [#257](https://github.com/affaan-m/everything-claude-code/pull/257).
|
||||||
- **Claude Code**: Native — this is the primary target.
|
- **Claude Code**: Native — this is the primary target.
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -807,8 +810,8 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|
|||||||
| Feature | Claude Code | OpenCode | Status |
|
| Feature | Claude Code | OpenCode | Status |
|
||||||
|---------|-------------|----------|--------|
|
|---------|-------------|----------|--------|
|
||||||
| Agents | ✅ 13 agents | ✅ 12 agents | **Claude Code leads** |
|
| Agents | ✅ 13 agents | ✅ 12 agents | **Claude Code leads** |
|
||||||
| Commands | ✅ 31 commands | ✅ 24 commands | **Claude Code leads** |
|
| Commands | ✅ 32 commands | ✅ 24 commands | **Claude Code leads** |
|
||||||
| Skills | ✅ 43 skills | ✅ 16 skills | **Claude Code leads** |
|
| Skills | ✅ 44 skills | ✅ 16 skills | **Claude Code leads** |
|
||||||
| Hooks | ✅ 3 phases | ✅ 20+ events | **OpenCode has more!** |
|
| Hooks | ✅ 3 phases | ✅ 20+ events | **OpenCode has more!** |
|
||||||
| Rules | ✅ 8 rules | ✅ 8 rules | **Full parity** |
|
| Rules | ✅ 8 rules | ✅ 8 rules | **Full parity** |
|
||||||
| MCP Servers | ✅ Full | ✅ Full | **Full parity** |
|
| MCP Servers | ✅ Full | ✅ Full | **Full parity** |
|
||||||
@@ -828,7 +831,7 @@ OpenCode's plugin system is MORE sophisticated than Claude Code with 20+ event t
|
|||||||
|
|
||||||
**Additional OpenCode events**: `file.edited`, `file.watcher.updated`, `message.updated`, `lsp.client.diagnostics`, `tui.toast.show`, and more.
|
**Additional OpenCode events**: `file.edited`, `file.watcher.updated`, `message.updated`, `lsp.client.diagnostics`, `tui.toast.show`, and more.
|
||||||
|
|
||||||
### Available Commands (31)
|
### Available Commands (32)
|
||||||
|
|
||||||
| Command | Description |
|
| Command | Description |
|
||||||
|---------|-------------|
|
|---------|-------------|
|
||||||
@@ -862,6 +865,7 @@ OpenCode's plugin system is MORE sophisticated than Claude Code with 20+ event t
|
|||||||
| `/instinct-import` | Import instincts |
|
| `/instinct-import` | Import instincts |
|
||||||
| `/instinct-export` | Export instincts |
|
| `/instinct-export` | Export instincts |
|
||||||
| `/evolve` | Cluster instincts into skills |
|
| `/evolve` | Cluster instincts into skills |
|
||||||
|
| `/learn-eval` | Extract and evaluate patterns before saving |
|
||||||
| `/setup-pm` | Configure package manager |
|
| `/setup-pm` | Configure package manager |
|
||||||
|
|
||||||
### Plugin Installation
|
### Plugin Installation
|
||||||
|
|||||||
330
scripts/codemaps/generate.ts
Normal file
330
scripts/codemaps/generate.ts
Normal file
@@ -0,0 +1,330 @@
|
|||||||
|
#!/usr/bin/env node
|
||||||
|
/**
|
||||||
|
* scripts/codemaps/generate.ts
|
||||||
|
*
|
||||||
|
* Codemap Generator for everything-claude-code (ECC)
|
||||||
|
*
|
||||||
|
* Scans the current working directory and generates architectural
|
||||||
|
* codemap documentation under docs/CODEMAPS/ as specified by the
|
||||||
|
* doc-updater agent.
|
||||||
|
*
|
||||||
|
* Usage:
|
||||||
|
* npx tsx scripts/codemaps/generate.ts [srcDir]
|
||||||
|
*
|
||||||
|
* Output:
|
||||||
|
* docs/CODEMAPS/INDEX.md
|
||||||
|
* docs/CODEMAPS/frontend.md
|
||||||
|
* docs/CODEMAPS/backend.md
|
||||||
|
* docs/CODEMAPS/database.md
|
||||||
|
* docs/CODEMAPS/integrations.md
|
||||||
|
* docs/CODEMAPS/workers.md
|
||||||
|
*/
|
||||||
|
|
||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Config
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
const ROOT = process.cwd();
|
||||||
|
const SRC_DIR = process.argv[2] ? path.resolve(process.argv[2]) : ROOT;
|
||||||
|
const OUTPUT_DIR = path.join(ROOT, 'docs', 'CODEMAPS');
|
||||||
|
const TODAY = new Date().toISOString().split('T')[0];
|
||||||
|
|
||||||
|
// Patterns used to classify files into codemap areas
|
||||||
|
const AREA_PATTERNS: Record<string, RegExp[]> = {
|
||||||
|
frontend: [
|
||||||
|
/\/(app|pages|components|hooks|contexts|ui|views|layouts|styles)\//i,
|
||||||
|
/\.(tsx|jsx|css|scss|sass|less|vue|svelte)$/i,
|
||||||
|
],
|
||||||
|
backend: [
|
||||||
|
/\/(api|routes|controllers|middleware|server|services|handlers)\//i,
|
||||||
|
/\.(route|controller|handler|middleware|service)\.(ts|js)$/i,
|
||||||
|
],
|
||||||
|
database: [
|
||||||
|
/\/(models|schemas|migrations|prisma|drizzle|db|database|repositories)\//i,
|
||||||
|
/\.(model|schema|migration|seed)\.(ts|js)$/i,
|
||||||
|
/prisma\/schema\.prisma$/,
|
||||||
|
/schema\.sql$/,
|
||||||
|
],
|
||||||
|
integrations: [
|
||||||
|
/\/(integrations?|third-party|external|plugins?|adapters?|connectors?)\//i,
|
||||||
|
/\.(integration|adapter|connector)\.(ts|js)$/i,
|
||||||
|
],
|
||||||
|
workers: [
|
||||||
|
/\/(workers?|jobs?|queues?|tasks?|cron|background)\//i,
|
||||||
|
/\.(worker|job|queue|task|cron)\.(ts|js)$/i,
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// File System Helpers
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
/** Recursively collect all files under a directory, skipping common noise dirs. */
|
||||||
|
function walkDir(dir: string, results: string[] = []): string[] {
|
||||||
|
const SKIP = new Set([
|
||||||
|
'node_modules', '.git', '.next', '.nuxt', 'dist', 'build', 'out',
|
||||||
|
'.turbo', 'coverage', '.cache', '__pycache__', '.venv', 'venv',
|
||||||
|
]);
|
||||||
|
|
||||||
|
let entries: fs.Dirent[];
|
||||||
|
try {
|
||||||
|
entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||||
|
} catch {
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const entry of entries) {
|
||||||
|
if (SKIP.has(entry.name)) continue;
|
||||||
|
const fullPath = path.join(dir, entry.name);
|
||||||
|
if (entry.isDirectory()) {
|
||||||
|
walkDir(fullPath, results);
|
||||||
|
} else if (entry.isFile()) {
|
||||||
|
results.push(fullPath);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Return path relative to ROOT, always using forward slashes. */
|
||||||
|
function rel(p: string): string {
|
||||||
|
return path.relative(ROOT, p).replace(/\\/g, '/');
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Analysis
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
interface AreaInfo {
|
||||||
|
name: string;
|
||||||
|
files: string[];
|
||||||
|
entryPoints: string[];
|
||||||
|
directories: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
function classifyFiles(allFiles: string[]): Record<string, AreaInfo> {
|
||||||
|
const areas: Record<string, AreaInfo> = {
|
||||||
|
frontend: { name: 'Frontend', files: [], entryPoints: [], directories: [] },
|
||||||
|
backend: { name: 'Backend/API', files: [], entryPoints: [], directories: [] },
|
||||||
|
database: { name: 'Database', files: [], entryPoints: [], directories: [] },
|
||||||
|
integrations: { name: 'Integrations', files: [], entryPoints: [], directories: [] },
|
||||||
|
workers: { name: 'Workers', files: [], entryPoints: [], directories: [] },
|
||||||
|
};
|
||||||
|
|
||||||
|
for (const file of allFiles) {
|
||||||
|
const relPath = rel(file);
|
||||||
|
for (const [area, patterns] of Object.entries(AREA_PATTERNS)) {
|
||||||
|
if (patterns.some((p) => p.test(relPath))) {
|
||||||
|
areas[area].files.push(relPath);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Derive unique directories and entry points per area
|
||||||
|
for (const area of Object.values(areas)) {
|
||||||
|
const dirs = new Set(area.files.map((f) => path.dirname(f)));
|
||||||
|
area.directories = [...dirs].sort();
|
||||||
|
|
||||||
|
area.entryPoints = area.files
|
||||||
|
.filter((f) => /index\.(ts|tsx|js|jsx)$/.test(f) || /main\.(ts|tsx|js|jsx)$/.test(f))
|
||||||
|
.slice(0, 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
return areas;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Count lines in a file (returns 0 on error). */
|
||||||
|
function lineCount(p: string): number {
|
||||||
|
try {
|
||||||
|
const content = fs.readFileSync(p, 'utf8');
|
||||||
|
return content.split('\n').length;
|
||||||
|
} catch {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Build a simple directory tree ASCII diagram (max 3 levels deep). */
|
||||||
|
function buildTree(dir: string, prefix = '', depth = 0): string {
|
||||||
|
if (depth > 2) return '';
|
||||||
|
const SKIP = new Set(['node_modules', '.git', 'dist', 'build', '.next', 'coverage']);
|
||||||
|
|
||||||
|
let entries: fs.Dirent[];
|
||||||
|
try {
|
||||||
|
entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||||
|
} catch {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
const dirs = entries.filter((e) => e.isDirectory() && !SKIP.has(e.name));
|
||||||
|
const files = entries.filter((e) => e.isFile());
|
||||||
|
|
||||||
|
let result = '';
|
||||||
|
const items = [...dirs, ...files];
|
||||||
|
items.forEach((entry, i) => {
|
||||||
|
const isLast = i === items.length - 1;
|
||||||
|
const connector = isLast ? '└── ' : '├── ';
|
||||||
|
result += `${prefix}${connector}${entry.name}\n`;
|
||||||
|
if (entry.isDirectory()) {
|
||||||
|
const newPrefix = prefix + (isLast ? ' ' : '│ ');
|
||||||
|
result += buildTree(path.join(dir, entry.name), newPrefix, depth + 1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Markdown Generators
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
function generateAreaDoc(areaKey: string, area: AreaInfo, allFiles: string[]): string {
|
||||||
|
const fileCount = area.files.length;
|
||||||
|
const totalLines = area.files.reduce((sum, f) => sum + lineCount(path.join(ROOT, f)), 0);
|
||||||
|
|
||||||
|
const entrySection = area.entryPoints.length > 0
|
||||||
|
? area.entryPoints.map((e) => `- \`${e}\``).join('\n')
|
||||||
|
: '- *(no index/main entry points detected)*';
|
||||||
|
|
||||||
|
const dirSection = area.directories.slice(0, 20)
|
||||||
|
.map((d) => `- \`${d}/\``)
|
||||||
|
.join('\n') || '- *(no dedicated directories detected)*';
|
||||||
|
|
||||||
|
const fileSection = area.files.slice(0, 30)
|
||||||
|
.map((f) => `| \`${f}\` | ${lineCount(path.join(ROOT, f))} |`)
|
||||||
|
.join('\n');
|
||||||
|
|
||||||
|
const moreFiles = area.files.length > 30
|
||||||
|
? `\n*...and ${area.files.length - 30} more files*`
|
||||||
|
: '';
|
||||||
|
|
||||||
|
return `# ${area.name} Codemap
|
||||||
|
|
||||||
|
**Last Updated:** ${TODAY}
|
||||||
|
**Total Files:** ${fileCount}
|
||||||
|
**Total Lines:** ${totalLines}
|
||||||
|
|
||||||
|
## Entry Points
|
||||||
|
|
||||||
|
${entrySection}
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
\`\`\`
|
||||||
|
${area.name} Directory Structure
|
||||||
|
${dirSection.replace(/- `/g, '').replace(/`\/$/gm, '/')}
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
## Key Modules
|
||||||
|
|
||||||
|
| File | Lines |
|
||||||
|
|------|-------|
|
||||||
|
${fileSection}${moreFiles}
|
||||||
|
|
||||||
|
## Data Flow
|
||||||
|
|
||||||
|
> Detected from file patterns. Review individual files for detailed data flow.
|
||||||
|
|
||||||
|
## External Dependencies
|
||||||
|
|
||||||
|
> Run \`npx jsdoc2md src/**/*.ts\` to extract JSDoc and identify external dependencies.
|
||||||
|
|
||||||
|
## Related Areas
|
||||||
|
|
||||||
|
- [INDEX](./INDEX.md) — Full overview
|
||||||
|
- [Frontend](./frontend.md)
|
||||||
|
- [Backend/API](./backend.md)
|
||||||
|
- [Database](./database.md)
|
||||||
|
- [Integrations](./integrations.md)
|
||||||
|
- [Workers](./workers.md)
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function generateIndex(areas: Record<string, AreaInfo>, allFiles: string[]): string {
|
||||||
|
const totalFiles = allFiles.length;
|
||||||
|
const areaRows = Object.entries(areas)
|
||||||
|
.map(([key, area]) => `| [${area.name}](./${key}.md) | ${area.files.length} files | ${area.directories.slice(0, 3).map((d) => `\`${d}\``).join(', ') || '—'} |`)
|
||||||
|
.join('\n');
|
||||||
|
|
||||||
|
const topLevelTree = buildTree(SRC_DIR);
|
||||||
|
|
||||||
|
return `# Codebase Overview — CODEMAPS Index
|
||||||
|
|
||||||
|
**Last Updated:** ${TODAY}
|
||||||
|
**Root:** \`${rel(SRC_DIR) || '.'}\`
|
||||||
|
**Total Files Scanned:** ${totalFiles}
|
||||||
|
|
||||||
|
## Areas
|
||||||
|
|
||||||
|
| Area | Size | Key Directories |
|
||||||
|
|------|------|-----------------|
|
||||||
|
${areaRows}
|
||||||
|
|
||||||
|
## Repository Structure
|
||||||
|
|
||||||
|
\`\`\`
|
||||||
|
${rel(SRC_DIR) || path.basename(SRC_DIR)}/
|
||||||
|
${topLevelTree}\`\`\`
|
||||||
|
|
||||||
|
## How to Regenerate
|
||||||
|
|
||||||
|
\`\`\`bash
|
||||||
|
npx tsx scripts/codemaps/generate.ts # Regenerate codemaps
|
||||||
|
npx madge --image graph.svg src/ # Dependency graph (requires graphviz)
|
||||||
|
npx jsdoc2md src/**/*.ts # Extract JSDoc
|
||||||
|
\`\`\`
|
||||||
|
|
||||||
|
## Related Documentation
|
||||||
|
|
||||||
|
- [Frontend](./frontend.md) — UI components, pages, hooks
|
||||||
|
- [Backend/API](./backend.md) — API routes, controllers, middleware
|
||||||
|
- [Database](./database.md) — Models, schemas, migrations
|
||||||
|
- [Integrations](./integrations.md) — External services & adapters
|
||||||
|
- [Workers](./workers.md) — Background jobs, queues, cron tasks
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
// Main
|
||||||
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
function main(): void {
|
||||||
|
console.log(`[generate.ts] Scanning: ${SRC_DIR}`);
|
||||||
|
console.log(`[generate.ts] Output: ${OUTPUT_DIR}`);
|
||||||
|
|
||||||
|
// Ensure output directory exists
|
||||||
|
fs.mkdirSync(OUTPUT_DIR, { recursive: true });
|
||||||
|
|
||||||
|
// Walk the directory tree
|
||||||
|
const allFiles = walkDir(SRC_DIR);
|
||||||
|
console.log(`[generate.ts] Found ${allFiles.length} files`);
|
||||||
|
|
||||||
|
// Classify files into areas
|
||||||
|
const areas = classifyFiles(allFiles);
|
||||||
|
|
||||||
|
// Generate INDEX.md
|
||||||
|
const indexContent = generateIndex(areas, allFiles);
|
||||||
|
const indexPath = path.join(OUTPUT_DIR, 'INDEX.md');
|
||||||
|
fs.writeFileSync(indexPath, indexContent, 'utf8');
|
||||||
|
console.log(`[generate.ts] Written: ${rel(indexPath)}`);
|
||||||
|
|
||||||
|
// Generate per-area codemaps
|
||||||
|
for (const [key, area] of Object.entries(areas)) {
|
||||||
|
const content = generateAreaDoc(key, area, allFiles);
|
||||||
|
const outPath = path.join(OUTPUT_DIR, `${key}.md`);
|
||||||
|
fs.writeFileSync(outPath, content, 'utf8');
|
||||||
|
console.log(`[generate.ts] Written: ${rel(outPath)} (${area.files.length} files)`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('\n[generate.ts] Done! Codemaps written to docs/CODEMAPS/');
|
||||||
|
console.log('[generate.ts] Files generated:');
|
||||||
|
console.log(' docs/CODEMAPS/INDEX.md');
|
||||||
|
console.log(' docs/CODEMAPS/frontend.md');
|
||||||
|
console.log(' docs/CODEMAPS/backend.md');
|
||||||
|
console.log(' docs/CODEMAPS/database.md');
|
||||||
|
console.log(' docs/CODEMAPS/integrations.md');
|
||||||
|
console.log(' docs/CODEMAPS/workers.md');
|
||||||
|
}
|
||||||
|
|
||||||
|
main();
|
||||||
175
skills/skill-stocktake/SKILL.md
Normal file
175
skills/skill-stocktake/SKILL.md
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
---
|
||||||
|
description: "Use when auditing Claude skills and commands for quality. Supports Quick Scan (changed skills only) and Full Stocktake modes with sequential subagent batch evaluation."
|
||||||
|
---
|
||||||
|
|
||||||
|
# skill-stocktake
|
||||||
|
|
||||||
|
Slash command (`/skill-stocktake`) that audits all Claude skills and commands using a quality checklist + AI holistic judgment. Supports two modes: Quick Scan for recently changed skills, and Full Stocktake for a complete review.
|
||||||
|
|
||||||
|
## Scope
|
||||||
|
|
||||||
|
The command targets the following paths **relative to the directory where it is invoked**:
|
||||||
|
|
||||||
|
| Path | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| `~/.claude/skills/` | Global skills (all projects) |
|
||||||
|
| `{cwd}/.claude/skills/` | Project-level skills (if the directory exists) |
|
||||||
|
|
||||||
|
**At the start of Phase 1, the command explicitly lists which paths were found and scanned.**
|
||||||
|
|
||||||
|
### Targeting a specific project
|
||||||
|
|
||||||
|
To include project-level skills, run from that project's root directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd ~/path/to/my-project
|
||||||
|
/skill-stocktake
|
||||||
|
```
|
||||||
|
|
||||||
|
If the project has no `.claude/skills/` directory, only global skills and commands are evaluated.
|
||||||
|
|
||||||
|
## Modes
|
||||||
|
|
||||||
|
| Mode | Trigger | Duration |
|
||||||
|
|------|---------|---------|
|
||||||
|
| Quick Scan | `results.json` exists (default) | 5–10 min |
|
||||||
|
| Full Stocktake | `results.json` absent, or `/skill-stocktake full` | 20–30 min |
|
||||||
|
|
||||||
|
**Results cache:** `~/.claude/skills/skill-stocktake/results.json`
|
||||||
|
|
||||||
|
## Quick Scan Flow
|
||||||
|
|
||||||
|
Re-evaluate only skills that have changed since the last run (5–10 min).
|
||||||
|
|
||||||
|
1. Read `~/.claude/skills/skill-stocktake/results.json`
|
||||||
|
2. Run: `bash ~/.claude/skills/skill-stocktake/scripts/quick-diff.sh \
|
||||||
|
~/.claude/skills/skill-stocktake/results.json`
|
||||||
|
(Project dir is auto-detected from `$PWD/.claude/skills`; pass it explicitly only if needed)
|
||||||
|
3. If output is `[]`: report "No changes since last run." and stop
|
||||||
|
4. Re-evaluate only those changed files using the same Phase 2 criteria
|
||||||
|
5. Carry forward unchanged skills from previous results
|
||||||
|
6. Output only the diff
|
||||||
|
7. Run: `bash ~/.claude/skills/skill-stocktake/scripts/save-results.sh \
|
||||||
|
~/.claude/skills/skill-stocktake/results.json <<< "$EVAL_RESULTS"`
|
||||||
|
|
||||||
|
## Full Stocktake Flow
|
||||||
|
|
||||||
|
### Phase 1 — Inventory
|
||||||
|
|
||||||
|
Run: `bash ~/.claude/skills/skill-stocktake/scripts/scan.sh`
|
||||||
|
|
||||||
|
The script enumerates skill files, extracts frontmatter, and collects UTC mtimes.
|
||||||
|
Project dir is auto-detected from `$PWD/.claude/skills`; pass it explicitly only if needed.
|
||||||
|
Present the scan summary and inventory table from the script output:
|
||||||
|
|
||||||
|
```
|
||||||
|
Scanning:
|
||||||
|
✓ ~/.claude/skills/ (17 files)
|
||||||
|
✗ {cwd}/.claude/skills/ (not found — global skills only)
|
||||||
|
```
|
||||||
|
|
||||||
|
| Skill | 7d use | 30d use | Description |
|
||||||
|
|-------|--------|---------|-------------|
|
||||||
|
|
||||||
|
### Phase 2 — Quality Evaluation
|
||||||
|
|
||||||
|
Launch a Task tool subagent (**Explore agent, model: opus**) with the full inventory and checklist.
|
||||||
|
The subagent reads each skill, applies the checklist, and returns per-skill JSON:
|
||||||
|
|
||||||
|
`{ "verdict": "Keep"|"Improve"|"Update"|"Retire"|"Merge into [X]", "reason": "..." }`
|
||||||
|
|
||||||
|
**Chunk guidance:** Process ~20 skills per subagent invocation to keep context manageable. Save intermediate results to `results.json` (`status: "in_progress"`) after each chunk.
|
||||||
|
|
||||||
|
After all skills are evaluated: set `status: "completed"`, proceed to Phase 3.
|
||||||
|
|
||||||
|
**Resume detection:** If `status: "in_progress"` is found on startup, resume from the first unevaluated skill.
|
||||||
|
|
||||||
|
Each skill is evaluated against this checklist:
|
||||||
|
|
||||||
|
```
|
||||||
|
- [ ] Content overlap with other skills checked
|
||||||
|
- [ ] Overlap with MEMORY.md / CLAUDE.md checked
|
||||||
|
- [ ] Freshness of technical references verified (use WebSearch if tool names / CLI flags / APIs are present)
|
||||||
|
- [ ] Usage frequency considered
|
||||||
|
```
|
||||||
|
|
||||||
|
Verdict criteria:
|
||||||
|
|
||||||
|
| Verdict | Meaning |
|
||||||
|
|---------|---------|
|
||||||
|
| Keep | Useful and current |
|
||||||
|
| Improve | Worth keeping, but specific improvements needed |
|
||||||
|
| Update | Referenced technology is outdated (verify with WebSearch) |
|
||||||
|
| Retire | Low quality, stale, or cost-asymmetric |
|
||||||
|
| Merge into [X] | Substantial overlap with another skill; name the merge target |
|
||||||
|
|
||||||
|
Evaluation is **holistic AI judgment** — not a numeric rubric. Guiding dimensions:
|
||||||
|
- **Actionability**: code examples, commands, or steps that let you act immediately
|
||||||
|
- **Scope fit**: name, trigger, and content are aligned; not too broad or narrow
|
||||||
|
- **Uniqueness**: value not replaceable by MEMORY.md / CLAUDE.md / another skill
|
||||||
|
- **Currency**: technical references work in the current environment
|
||||||
|
|
||||||
|
**Reason quality requirements** — the `reason` field must be self-contained and decision-enabling:
|
||||||
|
- Do NOT write "unchanged" alone — always restate the core evidence
|
||||||
|
- For **Retire**: state (1) what specific defect was found, (2) what covers the same need instead
|
||||||
|
- Bad: `"Superseded"`
|
||||||
|
- Good: `"disable-model-invocation: true already set; superseded by continuous-learning-v2 which covers all the same patterns plus confidence scoring. No unique content remains."`
|
||||||
|
- For **Merge**: name the target and describe what content to integrate
|
||||||
|
- Bad: `"Overlaps with X"`
|
||||||
|
- Good: `"42-line thin content; Step 4 of chatlog-to-article already covers the same workflow. Integrate the 'article angle' tip as a note in that skill."`
|
||||||
|
- For **Improve**: describe the specific change needed (what section, what action, target size if relevant)
|
||||||
|
- Bad: `"Too long"`
|
||||||
|
- Good: `"276 lines; Section 'Framework Comparison' (L80–140) duplicates ai-era-architecture-principles; delete it to reach ~150 lines."`
|
||||||
|
- For **Keep** (mtime-only change in Quick Scan): restate the original verdict rationale, do not write "unchanged"
|
||||||
|
- Bad: `"Unchanged"`
|
||||||
|
- Good: `"mtime updated but content unchanged. Unique Python reference explicitly imported by rules/python/; no overlap found."`
|
||||||
|
|
||||||
|
### Phase 3 — Summary Table
|
||||||
|
|
||||||
|
| Skill | 7d use | Verdict | Reason |
|
||||||
|
|-------|--------|---------|--------|
|
||||||
|
|
||||||
|
### Phase 4 — Consolidation
|
||||||
|
|
||||||
|
1. **Retire / Merge**: present detailed justification per file before confirming with user:
|
||||||
|
- What specific problem was found (overlap, staleness, broken references, etc.)
|
||||||
|
- What alternative covers the same functionality (for Retire: which existing skill/rule; for Merge: the target file and what content to integrate)
|
||||||
|
- Impact of removal (any dependent skills, MEMORY.md references, or workflows affected)
|
||||||
|
2. **Improve**: present specific improvement suggestions with rationale:
|
||||||
|
- What to change and why (e.g., "trim 430→200 lines because sections X/Y duplicate python-patterns")
|
||||||
|
- User decides whether to act
|
||||||
|
3. **Update**: present updated content with sources checked
|
||||||
|
4. Check MEMORY.md line count; propose compression if >100 lines
|
||||||
|
|
||||||
|
## Results File Schema
|
||||||
|
|
||||||
|
`~/.claude/skills/skill-stocktake/results.json`:
|
||||||
|
|
||||||
|
**`evaluated_at`**: Must be set to the actual UTC time of evaluation completion.
|
||||||
|
Obtain via Bash: `date -u +%Y-%m-%dT%H:%M:%SZ`. Never use a date-only approximation like `T00:00:00Z`.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"evaluated_at": "2026-02-21T10:00:00Z",
|
||||||
|
"mode": "full",
|
||||||
|
"batch_progress": {
|
||||||
|
"total": 80,
|
||||||
|
"evaluated": 80,
|
||||||
|
"status": "completed"
|
||||||
|
},
|
||||||
|
"skills": {
|
||||||
|
"skill-name": {
|
||||||
|
"path": "~/.claude/skills/skill-name/SKILL.md",
|
||||||
|
"verdict": "Keep",
|
||||||
|
"reason": "Concrete, actionable, unique value for X workflow",
|
||||||
|
"mtime": "2026-01-15T08:30:00Z"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Evaluation is blind: the same checklist applies to all skills regardless of origin (ECC, self-authored, auto-extracted)
|
||||||
|
- Archive / delete operations always require explicit user confirmation
|
||||||
|
- No verdict branching by skill origin
|
||||||
87
skills/skill-stocktake/scripts/quick-diff.sh
Executable file
87
skills/skill-stocktake/scripts/quick-diff.sh
Executable file
@@ -0,0 +1,87 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# quick-diff.sh — compare skill file mtimes against results.json evaluated_at
|
||||||
|
# Usage: quick-diff.sh RESULTS_JSON [CWD_SKILLS_DIR]
|
||||||
|
# Output: JSON array of changed/new files to stdout (empty [] if no changes)
|
||||||
|
#
|
||||||
|
# When CWD_SKILLS_DIR is omitted, defaults to $PWD/.claude/skills so the
|
||||||
|
# script always picks up project-level skills without relying on the caller.
|
||||||
|
#
|
||||||
|
# Environment:
|
||||||
|
# SKILL_STOCKTAKE_GLOBAL_DIR Override ~/.claude/skills (for testing only;
|
||||||
|
# do not set in production — intended for bats tests)
|
||||||
|
# SKILL_STOCKTAKE_PROJECT_DIR Override project dir detection (for testing only)
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
RESULTS_JSON="${1:-}"
|
||||||
|
CWD_SKILLS_DIR="${SKILL_STOCKTAKE_PROJECT_DIR:-${2:-$PWD/.claude/skills}}"
|
||||||
|
GLOBAL_DIR="${SKILL_STOCKTAKE_GLOBAL_DIR:-$HOME/.claude/skills}"
|
||||||
|
|
||||||
|
if [[ -z "$RESULTS_JSON" || ! -f "$RESULTS_JSON" ]]; then
|
||||||
|
echo "Error: RESULTS_JSON not found: ${RESULTS_JSON:-<empty>}" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Validate CWD_SKILLS_DIR looks like a .claude/skills path (defense-in-depth).
|
||||||
|
# Only warn when the path exists — a nonexistent path poses no traversal risk.
|
||||||
|
if [[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" && "$CWD_SKILLS_DIR" != */.claude/skills* ]]; then
|
||||||
|
echo "Warning: CWD_SKILLS_DIR does not look like a .claude/skills path: $CWD_SKILLS_DIR" >&2
|
||||||
|
fi
|
||||||
|
|
||||||
|
evaluated_at=$(jq -r '.evaluated_at' "$RESULTS_JSON")
|
||||||
|
|
||||||
|
# Fail fast on a missing or malformed evaluated_at rather than producing
|
||||||
|
# unpredictable results from ISO 8601 string comparison against "null".
|
||||||
|
if [[ ! "$evaluated_at" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$ ]]; then
|
||||||
|
echo "Error: invalid or missing evaluated_at in $RESULTS_JSON: $evaluated_at" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Pre-extract known paths from results.json once (O(1) lookup per file instead of O(n*m))
|
||||||
|
known_paths=$(jq -r '.skills[].path' "$RESULTS_JSON" 2>/dev/null)
|
||||||
|
|
||||||
|
tmpdir=$(mktemp -d)
|
||||||
|
# Use a function to avoid embedding $tmpdir in a quoted string (prevents injection
|
||||||
|
# if TMPDIR were crafted to contain shell metacharacters).
|
||||||
|
_cleanup() { rm -rf "$tmpdir"; }
|
||||||
|
trap _cleanup EXIT
|
||||||
|
|
||||||
|
# Shared counter across process_dir calls — intentionally NOT local
|
||||||
|
i=0
|
||||||
|
|
||||||
|
process_dir() {
|
||||||
|
local dir="$1"
|
||||||
|
while IFS= read -r file; do
|
||||||
|
local mtime dp is_new
|
||||||
|
mtime=$(date -u -r "$file" +%Y-%m-%dT%H:%M:%SZ)
|
||||||
|
dp="${file/#$HOME/~}"
|
||||||
|
|
||||||
|
# Check if this file is known to results.json (exact whole-line match to
|
||||||
|
# avoid substring false-positives, e.g. "python-patterns" matching "python-patterns-v2").
|
||||||
|
if echo "$known_paths" | grep -qxF "$dp"; then
|
||||||
|
is_new="false"
|
||||||
|
# Known file: only emit if mtime changed (ISO 8601 string comparison is safe)
|
||||||
|
[[ "$mtime" > "$evaluated_at" ]] || continue
|
||||||
|
else
|
||||||
|
is_new="true"
|
||||||
|
# New file: always emit regardless of mtime
|
||||||
|
fi
|
||||||
|
|
||||||
|
jq -n \
|
||||||
|
--arg path "$dp" \
|
||||||
|
--arg mtime "$mtime" \
|
||||||
|
--argjson is_new "$is_new" \
|
||||||
|
'{path:$path,mtime:$mtime,is_new:$is_new}' \
|
||||||
|
> "$tmpdir/$i.json"
|
||||||
|
i=$((i+1))
|
||||||
|
done < <(find "$dir" -name "*.md" -type f 2>/dev/null | sort)
|
||||||
|
}
|
||||||
|
|
||||||
|
[[ -d "$GLOBAL_DIR" ]] && process_dir "$GLOBAL_DIR"
|
||||||
|
[[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" ]] && process_dir "$CWD_SKILLS_DIR"
|
||||||
|
|
||||||
|
if [[ $i -eq 0 ]]; then
|
||||||
|
echo "[]"
|
||||||
|
else
|
||||||
|
jq -s '.' "$tmpdir"/*.json
|
||||||
|
fi
|
||||||
56
skills/skill-stocktake/scripts/save-results.sh
Executable file
56
skills/skill-stocktake/scripts/save-results.sh
Executable file
@@ -0,0 +1,56 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# save-results.sh — merge evaluated skills into results.json with correct UTC timestamp
|
||||||
|
# Usage: save-results.sh RESULTS_JSON <<< "$EVAL_JSON"
|
||||||
|
#
|
||||||
|
# stdin format:
|
||||||
|
# { "skills": {...}, "mode"?: "full"|"quick", "batch_progress"?: {...} }
|
||||||
|
#
|
||||||
|
# Always sets evaluated_at to current UTC time via `date -u`.
|
||||||
|
# Merges stdin .skills into existing results.json (new entries override old).
|
||||||
|
# Optionally updates .mode and .batch_progress if present in stdin.
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
RESULTS_JSON="${1:-}"
|
||||||
|
|
||||||
|
if [[ -z "$RESULTS_JSON" ]]; then
|
||||||
|
echo "Error: RESULTS_JSON argument required" >&2
|
||||||
|
echo "Usage: save-results.sh RESULTS_JSON <<< \"\$EVAL_JSON\"" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
EVALUATED_AT=$(date -u +%Y-%m-%dT%H:%M:%SZ)
|
||||||
|
|
||||||
|
# Read eval results from stdin and validate JSON before touching the results file
|
||||||
|
input_json=$(cat)
|
||||||
|
if ! echo "$input_json" | jq empty 2>/dev/null; then
|
||||||
|
echo "Error: stdin is not valid JSON" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ ! -f "$RESULTS_JSON" ]]; then
|
||||||
|
# Bootstrap: create new results.json from stdin JSON + current UTC timestamp
|
||||||
|
echo "$input_json" | jq --arg ea "$EVALUATED_AT" \
|
||||||
|
'. + { evaluated_at: $ea }' > "$RESULTS_JSON"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Merge: new .skills override existing ones; old skills not in input_json are kept.
|
||||||
|
# Optionally update .mode and .batch_progress if provided.
|
||||||
|
#
|
||||||
|
# Use mktemp for a collision-safe temp file (concurrent runs on the same RESULTS_JSON
|
||||||
|
# would race on a predictable ".tmp" suffix; random suffix prevents silent overwrites).
|
||||||
|
tmp=$(mktemp "${RESULTS_JSON}.XXXXXX")
|
||||||
|
trap 'rm -f "$tmp"' EXIT
|
||||||
|
|
||||||
|
jq -s \
|
||||||
|
--arg ea "$EVALUATED_AT" \
|
||||||
|
'.[0] as $existing | .[1] as $new |
|
||||||
|
$existing |
|
||||||
|
.evaluated_at = $ea |
|
||||||
|
.skills = ($existing.skills + ($new.skills // {})) |
|
||||||
|
if ($new | has("mode")) then .mode = $new.mode else . end |
|
||||||
|
if ($new | has("batch_progress")) then .batch_progress = $new.batch_progress else . end' \
|
||||||
|
"$RESULTS_JSON" <(echo "$input_json") > "$tmp"
|
||||||
|
|
||||||
|
mv "$tmp" "$RESULTS_JSON"
|
||||||
170
skills/skill-stocktake/scripts/scan.sh
Executable file
170
skills/skill-stocktake/scripts/scan.sh
Executable file
@@ -0,0 +1,170 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# scan.sh — enumerate skill files, extract frontmatter and UTC mtime
|
||||||
|
# Usage: scan.sh [CWD_SKILLS_DIR]
|
||||||
|
# Output: JSON to stdout
|
||||||
|
#
|
||||||
|
# When CWD_SKILLS_DIR is omitted, defaults to $PWD/.claude/skills so the
|
||||||
|
# script always picks up project-level skills without relying on the caller.
|
||||||
|
#
|
||||||
|
# Environment:
|
||||||
|
# SKILL_STOCKTAKE_GLOBAL_DIR Override ~/.claude/skills (for testing only;
|
||||||
|
# do not set in production — intended for bats tests)
|
||||||
|
# SKILL_STOCKTAKE_PROJECT_DIR Override project dir detection (for testing only)
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
GLOBAL_DIR="${SKILL_STOCKTAKE_GLOBAL_DIR:-$HOME/.claude/skills}"
|
||||||
|
CWD_SKILLS_DIR="${SKILL_STOCKTAKE_PROJECT_DIR:-${1:-$PWD/.claude/skills}}"
|
||||||
|
# Path to JSONL file containing tool-use observations (optional; used for usage frequency counts).
|
||||||
|
# Override via SKILL_STOCKTAKE_OBSERVATIONS env var if your setup uses a different path.
|
||||||
|
OBSERVATIONS="${SKILL_STOCKTAKE_OBSERVATIONS:-$HOME/.claude/observations.jsonl}"
|
||||||
|
|
||||||
|
# Validate CWD_SKILLS_DIR looks like a .claude/skills path (defense-in-depth).
|
||||||
|
# Only warn when the path exists — a nonexistent path poses no traversal risk.
|
||||||
|
if [[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" && "$CWD_SKILLS_DIR" != */.claude/skills* ]]; then
|
||||||
|
echo "Warning: CWD_SKILLS_DIR does not look like a .claude/skills path: $CWD_SKILLS_DIR" >&2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract a frontmatter field (handles both quoted and unquoted single-line values).
|
||||||
|
# Does NOT support multi-line YAML blocks (| or >) or nested YAML keys.
|
||||||
|
extract_field() {
|
||||||
|
local file="$1" field="$2"
|
||||||
|
awk -v f="$field" '
|
||||||
|
BEGIN { fm=0 }
|
||||||
|
/^---$/ { fm++; next }
|
||||||
|
fm==1 {
|
||||||
|
n = length(f) + 2
|
||||||
|
if (substr($0, 1, n) == f ": ") {
|
||||||
|
val = substr($0, n+1)
|
||||||
|
gsub(/^"/, "", val)
|
||||||
|
gsub(/"$/, "", val)
|
||||||
|
print val
|
||||||
|
exit
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fm>=2 { exit }
|
||||||
|
' "$file"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get UTC timestamp N days ago (supports both macOS and GNU date)
|
||||||
|
date_ago() {
|
||||||
|
local n="$1"
|
||||||
|
date -u -v-"${n}d" +%Y-%m-%dT%H:%M:%SZ 2>/dev/null ||
|
||||||
|
date -u -d "${n} days ago" +%Y-%m-%dT%H:%M:%SZ
|
||||||
|
}
|
||||||
|
|
||||||
|
# Count observations matching a file path since a cutoff timestamp
|
||||||
|
count_obs() {
|
||||||
|
local file="$1" cutoff="$2"
|
||||||
|
if [[ ! -f "$OBSERVATIONS" ]]; then
|
||||||
|
echo 0
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
jq -r --arg p "$file" --arg c "$cutoff" \
|
||||||
|
'select(.tool=="Read" and .path==$p and .timestamp>=$c) | 1' \
|
||||||
|
"$OBSERVATIONS" 2>/dev/null | wc -l | tr -d ' '
|
||||||
|
}
|
||||||
|
|
||||||
|
# Scan a directory and produce a JSON array of skill objects
|
||||||
|
scan_dir_to_json() {
|
||||||
|
local dir="$1"
|
||||||
|
local c7 c30
|
||||||
|
c7=$(date_ago 7)
|
||||||
|
c30=$(date_ago 30)
|
||||||
|
|
||||||
|
local tmpdir
|
||||||
|
tmpdir=$(mktemp -d)
|
||||||
|
# Use a function to avoid embedding $tmpdir in a quoted string (prevents injection
|
||||||
|
# if TMPDIR were crafted to contain shell metacharacters).
|
||||||
|
local _scan_tmpdir="$tmpdir"
|
||||||
|
_scan_cleanup() { rm -rf "$_scan_tmpdir"; }
|
||||||
|
trap _scan_cleanup RETURN
|
||||||
|
|
||||||
|
# Pre-aggregate observation counts in two passes (one per window) instead of
|
||||||
|
# calling jq per-file — reduces from O(n*m) to O(n+m) jq invocations.
|
||||||
|
local obs_7d_counts obs_30d_counts
|
||||||
|
obs_7d_counts=""
|
||||||
|
obs_30d_counts=""
|
||||||
|
if [[ -f "$OBSERVATIONS" ]]; then
|
||||||
|
obs_7d_counts=$(jq -r --arg c "$c7" \
|
||||||
|
'select(.tool=="Read" and .timestamp>=$c) | .path' \
|
||||||
|
"$OBSERVATIONS" 2>/dev/null | sort | uniq -c)
|
||||||
|
obs_30d_counts=$(jq -r --arg c "$c30" \
|
||||||
|
'select(.tool=="Read" and .timestamp>=$c) | .path' \
|
||||||
|
"$OBSERVATIONS" 2>/dev/null | sort | uniq -c)
|
||||||
|
fi
|
||||||
|
|
||||||
|
local i=0
|
||||||
|
while IFS= read -r file; do
|
||||||
|
local name desc mtime u7 u30 dp
|
||||||
|
name=$(extract_field "$file" "name")
|
||||||
|
desc=$(extract_field "$file" "description")
|
||||||
|
mtime=$(date -u -r "$file" +%Y-%m-%dT%H:%M:%SZ)
|
||||||
|
# Use awk exact field match to avoid substring false-positives from grep -F.
|
||||||
|
# uniq -c output format: " N /path/to/file" — path is always field 2.
|
||||||
|
u7=$(echo "$obs_7d_counts" | awk -v f="$file" '$2 == f {print $1}' | head -1)
|
||||||
|
u7="${u7:-0}"
|
||||||
|
u30=$(echo "$obs_30d_counts" | awk -v f="$file" '$2 == f {print $1}' | head -1)
|
||||||
|
u30="${u30:-0}"
|
||||||
|
dp="${file/#$HOME/~}"
|
||||||
|
|
||||||
|
jq -n \
|
||||||
|
--arg path "$dp" \
|
||||||
|
--arg name "$name" \
|
||||||
|
--arg description "$desc" \
|
||||||
|
--arg mtime "$mtime" \
|
||||||
|
--argjson use_7d "$u7" \
|
||||||
|
--argjson use_30d "$u30" \
|
||||||
|
'{path:$path,name:$name,description:$description,use_7d:$use_7d,use_30d:$use_30d,mtime:$mtime}' \
|
||||||
|
> "$tmpdir/$i.json"
|
||||||
|
i=$((i+1))
|
||||||
|
done < <(find "$dir" -name "*.md" -type f 2>/dev/null | sort)
|
||||||
|
|
||||||
|
if [[ $i -eq 0 ]]; then
|
||||||
|
echo "[]"
|
||||||
|
else
|
||||||
|
jq -s '.' "$tmpdir"/*.json
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# --- Main ---
|
||||||
|
|
||||||
|
global_found="false"
|
||||||
|
global_count=0
|
||||||
|
global_skills="[]"
|
||||||
|
|
||||||
|
if [[ -d "$GLOBAL_DIR" ]]; then
|
||||||
|
global_found="true"
|
||||||
|
global_skills=$(scan_dir_to_json "$GLOBAL_DIR")
|
||||||
|
global_count=$(echo "$global_skills" | jq 'length')
|
||||||
|
fi
|
||||||
|
|
||||||
|
project_found="false"
|
||||||
|
project_path=""
|
||||||
|
project_count=0
|
||||||
|
project_skills="[]"
|
||||||
|
|
||||||
|
if [[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" ]]; then
|
||||||
|
project_found="true"
|
||||||
|
project_path="$CWD_SKILLS_DIR"
|
||||||
|
project_skills=$(scan_dir_to_json "$CWD_SKILLS_DIR")
|
||||||
|
project_count=$(echo "$project_skills" | jq 'length')
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Merge global + project skills into one array
|
||||||
|
all_skills=$(jq -s 'add' <(echo "$global_skills") <(echo "$project_skills"))
|
||||||
|
|
||||||
|
jq -n \
|
||||||
|
--arg global_found "$global_found" \
|
||||||
|
--argjson global_count "$global_count" \
|
||||||
|
--arg project_found "$project_found" \
|
||||||
|
--arg project_path "$project_path" \
|
||||||
|
--argjson project_count "$project_count" \
|
||||||
|
--argjson skills "$all_skills" \
|
||||||
|
'{
|
||||||
|
scan_summary: {
|
||||||
|
global: { found: ($global_found == "true"), count: $global_count },
|
||||||
|
project: { found: ($project_found == "true"), path: $project_path, count: $project_count }
|
||||||
|
},
|
||||||
|
skills: $skills
|
||||||
|
}'
|
||||||
Reference in New Issue
Block a user