Compare commits

...

167 Commits

Author SHA1 Message Date
Affaan Mustafa
db27ba1eb2 chore: update README stats and add Codex platform support
- Stars: 42K+ -> 50K+, forks: 5K+ -> 6K+, contributors: 24 -> 30
- Skills: 43 -> 44 (search-first), commands: 31 -> 32 (learn-eval)
- Add Codex to supported platforms in FAQ
- Add search-first skill and learn-eval command to directory listing
- Update OpenCode feature parity table counts
2026-02-23 06:56:00 -08:00
Affaan Mustafa
3c833d8922 Merge pull request #265 from shimo4228/feat/skills/skill-stocktake
feat(skills): add skill-stocktake skill
2026-02-23 06:55:38 -08:00
Affaan Mustafa
156b89ed30 Merge pull request #268 from pangerlkr/patch-6
feat: add scripts/codemaps/generate.ts — fixes #247
2026-02-23 06:55:35 -08:00
Pangerkumzuk Longkumer
41ce1a52e5 feat: add scripts/codemaps/generate.ts codemap generator Fixes #247 - The generate.ts script referenced in agents/doc-updater.md was missing from the repository. This adds the actual implementation. The script: - Recursively walks the src directory (skipping node_modules, dist, etc.) - Classifies files into 5 areas: frontend, backend, database, integrations, workers - Generates docs/CODEMAPS/INDEX.md + one .md per area - Uses the codemap format defined in doc-updater.md - Supports optional srcDir argument: npx tsx scripts/codemaps/generate.ts [srcDir]
This script scans the current working directory and generates architectural codemap documentation in the specified output directory. It classifies files into areas such as frontend, backend, database, integrations, and workers, and creates markdown files for each area along with an index.
2026-02-22 16:19:16 +05:30
Jongchan
6f94c2e28f fix(search-first): add missing skill name frontmatter (#266)
fix: add missing name frontmatter for search-first skill
2026-02-21 16:04:39 -08:00
Tatsuya Shimomoto
91b7ccf56f feat(skills): add skill-stocktake skill 2026-02-22 04:29:40 +09:00
Affaan Mustafa
7daa830da9 Merge pull request #263 from shimo4228/feat/commands/learn-eval
feat(commands): add learn-eval command
2026-02-20 21:17:57 -08:00
Affaan Mustafa
7e57d1b831 Merge pull request #262 from shimo4228/feat/skills/search-first
feat(skills): add search-first skill
2026-02-20 21:17:54 -08:00
Tatsuya Shimomoto
ff47dace11 feat(commands): add learn-eval command 2026-02-21 12:10:39 +09:00
Tatsuya Shimomoto
c9dc53e862 feat(skills): add search-first skill 2026-02-21 12:10:25 +09:00
Affaan Mustafa
c8f54481b8 chore: update Sonnet model references from 4.5 to 4.6
chore: update Sonnet model references from 4.5 to 4.6
2026-02-20 10:59:12 -08:00
Affaan Mustafa
294fc4aad8 fix: CI/Test for issue #226 (hook override bug)
Fixed CI / Test for (issue#226)
2026-02-20 10:59:10 -08:00
yptse123
81aa8a72c3 chore: update Sonnet model references from 4.5 to 4.6
Update MODEL_SONNET constant and all documentation references
to reflect the new claude-sonnet-4-6 model version.
2026-02-20 18:08:10 +08:00
Affaan Mustafa
0e9f613fd1 Revert "feat(ecc): prune plugin 43→12 items, promote 7 rules to .claude/rules/ (#245)"
This reverts commit 1bd68ff534.
2026-02-20 01:11:30 -08:00
park-kyungchan
1bd68ff534 feat(ecc): prune plugin 43→12 items, promote 7 rules to .claude/rules/ (#245)
ECC community plugin pruning: removed 530+ non-essential files
(.cursor/, .opencode/, docs/ja-JP, docs/zh-CN, docs/zh-TW,
language-specific skills/agents/rules). Retained 4 agents,
3 commands, 5 skills. Promoted 13 rule files (8 common + 5
typescript) to .claude/rules/ for CC native loading. Extracted
reusable patterns to EXTRACTED-PATTERNS.md.
2026-02-19 22:34:51 -08:00
Affaan Mustafa
24047351c2 Merge pull request #251 from gangqian68/main
docs: add CLAUDE.md for Claude Code guidance
2026-02-19 04:56:49 -08:00
qian gang
66959c1dca docs: add CLAUDE.md for Claude Code guidance
Add project-level CLAUDE.md with test commands, architecture
overview, key commands, and contribution guidelines.
2026-02-19 16:50:08 +08:00
Pangerkumzuk Longkumer
5a0f6e9e1e Merge pull request #12 from pangerlkr/copilot/fix-ci-test-failures-again
Fix Windows CI test failures - platform-specific test adjustments
2026-02-19 06:43:40 +05:30
copilot-swe-agent[bot]
cf61ef7539 Fix Windows CI test failures - add platform checks and USERPROFILE support
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-18 15:10:32 +00:00
copilot-swe-agent[bot]
07e23e3e64 Initial plan 2026-02-18 15:00:14 +00:00
Pangerkumzuk Longkumer
8fc49ba0e8 Merge pull request #11 from pangerlkr/copilot/fix-ci-test-failures
Fix session-manager tests failing in CI due to missing test isolation
2026-02-18 20:29:40 +05:30
copilot-swe-agent[bot]
b90448aef6 Clarify cleanup comment scope in session-manager tests
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-18 14:05:46 +00:00
copilot-swe-agent[bot]
caab908be8 Fix session-manager test environment for Rounds 95-98
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-18 14:02:11 +00:00
copilot-swe-agent[bot]
7021d1f6cf Initial plan 2026-02-18 13:51:40 +00:00
Pangerkumzuk Longkumer
3ad211b01b Merge pull request #10 from pangerlkr/copilot/fix-matrix-test-failures
Fix platform-specific hook blocking tests for CI matrix
2026-02-18 19:21:21 +05:30
copilot-swe-agent[bot]
f61c9b0caf Fix integration/hooks tests to handle Windows platform differences
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-18 13:42:05 +00:00
copilot-swe-agent[bot]
b682ac7d79 Initial plan 2026-02-18 13:36:31 +00:00
Pangerkumzuk Longkumer
e1fca6e84d Merge branch 'affaan-m:main' into main 2026-02-18 18:33:04 +05:30
Pangerkumzuk Longkumer
07530ace5f Merge pull request #9 from pangerlkr/claude/fix-agentshield-security-scan
Fix test failures and remove broken AgentShield workflow
2026-02-18 13:42:01 +05:30
anthropic-code-agent[bot]
00464b6f60 Fix failing workflows: trim action in getCommandPattern and remove broken AgentShield scan
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-18 08:06:25 +00:00
anthropic-code-agent[bot]
0c78a7c779 Initial plan 2026-02-18 08:00:23 +00:00
Pangerkumzuk Longkumer
fca997001e Merge pull request #7 from pangerlkr/copilot/fix-workflow-failures
Fix stdin size limit enforcement in hook scripts
2026-02-18 13:16:01 +05:30
copilot-swe-agent[bot]
1eca3c9130 Fix stdin overflow bug in hook scripts - truncate chunks to stay within MAX_STDIN limit
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-18 07:40:12 +00:00
copilot-swe-agent[bot]
defcdc356e Initial plan 2026-02-18 07:28:13 +00:00
Pangerkumzuk Longkumer
b548ce47c9 Merge pull request #6 from pangerlkr/copilot/fix-workflow-actions
Fix copilot-setup-steps.yml YAML structure and address review feedback
2026-02-18 12:56:29 +05:30
copilot-swe-agent[bot]
90e6a8c63b Fix copilot-setup-steps.yml and address PR review comments
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-18 07:22:05 +00:00
copilot-swe-agent[bot]
c68f7efcdc Initial plan 2026-02-18 07:16:12 +00:00
Pangerkumzuk Longkumer
aa805d5240 Merge pull request #5 from pangerlkr/claude/fix-workflow-actions
Fix ESLint errors in test files and package manager
2026-02-18 12:42:38 +05:30
anthropic-code-agent[bot]
c5ca3c698c Fix ESLint errors in test files and package-manager.js
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-18 07:04:29 +00:00
anthropic-code-agent[bot]
7e928572c7 Initial plan 2026-02-18 06:58:35 +00:00
Pangerkumzuk Longkumer
0bf47bbb41 Update print statement from 'Hfix: update package manager tests and add summaryello' to 'Goodbye' 2026-02-18 07:29:16 +05:30
Pangerkumzuk Longkumer
2ad888ca82 Refactor console log formatting in tests 2026-02-18 07:21:58 +05:30
Pangerkumzuk Longkumer
8966282e48 fix: add comments to empty catch blocks (no-empty ESLint) 2026-02-18 07:18:40 +05:30
Pangerkumzuk Longkumer
3d97985559 fix: remove unused execFileSync import (no-unused-vars ESLint) 2026-02-18 07:15:53 +05:30
Pangerkumzuk Longkumer
d54124afad fix: remove useless escape characters in regex patterns (no-useless-escape ESLint) 2026-02-18 07:14:32 +05:30
Affaan Mustafa
0b11849f1e chore: update skill count from 37 to 43, add 5 new skills to directory listing
New community skills: content-hash-cache-pattern, cost-aware-llm-pipeline,
regex-vs-llm-structured-text, swift-actor-persistence, swift-protocol-di-testing
2026-02-16 20:04:57 -08:00
Affaan Mustafa
2c26d2d67c fix: add missing process.exit(0) to early return in post-edit-console-warn hook 2026-02-16 20:03:12 -08:00
Pangerkumzuk Longkumer
fdda6cbcd9 Merge branch 'main' into main 2026-02-17 07:00:12 +05:30
Affaan Mustafa
5cb9c1c2a5 Merge pull request #223 from shimo4228/feat/skills/regex-vs-llm-structured-text
feat(skills): add regex-vs-llm-structured-text skill
2026-02-16 14:19:02 -08:00
Affaan Mustafa
595127954f Merge pull request #222 from shimo4228/feat/skills/content-hash-cache-pattern
feat(skills): add content-hash-cache-pattern skill
2026-02-16 14:18:59 -08:00
Affaan Mustafa
bb084229aa Merge pull request #221 from shimo4228/feat/skills/swift-actor-persistence
feat(skills): add swift-actor-persistence skill
2026-02-16 14:18:57 -08:00
Affaan Mustafa
849bb3b425 Merge pull request #220 from shimo4228/feat/skills/swift-protocol-di-testing
feat(skills): add swift-protocol-di-testing skill
2026-02-16 14:18:55 -08:00
Affaan Mustafa
4db215f60d Merge pull request #219 from shimo4228/feat/skills/cost-aware-llm-pipeline
feat(skills): add cost-aware-llm-pipeline skill
2026-02-16 14:18:53 -08:00
Affaan Mustafa
bb1486c404 Merge pull request #229 from voidforall/feat/cpp-coding-standards
feat: add cpp coding standards skill
2026-02-16 14:18:35 -08:00
Affaan Mustafa
9339d4c88c Merge pull request #228 from hrygo/fix/observe.sh-timestamp-export
fix: correct TIMESTAMP environment variable syntax in observe.sh
2026-02-16 14:18:13 -08:00
Affaan Mustafa
2497a9b6e5 Merge pull request #241 from sungpeo/mkdir-rules-readme
docs: require default directory creation for initial user-level rules
2026-02-16 14:18:10 -08:00
Affaan Mustafa
e449471ed3 Merge pull request #230 from neal-zhu/fix/whitelist-plan-files-in-doc-hook
fix: whitelist .claude/plans/ in doc file creation hook
2026-02-16 14:18:08 -08:00
Sungpeo Kook
cad8db21b7 docs: add mkdir for rules directory in ja-JP and zh-CN READMEs 2026-02-17 01:48:37 +09:00
Sungpeo Kook
9d9258c7e1 docs: require default directory creation for initial user-level rules 2026-02-17 01:32:56 +09:00
Pangerkumzuk Longkumer
08ee723e85 Merge pull request #3 from pangerlkr/copilot/fix-markdownlint-errors-again
Fix markdownlint errors: MD038, MD058, MD025, MD034
2026-02-16 19:23:01 +05:30
Pangerkumzuk Longkumer
f11347a708 Merge pull request #4 from pangerlkr/copilot/fix-markdownlint-errors-another-one
Fix markdownlint errors (MD038, MD058, MD025, MD034)
2026-02-16 19:20:56 +05:30
copilot-swe-agent[bot]
586637f94c Revert unrelated package-lock.json changes
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-16 03:01:15 +00:00
copilot-swe-agent[bot]
2b6ff6b55e Initial plan for markdownlint error fixes
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-16 02:58:12 +00:00
copilot-swe-agent[bot]
2be6e09501 Initial plan 2026-02-16 02:55:40 +00:00
copilot-swe-agent[bot]
b1d47b22ea Initial plan 2026-02-16 02:37:38 +00:00
Pangerkumzuk Longkumer
9dd4f4409b Merge pull request #2 from pangerlkr/copilot/fix-markdownlint-errors
Fix markdownlint CI failures (MD038, MD058, MD025, MD034)
2026-02-16 07:58:23 +05:30
copilot-swe-agent[bot]
c5de2a7bf7 Remove misleading comments about trailing spaces
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-16 02:22:49 +00:00
copilot-swe-agent[bot]
af24c617bb Fix all markdownlint errors (MD038, MD058, MD025, MD034)
Co-authored-by: pangerlkr <73515951+pangerlkr@users.noreply.github.com>
2026-02-16 02:22:14 +00:00
copilot-swe-agent[bot]
2ca903d4c5 Initial plan 2026-02-16 02:20:28 +00:00
Pangerkumzuk Longkumer
4d98d9f125 Add Go environment setup step to workflow
Added a step to set up the Go environment in GitHub Actions workflow.
2026-02-16 07:10:39 +05:30
Maohua Zhu
40e80bcc61 fix: whitelist .claude/plans/ in doc file creation hook
The PreToolUse Write hook blocks creation of .md files to prevent
unnecessary documentation sprawl. However, it also blocks Claude Code's
built-in plan mode from writing plan files to .claude/plans/*.md,
causing "BLOCKED: Unnecessary documentation file creation" errors
every time a plan is created or updated.

Add .claude/plans/ path to the whitelist so plan files are not blocked.
2026-02-15 08:49:15 +08:00
Lin Yuan
eaf710847f fix syntax in illustrative example 2026-02-14 20:36:36 +00:00
voidforall
b169a2e1dd Update .cursor/skills/cpp-coding-standards/SKILL.md
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2026-02-14 20:33:59 +00:00
Lin Yuan
8b4aac4e56 feat: add cpp-coding-standards skill to skills/ and update README 2026-02-14 20:26:33 +00:00
Lin Yuan
08f60355d4 feat: add cpp-coding-standards skill based on C++ Core Guidelines
Comprehensive coding standards for modern C++ (C++17/20/23) derived
from isocpp.github.io/CppCoreGuidelines. Covers philosophy, functions,
classes, resource management, expressions, error handling, immutability,
concurrency, templates, standard library, enumerations, naming, and
performance with DO/DON'T examples and a pre-commit checklist.
2026-02-14 20:20:27 +00:00
黄飞虹
1f74889dbf fix: correct TIMESTAMP environment variable syntax in observe.sh
The inline environment variable syntax `TIMESTAMP="$timestamp" echo ...` 
does not work correctly because:
1. The pipe creates a subshell that doesn't inherit the variable
2. The environment variable is set for echo, not for the piped python

Fixed by using `export` and separating the commands:
- export TIMESTAMP="$timestamp"
- echo "$INPUT_JSON" | python3 -c "..."

This ensures the TIMESTAMP variable is available to the python subprocess.

Fixes #227

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-14 21:25:50 +08:00
Tatsuya Shimomoto
82d751556c feat(skills): add regex-vs-llm-structured-text skill
Decision framework and hybrid pipeline for choosing between regex
and LLM when parsing structured text.
2026-02-14 12:33:03 +09:00
Tatsuya Shimomoto
3847cc0e0d feat(skills): add content-hash-cache-pattern skill
SHA-256 content-hash based file caching with service layer
separation for expensive processing pipelines.
2026-02-14 12:31:30 +09:00
Tatsuya Shimomoto
94eaaad238 feat(skills): add swift-actor-persistence skill
Thread-safe data persistence patterns using Swift actors with
in-memory cache and file-backed storage.
2026-02-14 12:30:22 +09:00
Tatsuya Shimomoto
ab5be936e9 feat(skills): add swift-protocol-di-testing skill
Protocol-based dependency injection patterns for testable Swift code
with Swift Testing framework examples.
2026-02-14 12:18:48 +09:00
Tatsuya Shimomoto
219bd1ff88 feat(skills): add cost-aware-llm-pipeline skill
Cost optimization patterns for LLM API usage combining model routing,
budget tracking, retry logic, and prompt caching.
2026-02-14 12:16:05 +09:00
Affaan Mustafa
4ff6831b2b Delete llms.txt
not necessary + opencode focused, open to a PR for a new one
2026-02-13 18:47:48 -08:00
Affaan Mustafa
182e9e78b9 test: add 3 edge-case tests for readFile binary, output() NaN/Infinity, loadAliases __proto__ safety
Round 125: Tests for readFile returning garbled strings (not null) on binary
files, output() handling undefined/NaN/Infinity as non-objects logged directly
(and JSON.stringify converting NaN/Infinity to null in objects), and loadAliases
with __proto__ key in JSON proving no prototype pollution occurs.
Total: 935 tests, all passing.
2026-02-13 18:44:07 -08:00
Affaan Mustafa
0250de793a test: add 3 edge-case tests for findFiles dotfiles, getAllSessions date format, parseSessionMetadata title regex
Round 124: Tests for findFiles matching dotfiles (unlike shell glob where *
excludes hidden files), getAllSessions strict date equality filter (wrong format
silently returns empty), and parseSessionMetadata title regex edge cases
(no space after #, ## heading, multiple H1, greedy \s+ crossing newlines).
Total: 932 tests, all passing.
2026-02-13 18:40:07 -08:00
Affaan Mustafa
88fa1bdbbc test: add 3 edge-case tests for countInFile overlapping, replaceInFile $& tokens, parseSessionMetadata CRLF
Round 123: Tests for countInFile non-overlapping regex match behavior (aaa with
/aa/g returns 1 not 2), replaceInFile with $& and $$ substitution tokens in
replacement strings, and parseSessionMetadata CRLF section boundary bleed where
\n\n fails to match \r\n\r\n. Total: 929 tests, all passing.
2026-02-13 18:36:09 -08:00
Affaan Mustafa
2753db3a48 test: add 3 edge-case tests for findFiles dot escaping, listAliases limit falsy values, getSessionById old format
Round 122: Tests for findFiles glob dot escaping (*.txt must not match filetxt),
listAliases limit=0/negative/NaN returning all due to JS falsy check, and
getSessionById matching old YYYY-MM-DD-session.tmp filenames via noIdMatch path.
Total: 926 tests, all passing.
2026-02-13 18:30:42 -08:00
Affaan Mustafa
e50b05384a test: add Round 121 tests for findFiles ? glob, setAlias path validation, and time metadata extraction
- findFiles: ? glob pattern matches single character only (converted to . regex)
- setAlias: rejects null, empty, whitespace-only, and non-string sessionPath values
- parseSessionMetadata: Started/Last Updated time extraction — present, missing, loose regex

Total tests: 923
2026-02-13 18:25:56 -08:00
Affaan Mustafa
26f3c88902 test: add Round 120 tests for replaceInFile empty search, setAlias length boundary, and notes extraction
- replaceInFile: empty string search — replace prepends at pos 0, replaceAll inserts between every char
- setAlias: 128-char alias accepted (boundary), 129-char rejected (> 128 check)
- parseSessionMetadata: "Notes for Next Session" extraction — last section, empty, ### boundary, markdown

Total tests: 920
2026-02-13 18:23:55 -08:00
Affaan Mustafa
df2d3a6d54 test: add Round 119 tests for appendFile type safety, renameAlias reserved names, and context extraction
- appendFile: null/undefined/number content throws TypeError (no try/catch like writeFile)
- renameAlias: reserved names rejected for newAlias (parallel check to setAlias, case-insensitive)
- parseSessionMetadata: "Context to Load" code block extraction — missing close, nested blocks, empty

Total tests: 917
2026-02-13 18:21:39 -08:00
Affaan Mustafa
25c5d58c44 test: add Round 118 edge-case tests for writeFile type safety, renameAlias self, and reserved alias names
- writeFile: null/undefined/number content throws TypeError (no try/catch unlike replaceInFile)
- renameAlias: same-name rename returns "already exists" (no self-rename short-circuit)
- setAlias: reserved names (list, help, remove, delete, create, set) rejected case-insensitively

Total tests: 914
2026-02-13 18:19:21 -08:00
Affaan Mustafa
06af1acb8d test: add Round 117 edge-case tests for grepFile CRLF, getSessionSize boundaries, and parseSessionFilename case
- grepFile: CRLF content leaves trailing \r on lines, breaking anchored $ patterns
- getSessionSize: boundary formatting at 0B, 1023B→"1023 B", 1024B→"1.0 KB", non-existent→"0 B"
- parseSessionFilename: [a-z0-9] regex rejects uppercase short IDs (case-sensitive design)

Total tests: 911
2026-02-13 18:16:10 -08:00
Affaan Mustafa
6a0b231d34 test: add Round 116 edge-case tests for replaceInFile null coercion, loadAliases extra fields, and ensureDir null path
- replaceInFile: null/undefined replacement coerced to string "null"/"undefined" by JS String.replace ToString
- loadAliases: extra unknown JSON fields silently preserved through load/save round-trip (loose validation)
- ensureDir: null/undefined path throws wrapped Error (ERR_INVALID_ARG_TYPE → re-thrown)

Total tests: 908
2026-02-13 18:11:58 -08:00
Affaan Mustafa
a563df2a52 test: add edge-case tests for countInFile empty pattern, parseSessionMetadata CRLF, and updateAliasTitle empty string coercion (round 115) 2026-02-13 18:05:28 -08:00
Affaan Mustafa
53e06a8850 test: add edge-case tests for listAliases type coercion, replaceInFile options.all with RegExp, and output BigInt serialization (round 114) 2026-02-13 18:01:25 -08:00
Affaan Mustafa
93633e44f2 test: add 3 tests for century leap years, zero-width regex, and markdown titles (Round 113)
- parseSessionFilename rejects Feb 29 in century non-leap years (1900, 2100) but accepts 2000/2400
- replaceInFile with /(?:)/g zero-width regex inserts at every position boundary
- parseSessionMetadata preserves raw markdown formatting (**bold**, `code`, _italic_) in titles

Total: 899 tests
2026-02-13 17:54:48 -08:00
Affaan Mustafa
791da32c6b test: add 3 tests for Unicode alias rejection, newline-in-path heuristic, and read-only append (Round 112)
- resolveAlias rejects Unicode characters (accented, CJK, emoji, Cyrillic homoglyphs)
- getSessionStats treats absolute .tmp paths with embedded newlines as content, not file paths
- appendSessionContent returns false on EACCES for read-only files

Total: 896 tests
2026-02-13 17:47:50 -08:00
Affaan Mustafa
635eb108ab test: add 3 tests for nested backtick context truncation, newline args injection, alias 128-char boundary
Round 111: Tests for parseSessionMetadata context regex truncation at
nested triple backticks (lazy [\s\S]*? stops early), getExecCommand
accepting newline/tab/CR in args via \s in SAFE_ARGS_REGEX, and setAlias
accepting exactly 128-character alias (off-by-one boundary). 893 tests total.
2026-02-13 17:41:58 -08:00
Affaan Mustafa
1e740724ca test: add 3 tests for findFiles root-unreadable, parseSessionFilename year 0000, uppercase ID rejection
Round 110: Tests for findFiles with unreadable root directory returning
empty array (vs Round 71 which tested subdirectory), parseSessionFilename
year 0000 exposing JS Date 0-99→1900-1999 mapping quirk, and uppercase
session ID rejection by [a-z0-9]{8,} regex. 890 tests total.
2026-02-13 17:30:38 -08:00
Affaan Mustafa
6737f3245b test: add 3 tests for appendFile new-file creation, getExecCommand traversal, getAllSessions non-session skip
Round 109:
- appendFile creating new file in non-existent directory (ensureDir + appendFileSync)
- getExecCommand with ../ path traversal in binary (SAFE_NAME_REGEX allows ../)
- getAllSessions skips .tmp files that don't match session filename format
2026-02-13 17:24:36 -08:00
Affaan Mustafa
1b273de13f test: add 3 tests for grepFile Unicode, SAFE_NAME_REGEX traversal, getSessionSize boundary
Round 108:
- grepFile with Unicode/emoji content (UTF-16 string matching on split lines)
- getRunCommand accepts ../ path traversal via SAFE_NAME_REGEX (allows / and . individually)
- getSessionSize exact 1024-byte B→KB boundary and 1MB KB→MB boundary
2026-02-13 17:18:06 -08:00
Affaan Mustafa
882157ac09 test: add 3 tests for Round 107 (881 total)
- grepFile with ^$ pattern verifies empty line matching including trailing newline phantom
- replaceInFile with self-reintroducing replacement confirms single-pass behavior
- setAlias with whitespace-only title exposes missing trim validation vs sessionPath
2026-02-13 17:11:32 -08:00
Affaan Mustafa
69799f2f80 test: add 3 tests for Round 106 (878 total)
- countInFile with named capture groups verifies match(g) ignores group details
- grepFile with multiline (m) flag confirms flag is preserved unlike stripped g
- getAllSessions with array/object limit tests Number() coercion edge cases
2026-02-13 17:07:13 -08:00
Affaan Mustafa
b27c21732f test: add 3 edge-case tests for regex boundary, sticky flag, and type bypass (Round 105)
- parseSessionMetadata: blank line within Completed section truncates items
  due to regex lookahead (?=###|\n\n|$) stopping at \n\n boundary
- grepFile: sticky (y) flag not stripped like g flag, causing stateful
  .test() behavior that misses matching lines
- getExecCommand: object args bypass SAFE_ARGS_REGEX (typeof !== 'string')
  but coerce to "[object Object]" in command string
2026-02-13 16:59:56 -08:00
Affaan Mustafa
332d0f444b test: add Round 104 edge-case tests (detectFromLockFile null, resolveSessionAlias traversal, whitespace notes)
- detectFromLockFile(null): throws TypeError — no input validation before
  path.join (package-manager.js:95)
- resolveSessionAlias('../etc/passwd'): returns path-traversal input unchanged
  when alias lookup fails, documenting the passthrough behavior
- parseSessionMetadata with whitespace-only notes: trim() → "" → hasNotes=false,
  whitespace-only notes treated as absent

Total tests: 872 (all passing)
2026-02-13 16:45:47 -08:00
Affaan Mustafa
45a0b62fcb test: add Round 103 edge-case tests (countInFile bool, grepFile numeric, loadAliases array)
- countInFile(file, false): boolean falls to else-return-0 type guard (utils.js:443)
- grepFile(file, 0): numeric pattern implicitly coerced via RegExp constructor,
  contrasting with countInFile which explicitly rejects non-string non-RegExp
- loadAliases with array aliases: typeof [] === 'object' bypasses validation
  at session-aliases.js:58, returning array instead of plain object

Total tests: 869 (all passing)
2026-02-13 16:08:47 -08:00
Affaan Mustafa
a64a294b29 test: add 3 edge-case tests for looksLikePath heuristic, falsy title coercion, and checkbox regex (Round 102)
- getSessionStats with Unix nonexistent .tmp path triggers looksLikePath
  heuristic → readFile returns null → zeroed stats via null content path
- setAlias with title=0 silently converts to null (0 || null === null)
- parseSessionMetadata skips [x] checked items in In Progress section
  (regex only matches unchecked [ ] checkboxes)

Total tests: 866
2026-02-13 16:02:18 -08:00
Affaan Mustafa
4d016babbb test: round 101 — output() circular crash, getSessionStats type confusion, appendSessionContent null
- output() throws TypeError on circular reference object (JSON.stringify has no try/catch)
- getSessionStats(123) throws TypeError (number reaches parseSessionMetadata, .match() fails)
- appendSessionContent(null) returns false (TypeError caught by try/catch)

Total tests: 863
2026-02-13 15:54:02 -08:00
Affaan Mustafa
d2c1281e97 test: round 100 — findFiles maxAge+recursive interaction, parseSessionMetadata ### truncation, cleanupAliases falsy coercion
- findFiles with both maxAge AND recursive combined (option interaction test)
- parseSessionMetadata truncates item text at embedded ### due to lazy regex
- cleanupAliases callback returning 0 (falsy non-boolean) removes alias via !0 coercion

Total tests: 860
2026-02-13 15:49:06 -08:00
Affaan Mustafa
78ad952433 test: add 3 tests for no-match rewrite, CR-only grepFile, and null write (R99)
- replaceInFile returns true even when pattern doesn't match (silent rewrite)
- grepFile treats CR-only (\r) file as single line (splits on \n only)
- writeSessionContent(null) returns false (TypeError caught by try/catch)
2026-02-13 15:41:15 -08:00
Affaan Mustafa
274cca025e test: add 3 tests for null-input crashes and negative maxAge boundary (R98)
- getSessionById(null) throws TypeError at line 297 (null.length)
- parseSessionFilename(null) throws TypeError at line 30 (null.match())
- findFiles with maxAge: -1 deterministically excludes all files
2026-02-13 15:35:18 -08:00
Affaan Mustafa
18fcb88168 test: add 3 tests for whitespace ID, lastIndex reuse, and whitespace search (Round 97) 2026-02-13 15:28:06 -08:00
Affaan Mustafa
8604583d16 test: add 3 tests for session-manager edge cases (Round 96)
- parseSessionFilename rejects Feb 30 (Date rollover check)
- getAllSessions with limit: Infinity bypasses pagination
- getAllSessions with limit: null demonstrates destructuring default bypass (null !== undefined)

Total: 848 tests, all passing
2026-02-13 15:13:55 -08:00
Affaan Mustafa
233b341557 test: add 3 tests for alternation regex, double-negative clamping, and self-rename (Round 95) 2026-02-13 14:50:49 -08:00
Affaan Mustafa
a95fb54ee4 test: add 3 tests for scoped pkg detection, empty env var, and tools-without-files (Round 94)
- detectFromPackageJson with scoped package name (@scope/pkg@version)
  returns null because split('@')[0] yields empty string
- getPackageManager skips empty string CLAUDE_PACKAGE_MANAGER via
  falsy short-circuit (distinct from unknown PM name test)
- session-end buildSummarySection includes Tools Used but omits
  Files Modified when transcript has only Read/Grep tools

Total tests: 842
2026-02-13 14:44:40 -08:00
Affaan Mustafa
910ffa5530 test: add 3 tests for regex boundary and flag logic gaps (round 93)
- getSessionStats: drive letter without slash (Z:nosession.tmp) treated as content
- countInFile: case-insensitive regex with g flag auto-appended (/foo/i → /foo/ig)
- countInFile: case-insensitive regex with g flag preserved (/foo/gi stays /foo/gi)
2026-02-13 14:21:03 -08:00
Affaan Mustafa
b9a38b2680 test: add Round 92 tests for object pattern, UNC path, and empty packageManager
- Test countInFile returns 0 for object pattern type (non-string non-RegExp)
- Test getSessionStats treats Windows UNC path as content (not file path)
- Test detectFromPackageJson returns null for empty string packageManager field

Total tests: 836
2026-02-13 14:05:24 -08:00
Affaan Mustafa
14dfe4d110 test: add Round 91 tests for empty action pattern, whitespace PM, and mixed separators
- Test getCommandPattern('') produces valid regex for empty action string
- Test detectFromPackageJson returns null for whitespace-only packageManager
- Test getSessionStats treats mixed Windows path separators as file path

Total tests: 833
2026-02-13 14:02:41 -08:00
Affaan Mustafa
3e98be3e39 test: add Round 90 tests for readStdinJson timeout and saveAliases double failure
- Test readStdinJson timeout path when stdin never closes (resolves with {})
- Test readStdinJson timeout path with partial invalid JSON (catch resolves with {})
- Test saveAliases backup restore double failure (inner restoreErr catch at line 135)

Total tests: 830
2026-02-13 13:59:03 -08:00
Affaan Mustafa
3ec59c48bc test: add 3 tests for subdirectory skip, TypeScript error detection, and entry.name fallback (Round 89)
- getAllSessions skips subdirectories in sessions dir (!entry.isFile() branch)
- post-edit-typecheck.js error detection path when tsc reports errors (relevantLines > 0)
- extractSessionSummary extracts tools via entry.name + entry.input fallback format
2026-02-13 13:39:16 -08:00
Affaan Mustafa
e70d4d2237 test: add 3 tests for replaceInFile deletion, parseSessionMetadata null fields, countInFile zero matches (Round 88)
- replaceInFile with empty replacement string verifies text deletion works
- parseSessionMetadata asserts date/started/lastUpdated are null when fields absent
- countInFile with valid file but non-matching pattern returns 0

Total: 824 tests
2026-02-13 12:49:53 -08:00
Affaan Mustafa
9b286ab3f8 test: add 3 tests for stdin 1MB overflow and analyzePhase async method (round 87)
- post-edit-format.js: verify MAX_STDIN truncation at 1MB limit
- post-edit-typecheck.js: verify MAX_STDIN truncation at 1MB limit
- skill-create-output.js: test analyzePhase() returns Promise and writes output
2026-02-13 12:42:20 -08:00
Affaan Mustafa
b3e362105d test: add 3 tests for typeof guard, empty package.json, and learned_skills_path override (round 86)
- loadAliases resets to defaults when aliases field is a truthy non-object (string)
- detectFromPackageJson returns null for empty (0-byte) package.json
- evaluate-session uses learned_skills_path config override with ~ expansion
2026-02-13 12:23:34 -08:00
Affaan Mustafa
8cacf0f6a6 fix: use nullish coalescing for confidence default + add 3 tests (round 85)
Fix confidence=0 showing 80% instead of 0% in patterns() (|| → ??).
Test evaluate-session.js config parse error catch, getSessionIdShort
fallback at root CWD, and precise confidence=0 assertion.
2026-02-13 12:11:26 -08:00
Affaan Mustafa
cedcf9a701 test: add 3 tests for TOCTOU catch paths and NaN date sort fallback (round 84)
- getSessionById returns null for broken symlink (session-manager.js:307-310)
- findFiles skips broken symlinks matching the pattern (utils.js:170-173)
- listAliases sorts entries with invalid/missing dates via getTime() || 0 fallback
2026-02-13 11:35:22 -08:00
Affaan Mustafa
15717d6d04 test: cover whitespace-only frontmatter field, empty SKILL.md, and getAllSessions TOCTOU symlink 2026-02-13 11:20:44 -08:00
Affaan Mustafa
c8b7d41e42 test: cover tool_name OR fallback, Notification/SubagentStop events, and template regex no-match 2026-02-13 11:12:03 -08:00
Affaan Mustafa
9bec3d7625 test: cover suggest-compact upper bound, getSessionStats null input, and non-string content branch 2026-02-13 11:02:46 -08:00
Affaan Mustafa
2573cbb7b0 test: cover session-end message.role path, getExecCommand non-string args, and legacy hooks format
Round 80: Three previously untested conditional branches:
- session-end.js: entry.message?.role === 'user' third OR condition
  (fires when type is not 'user' but message.role is)
- package-manager.js: getExecCommand with truthy non-string args
  (typeof check short-circuits, value still appended via ternary)
- validate-hooks.js: legacy array format parsing path (lines 115-135)
  with 'Hook N' error labels instead of 'EventType[N]'
2026-02-13 10:39:35 -08:00
Affaan Mustafa
9dccdb9068 test: cover countInFile/grepFile string patterns and validate-commands warnings suffix
Round 79 — untested conditional branches in utils.js and validate-commands.js:
- countInFile: exercise typeof pattern === 'string' branch with valid string
- grepFile: exercise string pattern branch (not RegExp)
- validate-commands: verify (N warnings) suffix in output when warnCount > 0
2026-02-13 10:26:58 -08:00
Affaan Mustafa
f000d9b02d test: cover getSessionStats file-path read, hasContent field, and wrapped hooks format
Round 78 — shifted from catch blocks to untested conditional branches:
- getSessionStats: exercise looksLikePath → getSessionContent path (real .tmp file)
- getAllSessions: verify hasContent true/false for non-empty vs empty files
- validate-hooks: test wrapped { hooks: { PreToolUse: [...] } } production format
2026-02-13 10:21:06 -08:00
Affaan Mustafa
27ae5ea299 test: cover evaluate-session/suggest-compact main().catch and validate-hooks JSON parse
- evaluate-session: main().catch when HOME is non-directory (ENOTDIR)
- suggest-compact: main().catch double-failure when TMPDIR is non-directory
- validate-hooks: invalid JSON in hooks.json triggers error exit

Total tests: 831 → 834
2026-02-13 10:03:48 -08:00
Affaan Mustafa
723e69a621 test: cover deleteSession catch, pre-compact and session-end main().catch
- session-manager: deleteSession returns false when dir is read-only (EACCES)
- pre-compact: main().catch handler when HOME is non-directory (ENOTDIR)
- session-end: main().catch handler when HOME is non-directory (ENOTDIR)

Total tests: 828 → 831
2026-02-13 09:59:48 -08:00
Affaan Mustafa
241c35a589 test: cover setGlobal/setProject catch blocks and session-start main().catch
- setup-package-manager: setGlobal catch when HOME is non-directory (ENOTDIR)
- setup-package-manager: setProject catch when CWD is read-only (EACCES)
- session-start: main().catch handler when ensureDir throws (exit 0, don't block)

Total tests: 825 → 828
2026-02-13 09:55:00 -08:00
Affaan Mustafa
0c67e0571e test: cover cleanupAliases save failure, setAlias save failure, and validate-commands statSync catch
Round 73: Add 3 tests for genuine untested code paths:
- session-aliases cleanupAliases returns failure when save blocked after removing aliases
- session-aliases setAlias returns failure when save blocked on new alias creation
- validate-commands silently skips broken symlinks in skill directory scanning
2026-02-13 09:42:25 -08:00
Affaan Mustafa
02d5986049 test: cover setProjectPM save failure, deleteAlias save failure, hooks async/timeout validation
Round 72: Add 4 tests for untested code paths (818 → 822):
- package-manager.js: setProjectPackageManager wraps writeFile errors (lines 275-279)
- session-aliases.js: deleteAlias returns failure when saveAliases fails (line 299)
- validate-hooks.js: rejects non-boolean async field (line 28-31)
- validate-hooks.js: rejects negative timeout value (lines 32-35)
2026-02-13 08:12:27 -08:00
Affaan Mustafa
f623e3b429 test: cover findFiles unreadable subdir, session-start default PM, setPreferredPM save failure
Round 71: Add 3 tests for untested code paths (815 → 818):
- utils.js findFiles: recursive scan silently skips unreadable subdirectories (line 188 catch)
- session-start.js: shows getSelectionPrompt when pm.source is 'default' (lines 69-72)
- package-manager.js: setPreferredPackageManager wraps saveConfig errors (lines 250-254)
2026-02-13 08:01:15 -08:00
Affaan Mustafa
44b5a4f9f0 test: add 3 tests for untested fallback/skip/failure paths (Round 70)
- session-end.js: entry.name/entry.input fallback in direct tool_use entries
- validate-commands.js: "would create:" regex alternation skip line
- session-aliases.js: updateAliasTitle save failure with read-only dir
2026-02-13 07:48:39 -08:00
Affaan Mustafa
567664091d test: add 3 tests for untested code paths (Round 69, 812 total)
- getGitModifiedFiles: all-invalid patterns skip filtering (compiled.length === 0)
- getSessionById: returns null when sessions directory doesn't exist
- getPackageManager: global-config success path returns source 'global-config'
2026-02-13 07:35:20 -08:00
Affaan Mustafa
5031a84d6e test: add 3 tests for setup-pm --project success, demo export, --list marker (Round 68) 2026-02-13 07:23:16 -08:00
Affaan Mustafa
702c3f54b4 test: add 3 tests for session-aliases empty file, null resolve, metadata backfill (Round 67) 2026-02-13 07:18:28 -08:00
Affaan Mustafa
162222a46c test: add 3 tests for session-manager noIdMatch, session-end fallbacks (Round 66)
- session-manager.js: getSessionById with date-only string exercises the
  noIdMatch path for old-format sessions (2026-02-10 → 2026-02-10-session.tmp)
- session-end.js: extract user messages from role-only JSONL format
  ({"role":"user",...} without type field) exercises line 48 fallback
- session-end.js: nonexistent transcript_path triggers "Transcript not found"
  log path (lines 153-155), creates session with blank template

Total: 803 tests, all passing
2026-02-13 07:10:54 -08:00
Affaan Mustafa
485def8582 test: add 3 tests for evaluate-session regex, empty rules/skills dirs (Round 65)
- evaluate-session.js: verify regex whitespace tolerance around colon
  matches "type" : "user" (with spaces), not just compact JSON
- validate-rules.js: empty directory with no .md files yields Validated 0
- validate-skills.js: directory with only files, no subdirectories yields
  Validated 0

Total: 800 tests, all passing
2026-02-13 07:04:55 -08:00
Affaan Mustafa
cba6b44c61 test: add 3 tests for suggest-compact, session-aliases, typecheck (Round 64)
- suggest-compact: 'default' session ID fallback when CLAUDE_SESSION_ID empty
- session-aliases: loadAliases backfills missing version and metadata fields
- post-edit-typecheck: valid JSON without tool_input passes through unchanged

Total: 797 tests, all passing
2026-02-13 06:59:08 -08:00
Affaan Mustafa
1fcdf12b62 test: add 3 CI validator tests for untested code paths (Round 63)
- validate-hooks: object-format matcher missing matcher field (line 97-100)
- validate-commands: readFileSync catch block for unreadable .md files (lines 56-62)
- validate-commands: empty commands directory with no .md files (Validated 0)

Total: 794 tests, all passing
2026-02-13 06:55:30 -08:00
Affaan Mustafa
85a86f6747 test: add --global success, bare PM name, and source label tests (Round 62)
- setup-package-manager.test.js: --global npm writes config and exits 0
- setup-package-manager.test.js: bare PM name sets global preference
- setup-package-manager.test.js: --detect with env var shows source 'environment'

791 tests total, all passing.
2026-02-13 06:49:29 -08:00
Affaan Mustafa
3ec0aa7b50 test: add replaceInFile write failure, empty sessions dir, and corrupted global config tests (Round 61)
- utils.test.js: replaceInFile returns false on read-only file (catch block)
- session-manager.test.js: getAllSessions returns empty when sessions dir missing
- package-manager.test.js: getPackageManager falls through corrupted global config to npm default

788 tests total, all passing.
2026-02-13 06:44:52 -08:00
Affaan Mustafa
9afecedb21 test: add replaceInFile failure, console-warn overflow, and missing tool_input tests (Round 60) 2026-02-13 06:25:35 -08:00
Affaan Mustafa
7db0d316f5 test: add unreadable session file, stdin overflow, and read-only compact tests (Round 59) 2026-02-13 06:19:02 -08:00
Affaan Mustafa
99fc51dda7 test: add unreadable agent file, colonIdx edge case, and command-as-object tests (Round 58) 2026-02-13 06:14:06 -08:00
Affaan Mustafa
2fea46edc7 test: add SKILL.md-as-directory, broken symlink, and adjacent code block tests (Round 57) 2026-02-13 06:02:56 -08:00
Affaan Mustafa
990c08159c test: add tsconfig walk-up, compact fallback, and Windows atomic write tests (Round 56) 2026-02-13 05:59:07 -08:00
Affaan Mustafa
43808ccf78 test: add maxAge boundary, multi-session injection, and stdin overflow tests (Round 55)
- session-start.js excludes sessions older than 7 days (6.9d vs 8d boundary)
- session-start.js injects newest session when multiple recent sessions exist
- session-end.js handles stdin exceeding 1MB MAX_STDIN limit via env var fallback
2026-02-13 05:48:34 -08:00
Affaan Mustafa
3bc0929c6e test: add search scope, path utility, and zero-value analysis tests (Round 54)
- getAllSessions search matches only shortId, not title/content
- getSessionPath returns absolute path with correct directory structure
- analysisResults handles zero values for all data fields without crash
2026-02-13 05:43:29 -08:00
Affaan Mustafa
ad40bf3aad test: add env var fallback, console.log max matches, and format non-existent file tests
Round 53: Adds 3 hook tests — validates evaluate-session.js
falls back to CLAUDE_TRANSCRIPT_PATH env var when stdin JSON
is invalid, post-edit-console-warn.js truncates output to max
5 matches, and post-edit-format.js passes through data when
the target .tsx file doesn't exist.
2026-02-13 05:34:59 -08:00
Affaan Mustafa
f1a693f7cf test: add inline backtick ref, workflow whitespace, and code-only rule tests
Round 52: Adds 3 CI validator tests — validates command refs
inside inline backticks are checked (not stripped like fenced
blocks), workflow arrows with irregular whitespace pass, and
rule files containing only fenced code blocks are accepted.
2026-02-13 05:29:04 -08:00
Affaan Mustafa
4e520c6873 test: add timeout enforcement, async hook schema, and command format validation tests
Round 51: Adds 3 integration tests for hook infrastructure —
validates hanging hook timeout/kill mechanism, hooks.json async
hook configuration schema, and all hook command format consistency.
2026-02-13 05:23:16 -08:00
Affaan Mustafa
86844a305a test: add alias reporting, parallel compaction, and graceful degradation tests 2026-02-13 05:13:56 -08:00
Affaan Mustafa
b950fd7427 test: add typecheck extension edge cases and conditional summary section tests 2026-02-13 05:10:07 -08:00
Affaan Mustafa
71e86cc93f test: add packageManager version format and sequential save integrity tests 2026-02-13 05:04:58 -08:00
Affaan Mustafa
4f7b50fb78 test: add inline JS escape validation and frontmatter colon-less line tests 2026-02-13 05:01:28 -08:00
Affaan Mustafa
277006bd7f test: add Windows path heuristic and checkbox case sensitivity tests
Round 46: verify getSessionStats recognises C:/ and D:\ as file
paths but not bare C: without slash; verify parseSessionMetadata
only matches lowercase [x] checkboxes (not uppercase [X]).
2026-02-13 04:51:39 -08:00
Pangerkumzuk Longkumer
9db98673d0 Merge branch 'affaan-m:main' into main 2026-02-09 17:12:53 +05:30
Pangerkumzuk Longkumer
fab2e05ae7 Merge branch 'affaan-m:main' into main 2026-02-02 10:53:41 +05:30
Graceme Kamei
8d65c6d429 Merge branch 'affaan-m:main' into main 2026-01-30 19:32:07 +05:30
Panger Lkr
9b2233b5bc Merge branch 'affaan-m:main' into main 2026-01-27 10:15:34 +05:30
Panger Lkr
5a26daf392 Merge pull request #1 from pangerlkr/pangerlkr-patch-1
feat: add cloud infrastructure security skill
2026-01-23 23:14:43 +05:30
Panger Lkr
438d082e30 feat: add cloud infrastructure security skill
Add comprehensive cloud and infrastructure security skill covering:
- IAM & access control (least privilege, MFA)
- Secrets management & rotation
- Network security (VPC, firewalls)
- Logging & monitoring setup
- CI/CD pipeline security
- Cloudflare/CDN security
- Backup & disaster recovery
- Pre-deployment checklist

Complements existing security-review skill with cloud-specific guidance.
2026-01-23 22:50:59 +05:30
48 changed files with 10175 additions and 901 deletions

View File

@@ -8,7 +8,7 @@ Pre-translated configurations for [Cursor IDE](https://cursor.com), part of the
|----------|-------|-------------|
| Rules | 27 | Coding standards, security, testing, patterns (common + TypeScript/Python/Go) |
| Agents | 13 | Specialized AI agents (planner, architect, code-reviewer, tdd-guide, etc.) |
| Skills | 37 | Agent skills for backend, frontend, security, TDD, and more |
| Skills | 43 | Agent skills for backend, frontend, security, TDD, and more |
| Commands | 31 | Slash commands for planning, reviewing, testing, and deployment |
| MCP Config | 1 | Pre-configured MCP servers (GitHub, Supabase, Vercel, Railway, etc.) |

View File

@@ -12,7 +12,7 @@ alwaysApply: true
- Pair programming and code generation
- Worker agents in multi-agent systems
**Sonnet 4.5** (Best coding model):
**Sonnet 4.6** (Best coding model):
- Main development work
- Orchestrating multi-agent workflows
- Complex coding tasks

View File

@@ -0,0 +1,722 @@
---
name: cpp-coding-standards
description: C++ coding standards based on the C++ Core Guidelines (isocpp.github.io). Use when writing, reviewing, or refactoring C++ code to enforce modern, safe, and idiomatic practices.
---
# C++ Coding Standards (C++ Core Guidelines)
Comprehensive coding standards for modern C++ (C++17/20/23) derived from the [C++ Core Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines). Enforces type safety, resource safety, immutability, and clarity.
## When to Use
- Writing new C++ code (classes, functions, templates)
- Reviewing or refactoring existing C++ code
- Making architectural decisions in C++ projects
- Enforcing consistent style across a C++ codebase
- Choosing between language features (e.g., `enum` vs `enum class`, raw pointer vs smart pointer)
### When NOT to Use
- Non-C++ projects
- Legacy C codebases that cannot adopt modern C++ features
- Embedded/bare-metal contexts where specific guidelines conflict with hardware constraints (adapt selectively)
## Cross-Cutting Principles
These themes recur across the entire guidelines and form the foundation:
1. **RAII everywhere** (P.8, R.1, E.6, CP.20): Bind resource lifetime to object lifetime
2. **Immutability by default** (P.10, Con.1-5, ES.25): Start with `const`/`constexpr`; mutability is the exception
3. **Type safety** (P.4, I.4, ES.46-49, Enum.3): Use the type system to prevent errors at compile time
4. **Express intent** (P.3, F.1, NL.1-2, T.10): Names, types, and concepts should communicate purpose
5. **Minimize complexity** (F.2-3, ES.5, Per.4-5): Simple code is correct code
6. **Value semantics over pointer semantics** (C.10, R.3-5, F.20, CP.31): Prefer returning by value and scoped objects
## Philosophy & Interfaces (P.*, I.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **P.1** | Express ideas directly in code |
| **P.3** | Express intent |
| **P.4** | Ideally, a program should be statically type safe |
| **P.5** | Prefer compile-time checking to run-time checking |
| **P.8** | Don't leak any resources |
| **P.10** | Prefer immutable data to mutable data |
| **I.1** | Make interfaces explicit |
| **I.2** | Avoid non-const global variables |
| **I.4** | Make interfaces precisely and strongly typed |
| **I.11** | Never transfer ownership by a raw pointer or reference |
| **I.23** | Keep the number of function arguments low |
### DO
```cpp
// P.10 + I.4: Immutable, strongly typed interface
struct Temperature {
double kelvin;
};
Temperature boil(const Temperature& water);
```
### DON'T
```cpp
// Weak interface: unclear ownership, unclear units
double boil(double* temp);
// Non-const global variable
int g_counter = 0; // I.2 violation
```
## Functions (F.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **F.1** | Package meaningful operations as carefully named functions |
| **F.2** | A function should perform a single logical operation |
| **F.3** | Keep functions short and simple |
| **F.4** | If a function might be evaluated at compile time, declare it `constexpr` |
| **F.6** | If your function must not throw, declare it `noexcept` |
| **F.8** | Prefer pure functions |
| **F.16** | For "in" parameters, pass cheaply-copied types by value and others by `const&` |
| **F.20** | For "out" values, prefer return values to output parameters |
| **F.21** | To return multiple "out" values, prefer returning a struct |
| **F.43** | Never return a pointer or reference to a local object |
### Parameter Passing
```cpp
// F.16: Cheap types by value, others by const&
void print(int x); // cheap: by value
void analyze(const std::string& data); // expensive: by const&
void transform(std::string s); // sink: by value (will move)
// F.20 + F.21: Return values, not output parameters
struct ParseResult {
std::string token;
int position;
};
ParseResult parse(std::string_view input); // GOOD: return struct
// BAD: output parameters
void parse(std::string_view input,
std::string& token, int& pos); // avoid this
```
### Pure Functions and constexpr
```cpp
// F.4 + F.8: Pure, constexpr where possible
constexpr int factorial(int n) noexcept {
return (n <= 1) ? 1 : n * factorial(n - 1);
}
static_assert(factorial(5) == 120);
```
### Anti-Patterns
- Returning `T&&` from functions (F.45)
- Using `va_arg` / C-style variadics (F.55)
- Capturing by reference in lambdas passed to other threads (F.53)
- Returning `const T` which inhibits move semantics (F.49)
## Classes & Class Hierarchies (C.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **C.2** | Use `class` if invariant exists; `struct` if data members vary independently |
| **C.9** | Minimize exposure of members |
| **C.20** | If you can avoid defining default operations, do (Rule of Zero) |
| **C.21** | If you define or `=delete` any copy/move/destructor, handle them all (Rule of Five) |
| **C.35** | Base class destructor: public virtual or protected non-virtual |
| **C.41** | A constructor should create a fully initialized object |
| **C.46** | Declare single-argument constructors `explicit` |
| **C.67** | A polymorphic class should suppress public copy/move |
| **C.128** | Virtual functions: specify exactly one of `virtual`, `override`, or `final` |
### Rule of Zero
```cpp
// C.20: Let the compiler generate special members
struct Employee {
std::string name;
std::string department;
int id;
// No destructor, copy/move constructors, or assignment operators needed
};
```
### Rule of Five
```cpp
// C.21: If you must manage a resource, define all five
class Buffer {
public:
explicit Buffer(std::size_t size)
: data_(std::make_unique<char[]>(size)), size_(size) {}
~Buffer() = default;
Buffer(const Buffer& other)
: data_(std::make_unique<char[]>(other.size_)), size_(other.size_) {
std::copy_n(other.data_.get(), size_, data_.get());
}
Buffer& operator=(const Buffer& other) {
if (this != &other) {
auto new_data = std::make_unique<char[]>(other.size_);
std::copy_n(other.data_.get(), other.size_, new_data.get());
data_ = std::move(new_data);
size_ = other.size_;
}
return *this;
}
Buffer(Buffer&&) noexcept = default;
Buffer& operator=(Buffer&&) noexcept = default;
private:
std::unique_ptr<char[]> data_;
std::size_t size_;
};
```
### Class Hierarchy
```cpp
// C.35 + C.128: Virtual destructor, use override
class Shape {
public:
virtual ~Shape() = default;
virtual double area() const = 0; // C.121: pure interface
};
class Circle : public Shape {
public:
explicit Circle(double r) : radius_(r) {}
double area() const override { return 3.14159 * radius_ * radius_; }
private:
double radius_;
};
```
### Anti-Patterns
- Calling virtual functions in constructors/destructors (C.82)
- Using `memset`/`memcpy` on non-trivial types (C.90)
- Providing different default arguments for virtual function and overrider (C.140)
- Making data members `const` or references, which suppresses move/copy (C.12)
## Resource Management (R.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **R.1** | Manage resources automatically using RAII |
| **R.3** | A raw pointer (`T*`) is non-owning |
| **R.5** | Prefer scoped objects; don't heap-allocate unnecessarily |
| **R.10** | Avoid `malloc()`/`free()` |
| **R.11** | Avoid calling `new` and `delete` explicitly |
| **R.20** | Use `unique_ptr` or `shared_ptr` to represent ownership |
| **R.21** | Prefer `unique_ptr` over `shared_ptr` unless sharing ownership |
| **R.22** | Use `make_shared()` to make `shared_ptr`s |
### Smart Pointer Usage
```cpp
// R.11 + R.20 + R.21: RAII with smart pointers
auto widget = std::make_unique<Widget>("config"); // unique ownership
auto cache = std::make_shared<Cache>(1024); // shared ownership
// R.3: Raw pointer = non-owning observer
void render(const Widget* w) { // does NOT own w
if (w) w->draw();
}
render(widget.get());
```
### RAII Pattern
```cpp
// R.1: Resource acquisition is initialization
class FileHandle {
public:
explicit FileHandle(const std::string& path)
: handle_(std::fopen(path.c_str(), "r")) {
if (!handle_) throw std::runtime_error("Failed to open: " + path);
}
~FileHandle() {
if (handle_) std::fclose(handle_);
}
FileHandle(const FileHandle&) = delete;
FileHandle& operator=(const FileHandle&) = delete;
FileHandle(FileHandle&& other) noexcept
: handle_(std::exchange(other.handle_, nullptr)) {}
FileHandle& operator=(FileHandle&& other) noexcept {
if (this != &other) {
if (handle_) std::fclose(handle_);
handle_ = std::exchange(other.handle_, nullptr);
}
return *this;
}
private:
std::FILE* handle_;
};
```
### Anti-Patterns
- Naked `new`/`delete` (R.11)
- `malloc()`/`free()` in C++ code (R.10)
- Multiple resource allocations in a single expression (R.13 -- exception safety hazard)
- `shared_ptr` where `unique_ptr` suffices (R.21)
## Expressions & Statements (ES.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **ES.5** | Keep scopes small |
| **ES.20** | Always initialize an object |
| **ES.23** | Prefer `{}` initializer syntax |
| **ES.25** | Declare objects `const` or `constexpr` unless modification is intended |
| **ES.28** | Use lambdas for complex initialization of `const` variables |
| **ES.45** | Avoid magic constants; use symbolic constants |
| **ES.46** | Avoid narrowing/lossy arithmetic conversions |
| **ES.47** | Use `nullptr` rather than `0` or `NULL` |
| **ES.48** | Avoid casts |
| **ES.50** | Don't cast away `const` |
### Initialization
```cpp
// ES.20 + ES.23 + ES.25: Always initialize, prefer {}, default to const
const int max_retries{3};
const std::string name{"widget"};
const std::vector<int> primes{2, 3, 5, 7, 11};
// ES.28: Lambda for complex const initialization
const auto config = [&] {
Config c;
c.timeout = std::chrono::seconds{30};
c.retries = max_retries;
c.verbose = debug_mode;
return c;
}();
```
### Anti-Patterns
- Uninitialized variables (ES.20)
- Using `0` or `NULL` as pointer (ES.47 -- use `nullptr`)
- C-style casts (ES.48 -- use `static_cast`, `const_cast`, etc.)
- Casting away `const` (ES.50)
- Magic numbers without named constants (ES.45)
- Mixing signed and unsigned arithmetic (ES.100)
- Reusing names in nested scopes (ES.12)
## Error Handling (E.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **E.1** | Develop an error-handling strategy early in a design |
| **E.2** | Throw an exception to signal that a function can't perform its assigned task |
| **E.6** | Use RAII to prevent leaks |
| **E.12** | Use `noexcept` when throwing is impossible or unacceptable |
| **E.14** | Use purpose-designed user-defined types as exceptions |
| **E.15** | Throw by value, catch by reference |
| **E.16** | Destructors, deallocation, and swap must never fail |
| **E.17** | Don't try to catch every exception in every function |
### Exception Hierarchy
```cpp
// E.14 + E.15: Custom exception types, throw by value, catch by reference
class AppError : public std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
class NetworkError : public AppError {
public:
NetworkError(const std::string& msg, int code)
: AppError(msg), status_code(code) {}
int status_code;
};
void fetch_data(const std::string& url) {
// E.2: Throw to signal failure
throw NetworkError("connection refused", 503);
}
void run() {
try {
fetch_data("https://api.example.com");
} catch (const NetworkError& e) {
log_error(e.what(), e.status_code);
} catch (const AppError& e) {
log_error(e.what());
}
// E.17: Don't catch everything here -- let unexpected errors propagate
}
```
### Anti-Patterns
- Throwing built-in types like `int` or string literals (E.14)
- Catching by value (slicing risk) (E.15)
- Empty catch blocks that silently swallow errors
- Using exceptions for flow control (E.3)
- Error handling based on global state like `errno` (E.28)
## Constants & Immutability (Con.*)
### All Rules
| Rule | Summary |
|------|---------|
| **Con.1** | By default, make objects immutable |
| **Con.2** | By default, make member functions `const` |
| **Con.3** | By default, pass pointers and references to `const` |
| **Con.4** | Use `const` for values that don't change after construction |
| **Con.5** | Use `constexpr` for values computable at compile time |
```cpp
// Con.1 through Con.5: Immutability by default
class Sensor {
public:
explicit Sensor(std::string id) : id_(std::move(id)) {}
// Con.2: const member functions by default
const std::string& id() const { return id_; }
double last_reading() const { return reading_; }
// Only non-const when mutation is required
void record(double value) { reading_ = value; }
private:
const std::string id_; // Con.4: never changes after construction
double reading_{0.0};
};
// Con.3: Pass by const reference
void display(const Sensor& s) {
std::cout << s.id() << ": " << s.last_reading() << '\n';
}
// Con.5: Compile-time constants
constexpr double PI = 3.14159265358979;
constexpr int MAX_SENSORS = 256;
```
## Concurrency & Parallelism (CP.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **CP.2** | Avoid data races |
| **CP.3** | Minimize explicit sharing of writable data |
| **CP.4** | Think in terms of tasks, rather than threads |
| **CP.8** | Don't use `volatile` for synchronization |
| **CP.20** | Use RAII, never plain `lock()`/`unlock()` |
| **CP.21** | Use `std::scoped_lock` to acquire multiple mutexes |
| **CP.22** | Never call unknown code while holding a lock |
| **CP.42** | Don't wait without a condition |
| **CP.44** | Remember to name your `lock_guard`s and `unique_lock`s |
| **CP.100** | Don't use lock-free programming unless you absolutely have to |
### Safe Locking
```cpp
// CP.20 + CP.44: RAII locks, always named
class ThreadSafeQueue {
public:
void push(int value) {
std::lock_guard<std::mutex> lock(mutex_); // CP.44: named!
queue_.push(value);
cv_.notify_one();
}
int pop() {
std::unique_lock<std::mutex> lock(mutex_);
// CP.42: Always wait with a condition
cv_.wait(lock, [this] { return !queue_.empty(); });
const int value = queue_.front();
queue_.pop();
return value;
}
private:
std::mutex mutex_; // CP.50: mutex with its data
std::condition_variable cv_;
std::queue<int> queue_;
};
```
### Multiple Mutexes
```cpp
// CP.21: std::scoped_lock for multiple mutexes (deadlock-free)
void transfer(Account& from, Account& to, double amount) {
std::scoped_lock lock(from.mutex_, to.mutex_);
from.balance_ -= amount;
to.balance_ += amount;
}
```
### Anti-Patterns
- `volatile` for synchronization (CP.8 -- it's for hardware I/O only)
- Detaching threads (CP.26 -- lifetime management becomes nearly impossible)
- Unnamed lock guards: `std::lock_guard<std::mutex>(m);` destroys immediately (CP.44)
- Holding locks while calling callbacks (CP.22 -- deadlock risk)
- Lock-free programming without deep expertise (CP.100)
## Templates & Generic Programming (T.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **T.1** | Use templates to raise the level of abstraction |
| **T.2** | Use templates to express algorithms for many argument types |
| **T.10** | Specify concepts for all template arguments |
| **T.11** | Use standard concepts whenever possible |
| **T.13** | Prefer shorthand notation for simple concepts |
| **T.43** | Prefer `using` over `typedef` |
| **T.120** | Use template metaprogramming only when you really need to |
| **T.144** | Don't specialize function templates (overload instead) |
### Concepts (C++20)
```cpp
#include <concepts>
// T.10 + T.11: Constrain templates with standard concepts
template<std::integral T>
T gcd(T a, T b) {
while (b != 0) {
a = std::exchange(b, a % b);
}
return a;
}
// T.13: Shorthand concept syntax
void sort(std::ranges::random_access_range auto& range) {
std::ranges::sort(range);
}
// Custom concept for domain-specific constraints
template<typename T>
concept Serializable = requires(const T& t) {
{ t.serialize() } -> std::convertible_to<std::string>;
};
template<Serializable T>
void save(const T& obj, const std::string& path);
```
### Anti-Patterns
- Unconstrained templates in visible namespaces (T.47)
- Specializing function templates instead of overloading (T.144)
- Template metaprogramming where `constexpr` suffices (T.120)
- `typedef` instead of `using` (T.43)
## Standard Library (SL.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **SL.1** | Use libraries wherever possible |
| **SL.2** | Prefer the standard library to other libraries |
| **SL.con.1** | Prefer `std::array` or `std::vector` over C arrays |
| **SL.con.2** | Prefer `std::vector` by default |
| **SL.str.1** | Use `std::string` to own character sequences |
| **SL.str.2** | Use `std::string_view` to refer to character sequences |
| **SL.io.50** | Avoid `endl` (use `'\n'` -- `endl` forces a flush) |
```cpp
// SL.con.1 + SL.con.2: Prefer vector/array over C arrays
const std::array<int, 4> fixed_data{1, 2, 3, 4};
std::vector<std::string> dynamic_data;
// SL.str.1 + SL.str.2: string owns, string_view observes
std::string build_greeting(std::string_view name) {
return "Hello, " + std::string(name) + "!";
}
// SL.io.50: Use '\n' not endl
std::cout << "result: " << value << '\n';
```
## Enumerations (Enum.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **Enum.1** | Prefer enumerations over macros |
| **Enum.3** | Prefer `enum class` over plain `enum` |
| **Enum.5** | Don't use ALL_CAPS for enumerators |
| **Enum.6** | Avoid unnamed enumerations |
```cpp
// Enum.3 + Enum.5: Scoped enum, no ALL_CAPS
enum class Color { red, green, blue };
enum class LogLevel { debug, info, warning, error };
// BAD: plain enum leaks names, ALL_CAPS clashes with macros
enum { RED, GREEN, BLUE }; // Enum.3 + Enum.5 + Enum.6 violation
#define MAX_SIZE 100 // Enum.1 violation -- use constexpr
```
## Source Files & Naming (SF.*, NL.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **SF.1** | Use `.cpp` for code files and `.h` for interface files |
| **SF.7** | Don't write `using namespace` at global scope in a header |
| **SF.8** | Use `#include` guards for all `.h` files |
| **SF.11** | Header files should be self-contained |
| **NL.5** | Avoid encoding type information in names (no Hungarian notation) |
| **NL.8** | Use a consistent naming style |
| **NL.9** | Use ALL_CAPS for macro names only |
| **NL.10** | Prefer `underscore_style` names |
### Header Guard
```cpp
// SF.8: Include guard (or #pragma once)
#ifndef PROJECT_MODULE_WIDGET_H
#define PROJECT_MODULE_WIDGET_H
// SF.11: Self-contained -- include everything this header needs
#include <string>
#include <vector>
namespace project::module {
class Widget {
public:
explicit Widget(std::string name);
const std::string& name() const;
private:
std::string name_;
};
} // namespace project::module
#endif // PROJECT_MODULE_WIDGET_H
```
### Naming Conventions
```cpp
// NL.8 + NL.10: Consistent underscore_style
namespace my_project {
constexpr int max_buffer_size = 4096; // NL.9: not ALL_CAPS (it's not a macro)
class tcp_connection { // underscore_style class
public:
void send_message(std::string_view msg);
bool is_connected() const;
private:
std::string host_; // trailing underscore for members
int port_;
};
} // namespace my_project
```
### Anti-Patterns
- `using namespace std;` in a header at global scope (SF.7)
- Headers that depend on inclusion order (SF.10, SF.11)
- Hungarian notation like `strName`, `iCount` (NL.5)
- ALL_CAPS for anything other than macros (NL.9)
## Performance (Per.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **Per.1** | Don't optimize without reason |
| **Per.2** | Don't optimize prematurely |
| **Per.6** | Don't make claims about performance without measurements |
| **Per.7** | Design to enable optimization |
| **Per.10** | Rely on the static type system |
| **Per.11** | Move computation from run time to compile time |
| **Per.19** | Access memory predictably |
### Guidelines
```cpp
// Per.11: Compile-time computation where possible
constexpr auto lookup_table = [] {
std::array<int, 256> table{};
for (int i = 0; i < 256; ++i) {
table[i] = i * i;
}
return table;
}();
// Per.19: Prefer contiguous data for cache-friendliness
std::vector<Point> points; // GOOD: contiguous
std::vector<std::unique_ptr<Point>> indirect_points; // BAD: pointer chasing
```
### Anti-Patterns
- Optimizing without profiling data (Per.1, Per.6)
- Choosing "clever" low-level code over clear abstractions (Per.4, Per.5)
- Ignoring data layout and cache behavior (Per.19)
## Quick Reference Checklist
Before marking C++ work complete:
- [ ] No raw `new`/`delete` -- use smart pointers or RAII (R.11)
- [ ] Objects initialized at declaration (ES.20)
- [ ] Variables are `const`/`constexpr` by default (Con.1, ES.25)
- [ ] Member functions are `const` where possible (Con.2)
- [ ] `enum class` instead of plain `enum` (Enum.3)
- [ ] `nullptr` instead of `0`/`NULL` (ES.47)
- [ ] No narrowing conversions (ES.46)
- [ ] No C-style casts (ES.48)
- [ ] Single-argument constructors are `explicit` (C.46)
- [ ] Rule of Zero or Rule of Five applied (C.20, C.21)
- [ ] Base class destructors are public virtual or protected non-virtual (C.35)
- [ ] Templates are constrained with concepts (T.10)
- [ ] No `using namespace` in headers at global scope (SF.7)
- [ ] Headers have include guards and are self-contained (SF.8, SF.11)
- [ ] Locks use RAII (`scoped_lock`/`lock_guard`) (CP.20)
- [ ] Exceptions are custom types, thrown by value, caught by reference (E.14, E.15)
- [ ] `'\n'` instead of `std::endl` (SL.io.50)
- [ ] No magic numbers (ES.45)

View File

@@ -0,0 +1,18 @@
steps:
- name: Setup Go environment
uses: actions/setup-go@v6.2.0
with:
# The Go version to download (if necessary) and use. Supports semver spec and ranges. Be sure to enclose this option in single quotation marks.
go-version: # optional
# Path to the go.mod, go.work, .go-version, or .tool-versions file.
go-version-file: # optional
# Set this option to true if you want the action to always check for the latest available version that satisfies the version spec
check-latest: # optional
# Used to pull Go distributions from go-versions. Since there's a default, this is typically not supplied by the user. When running this action on github.com, the default value is sufficient. When running on GHES, you can pass a personal access token for github.com if you are experiencing rate limiting.
token: # optional, default is ${{ github.server_url == 'https://github.com' && github.token || '' }}
# Used to specify whether caching is needed. Set to true, if you'd like to enable caching.
cache: # optional, default is true
# Used to specify the path to a dependency file - go.sum
cache-dependency-path: # optional
# Target architecture for Go to use. Examples: x86, x64. Will use system architecture by default.
architecture: # optional

View File

@@ -1,34 +0,0 @@
name: AgentShield Security Scan
on:
push:
branches: [main]
pull_request:
branches: [main]
# Prevent duplicate runs
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# Minimal permissions
permissions:
contents: read
jobs:
agentshield:
name: AgentShield Scan
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Run AgentShield Security Scan
uses: affaan-m/agentshield@v1
with:
path: '.'
min-severity: 'medium'
format: 'terminal'
fail-on-findings: 'false'

60
CLAUDE.md Normal file
View File

@@ -0,0 +1,60 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
This is a **Claude Code plugin** - a collection of production-ready agents, skills, hooks, commands, rules, and MCP configurations. The project provides battle-tested workflows for software development using Claude Code.
## Running Tests
```bash
# Run all tests
node tests/run-all.js
# Run individual test files
node tests/lib/utils.test.js
node tests/lib/package-manager.test.js
node tests/hooks/hooks.test.js
```
## Architecture
The project is organized into several core components:
- **agents/** - Specialized subagents for delegation (planner, code-reviewer, tdd-guide, etc.)
- **skills/** - Workflow definitions and domain knowledge (coding standards, patterns, testing)
- **commands/** - Slash commands invoked by users (/tdd, /plan, /e2e, etc.)
- **hooks/** - Trigger-based automations (session persistence, pre/post-tool hooks)
- **rules/** - Always-follow guidelines (security, coding style, testing requirements)
- **mcp-configs/** - MCP server configurations for external integrations
- **scripts/** - Cross-platform Node.js utilities for hooks and setup
- **tests/** - Test suite for scripts and utilities
## Key Commands
- `/tdd` - Test-driven development workflow
- `/plan` - Implementation planning
- `/e2e` - Generate and run E2E tests
- `/code-review` - Quality review
- `/build-fix` - Fix build errors
- `/learn` - Extract patterns from sessions
- `/skill-create` - Generate skills from git history
## Development Notes
- Package manager detection: npm, pnpm, yarn, bun (configurable via `CLAUDE_PACKAGE_MANAGER` env var or project config)
- Cross-platform: Windows, macOS, Linux support via Node.js scripts
- Agent format: Markdown with YAML frontmatter (name, description, tools, model)
- Skill format: Markdown with clear sections for when to use, how it works, examples
- Hook format: JSON with matcher conditions and command/notification hooks
## Contributing
Follow the formats in CONTRIBUTING.md:
- Agents: Markdown with frontmatter (name, description, tools, model)
- Skills: Clear sections (When to Use, How It Works, Examples)
- Commands: Markdown with description frontmatter
- Hooks: JSON with matcher and hooks array
File naming: lowercase with hyphens (e.g., `python-reviewer.md`, `tdd-workflow.md`)

View File

@@ -13,7 +13,7 @@
![Java](https://img.shields.io/badge/-Java-ED8B00?logo=openjdk&logoColor=white)
![Markdown](https://img.shields.io/badge/-Markdown-000000?logo=markdown&logoColor=white)
> **42K+ stars** | **5K+ forks** | **24 contributors** | **6 languages supported** | **Anthropic Hackathon Winner**
> **50K+ stars** | **6K+ forks** | **30 contributors** | **6 languages supported** | **Anthropic Hackathon Winner**
---
@@ -143,7 +143,7 @@ For manual install instructions see the README in the `rules/` folder.
/plugin list everything-claude-code@everything-claude-code
```
**That's it!** You now have access to 13 agents, 37 skills, and 31 commands.
**That's it!** You now have access to 13 agents, 44 skills, and 32 commands.
---
@@ -222,6 +222,7 @@ everything-claude-code/
| |-- verification-loop/ # Continuous verification (Longform Guide)
| |-- golang-patterns/ # Go idioms and best practices
| |-- golang-testing/ # Go testing patterns, TDD, benchmarks
| |-- cpp-coding-standards/ # C++ coding standards from C++ Core Guidelines (NEW)
| |-- cpp-testing/ # C++ testing with GoogleTest, CMake/CTest (NEW)
| |-- django-patterns/ # Django patterns, models, views (NEW)
| |-- django-security/ # Django security best practices (NEW)
@@ -245,6 +246,12 @@ everything-claude-code/
| |-- deployment-patterns/ # CI/CD, Docker, health checks, rollbacks (NEW)
| |-- docker-patterns/ # Docker Compose, networking, volumes, container security (NEW)
| |-- e2e-testing/ # Playwright E2E patterns and Page Object Model (NEW)
| |-- content-hash-cache-pattern/ # SHA-256 content hash caching for file processing (NEW)
| |-- cost-aware-llm-pipeline/ # LLM cost optimization, model routing, budget tracking (NEW)
| |-- regex-vs-llm-structured-text/ # Decision framework: regex vs LLM for text parsing (NEW)
| |-- swift-actor-persistence/ # Thread-safe Swift data persistence with actors (NEW)
| |-- swift-protocol-di-testing/ # Protocol-based DI for testable Swift code (NEW)
| |-- search-first/ # Research-before-coding workflow (NEW)
|
|-- commands/ # Slash commands for quick execution
| |-- tdd.md # /tdd - Test-driven development
@@ -254,6 +261,7 @@ everything-claude-code/
| |-- build-fix.md # /build-fix - Fix build errors
| |-- refactor-clean.md # /refactor-clean - Dead code removal
| |-- learn.md # /learn - Extract patterns mid-session (Longform Guide)
| |-- learn-eval.md # /learn-eval - Extract, evaluate, and save patterns (NEW)
| |-- checkpoint.md # /checkpoint - Save verification state (Longform Guide)
| |-- verify.md # /verify - Run verification loop (Longform Guide)
| |-- setup-pm.md # /setup-pm - Configure package manager
@@ -486,6 +494,7 @@ This gives you instant access to all commands, agents, skills, and hooks.
> git clone https://github.com/affaan-m/everything-claude-code.git
>
> # Option A: User-level rules (applies to all projects)
> mkdir -p ~/.claude/rules
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
@@ -691,11 +700,12 @@ Each component is fully independent.
</details>
<details>
<summary><b>Does this work with Cursor / OpenCode?</b></summary>
<summary><b>Does this work with Cursor / OpenCode / Codex?</b></summary>
Yes. ECC is cross-platform:
- **Cursor**: Pre-translated configs in `.cursor/`. See [Cursor IDE Support](#cursor-ide-support).
- **OpenCode**: Full plugin support in `.opencode/`. See [OpenCode Support](#-opencode-support).
- **Codex**: First-class support with adapter drift guards and SessionStart fallback. See PR [#257](https://github.com/affaan-m/everything-claude-code/pull/257).
- **Claude Code**: Native — this is the primary target.
</details>
@@ -800,8 +810,8 @@ The configuration is automatically detected from `.opencode/opencode.json`.
| Feature | Claude Code | OpenCode | Status |
|---------|-------------|----------|--------|
| Agents | ✅ 13 agents | ✅ 12 agents | **Claude Code leads** |
| Commands | ✅ 31 commands | ✅ 24 commands | **Claude Code leads** |
| Skills | ✅ 37 skills | ✅ 16 skills | **Claude Code leads** |
| Commands | ✅ 32 commands | ✅ 24 commands | **Claude Code leads** |
| Skills | ✅ 44 skills | ✅ 16 skills | **Claude Code leads** |
| Hooks | ✅ 3 phases | ✅ 20+ events | **OpenCode has more!** |
| Rules | ✅ 8 rules | ✅ 8 rules | **Full parity** |
| MCP Servers | ✅ Full | ✅ Full | **Full parity** |
@@ -821,7 +831,7 @@ OpenCode's plugin system is MORE sophisticated than Claude Code with 20+ event t
**Additional OpenCode events**: `file.edited`, `file.watcher.updated`, `message.updated`, `lsp.client.diagnostics`, `tui.toast.show`, and more.
### Available Commands (31)
### Available Commands (32)
| Command | Description |
|---------|-------------|
@@ -855,6 +865,7 @@ OpenCode's plugin system is MORE sophisticated than Claude Code with 20+ event t
| `/instinct-import` | Import instincts |
| `/instinct-export` | Export instincts |
| `/evolve` | Cluster instincts into skills |
| `/learn-eval` | Extract and evaluate patterns before saving |
| `/setup-pm` | Configure package manager |
### Plugin Installation

View File

@@ -95,7 +95,7 @@ cp -r everything-claude-code/rules/* ~/.claude/rules/
/plugin list everything-claude-code@everything-claude-code
```
**完成!** 你现在可以使用 13 个代理、37 个技能和 31 个命令。
**完成!** 你现在可以使用 13 个代理、43 个技能和 31 个命令。
---

91
commands/learn-eval.md Normal file
View File

@@ -0,0 +1,91 @@
---
description: Extract reusable patterns from the session, self-evaluate quality before saving, and determine the right save location (Global vs Project).
---
# /learn-eval - Extract, Evaluate, then Save
Extends `/learn` with a quality gate and save-location decision before writing any skill file.
## What to Extract
Look for:
1. **Error Resolution Patterns** — root cause + fix + reusability
2. **Debugging Techniques** — non-obvious steps, tool combinations
3. **Workarounds** — library quirks, API limitations, version-specific fixes
4. **Project-Specific Patterns** — conventions, architecture decisions, integration patterns
## Process
1. Review the session for extractable patterns
2. Identify the most valuable/reusable insight
3. **Determine save location:**
- Ask: "Would this pattern be useful in a different project?"
- **Global** (`~/.claude/skills/learned/`): Generic patterns usable across 2+ projects (bash compatibility, LLM API behavior, debugging techniques, etc.)
- **Project** (`.claude/skills/learned/` in current project): Project-specific knowledge (quirks of a particular config file, project-specific architecture decisions, etc.)
- When in doubt, choose Global (moving Global → Project is easier than the reverse)
4. Draft the skill file using this format:
```markdown
---
name: pattern-name
description: "Under 130 characters"
user-invocable: false
origin: auto-extracted
---
# [Descriptive Pattern Name]
**Extracted:** [Date]
**Context:** [Brief description of when this applies]
## Problem
[What problem this solves - be specific]
## Solution
[The pattern/technique/workaround - with code examples]
## When to Use
[Trigger conditions]
```
5. **Self-evaluate before saving** using this rubric:
| Dimension | 1 | 3 | 5 |
|-----------|---|---|---|
| Specificity | Abstract principles only, no code examples | Representative code example present | Rich examples covering all usage patterns |
| Actionability | Unclear what to do | Main steps are understandable | Immediately actionable, edge cases covered |
| Scope Fit | Too broad or too narrow | Mostly appropriate, some boundary ambiguity | Name, trigger, and content perfectly aligned |
| Non-redundancy | Nearly identical to another skill | Some overlap but unique perspective exists | Completely unique value |
| Coverage | Covers only a fraction of the target task | Main cases covered, common variants missing | Main cases, edge cases, and pitfalls covered |
- Score each dimension 15
- If any dimension scores 12, improve the draft and re-score until all dimensions are ≥ 3
- Show the user the scores table and the final draft
6. Ask user to confirm:
- Show: proposed save path + scores table + final draft
- Wait for explicit confirmation before writing
7. Save to the determined location
## Output Format for Step 5 (scores table)
| Dimension | Score | Rationale |
|-----------|-------|-----------|
| Specificity | N/5 | ... |
| Actionability | N/5 | ... |
| Scope Fit | N/5 | ... |
| Non-redundancy | N/5 | ... |
| Coverage | N/5 | ... |
| **Total** | **N/25** | |
## Notes
- Don't extract trivial fixes (typos, simple syntax errors)
- Don't extract one-time issues (specific API outages, etc.)
- Focus on patterns that will save time in future sessions
- Keep skills focused — one pattern per skill
- If Coverage score is low, add related variants before saving

View File

@@ -140,7 +140,7 @@ cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
/plugin list everything-claude-code@everything-claude-code
```
**完了です!** これで13のエージェント、37のスキル、31のコマンドにアクセスできます。
**完了です!** これで13のエージェント、43のスキル、31のコマンドにアクセスできます。
---
@@ -454,6 +454,7 @@ Duplicate hooks file detected: ./hooks/hooks.json resolves to already-loaded fil
> git clone https://github.com/affaan-m/everything-claude-code.git
>
> # オプション Aユーザーレベルルールすべてのプロジェクトに適用
> mkdir -p ~/.claude/rules
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # スタックを選択
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/

View File

@@ -456,6 +456,7 @@ Duplicate hooks file detected: ./hooks/hooks.json resolves to already-loaded fil
> git clone https://github.com/affaan-m/everything-claude-code.git
>
> # 选项 A用户级规则适用于所有项目
> mkdir -p ~/.claude/rules
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择您的技术栈
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/

View File

@@ -13,7 +13,7 @@
"C": "Cyan - username, todos",
"T": "Gray - model name"
},
"output_example": "affoon:~/projects/myapp main* ctx:73% sonnet-4.5 14:30 todos:3",
"output_example": "affoon:~/projects/myapp main* ctx:73% sonnet-4.6 14:30 todos:3",
"usage": "Copy the statusLine object to your ~/.claude/settings.json"
}
}

View File

@@ -37,7 +37,7 @@
"hooks": [
{
"type": "command",
"command": "node -e \"const fs=require('fs');let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const p=i.tool_input?.file_path||'';if(/\\.(md|txt)$/.test(p)&&!/(README|CLAUDE|AGENTS|CONTRIBUTING)\\.md$/.test(p)){console.error('[Hook] BLOCKED: Unnecessary documentation file creation');console.error('[Hook] File: '+p);console.error('[Hook] Use README.md for documentation instead');process.exit(2)}}catch{}console.log(d)})\""
"command": "node -e \"const fs=require('fs');let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{try{const i=JSON.parse(d);const p=i.tool_input?.file_path||'';if(/\\.(md|txt)$/.test(p)&&!/(README|CLAUDE|AGENTS|CONTRIBUTING)\\.md$/.test(p)&&!/\\.claude\\/plans\\//.test(p)){console.error('[Hook] BLOCKED: Unnecessary documentation file creation');console.error('[Hook] File: '+p);console.error('[Hook] Use README.md for documentation instead');process.exit(2)}}catch{}console.log(d)})\""
}
],
"description": "Block creation of random .md files - keeps docs consolidated"

642
llms.txt
View File

@@ -1,642 +0,0 @@
# OpenCode Documentation for LLMs
> OpenCode is an open-source AI coding agent available as a terminal interface, desktop application, or IDE extension. It helps developers write code, add features, and understand codebases through conversational interactions.
## Installation
Multiple installation methods are available:
```bash
# curl script (recommended)
curl -fsSL https://opencode.ai/install | bash
# Node.js package managers
npm install -g opencode
bun install -g opencode
pnpm add -g opencode
yarn global add opencode
# Homebrew (macOS/Linux)
brew install anomalyco/tap/opencode
# Arch Linux
paru -S opencode
# Windows
choco install opencode # Chocolatey
scoop install opencode # Scoop
# Or use Docker/WSL (recommended for Windows)
```
## Configuration
Configuration file: `opencode.json` or `opencode.jsonc` (with comments)
Schema: `https://opencode.ai/config.json`
### Core Settings
```json
{
"$schema": "https://opencode.ai/config.json",
"model": "anthropic/claude-sonnet-4-5",
"small_model": "anthropic/claude-haiku-4-5",
"default_agent": "build",
"instructions": [
"CONTRIBUTING.md",
"docs/guidelines.md"
],
"plugin": [
"opencode-helicone-session",
"./.opencode/plugins"
],
"agent": { /* agent definitions */ },
"command": { /* command definitions */ },
"permission": {
"edit": "ask",
"bash": "ask",
"mcp_*": "ask"
},
"tools": {
"write": true,
"bash": true
}
}
```
### Environment Variables
Use `{env:VAR_NAME}` for environment variables and `{file:path}` for file contents in configuration values.
## Agents
OpenCode supports two agent types:
### Primary Agents
Main assistants you interact with directly. Switch between them using Tab or configured keybinds.
### Subagents
Specialized assistants that primary agents invoke automatically or through `@` mentions (e.g., `@general help me search`).
### Built-in Agents
| Agent | Type | Description |
|-------|------|-------------|
| build | primary | Default agent with full tool access for development work |
| plan | primary | Restricted agent for analysis; file edits and bash set to "ask" |
| general | subagent | Full tool access for multi-step research tasks |
| explore | subagent | Read-only agent for rapid codebase exploration |
| compaction | system | Hidden agent for context compaction |
| title | system | Hidden agent for title generation |
| summary | system | Hidden agent for summarization |
### Agent Configuration
JSON format in `opencode.json`:
```json
{
"agent": {
"code-reviewer": {
"description": "Reviews code for best practices",
"mode": "subagent",
"model": "anthropic/claude-opus-4-5",
"prompt": "{file:.opencode/prompts/agents/code-reviewer.txt}",
"temperature": 0.3,
"tools": {
"write": false,
"edit": false,
"read": true,
"bash": true
},
"permission": {
"edit": "deny",
"bash": {
"*": "ask",
"git status": "allow"
}
},
"steps": 10
}
}
}
```
Markdown format in `~/.config/opencode/agents/` or `.opencode/agents/`:
```markdown
---
description: Expert code review specialist
mode: subagent
model: anthropic/claude-opus-4-5
temperature: 0.3
tools:
write: false
read: true
bash: true
permission:
edit: deny
steps: 10
---
You are an expert code reviewer. Review code for quality, security, and maintainability...
```
### Agent Configuration Options
| Option | Purpose | Values |
|--------|---------|--------|
| description | Required field explaining agent purpose | string |
| mode | Agent type | "primary", "subagent", or "all" |
| model | Override default model | "provider/model-id" |
| temperature | Control randomness | 0.0-1.0 (lower = focused) |
| tools | Enable/disable specific tools | object or wildcards |
| permission | Set tool permissions | "ask", "allow", or "deny" |
| steps | Limit agentic iterations | number |
| prompt | Reference custom prompt file | "{file:./path}" |
| top_p | Alternative randomness control | 0.0-1.0 |
## Commands
### Built-in Commands
- `/init` - Initialize project analysis (creates AGENTS.md)
- `/undo` - Undo last change
- `/redo` - Redo undone change
- `/share` - Generate shareable conversation link
- `/help` - Show help
- `/connect` - Configure API providers
### Custom Commands
**Markdown files** in `~/.config/opencode/commands/` (global) or `.opencode/commands/` (project):
```markdown
---
description: Create implementation plan
agent: planner
subtask: true
---
Create a detailed implementation plan for: $ARGUMENTS
Include:
- Requirements analysis
- Architecture review
- Step breakdown
- Testing strategy
```
**JSON configuration** in `opencode.json`:
```json
{
"command": {
"plan": {
"description": "Create implementation plan",
"template": "Create a detailed implementation plan for: $ARGUMENTS",
"agent": "planner",
"subtask": true
},
"test": {
"template": "Run tests with coverage for: $ARGUMENTS\n\nOutput:\n!`npm test`",
"description": "Run tests with coverage",
"agent": "build"
}
}
}
```
### Template Variables
| Variable | Description |
|----------|-------------|
| `$ARGUMENTS` | All command arguments |
| `$1`, `$2`, `$3` | Positional arguments |
| `!`command`` | Include shell command output |
| `@filepath` | Include file contents |
### Command Options
| Option | Purpose | Required |
|--------|---------|----------|
| template | Prompt text sent to LLM | Yes |
| description | UI display text | No |
| agent | Target agent for execution | No |
| model | Override default LLM | No |
| subtask | Force subagent invocation | No |
## Tools
### Built-in Tools
| Tool | Purpose | Permission Key |
|------|---------|---------------|
| bash | Execute shell commands | "bash" |
| edit | Modify existing files using exact string replacements | "edit" |
| write | Create new files or overwrite existing ones | "edit" |
| read | Read file contents from codebase | "read" |
| grep | Search file contents using regular expressions | "grep" |
| glob | Find files by pattern matching | "glob" |
| list | List files and directories | "list" |
| lsp | Access code intelligence (experimental) | "lsp" |
| patch | Apply patches to files | "edit" |
| skill | Load skill files (SKILL.md) | "skill" |
| todowrite | Manage todo lists during sessions | "todowrite" |
| todoread | Read existing todo lists | "todoread" |
| webfetch | Fetch and read web pages | "webfetch" |
| question | Ask user questions during execution | "question" |
### Tool Permissions
```json
{
"permission": {
"edit": "ask",
"bash": "allow",
"webfetch": "deny",
"mcp_*": "ask"
}
}
```
Permission levels:
- `"allow"` - Tool executes without restriction
- `"deny"` - Tool cannot be used
- `"ask"` - Requires user approval before execution
## Custom Tools
Location: `.opencode/tools/` (project) or `~/.config/opencode/tools/` (global)
### Tool Definition
```typescript
import { tool } from "@opencode-ai/plugin"
export default tool({
description: "Execute SQL query on the database",
args: {
query: tool.schema.string().describe("SQL query to execute"),
database: tool.schema.string().optional().describe("Target database")
},
async execute(args, context) {
// context.worktree - git repository root
// context.directory - current working directory
// context.sessionID - current session ID
// context.agent - active agent identifier
const result = await someDbQuery(args.query)
return { result }
}
})
```
### Multiple Tools Per File
```typescript
// math.ts - creates math_add and math_multiply tools
export const add = tool({
description: "Add two numbers",
args: {
a: tool.schema.number(),
b: tool.schema.number()
},
async execute({ a, b }) {
return { result: a + b }
}
})
export const multiply = tool({
description: "Multiply two numbers",
args: {
a: tool.schema.number(),
b: tool.schema.number()
},
async execute({ a, b }) {
return { result: a * b }
}
})
```
### Schema Types (Zod)
```typescript
tool.schema.string()
tool.schema.number()
tool.schema.boolean()
tool.schema.array(tool.schema.string())
tool.schema.object({ key: tool.schema.string() })
tool.schema.enum(["option1", "option2"])
tool.schema.optional()
tool.schema.describe("Description for LLM")
```
## Plugins
Plugins extend OpenCode with custom hooks, tools, and behaviors.
### Plugin Structure
```typescript
import { tool } from "@opencode-ai/plugin"
export const MyPlugin = async ({ project, client, $, directory, worktree }) => {
// project - Current project information
// client - OpenCode SDK client for AI interaction
// $ - Bun's shell API for command execution
// directory - Current working directory
// worktree - Git worktree path
return {
// Hook implementations
"file.edited": async (event) => {
// Handle file edit event
},
"tool.execute.before": async (input, output) => {
// Intercept before tool execution
},
"session.idle": async (event) => {
// Handle session idle
}
}
}
```
### Loading Plugins
1. **Local files**: Place in `.opencode/plugins/` (project) or `~/.config/opencode/plugins/` (global)
2. **npm packages**: Specify in `opencode.json`:
```json
{
"plugin": [
"opencode-helicone-session",
"@my-org/custom-plugin",
"./.opencode/plugins"
]
}
```
### Available Hook Events
**Command Events:**
- `command.executed` - After a command is executed
**File Events:**
- `file.edited` - After a file is edited
- `file.watcher.updated` - When file watcher detects changes
**Tool Events:**
- `tool.execute.before` - Before tool execution (can modify input)
- `tool.execute.after` - After tool execution (can modify output)
**Session Events:**
- `session.created` - When session starts
- `session.compacted` - After context compaction
- `session.deleted` - When session ends
- `session.idle` - When session becomes idle
- `session.updated` - When session is updated
- `session.status` - Session status changes
**Message Events:**
- `message.updated` - When message is updated
- `message.removed` - When message is removed
- `message.part.updated` - When message part is updated
**LSP Events:**
- `lsp.client.diagnostics` - LSP diagnostic updates
- `lsp.updated` - LSP state updates
**Shell Events:**
- `shell.env` - Modify shell environment variables
**TUI Events:**
- `tui.prompt.append` - Append to TUI prompt
- `tui.command.execute` - Execute TUI command
- `tui.toast.show` - Show toast notification
**Other Events:**
- `installation.updated` - Installation updates
- `permission.asked` - Permission request
- `server.connected` - Server connection
- `todo.updated` - Todo list updates
### Hook Event Mapping (Claude Code → OpenCode)
| Claude Code Hook | OpenCode Plugin Event |
|-----------------|----------------------|
| PreToolUse | `tool.execute.before` |
| PostToolUse | `tool.execute.after` |
| Stop | `session.idle` or `session.status` |
| SessionStart | `session.created` |
| SessionEnd | `session.deleted` |
| N/A | `file.edited`, `file.watcher.updated` |
| N/A | `message.*`, `permission.*`, `lsp.*` |
### Plugin Example: Auto-Format
```typescript
export const AutoFormatPlugin = async ({ $, directory }) => {
return {
"file.edited": async (event) => {
if (event.path.match(/\.(ts|tsx|js|jsx)$/)) {
await $`prettier --write ${event.path}`
}
}
}
}
```
### Plugin Example: TypeScript Check
```typescript
export const TypeCheckPlugin = async ({ $, client }) => {
return {
"tool.execute.after": async (input, output) => {
if (input.tool === "edit" && input.args.filePath?.match(/\.tsx?$/)) {
const result = await $`npx tsc --noEmit`.catch(e => e)
if (result.exitCode !== 0) {
client.app.log("warn", "TypeScript errors detected")
}
}
}
}
}
```
## Providers
OpenCode integrates 75+ LLM providers via AI SDK and Models.dev.
### Supported Providers
- OpenCode Zen (recommended for beginners)
- Anthropic (Claude)
- OpenAI (GPT)
- Google (Gemini)
- Amazon Bedrock
- Azure OpenAI
- GitHub Copilot
- Ollama (local)
- And 70+ more
### Provider Configuration
```json
{
"provider": {
"anthropic": {
"options": {
"baseURL": "https://api.anthropic.com/v1"
}
},
"custom-provider": {
"npm": "@ai-sdk/openai-compatible",
"name": "Display Name",
"options": {
"baseURL": "https://api.example.com/v1",
"apiKey": "{env:CUSTOM_API_KEY}"
},
"models": {
"model-id": { "name": "Model Name" }
}
}
}
}
```
### Model Naming Convention
Format: `provider/model-id`
Examples:
- `anthropic/claude-sonnet-4-5`
- `anthropic/claude-opus-4-5`
- `anthropic/claude-haiku-4-5`
- `openai/gpt-4o`
- `google/gemini-2.0-flash`
### API Key Setup
```bash
# Interactive setup
opencode
/connect
# Environment variables
export ANTHROPIC_API_KEY=sk-...
export OPENAI_API_KEY=sk-...
```
Keys stored in: `~/.local/share/opencode/auth.json`
## MCP (Model Context Protocol)
Configure MCP servers in `opencode.json`:
```json
{
"mcp": {
"github": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-github"],
"env": {
"GITHUB_PERSONAL_ACCESS_TOKEN": "{env:GITHUB_TOKEN}"
}
},
"filesystem": {
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/dir"]
}
}
}
```
MCP tool permissions use `mcp_*` wildcard:
```json
{
"permission": {
"mcp_*": "ask"
}
}
```
## Ecosystem Plugins
### Authentication & Provider Plugins
- Alternative model access (ChatGPT Plus, Gemini, Antigravity)
- Session tracking (Helicone headers)
- OAuth integrations
### Development Tools
- Sandbox isolation (Daytona integration)
- Type injection for TypeScript/Svelte
- DevContainer multi-branch support
- Git worktree management
### Enhancement Plugins
- Web search with citations
- Markdown table formatting
- Dynamic context token pruning
- Desktop notifications
- Persistent memory (Supermemory)
- Background process management
### Plugin Discovery
- opencode.cafe - Community plugin registry
- awesome-opencode - Curated list
- GitHub search for "opencode-plugin"
## Quick Reference
### Key Shortcuts
| Key | Action |
|-----|--------|
| Tab | Toggle between Plan and Build modes |
| @ | Reference files or mention agents |
| / | Execute commands |
### Common Commands
```bash
/init # Initialize project
/connect # Configure API providers
/share # Share conversation
/undo # Undo last change
/redo # Redo undone change
/help # Show help
```
### File Locations
| Purpose | Project | Global |
|---------|---------|--------|
| Configuration | `opencode.json` | `~/.config/opencode/config.json` |
| Agents | `.opencode/agents/` | `~/.config/opencode/agents/` |
| Commands | `.opencode/commands/` | `~/.config/opencode/commands/` |
| Plugins | `.opencode/plugins/` | `~/.config/opencode/plugins/` |
| Tools | `.opencode/tools/` | `~/.config/opencode/tools/` |
| Auth | - | `~/.local/share/opencode/auth.json` |
### Troubleshooting
```bash
# Verify credentials
opencode auth list
# Check configuration
cat opencode.json | jq .
# Test provider connection
/connect
```
---
For more information: https://opencode.ai/docs/

16
package-lock.json generated
View File

@@ -1,14 +1,25 @@
{
"name": "everything-claude-code",
"name": "ecc-universal",
"version": "1.4.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "ecc-universal",
"version": "1.4.1",
"hasInstallScript": true,
"license": "MIT",
"bin": {
"ecc-install": "install.sh"
},
"devDependencies": {
"@eslint/js": "^9.39.2",
"eslint": "^9.39.2",
"globals": "^17.1.0",
"markdownlint-cli": "^0.47.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@eslint-community/eslint-utils": {
@@ -294,6 +305,7 @@
"integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -599,6 +611,7 @@
"integrity": "sha512-LEyamqS7W5HB3ujJyvi0HQK/dtVINZvd5mAAp9eT5S/ujByGjiZLCzPcHVzuXbpJDJF/cxwHlfceVUDZ2lnSTw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.8.0",
"@eslint-community/regexpp": "^4.12.1",
@@ -1930,6 +1943,7 @@
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": ">=12"
},

View File

@@ -7,7 +7,7 @@
- Pair programming and code generation
- Worker agents in multi-agent systems
**Sonnet 4.5** (Best coding model):
**Sonnet 4.6** (Best coding model):
- Main development work
- Orchestrating multi-agent workflows
- Complex coding tasks

View File

@@ -0,0 +1,330 @@
#!/usr/bin/env node
/**
* scripts/codemaps/generate.ts
*
* Codemap Generator for everything-claude-code (ECC)
*
* Scans the current working directory and generates architectural
* codemap documentation under docs/CODEMAPS/ as specified by the
* doc-updater agent.
*
* Usage:
* npx tsx scripts/codemaps/generate.ts [srcDir]
*
* Output:
* docs/CODEMAPS/INDEX.md
* docs/CODEMAPS/frontend.md
* docs/CODEMAPS/backend.md
* docs/CODEMAPS/database.md
* docs/CODEMAPS/integrations.md
* docs/CODEMAPS/workers.md
*/
import fs from 'fs';
import path from 'path';
// ---------------------------------------------------------------------------
// Config
// ---------------------------------------------------------------------------
const ROOT = process.cwd();
const SRC_DIR = process.argv[2] ? path.resolve(process.argv[2]) : ROOT;
const OUTPUT_DIR = path.join(ROOT, 'docs', 'CODEMAPS');
const TODAY = new Date().toISOString().split('T')[0];
// Patterns used to classify files into codemap areas
const AREA_PATTERNS: Record<string, RegExp[]> = {
frontend: [
/\/(app|pages|components|hooks|contexts|ui|views|layouts|styles)\//i,
/\.(tsx|jsx|css|scss|sass|less|vue|svelte)$/i,
],
backend: [
/\/(api|routes|controllers|middleware|server|services|handlers)\//i,
/\.(route|controller|handler|middleware|service)\.(ts|js)$/i,
],
database: [
/\/(models|schemas|migrations|prisma|drizzle|db|database|repositories)\//i,
/\.(model|schema|migration|seed)\.(ts|js)$/i,
/prisma\/schema\.prisma$/,
/schema\.sql$/,
],
integrations: [
/\/(integrations?|third-party|external|plugins?|adapters?|connectors?)\//i,
/\.(integration|adapter|connector)\.(ts|js)$/i,
],
workers: [
/\/(workers?|jobs?|queues?|tasks?|cron|background)\//i,
/\.(worker|job|queue|task|cron)\.(ts|js)$/i,
],
};
// ---------------------------------------------------------------------------
// File System Helpers
// ---------------------------------------------------------------------------
/** Recursively collect all files under a directory, skipping common noise dirs. */
function walkDir(dir: string, results: string[] = []): string[] {
const SKIP = new Set([
'node_modules', '.git', '.next', '.nuxt', 'dist', 'build', 'out',
'.turbo', 'coverage', '.cache', '__pycache__', '.venv', 'venv',
]);
let entries: fs.Dirent[];
try {
entries = fs.readdirSync(dir, { withFileTypes: true });
} catch {
return results;
}
for (const entry of entries) {
if (SKIP.has(entry.name)) continue;
const fullPath = path.join(dir, entry.name);
if (entry.isDirectory()) {
walkDir(fullPath, results);
} else if (entry.isFile()) {
results.push(fullPath);
}
}
return results;
}
/** Return path relative to ROOT, always using forward slashes. */
function rel(p: string): string {
return path.relative(ROOT, p).replace(/\\/g, '/');
}
// ---------------------------------------------------------------------------
// Analysis
// ---------------------------------------------------------------------------
interface AreaInfo {
name: string;
files: string[];
entryPoints: string[];
directories: string[];
}
function classifyFiles(allFiles: string[]): Record<string, AreaInfo> {
const areas: Record<string, AreaInfo> = {
frontend: { name: 'Frontend', files: [], entryPoints: [], directories: [] },
backend: { name: 'Backend/API', files: [], entryPoints: [], directories: [] },
database: { name: 'Database', files: [], entryPoints: [], directories: [] },
integrations: { name: 'Integrations', files: [], entryPoints: [], directories: [] },
workers: { name: 'Workers', files: [], entryPoints: [], directories: [] },
};
for (const file of allFiles) {
const relPath = rel(file);
for (const [area, patterns] of Object.entries(AREA_PATTERNS)) {
if (patterns.some((p) => p.test(relPath))) {
areas[area].files.push(relPath);
break;
}
}
}
// Derive unique directories and entry points per area
for (const area of Object.values(areas)) {
const dirs = new Set(area.files.map((f) => path.dirname(f)));
area.directories = [...dirs].sort();
area.entryPoints = area.files
.filter((f) => /index\.(ts|tsx|js|jsx)$/.test(f) || /main\.(ts|tsx|js|jsx)$/.test(f))
.slice(0, 10);
}
return areas;
}
/** Count lines in a file (returns 0 on error). */
function lineCount(p: string): number {
try {
const content = fs.readFileSync(p, 'utf8');
return content.split('\n').length;
} catch {
return 0;
}
}
/** Build a simple directory tree ASCII diagram (max 3 levels deep). */
function buildTree(dir: string, prefix = '', depth = 0): string {
if (depth > 2) return '';
const SKIP = new Set(['node_modules', '.git', 'dist', 'build', '.next', 'coverage']);
let entries: fs.Dirent[];
try {
entries = fs.readdirSync(dir, { withFileTypes: true });
} catch {
return '';
}
const dirs = entries.filter((e) => e.isDirectory() && !SKIP.has(e.name));
const files = entries.filter((e) => e.isFile());
let result = '';
const items = [...dirs, ...files];
items.forEach((entry, i) => {
const isLast = i === items.length - 1;
const connector = isLast ? '└── ' : '├── ';
result += `${prefix}${connector}${entry.name}\n`;
if (entry.isDirectory()) {
const newPrefix = prefix + (isLast ? ' ' : '│ ');
result += buildTree(path.join(dir, entry.name), newPrefix, depth + 1);
}
});
return result;
}
// ---------------------------------------------------------------------------
// Markdown Generators
// ---------------------------------------------------------------------------
function generateAreaDoc(areaKey: string, area: AreaInfo, allFiles: string[]): string {
const fileCount = area.files.length;
const totalLines = area.files.reduce((sum, f) => sum + lineCount(path.join(ROOT, f)), 0);
const entrySection = area.entryPoints.length > 0
? area.entryPoints.map((e) => `- \`${e}\``).join('\n')
: '- *(no index/main entry points detected)*';
const dirSection = area.directories.slice(0, 20)
.map((d) => `- \`${d}/\``)
.join('\n') || '- *(no dedicated directories detected)*';
const fileSection = area.files.slice(0, 30)
.map((f) => `| \`${f}\` | ${lineCount(path.join(ROOT, f))} |`)
.join('\n');
const moreFiles = area.files.length > 30
? `\n*...and ${area.files.length - 30} more files*`
: '';
return `# ${area.name} Codemap
**Last Updated:** ${TODAY}
**Total Files:** ${fileCount}
**Total Lines:** ${totalLines}
## Entry Points
${entrySection}
## Architecture
\`\`\`
${area.name} Directory Structure
${dirSection.replace(/- `/g, '').replace(/`\/$/gm, '/')}
\`\`\`
## Key Modules
| File | Lines |
|------|-------|
${fileSection}${moreFiles}
## Data Flow
> Detected from file patterns. Review individual files for detailed data flow.
## External Dependencies
> Run \`npx jsdoc2md src/**/*.ts\` to extract JSDoc and identify external dependencies.
## Related Areas
- [INDEX](./INDEX.md) — Full overview
- [Frontend](./frontend.md)
- [Backend/API](./backend.md)
- [Database](./database.md)
- [Integrations](./integrations.md)
- [Workers](./workers.md)
`;
}
function generateIndex(areas: Record<string, AreaInfo>, allFiles: string[]): string {
const totalFiles = allFiles.length;
const areaRows = Object.entries(areas)
.map(([key, area]) => `| [${area.name}](./${key}.md) | ${area.files.length} files | ${area.directories.slice(0, 3).map((d) => `\`${d}\``).join(', ') || '—'} |`)
.join('\n');
const topLevelTree = buildTree(SRC_DIR);
return `# Codebase Overview — CODEMAPS Index
**Last Updated:** ${TODAY}
**Root:** \`${rel(SRC_DIR) || '.'}\`
**Total Files Scanned:** ${totalFiles}
## Areas
| Area | Size | Key Directories |
|------|------|-----------------|
${areaRows}
## Repository Structure
\`\`\`
${rel(SRC_DIR) || path.basename(SRC_DIR)}/
${topLevelTree}\`\`\`
## How to Regenerate
\`\`\`bash
npx tsx scripts/codemaps/generate.ts # Regenerate codemaps
npx madge --image graph.svg src/ # Dependency graph (requires graphviz)
npx jsdoc2md src/**/*.ts # Extract JSDoc
\`\`\`
## Related Documentation
- [Frontend](./frontend.md) — UI components, pages, hooks
- [Backend/API](./backend.md) — API routes, controllers, middleware
- [Database](./database.md) — Models, schemas, migrations
- [Integrations](./integrations.md) — External services & adapters
- [Workers](./workers.md) — Background jobs, queues, cron tasks
`;
}
// ---------------------------------------------------------------------------
// Main
// ---------------------------------------------------------------------------
function main(): void {
console.log(`[generate.ts] Scanning: ${SRC_DIR}`);
console.log(`[generate.ts] Output: ${OUTPUT_DIR}`);
// Ensure output directory exists
fs.mkdirSync(OUTPUT_DIR, { recursive: true });
// Walk the directory tree
const allFiles = walkDir(SRC_DIR);
console.log(`[generate.ts] Found ${allFiles.length} files`);
// Classify files into areas
const areas = classifyFiles(allFiles);
// Generate INDEX.md
const indexContent = generateIndex(areas, allFiles);
const indexPath = path.join(OUTPUT_DIR, 'INDEX.md');
fs.writeFileSync(indexPath, indexContent, 'utf8');
console.log(`[generate.ts] Written: ${rel(indexPath)}`);
// Generate per-area codemaps
for (const [key, area] of Object.entries(areas)) {
const content = generateAreaDoc(key, area, allFiles);
const outPath = path.join(OUTPUT_DIR, `${key}.md`);
fs.writeFileSync(outPath, content, 'utf8');
console.log(`[generate.ts] Written: ${rel(outPath)} (${area.files.length} files)`);
}
console.log('\n[generate.ts] Done! Codemaps written to docs/CODEMAPS/');
console.log('[generate.ts] Files generated:');
console.log(' docs/CODEMAPS/INDEX.md');
console.log(' docs/CODEMAPS/frontend.md');
console.log(' docs/CODEMAPS/backend.md');
console.log(' docs/CODEMAPS/database.md');
console.log(' docs/CODEMAPS/integrations.md');
console.log(' docs/CODEMAPS/workers.md');
}
main();

View File

@@ -32,7 +32,8 @@ process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (data.length < MAX_STDIN) {
data += chunk;
const remaining = MAX_STDIN - data.length;
data += chunk.substring(0, remaining);
}
});

View File

@@ -29,7 +29,8 @@ process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (stdinData.length < MAX_STDIN) {
stdinData += chunk;
const remaining = MAX_STDIN - stdinData.length;
stdinData += chunk.substring(0, remaining);
}
});

View File

@@ -17,7 +17,8 @@ process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (data.length < MAX_STDIN) {
data += chunk;
const remaining = MAX_STDIN - data.length;
data += chunk.substring(0, remaining);
}
});
@@ -28,7 +29,7 @@ process.stdin.on('end', () => {
if (filePath && /\.(ts|tsx|js|jsx)$/.test(filePath)) {
const content = readFile(filePath);
if (!content) { process.stdout.write(data); return; }
if (!content) { process.stdout.write(data); process.exit(0); }
const lines = content.split('\n');
const matches = [];

View File

@@ -17,7 +17,8 @@ process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (data.length < MAX_STDIN) {
data += chunk;
const remaining = MAX_STDIN - data.length;
data += chunk.substring(0, remaining);
}
});

View File

@@ -19,7 +19,8 @@ process.stdin.setEncoding("utf8");
process.stdin.on("data", (chunk) => {
if (data.length < MAX_STDIN) {
data += chunk;
const remaining = MAX_STDIN - data.length;
data += chunk.substring(0, remaining);
}
});

View File

@@ -109,7 +109,8 @@ process.stdin.setEncoding('utf8');
process.stdin.on('data', chunk => {
if (stdinData.length < MAX_STDIN) {
stdinData += chunk;
const remaining = MAX_STDIN - stdinData.length;
stdinData += chunk.substring(0, remaining);
}
});

View File

@@ -282,7 +282,7 @@ function setProjectPackageManager(pmName, projectDir = process.cwd()) {
// Allowed characters in script/binary names: alphanumeric, dash, underscore, dot, slash, @
// This prevents shell metacharacter injection while allowing scoped packages (e.g., @scope/pkg)
const SAFE_NAME_REGEX = /^[@a-zA-Z0-9_.\/-]+$/;
const SAFE_NAME_REGEX = /^[@a-zA-Z0-9_./-]+$/;
/**
* Get the command to run a script
@@ -316,7 +316,7 @@ function getRunCommand(script, options = {}) {
// Allowed characters in arguments: alphanumeric, whitespace, dashes, dots, slashes,
// equals, colons, commas, quotes, @. Rejects shell metacharacters like ; | & ` $ ( ) { } < > !
const SAFE_ARGS_REGEX = /^[@a-zA-Z0-9\s_.\/:=,'"*+-]+$/;
const SAFE_ARGS_REGEX = /^[@a-zA-Z0-9\s_./:=,'"*+-]+$/;
/**
* Get the command to execute a package binary
@@ -370,28 +370,31 @@ function escapeRegex(str) {
function getCommandPattern(action) {
const patterns = [];
if (action === 'dev') {
// Trim spaces from action to handle leading/trailing whitespace gracefully
const trimmedAction = action.trim();
if (trimmedAction === 'dev') {
patterns.push(
'npm run dev',
'pnpm( run)? dev',
'yarn dev',
'bun run dev'
);
} else if (action === 'install') {
} else if (trimmedAction === 'install') {
patterns.push(
'npm install',
'pnpm install',
'yarn( install)?',
'bun install'
);
} else if (action === 'test') {
} else if (trimmedAction === 'test') {
patterns.push(
'npm test',
'pnpm test',
'yarn test',
'bun test'
);
} else if (action === 'build') {
} else if (trimmedAction === 'build') {
patterns.push(
'npm run build',
'pnpm( run)? build',
@@ -400,7 +403,7 @@ function getCommandPattern(action) {
);
} else {
// Generic run command — escape regex metacharacters in action
const escaped = escapeRegex(action);
const escaped = escapeRegex(trimmedAction);
patterns.push(
`npm run ${escaped}`,
`pnpm( run)? ${escaped}`,

View File

@@ -125,7 +125,7 @@ ${chalk.bold('Files Tracked:')} ${chalk.green(data.files)}
console.log(chalk.gray('─'.repeat(50)));
patterns.forEach((pattern, i) => {
const confidence = pattern.confidence || 0.8;
const confidence = pattern.confidence ?? 0.8;
const confidenceBar = progressBar(Math.round(confidence * 100), 15);
console.log(`
${chalk.bold(chalk.yellow(`${i + 1}.`))} ${chalk.bold(pattern.name)}

View File

@@ -0,0 +1,160 @@
---
name: content-hash-cache-pattern
description: Cache expensive file processing results using SHA-256 content hashes — path-independent, auto-invalidating, with service layer separation.
---
# Content-Hash File Cache Pattern
Cache expensive file processing results (PDF parsing, text extraction, image analysis) using SHA-256 content hashes as cache keys. Unlike path-based caching, this approach survives file moves/renames and auto-invalidates when content changes.
## When to Activate
- Building file processing pipelines (PDF, images, text extraction)
- Processing cost is high and same files are processed repeatedly
- Need a `--cache/--no-cache` CLI option
- Want to add caching to existing pure functions without modifying them
## Core Pattern
### 1. Content-Hash Based Cache Key
Use file content (not path) as the cache key:
```python
import hashlib
from pathlib import Path
_HASH_CHUNK_SIZE = 65536 # 64KB chunks for large files
def compute_file_hash(path: Path) -> str:
"""SHA-256 of file contents (chunked for large files)."""
if not path.is_file():
raise FileNotFoundError(f"File not found: {path}")
sha256 = hashlib.sha256()
with open(path, "rb") as f:
while True:
chunk = f.read(_HASH_CHUNK_SIZE)
if not chunk:
break
sha256.update(chunk)
return sha256.hexdigest()
```
**Why content hash?** File rename/move = cache hit. Content change = automatic invalidation. No index file needed.
### 2. Frozen Dataclass for Cache Entry
```python
from dataclasses import dataclass
@dataclass(frozen=True, slots=True)
class CacheEntry:
file_hash: str
source_path: str
document: ExtractedDocument # The cached result
```
### 3. File-Based Cache Storage
Each cache entry is stored as `{hash}.json` — O(1) lookup by hash, no index file required.
```python
import json
from typing import Any
def write_cache(cache_dir: Path, entry: CacheEntry) -> None:
cache_dir.mkdir(parents=True, exist_ok=True)
cache_file = cache_dir / f"{entry.file_hash}.json"
data = serialize_entry(entry)
cache_file.write_text(json.dumps(data, ensure_ascii=False), encoding="utf-8")
def read_cache(cache_dir: Path, file_hash: str) -> CacheEntry | None:
cache_file = cache_dir / f"{file_hash}.json"
if not cache_file.is_file():
return None
try:
raw = cache_file.read_text(encoding="utf-8")
data = json.loads(raw)
return deserialize_entry(data)
except (json.JSONDecodeError, ValueError, KeyError):
return None # Treat corruption as cache miss
```
### 4. Service Layer Wrapper (SRP)
Keep the processing function pure. Add caching as a separate service layer.
```python
def extract_with_cache(
file_path: Path,
*,
cache_enabled: bool = True,
cache_dir: Path = Path(".cache"),
) -> ExtractedDocument:
"""Service layer: cache check -> extraction -> cache write."""
if not cache_enabled:
return extract_text(file_path) # Pure function, no cache knowledge
file_hash = compute_file_hash(file_path)
# Check cache
cached = read_cache(cache_dir, file_hash)
if cached is not None:
logger.info("Cache hit: %s (hash=%s)", file_path.name, file_hash[:12])
return cached.document
# Cache miss -> extract -> store
logger.info("Cache miss: %s (hash=%s)", file_path.name, file_hash[:12])
doc = extract_text(file_path)
entry = CacheEntry(file_hash=file_hash, source_path=str(file_path), document=doc)
write_cache(cache_dir, entry)
return doc
```
## Key Design Decisions
| Decision | Rationale |
|----------|-----------|
| SHA-256 content hash | Path-independent, auto-invalidates on content change |
| `{hash}.json` file naming | O(1) lookup, no index file needed |
| Service layer wrapper | SRP: extraction stays pure, cache is a separate concern |
| Manual JSON serialization | Full control over frozen dataclass serialization |
| Corruption returns `None` | Graceful degradation, re-processes on next run |
| `cache_dir.mkdir(parents=True)` | Lazy directory creation on first write |
## Best Practices
- **Hash content, not paths** — paths change, content identity doesn't
- **Chunk large files** when hashing — avoid loading entire files into memory
- **Keep processing functions pure** — they should know nothing about caching
- **Log cache hit/miss** with truncated hashes for debugging
- **Handle corruption gracefully** — treat invalid cache entries as misses, never crash
## Anti-Patterns to Avoid
```python
# BAD: Path-based caching (breaks on file move/rename)
cache = {"/path/to/file.pdf": result}
# BAD: Adding cache logic inside the processing function (SRP violation)
def extract_text(path, *, cache_enabled=False, cache_dir=None):
if cache_enabled: # Now this function has two responsibilities
...
# BAD: Using dataclasses.asdict() with nested frozen dataclasses
# (can cause issues with complex nested types)
data = dataclasses.asdict(entry) # Use manual serialization instead
```
## When to Use
- File processing pipelines (PDF parsing, OCR, text extraction, image analysis)
- CLI tools that benefit from `--cache/--no-cache` options
- Batch processing where the same files appear across runs
- Adding caching to existing pure functions without modifying them
## When NOT to Use
- Data that must always be fresh (real-time feeds)
- Cache entries that would be extremely large (consider streaming instead)
- Results that depend on parameters beyond file content (e.g., different extraction configs)

View File

@@ -103,7 +103,8 @@ PARSED_OK=$(echo "$PARSED" | python3 -c "import json,sys; print(json.load(sys.st
if [ "$PARSED_OK" != "True" ]; then
# Fallback: log raw input for debugging
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
TIMESTAMP="$timestamp" echo "$INPUT_JSON" | python3 -c "
export TIMESTAMP="$timestamp"
echo "$INPUT_JSON" | python3 -c "
import json, sys, os
raw = sys.stdin.read()[:2000]
print(json.dumps({'timestamp': os.environ['TIMESTAMP'], 'event': 'parse_error', 'raw': raw}))
@@ -124,7 +125,8 @@ fi
# Build and write observation
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
TIMESTAMP="$timestamp" echo "$PARSED" | python3 -c "
export TIMESTAMP="$timestamp"
echo "$PARSED" | python3 -c "
import json, sys, os
parsed = json.load(sys.stdin)

View File

@@ -0,0 +1,182 @@
---
name: cost-aware-llm-pipeline
description: Cost optimization patterns for LLM API usage — model routing by task complexity, budget tracking, retry logic, and prompt caching.
---
# Cost-Aware LLM Pipeline
Patterns for controlling LLM API costs while maintaining quality. Combines model routing, budget tracking, retry logic, and prompt caching into a composable pipeline.
## When to Activate
- Building applications that call LLM APIs (Claude, GPT, etc.)
- Processing batches of items with varying complexity
- Need to stay within a budget for API spend
- Optimizing cost without sacrificing quality on complex tasks
## Core Concepts
### 1. Model Routing by Task Complexity
Automatically select cheaper models for simple tasks, reserving expensive models for complex ones.
```python
MODEL_SONNET = "claude-sonnet-4-6"
MODEL_HAIKU = "claude-haiku-4-5-20251001"
_SONNET_TEXT_THRESHOLD = 10_000 # chars
_SONNET_ITEM_THRESHOLD = 30 # items
def select_model(
text_length: int,
item_count: int,
force_model: str | None = None,
) -> str:
"""Select model based on task complexity."""
if force_model is not None:
return force_model
if text_length >= _SONNET_TEXT_THRESHOLD or item_count >= _SONNET_ITEM_THRESHOLD:
return MODEL_SONNET # Complex task
return MODEL_HAIKU # Simple task (3-4x cheaper)
```
### 2. Immutable Cost Tracking
Track cumulative spend with frozen dataclasses. Each API call returns a new tracker — never mutates state.
```python
from dataclasses import dataclass
@dataclass(frozen=True, slots=True)
class CostRecord:
model: str
input_tokens: int
output_tokens: int
cost_usd: float
@dataclass(frozen=True, slots=True)
class CostTracker:
budget_limit: float = 1.00
records: tuple[CostRecord, ...] = ()
def add(self, record: CostRecord) -> "CostTracker":
"""Return new tracker with added record (never mutates self)."""
return CostTracker(
budget_limit=self.budget_limit,
records=(*self.records, record),
)
@property
def total_cost(self) -> float:
return sum(r.cost_usd for r in self.records)
@property
def over_budget(self) -> bool:
return self.total_cost > self.budget_limit
```
### 3. Narrow Retry Logic
Retry only on transient errors. Fail fast on authentication or bad request errors.
```python
from anthropic import (
APIConnectionError,
InternalServerError,
RateLimitError,
)
_RETRYABLE_ERRORS = (APIConnectionError, RateLimitError, InternalServerError)
_MAX_RETRIES = 3
def call_with_retry(func, *, max_retries: int = _MAX_RETRIES):
"""Retry only on transient errors, fail fast on others."""
for attempt in range(max_retries):
try:
return func()
except _RETRYABLE_ERRORS:
if attempt == max_retries - 1:
raise
time.sleep(2 ** attempt) # Exponential backoff
# AuthenticationError, BadRequestError etc. → raise immediately
```
### 4. Prompt Caching
Cache long system prompts to avoid resending them on every request.
```python
messages = [
{
"role": "user",
"content": [
{
"type": "text",
"text": system_prompt,
"cache_control": {"type": "ephemeral"}, # Cache this
},
{
"type": "text",
"text": user_input, # Variable part
},
],
}
]
```
## Composition
Combine all four techniques in a single pipeline function:
```python
def process(text: str, config: Config, tracker: CostTracker) -> tuple[Result, CostTracker]:
# 1. Route model
model = select_model(len(text), estimated_items, config.force_model)
# 2. Check budget
if tracker.over_budget:
raise BudgetExceededError(tracker.total_cost, tracker.budget_limit)
# 3. Call with retry + caching
response = call_with_retry(lambda: client.messages.create(
model=model,
messages=build_cached_messages(system_prompt, text),
))
# 4. Track cost (immutable)
record = CostRecord(model=model, input_tokens=..., output_tokens=..., cost_usd=...)
tracker = tracker.add(record)
return parse_result(response), tracker
```
## Pricing Reference (2025-2026)
| Model | Input ($/1M tokens) | Output ($/1M tokens) | Relative Cost |
|-------|---------------------|----------------------|---------------|
| Haiku 4.5 | $0.80 | $4.00 | 1x |
| Sonnet 4.6 | $3.00 | $15.00 | ~4x |
| Opus 4.5 | $15.00 | $75.00 | ~19x |
## Best Practices
- **Start with the cheapest model** and only route to expensive models when complexity thresholds are met
- **Set explicit budget limits** before processing batches — fail early rather than overspend
- **Log model selection decisions** so you can tune thresholds based on real data
- **Use prompt caching** for system prompts over 1024 tokens — saves both cost and latency
- **Never retry on authentication or validation errors** — only transient failures (network, rate limit, server error)
## Anti-Patterns to Avoid
- Using the most expensive model for all requests regardless of complexity
- Retrying on all errors (wastes budget on permanent failures)
- Mutating cost tracking state (makes debugging and auditing difficult)
- Hardcoding model names throughout the codebase (use constants or config)
- Ignoring prompt caching for repetitive system prompts
## When to Use
- Any application calling Claude, OpenAI, or similar LLM APIs
- Batch processing pipelines where cost adds up quickly
- Multi-model architectures that need intelligent routing
- Production systems that need budget guardrails

View File

@@ -0,0 +1,722 @@
---
name: cpp-coding-standards
description: C++ coding standards based on the C++ Core Guidelines (isocpp.github.io). Use when writing, reviewing, or refactoring C++ code to enforce modern, safe, and idiomatic practices.
---
# C++ Coding Standards (C++ Core Guidelines)
Comprehensive coding standards for modern C++ (C++17/20/23) derived from the [C++ Core Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines). Enforces type safety, resource safety, immutability, and clarity.
## When to Use
- Writing new C++ code (classes, functions, templates)
- Reviewing or refactoring existing C++ code
- Making architectural decisions in C++ projects
- Enforcing consistent style across a C++ codebase
- Choosing between language features (e.g., `enum` vs `enum class`, raw pointer vs smart pointer)
### When NOT to Use
- Non-C++ projects
- Legacy C codebases that cannot adopt modern C++ features
- Embedded/bare-metal contexts where specific guidelines conflict with hardware constraints (adapt selectively)
## Cross-Cutting Principles
These themes recur across the entire guidelines and form the foundation:
1. **RAII everywhere** (P.8, R.1, E.6, CP.20): Bind resource lifetime to object lifetime
2. **Immutability by default** (P.10, Con.1-5, ES.25): Start with `const`/`constexpr`; mutability is the exception
3. **Type safety** (P.4, I.4, ES.46-49, Enum.3): Use the type system to prevent errors at compile time
4. **Express intent** (P.3, F.1, NL.1-2, T.10): Names, types, and concepts should communicate purpose
5. **Minimize complexity** (F.2-3, ES.5, Per.4-5): Simple code is correct code
6. **Value semantics over pointer semantics** (C.10, R.3-5, F.20, CP.31): Prefer returning by value and scoped objects
## Philosophy & Interfaces (P.*, I.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **P.1** | Express ideas directly in code |
| **P.3** | Express intent |
| **P.4** | Ideally, a program should be statically type safe |
| **P.5** | Prefer compile-time checking to run-time checking |
| **P.8** | Don't leak any resources |
| **P.10** | Prefer immutable data to mutable data |
| **I.1** | Make interfaces explicit |
| **I.2** | Avoid non-const global variables |
| **I.4** | Make interfaces precisely and strongly typed |
| **I.11** | Never transfer ownership by a raw pointer or reference |
| **I.23** | Keep the number of function arguments low |
### DO
```cpp
// P.10 + I.4: Immutable, strongly typed interface
struct Temperature {
double kelvin;
};
Temperature boil(const Temperature& water);
```
### DON'T
```cpp
// Weak interface: unclear ownership, unclear units
double boil(double* temp);
// Non-const global variable
int g_counter = 0; // I.2 violation
```
## Functions (F.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **F.1** | Package meaningful operations as carefully named functions |
| **F.2** | A function should perform a single logical operation |
| **F.3** | Keep functions short and simple |
| **F.4** | If a function might be evaluated at compile time, declare it `constexpr` |
| **F.6** | If your function must not throw, declare it `noexcept` |
| **F.8** | Prefer pure functions |
| **F.16** | For "in" parameters, pass cheaply-copied types by value and others by `const&` |
| **F.20** | For "out" values, prefer return values to output parameters |
| **F.21** | To return multiple "out" values, prefer returning a struct |
| **F.43** | Never return a pointer or reference to a local object |
### Parameter Passing
```cpp
// F.16: Cheap types by value, others by const&
void print(int x); // cheap: by value
void analyze(const std::string& data); // expensive: by const&
void transform(std::string s); // sink: by value (will move)
// F.20 + F.21: Return values, not output parameters
struct ParseResult {
std::string token;
int position;
};
ParseResult parse(std::string_view input); // GOOD: return struct
// BAD: output parameters
void parse(std::string_view input,
std::string& token, int& pos); // avoid this
```
### Pure Functions and constexpr
```cpp
// F.4 + F.8: Pure, constexpr where possible
constexpr int factorial(int n) noexcept {
return (n <= 1) ? 1 : n * factorial(n - 1);
}
static_assert(factorial(5) == 120);
```
### Anti-Patterns
- Returning `T&&` from functions (F.45)
- Using `va_arg` / C-style variadics (F.55)
- Capturing by reference in lambdas passed to other threads (F.53)
- Returning `const T` which inhibits move semantics (F.49)
## Classes & Class Hierarchies (C.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **C.2** | Use `class` if invariant exists; `struct` if data members vary independently |
| **C.9** | Minimize exposure of members |
| **C.20** | If you can avoid defining default operations, do (Rule of Zero) |
| **C.21** | If you define or `=delete` any copy/move/destructor, handle them all (Rule of Five) |
| **C.35** | Base class destructor: public virtual or protected non-virtual |
| **C.41** | A constructor should create a fully initialized object |
| **C.46** | Declare single-argument constructors `explicit` |
| **C.67** | A polymorphic class should suppress public copy/move |
| **C.128** | Virtual functions: specify exactly one of `virtual`, `override`, or `final` |
### Rule of Zero
```cpp
// C.20: Let the compiler generate special members
struct Employee {
std::string name;
std::string department;
int id;
// No destructor, copy/move constructors, or assignment operators needed
};
```
### Rule of Five
```cpp
// C.21: If you must manage a resource, define all five
class Buffer {
public:
explicit Buffer(std::size_t size)
: data_(std::make_unique<char[]>(size)), size_(size) {}
~Buffer() = default;
Buffer(const Buffer& other)
: data_(std::make_unique<char[]>(other.size_)), size_(other.size_) {
std::copy_n(other.data_.get(), size_, data_.get());
}
Buffer& operator=(const Buffer& other) {
if (this != &other) {
auto new_data = std::make_unique<char[]>(other.size_);
std::copy_n(other.data_.get(), other.size_, new_data.get());
data_ = std::move(new_data);
size_ = other.size_;
}
return *this;
}
Buffer(Buffer&&) noexcept = default;
Buffer& operator=(Buffer&&) noexcept = default;
private:
std::unique_ptr<char[]> data_;
std::size_t size_;
};
```
### Class Hierarchy
```cpp
// C.35 + C.128: Virtual destructor, use override
class Shape {
public:
virtual ~Shape() = default;
virtual double area() const = 0; // C.121: pure interface
};
class Circle : public Shape {
public:
explicit Circle(double r) : radius_(r) {}
double area() const override { return 3.14159 * radius_ * radius_; }
private:
double radius_;
};
```
### Anti-Patterns
- Calling virtual functions in constructors/destructors (C.82)
- Using `memset`/`memcpy` on non-trivial types (C.90)
- Providing different default arguments for virtual function and overrider (C.140)
- Making data members `const` or references, which suppresses move/copy (C.12)
## Resource Management (R.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **R.1** | Manage resources automatically using RAII |
| **R.3** | A raw pointer (`T*`) is non-owning |
| **R.5** | Prefer scoped objects; don't heap-allocate unnecessarily |
| **R.10** | Avoid `malloc()`/`free()` |
| **R.11** | Avoid calling `new` and `delete` explicitly |
| **R.20** | Use `unique_ptr` or `shared_ptr` to represent ownership |
| **R.21** | Prefer `unique_ptr` over `shared_ptr` unless sharing ownership |
| **R.22** | Use `make_shared()` to make `shared_ptr`s |
### Smart Pointer Usage
```cpp
// R.11 + R.20 + R.21: RAII with smart pointers
auto widget = std::make_unique<Widget>("config"); // unique ownership
auto cache = std::make_shared<Cache>(1024); // shared ownership
// R.3: Raw pointer = non-owning observer
void render(const Widget* w) { // does NOT own w
if (w) w->draw();
}
render(widget.get());
```
### RAII Pattern
```cpp
// R.1: Resource acquisition is initialization
class FileHandle {
public:
explicit FileHandle(const std::string& path)
: handle_(std::fopen(path.c_str(), "r")) {
if (!handle_) throw std::runtime_error("Failed to open: " + path);
}
~FileHandle() {
if (handle_) std::fclose(handle_);
}
FileHandle(const FileHandle&) = delete;
FileHandle& operator=(const FileHandle&) = delete;
FileHandle(FileHandle&& other) noexcept
: handle_(std::exchange(other.handle_, nullptr)) {}
FileHandle& operator=(FileHandle&& other) noexcept {
if (this != &other) {
if (handle_) std::fclose(handle_);
handle_ = std::exchange(other.handle_, nullptr);
}
return *this;
}
private:
std::FILE* handle_;
};
```
### Anti-Patterns
- Naked `new`/`delete` (R.11)
- `malloc()`/`free()` in C++ code (R.10)
- Multiple resource allocations in a single expression (R.13 -- exception safety hazard)
- `shared_ptr` where `unique_ptr` suffices (R.21)
## Expressions & Statements (ES.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **ES.5** | Keep scopes small |
| **ES.20** | Always initialize an object |
| **ES.23** | Prefer `{}` initializer syntax |
| **ES.25** | Declare objects `const` or `constexpr` unless modification is intended |
| **ES.28** | Use lambdas for complex initialization of `const` variables |
| **ES.45** | Avoid magic constants; use symbolic constants |
| **ES.46** | Avoid narrowing/lossy arithmetic conversions |
| **ES.47** | Use `nullptr` rather than `0` or `NULL` |
| **ES.48** | Avoid casts |
| **ES.50** | Don't cast away `const` |
### Initialization
```cpp
// ES.20 + ES.23 + ES.25: Always initialize, prefer {}, default to const
const int max_retries{3};
const std::string name{"widget"};
const std::vector<int> primes{2, 3, 5, 7, 11};
// ES.28: Lambda for complex const initialization
const auto config = [&] {
Config c;
c.timeout = std::chrono::seconds{30};
c.retries = max_retries;
c.verbose = debug_mode;
return c;
}();
```
### Anti-Patterns
- Uninitialized variables (ES.20)
- Using `0` or `NULL` as pointer (ES.47 -- use `nullptr`)
- C-style casts (ES.48 -- use `static_cast`, `const_cast`, etc.)
- Casting away `const` (ES.50)
- Magic numbers without named constants (ES.45)
- Mixing signed and unsigned arithmetic (ES.100)
- Reusing names in nested scopes (ES.12)
## Error Handling (E.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **E.1** | Develop an error-handling strategy early in a design |
| **E.2** | Throw an exception to signal that a function can't perform its assigned task |
| **E.6** | Use RAII to prevent leaks |
| **E.12** | Use `noexcept` when throwing is impossible or unacceptable |
| **E.14** | Use purpose-designed user-defined types as exceptions |
| **E.15** | Throw by value, catch by reference |
| **E.16** | Destructors, deallocation, and swap must never fail |
| **E.17** | Don't try to catch every exception in every function |
### Exception Hierarchy
```cpp
// E.14 + E.15: Custom exception types, throw by value, catch by reference
class AppError : public std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
class NetworkError : public AppError {
public:
NetworkError(const std::string& msg, int code)
: AppError(msg), status_code(code) {}
int status_code;
};
void fetch_data(const std::string& url) {
// E.2: Throw to signal failure
throw NetworkError("connection refused", 503);
}
void run() {
try {
fetch_data("https://api.example.com");
} catch (const NetworkError& e) {
log_error(e.what(), e.status_code);
} catch (const AppError& e) {
log_error(e.what());
}
// E.17: Don't catch everything here -- let unexpected errors propagate
}
```
### Anti-Patterns
- Throwing built-in types like `int` or string literals (E.14)
- Catching by value (slicing risk) (E.15)
- Empty catch blocks that silently swallow errors
- Using exceptions for flow control (E.3)
- Error handling based on global state like `errno` (E.28)
## Constants & Immutability (Con.*)
### All Rules
| Rule | Summary |
|------|---------|
| **Con.1** | By default, make objects immutable |
| **Con.2** | By default, make member functions `const` |
| **Con.3** | By default, pass pointers and references to `const` |
| **Con.4** | Use `const` for values that don't change after construction |
| **Con.5** | Use `constexpr` for values computable at compile time |
```cpp
// Con.1 through Con.5: Immutability by default
class Sensor {
public:
explicit Sensor(std::string id) : id_(std::move(id)) {}
// Con.2: const member functions by default
const std::string& id() const { return id_; }
double last_reading() const { return reading_; }
// Only non-const when mutation is required
void record(double value) { reading_ = value; }
private:
const std::string id_; // Con.4: never changes after construction
double reading_{0.0};
};
// Con.3: Pass by const reference
void display(const Sensor& s) {
std::cout << s.id() << ": " << s.last_reading() << '\n';
}
// Con.5: Compile-time constants
constexpr double PI = 3.14159265358979;
constexpr int MAX_SENSORS = 256;
```
## Concurrency & Parallelism (CP.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **CP.2** | Avoid data races |
| **CP.3** | Minimize explicit sharing of writable data |
| **CP.4** | Think in terms of tasks, rather than threads |
| **CP.8** | Don't use `volatile` for synchronization |
| **CP.20** | Use RAII, never plain `lock()`/`unlock()` |
| **CP.21** | Use `std::scoped_lock` to acquire multiple mutexes |
| **CP.22** | Never call unknown code while holding a lock |
| **CP.42** | Don't wait without a condition |
| **CP.44** | Remember to name your `lock_guard`s and `unique_lock`s |
| **CP.100** | Don't use lock-free programming unless you absolutely have to |
### Safe Locking
```cpp
// CP.20 + CP.44: RAII locks, always named
class ThreadSafeQueue {
public:
void push(int value) {
std::lock_guard<std::mutex> lock(mutex_); // CP.44: named!
queue_.push(value);
cv_.notify_one();
}
int pop() {
std::unique_lock<std::mutex> lock(mutex_);
// CP.42: Always wait with a condition
cv_.wait(lock, [this] { return !queue_.empty(); });
const int value = queue_.front();
queue_.pop();
return value;
}
private:
std::mutex mutex_; // CP.50: mutex with its data
std::condition_variable cv_;
std::queue<int> queue_;
};
```
### Multiple Mutexes
```cpp
// CP.21: std::scoped_lock for multiple mutexes (deadlock-free)
void transfer(Account& from, Account& to, double amount) {
std::scoped_lock lock(from.mutex_, to.mutex_);
from.balance_ -= amount;
to.balance_ += amount;
}
```
### Anti-Patterns
- `volatile` for synchronization (CP.8 -- it's for hardware I/O only)
- Detaching threads (CP.26 -- lifetime management becomes nearly impossible)
- Unnamed lock guards: `std::lock_guard<std::mutex>(m);` destroys immediately (CP.44)
- Holding locks while calling callbacks (CP.22 -- deadlock risk)
- Lock-free programming without deep expertise (CP.100)
## Templates & Generic Programming (T.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **T.1** | Use templates to raise the level of abstraction |
| **T.2** | Use templates to express algorithms for many argument types |
| **T.10** | Specify concepts for all template arguments |
| **T.11** | Use standard concepts whenever possible |
| **T.13** | Prefer shorthand notation for simple concepts |
| **T.43** | Prefer `using` over `typedef` |
| **T.120** | Use template metaprogramming only when you really need to |
| **T.144** | Don't specialize function templates (overload instead) |
### Concepts (C++20)
```cpp
#include <concepts>
// T.10 + T.11: Constrain templates with standard concepts
template<std::integral T>
T gcd(T a, T b) {
while (b != 0) {
a = std::exchange(b, a % b);
}
return a;
}
// T.13: Shorthand concept syntax
void sort(std::ranges::random_access_range auto& range) {
std::ranges::sort(range);
}
// Custom concept for domain-specific constraints
template<typename T>
concept Serializable = requires(const T& t) {
{ t.serialize() } -> std::convertible_to<std::string>;
};
template<Serializable T>
void save(const T& obj, const std::string& path);
```
### Anti-Patterns
- Unconstrained templates in visible namespaces (T.47)
- Specializing function templates instead of overloading (T.144)
- Template metaprogramming where `constexpr` suffices (T.120)
- `typedef` instead of `using` (T.43)
## Standard Library (SL.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **SL.1** | Use libraries wherever possible |
| **SL.2** | Prefer the standard library to other libraries |
| **SL.con.1** | Prefer `std::array` or `std::vector` over C arrays |
| **SL.con.2** | Prefer `std::vector` by default |
| **SL.str.1** | Use `std::string` to own character sequences |
| **SL.str.2** | Use `std::string_view` to refer to character sequences |
| **SL.io.50** | Avoid `endl` (use `'\n'` -- `endl` forces a flush) |
```cpp
// SL.con.1 + SL.con.2: Prefer vector/array over C arrays
const std::array<int, 4> fixed_data{1, 2, 3, 4};
std::vector<std::string> dynamic_data;
// SL.str.1 + SL.str.2: string owns, string_view observes
std::string build_greeting(std::string_view name) {
return "Hello, " + std::string(name) + "!";
}
// SL.io.50: Use '\n' not endl
std::cout << "result: " << value << '\n';
```
## Enumerations (Enum.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **Enum.1** | Prefer enumerations over macros |
| **Enum.3** | Prefer `enum class` over plain `enum` |
| **Enum.5** | Don't use ALL_CAPS for enumerators |
| **Enum.6** | Avoid unnamed enumerations |
```cpp
// Enum.3 + Enum.5: Scoped enum, no ALL_CAPS
enum class Color { red, green, blue };
enum class LogLevel { debug, info, warning, error };
// BAD: plain enum leaks names, ALL_CAPS clashes with macros
enum { RED, GREEN, BLUE }; // Enum.3 + Enum.5 + Enum.6 violation
#define MAX_SIZE 100 // Enum.1 violation -- use constexpr
```
## Source Files & Naming (SF.*, NL.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **SF.1** | Use `.cpp` for code files and `.h` for interface files |
| **SF.7** | Don't write `using namespace` at global scope in a header |
| **SF.8** | Use `#include` guards for all `.h` files |
| **SF.11** | Header files should be self-contained |
| **NL.5** | Avoid encoding type information in names (no Hungarian notation) |
| **NL.8** | Use a consistent naming style |
| **NL.9** | Use ALL_CAPS for macro names only |
| **NL.10** | Prefer `underscore_style` names |
### Header Guard
```cpp
// SF.8: Include guard (or #pragma once)
#ifndef PROJECT_MODULE_WIDGET_H
#define PROJECT_MODULE_WIDGET_H
// SF.11: Self-contained -- include everything this header needs
#include <string>
#include <vector>
namespace project::module {
class Widget {
public:
explicit Widget(std::string name);
const std::string& name() const;
private:
std::string name_;
};
} // namespace project::module
#endif // PROJECT_MODULE_WIDGET_H
```
### Naming Conventions
```cpp
// NL.8 + NL.10: Consistent underscore_style
namespace my_project {
constexpr int max_buffer_size = 4096; // NL.9: not ALL_CAPS (it's not a macro)
class tcp_connection { // underscore_style class
public:
void send_message(std::string_view msg);
bool is_connected() const;
private:
std::string host_; // trailing underscore for members
int port_;
};
} // namespace my_project
```
### Anti-Patterns
- `using namespace std;` in a header at global scope (SF.7)
- Headers that depend on inclusion order (SF.10, SF.11)
- Hungarian notation like `strName`, `iCount` (NL.5)
- ALL_CAPS for anything other than macros (NL.9)
## Performance (Per.*)
### Key Rules
| Rule | Summary |
|------|---------|
| **Per.1** | Don't optimize without reason |
| **Per.2** | Don't optimize prematurely |
| **Per.6** | Don't make claims about performance without measurements |
| **Per.7** | Design to enable optimization |
| **Per.10** | Rely on the static type system |
| **Per.11** | Move computation from run time to compile time |
| **Per.19** | Access memory predictably |
### Guidelines
```cpp
// Per.11: Compile-time computation where possible
constexpr auto lookup_table = [] {
std::array<int, 256> table{};
for (int i = 0; i < 256; ++i) {
table[i] = i * i;
}
return table;
}();
// Per.19: Prefer contiguous data for cache-friendliness
std::vector<Point> points; // GOOD: contiguous
std::vector<std::unique_ptr<Point>> indirect_points; // BAD: pointer chasing
```
### Anti-Patterns
- Optimizing without profiling data (Per.1, Per.6)
- Choosing "clever" low-level code over clear abstractions (Per.4, Per.5)
- Ignoring data layout and cache behavior (Per.19)
## Quick Reference Checklist
Before marking C++ work complete:
- [ ] No raw `new`/`delete` -- use smart pointers or RAII (R.11)
- [ ] Objects initialized at declaration (ES.20)
- [ ] Variables are `const`/`constexpr` by default (Con.1, ES.25)
- [ ] Member functions are `const` where possible (Con.2)
- [ ] `enum class` instead of plain `enum` (Enum.3)
- [ ] `nullptr` instead of `0`/`NULL` (ES.47)
- [ ] No narrowing conversions (ES.46)
- [ ] No C-style casts (ES.48)
- [ ] Single-argument constructors are `explicit` (C.46)
- [ ] Rule of Zero or Rule of Five applied (C.20, C.21)
- [ ] Base class destructors are public virtual or protected non-virtual (C.35)
- [ ] Templates are constrained with concepts (T.10)
- [ ] No `using namespace` in headers at global scope (SF.7)
- [ ] Headers have include guards and are self-contained (SF.8, SF.11)
- [ ] Locks use RAII (`scoped_lock`/`lock_guard`) (CP.20)
- [ ] Exceptions are custom types, thrown by value, caught by reference (E.14, E.15)
- [ ] `'\n'` instead of `std::endl` (SL.io.50)
- [ ] No magic numbers (ES.45)

View File

@@ -0,0 +1,219 @@
---
name: regex-vs-llm-structured-text
description: Decision framework for choosing between regex and LLM when parsing structured text — start with regex, add LLM only for low-confidence edge cases.
---
# Regex vs LLM for Structured Text Parsing
A practical decision framework for parsing structured text (quizzes, forms, invoices, documents). The key insight: regex handles 95-98% of cases cheaply and deterministically. Reserve expensive LLM calls for the remaining edge cases.
## When to Activate
- Parsing structured text with repeating patterns (questions, forms, tables)
- Deciding between regex and LLM for text extraction
- Building hybrid pipelines that combine both approaches
- Optimizing cost/accuracy tradeoffs in text processing
## Decision Framework
```
Is the text format consistent and repeating?
├── Yes (>90% follows a pattern) → Start with Regex
│ ├── Regex handles 95%+ → Done, no LLM needed
│ └── Regex handles <95% → Add LLM for edge cases only
└── No (free-form, highly variable) → Use LLM directly
```
## Architecture Pattern
```
Source Text
[Regex Parser] ─── Extracts structure (95-98% accuracy)
[Text Cleaner] ─── Removes noise (markers, page numbers, artifacts)
[Confidence Scorer] ─── Flags low-confidence extractions
├── High confidence (≥0.95) → Direct output
└── Low confidence (<0.95) → [LLM Validator] → Output
```
## Implementation
### 1. Regex Parser (Handles the Majority)
```python
import re
from dataclasses import dataclass
@dataclass(frozen=True)
class ParsedItem:
id: str
text: str
choices: tuple[str, ...]
answer: str
confidence: float = 1.0
def parse_structured_text(content: str) -> list[ParsedItem]:
"""Parse structured text using regex patterns."""
pattern = re.compile(
r"(?P<id>\d+)\.\s*(?P<text>.+?)\n"
r"(?P<choices>(?:[A-D]\..+?\n)+)"
r"Answer:\s*(?P<answer>[A-D])",
re.MULTILINE | re.DOTALL,
)
items = []
for match in pattern.finditer(content):
choices = tuple(
c.strip() for c in re.findall(r"[A-D]\.\s*(.+)", match.group("choices"))
)
items.append(ParsedItem(
id=match.group("id"),
text=match.group("text").strip(),
choices=choices,
answer=match.group("answer"),
))
return items
```
### 2. Confidence Scoring
Flag items that may need LLM review:
```python
@dataclass(frozen=True)
class ConfidenceFlag:
item_id: str
score: float
reasons: tuple[str, ...]
def score_confidence(item: ParsedItem) -> ConfidenceFlag:
"""Score extraction confidence and flag issues."""
reasons = []
score = 1.0
if len(item.choices) < 3:
reasons.append("few_choices")
score -= 0.3
if not item.answer:
reasons.append("missing_answer")
score -= 0.5
if len(item.text) < 10:
reasons.append("short_text")
score -= 0.2
return ConfidenceFlag(
item_id=item.id,
score=max(0.0, score),
reasons=tuple(reasons),
)
def identify_low_confidence(
items: list[ParsedItem],
threshold: float = 0.95,
) -> list[ConfidenceFlag]:
"""Return items below confidence threshold."""
flags = [score_confidence(item) for item in items]
return [f for f in flags if f.score < threshold]
```
### 3. LLM Validator (Edge Cases Only)
```python
def validate_with_llm(
item: ParsedItem,
original_text: str,
client,
) -> ParsedItem:
"""Use LLM to fix low-confidence extractions."""
response = client.messages.create(
model="claude-haiku-4-5-20251001", # Cheapest model for validation
max_tokens=500,
messages=[{
"role": "user",
"content": (
f"Extract the question, choices, and answer from this text.\n\n"
f"Text: {original_text}\n\n"
f"Current extraction: {item}\n\n"
f"Return corrected JSON if needed, or 'CORRECT' if accurate."
),
}],
)
# Parse LLM response and return corrected item...
return corrected_item
```
### 4. Hybrid Pipeline
```python
def process_document(
content: str,
*,
llm_client=None,
confidence_threshold: float = 0.95,
) -> list[ParsedItem]:
"""Full pipeline: regex -> confidence check -> LLM for edge cases."""
# Step 1: Regex extraction (handles 95-98%)
items = parse_structured_text(content)
# Step 2: Confidence scoring
low_confidence = identify_low_confidence(items, confidence_threshold)
if not low_confidence or llm_client is None:
return items
# Step 3: LLM validation (only for flagged items)
low_conf_ids = {f.item_id for f in low_confidence}
result = []
for item in items:
if item.id in low_conf_ids:
result.append(validate_with_llm(item, content, llm_client))
else:
result.append(item)
return result
```
## Real-World Metrics
From a production quiz parsing pipeline (410 items):
| Metric | Value |
|--------|-------|
| Regex success rate | 98.0% |
| Low confidence items | 8 (2.0%) |
| LLM calls needed | ~5 |
| Cost savings vs all-LLM | ~95% |
| Test coverage | 93% |
## Best Practices
- **Start with regex** — even imperfect regex gives you a baseline to improve
- **Use confidence scoring** to programmatically identify what needs LLM help
- **Use the cheapest LLM** for validation (Haiku-class models are sufficient)
- **Never mutate** parsed items — return new instances from cleaning/validation steps
- **TDD works well** for parsers — write tests for known patterns first, then edge cases
- **Log metrics** (regex success rate, LLM call count) to track pipeline health
## Anti-Patterns to Avoid
- Sending all text to an LLM when regex handles 95%+ of cases (expensive and slow)
- Using regex for free-form, highly variable text (LLM is better here)
- Skipping confidence scoring and hoping regex "just works"
- Mutating parsed objects during cleaning/validation steps
- Not testing edge cases (malformed input, missing fields, encoding issues)
## When to Use
- Quiz/exam question parsing
- Form data extraction
- Invoice/receipt processing
- Document structure parsing (headers, sections, tables)
- Any structured text with repeating patterns where cost matters

View File

@@ -0,0 +1,159 @@
---
name: search-first
description: Research-before-coding workflow. Search for existing tools, libraries, and patterns before writing custom code. Invokes the researcher agent.
---
# /search-first — Research Before You Code
Systematizes the "search for existing solutions before implementing" workflow.
## Trigger
Use this skill when:
- Starting a new feature that likely has existing solutions
- Adding a dependency or integration
- The user asks "add X functionality" and you're about to write code
- Before creating a new utility, helper, or abstraction
## Workflow
```
┌─────────────────────────────────────────────┐
│ 1. NEED ANALYSIS │
│ Define what functionality is needed │
│ Identify language/framework constraints │
├─────────────────────────────────────────────┤
│ 2. PARALLEL SEARCH (researcher agent) │
│ ┌──────────┐ ┌──────────┐ ┌──────────┐ │
│ │ npm / │ │ MCP / │ │ GitHub / │ │
│ │ PyPI │ │ Skills │ │ Web │ │
│ └──────────┘ └──────────┘ └──────────┘ │
├─────────────────────────────────────────────┤
│ 3. EVALUATE │
│ Score candidates (functionality, maint, │
│ community, docs, license, deps) │
├─────────────────────────────────────────────┤
│ 4. DECIDE │
│ ┌─────────┐ ┌──────────┐ ┌─────────┐ │
│ │ Adopt │ │ Extend │ │ Build │ │
│ │ as-is │ │ /Wrap │ │ Custom │ │
│ └─────────┘ └──────────┘ └─────────┘ │
├─────────────────────────────────────────────┤
│ 5. IMPLEMENT │
│ Install package / Configure MCP / │
│ Write minimal custom code │
└─────────────────────────────────────────────┘
```
## Decision Matrix
| Signal | Action |
|--------|--------|
| Exact match, well-maintained, MIT/Apache | **Adopt** — install and use directly |
| Partial match, good foundation | **Extend** — install + write thin wrapper |
| Multiple weak matches | **Compose** — combine 2-3 small packages |
| Nothing suitable found | **Build** — write custom, but informed by research |
## How to Use
### Quick Mode (inline)
Before writing a utility or adding functionality, mentally run through:
1. Is this a common problem? → Search npm/PyPI
2. Is there an MCP for this? → Check `~/.claude/settings.json` and search
3. Is there a skill for this? → Check `~/.claude/skills/`
4. Is there a GitHub template? → Search GitHub
### Full Mode (agent)
For non-trivial functionality, launch the researcher agent:
```
Task(subagent_type="general-purpose", prompt="
Research existing tools for: [DESCRIPTION]
Language/framework: [LANG]
Constraints: [ANY]
Search: npm/PyPI, MCP servers, Claude Code skills, GitHub
Return: Structured comparison with recommendation
")
```
## Search Shortcuts by Category
### Development Tooling
- Linting → `eslint`, `ruff`, `textlint`, `markdownlint`
- Formatting → `prettier`, `black`, `gofmt`
- Testing → `jest`, `pytest`, `go test`
- Pre-commit → `husky`, `lint-staged`, `pre-commit`
### AI/LLM Integration
- Claude SDK → Context7 for latest docs
- Prompt management → Check MCP servers
- Document processing → `unstructured`, `pdfplumber`, `mammoth`
### Data & APIs
- HTTP clients → `httpx` (Python), `ky`/`got` (Node)
- Validation → `zod` (TS), `pydantic` (Python)
- Database → Check for MCP servers first
### Content & Publishing
- Markdown processing → `remark`, `unified`, `markdown-it`
- Image optimization → `sharp`, `imagemin`
## Integration Points
### With planner agent
The planner should invoke researcher before Phase 1 (Architecture Review):
- Researcher identifies available tools
- Planner incorporates them into the implementation plan
- Avoids "reinventing the wheel" in the plan
### With architect agent
The architect should consult researcher for:
- Technology stack decisions
- Integration pattern discovery
- Existing reference architectures
### With iterative-retrieval skill
Combine for progressive discovery:
- Cycle 1: Broad search (npm, PyPI, MCP)
- Cycle 2: Evaluate top candidates in detail
- Cycle 3: Test compatibility with project constraints
## Examples
### Example 1: "Add dead link checking"
```
Need: Check markdown files for broken links
Search: npm "markdown dead link checker"
Found: textlint-rule-no-dead-link (score: 9/10)
Action: ADOPT — npm install textlint-rule-no-dead-link
Result: Zero custom code, battle-tested solution
```
### Example 2: "Add HTTP client wrapper"
```
Need: Resilient HTTP client with retries and timeout handling
Search: npm "http client retry", PyPI "httpx retry"
Found: got (Node) with retry plugin, httpx (Python) with built-in retry
Action: ADOPT — use got/httpx directly with retry config
Result: Zero custom code, production-proven libraries
```
### Example 3: "Add config file linter"
```
Need: Validate project config files against a schema
Search: npm "config linter schema", "json schema validator cli"
Found: ajv-cli (score: 8/10)
Action: ADOPT + EXTEND — install ajv-cli, write project-specific schema
Result: 1 package + 1 schema file, no custom validation logic
```
## Anti-Patterns
- **Jumping to code**: Writing a utility without checking if one exists
- **Ignoring MCP**: Not checking if an MCP server already provides the capability
- **Over-customizing**: Wrapping a library so heavily it loses its benefits
- **Dependency bloat**: Installing a massive package for one small feature

View File

@@ -0,0 +1,175 @@
---
description: "Use when auditing Claude skills and commands for quality. Supports Quick Scan (changed skills only) and Full Stocktake modes with sequential subagent batch evaluation."
---
# skill-stocktake
Slash command (`/skill-stocktake`) that audits all Claude skills and commands using a quality checklist + AI holistic judgment. Supports two modes: Quick Scan for recently changed skills, and Full Stocktake for a complete review.
## Scope
The command targets the following paths **relative to the directory where it is invoked**:
| Path | Description |
|------|-------------|
| `~/.claude/skills/` | Global skills (all projects) |
| `{cwd}/.claude/skills/` | Project-level skills (if the directory exists) |
**At the start of Phase 1, the command explicitly lists which paths were found and scanned.**
### Targeting a specific project
To include project-level skills, run from that project's root directory:
```bash
cd ~/path/to/my-project
/skill-stocktake
```
If the project has no `.claude/skills/` directory, only global skills and commands are evaluated.
## Modes
| Mode | Trigger | Duration |
|------|---------|---------|
| Quick Scan | `results.json` exists (default) | 510 min |
| Full Stocktake | `results.json` absent, or `/skill-stocktake full` | 2030 min |
**Results cache:** `~/.claude/skills/skill-stocktake/results.json`
## Quick Scan Flow
Re-evaluate only skills that have changed since the last run (510 min).
1. Read `~/.claude/skills/skill-stocktake/results.json`
2. Run: `bash ~/.claude/skills/skill-stocktake/scripts/quick-diff.sh \
~/.claude/skills/skill-stocktake/results.json`
(Project dir is auto-detected from `$PWD/.claude/skills`; pass it explicitly only if needed)
3. If output is `[]`: report "No changes since last run." and stop
4. Re-evaluate only those changed files using the same Phase 2 criteria
5. Carry forward unchanged skills from previous results
6. Output only the diff
7. Run: `bash ~/.claude/skills/skill-stocktake/scripts/save-results.sh \
~/.claude/skills/skill-stocktake/results.json <<< "$EVAL_RESULTS"`
## Full Stocktake Flow
### Phase 1 — Inventory
Run: `bash ~/.claude/skills/skill-stocktake/scripts/scan.sh`
The script enumerates skill files, extracts frontmatter, and collects UTC mtimes.
Project dir is auto-detected from `$PWD/.claude/skills`; pass it explicitly only if needed.
Present the scan summary and inventory table from the script output:
```
Scanning:
✓ ~/.claude/skills/ (17 files)
✗ {cwd}/.claude/skills/ (not found — global skills only)
```
| Skill | 7d use | 30d use | Description |
|-------|--------|---------|-------------|
### Phase 2 — Quality Evaluation
Launch a Task tool subagent (**Explore agent, model: opus**) with the full inventory and checklist.
The subagent reads each skill, applies the checklist, and returns per-skill JSON:
`{ "verdict": "Keep"|"Improve"|"Update"|"Retire"|"Merge into [X]", "reason": "..." }`
**Chunk guidance:** Process ~20 skills per subagent invocation to keep context manageable. Save intermediate results to `results.json` (`status: "in_progress"`) after each chunk.
After all skills are evaluated: set `status: "completed"`, proceed to Phase 3.
**Resume detection:** If `status: "in_progress"` is found on startup, resume from the first unevaluated skill.
Each skill is evaluated against this checklist:
```
- [ ] Content overlap with other skills checked
- [ ] Overlap with MEMORY.md / CLAUDE.md checked
- [ ] Freshness of technical references verified (use WebSearch if tool names / CLI flags / APIs are present)
- [ ] Usage frequency considered
```
Verdict criteria:
| Verdict | Meaning |
|---------|---------|
| Keep | Useful and current |
| Improve | Worth keeping, but specific improvements needed |
| Update | Referenced technology is outdated (verify with WebSearch) |
| Retire | Low quality, stale, or cost-asymmetric |
| Merge into [X] | Substantial overlap with another skill; name the merge target |
Evaluation is **holistic AI judgment** — not a numeric rubric. Guiding dimensions:
- **Actionability**: code examples, commands, or steps that let you act immediately
- **Scope fit**: name, trigger, and content are aligned; not too broad or narrow
- **Uniqueness**: value not replaceable by MEMORY.md / CLAUDE.md / another skill
- **Currency**: technical references work in the current environment
**Reason quality requirements** — the `reason` field must be self-contained and decision-enabling:
- Do NOT write "unchanged" alone — always restate the core evidence
- For **Retire**: state (1) what specific defect was found, (2) what covers the same need instead
- Bad: `"Superseded"`
- Good: `"disable-model-invocation: true already set; superseded by continuous-learning-v2 which covers all the same patterns plus confidence scoring. No unique content remains."`
- For **Merge**: name the target and describe what content to integrate
- Bad: `"Overlaps with X"`
- Good: `"42-line thin content; Step 4 of chatlog-to-article already covers the same workflow. Integrate the 'article angle' tip as a note in that skill."`
- For **Improve**: describe the specific change needed (what section, what action, target size if relevant)
- Bad: `"Too long"`
- Good: `"276 lines; Section 'Framework Comparison' (L80140) duplicates ai-era-architecture-principles; delete it to reach ~150 lines."`
- For **Keep** (mtime-only change in Quick Scan): restate the original verdict rationale, do not write "unchanged"
- Bad: `"Unchanged"`
- Good: `"mtime updated but content unchanged. Unique Python reference explicitly imported by rules/python/; no overlap found."`
### Phase 3 — Summary Table
| Skill | 7d use | Verdict | Reason |
|-------|--------|---------|--------|
### Phase 4 — Consolidation
1. **Retire / Merge**: present detailed justification per file before confirming with user:
- What specific problem was found (overlap, staleness, broken references, etc.)
- What alternative covers the same functionality (for Retire: which existing skill/rule; for Merge: the target file and what content to integrate)
- Impact of removal (any dependent skills, MEMORY.md references, or workflows affected)
2. **Improve**: present specific improvement suggestions with rationale:
- What to change and why (e.g., "trim 430→200 lines because sections X/Y duplicate python-patterns")
- User decides whether to act
3. **Update**: present updated content with sources checked
4. Check MEMORY.md line count; propose compression if >100 lines
## Results File Schema
`~/.claude/skills/skill-stocktake/results.json`:
**`evaluated_at`**: Must be set to the actual UTC time of evaluation completion.
Obtain via Bash: `date -u +%Y-%m-%dT%H:%M:%SZ`. Never use a date-only approximation like `T00:00:00Z`.
```json
{
"evaluated_at": "2026-02-21T10:00:00Z",
"mode": "full",
"batch_progress": {
"total": 80,
"evaluated": 80,
"status": "completed"
},
"skills": {
"skill-name": {
"path": "~/.claude/skills/skill-name/SKILL.md",
"verdict": "Keep",
"reason": "Concrete, actionable, unique value for X workflow",
"mtime": "2026-01-15T08:30:00Z"
}
}
}
```
## Notes
- Evaluation is blind: the same checklist applies to all skills regardless of origin (ECC, self-authored, auto-extracted)
- Archive / delete operations always require explicit user confirmation
- No verdict branching by skill origin

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
# quick-diff.sh — compare skill file mtimes against results.json evaluated_at
# Usage: quick-diff.sh RESULTS_JSON [CWD_SKILLS_DIR]
# Output: JSON array of changed/new files to stdout (empty [] if no changes)
#
# When CWD_SKILLS_DIR is omitted, defaults to $PWD/.claude/skills so the
# script always picks up project-level skills without relying on the caller.
#
# Environment:
# SKILL_STOCKTAKE_GLOBAL_DIR Override ~/.claude/skills (for testing only;
# do not set in production — intended for bats tests)
# SKILL_STOCKTAKE_PROJECT_DIR Override project dir detection (for testing only)
set -euo pipefail
RESULTS_JSON="${1:-}"
CWD_SKILLS_DIR="${SKILL_STOCKTAKE_PROJECT_DIR:-${2:-$PWD/.claude/skills}}"
GLOBAL_DIR="${SKILL_STOCKTAKE_GLOBAL_DIR:-$HOME/.claude/skills}"
if [[ -z "$RESULTS_JSON" || ! -f "$RESULTS_JSON" ]]; then
echo "Error: RESULTS_JSON not found: ${RESULTS_JSON:-<empty>}" >&2
exit 1
fi
# Validate CWD_SKILLS_DIR looks like a .claude/skills path (defense-in-depth).
# Only warn when the path exists — a nonexistent path poses no traversal risk.
if [[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" && "$CWD_SKILLS_DIR" != */.claude/skills* ]]; then
echo "Warning: CWD_SKILLS_DIR does not look like a .claude/skills path: $CWD_SKILLS_DIR" >&2
fi
evaluated_at=$(jq -r '.evaluated_at' "$RESULTS_JSON")
# Fail fast on a missing or malformed evaluated_at rather than producing
# unpredictable results from ISO 8601 string comparison against "null".
if [[ ! "$evaluated_at" =~ ^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z$ ]]; then
echo "Error: invalid or missing evaluated_at in $RESULTS_JSON: $evaluated_at" >&2
exit 1
fi
# Pre-extract known paths from results.json once (O(1) lookup per file instead of O(n*m))
known_paths=$(jq -r '.skills[].path' "$RESULTS_JSON" 2>/dev/null)
tmpdir=$(mktemp -d)
# Use a function to avoid embedding $tmpdir in a quoted string (prevents injection
# if TMPDIR were crafted to contain shell metacharacters).
_cleanup() { rm -rf "$tmpdir"; }
trap _cleanup EXIT
# Shared counter across process_dir calls — intentionally NOT local
i=0
process_dir() {
local dir="$1"
while IFS= read -r file; do
local mtime dp is_new
mtime=$(date -u -r "$file" +%Y-%m-%dT%H:%M:%SZ)
dp="${file/#$HOME/~}"
# Check if this file is known to results.json (exact whole-line match to
# avoid substring false-positives, e.g. "python-patterns" matching "python-patterns-v2").
if echo "$known_paths" | grep -qxF "$dp"; then
is_new="false"
# Known file: only emit if mtime changed (ISO 8601 string comparison is safe)
[[ "$mtime" > "$evaluated_at" ]] || continue
else
is_new="true"
# New file: always emit regardless of mtime
fi
jq -n \
--arg path "$dp" \
--arg mtime "$mtime" \
--argjson is_new "$is_new" \
'{path:$path,mtime:$mtime,is_new:$is_new}' \
> "$tmpdir/$i.json"
i=$((i+1))
done < <(find "$dir" -name "*.md" -type f 2>/dev/null | sort)
}
[[ -d "$GLOBAL_DIR" ]] && process_dir "$GLOBAL_DIR"
[[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" ]] && process_dir "$CWD_SKILLS_DIR"
if [[ $i -eq 0 ]]; then
echo "[]"
else
jq -s '.' "$tmpdir"/*.json
fi

View File

@@ -0,0 +1,56 @@
#!/usr/bin/env bash
# save-results.sh — merge evaluated skills into results.json with correct UTC timestamp
# Usage: save-results.sh RESULTS_JSON <<< "$EVAL_JSON"
#
# stdin format:
# { "skills": {...}, "mode"?: "full"|"quick", "batch_progress"?: {...} }
#
# Always sets evaluated_at to current UTC time via `date -u`.
# Merges stdin .skills into existing results.json (new entries override old).
# Optionally updates .mode and .batch_progress if present in stdin.
set -euo pipefail
RESULTS_JSON="${1:-}"
if [[ -z "$RESULTS_JSON" ]]; then
echo "Error: RESULTS_JSON argument required" >&2
echo "Usage: save-results.sh RESULTS_JSON <<< \"\$EVAL_JSON\"" >&2
exit 1
fi
EVALUATED_AT=$(date -u +%Y-%m-%dT%H:%M:%SZ)
# Read eval results from stdin and validate JSON before touching the results file
input_json=$(cat)
if ! echo "$input_json" | jq empty 2>/dev/null; then
echo "Error: stdin is not valid JSON" >&2
exit 1
fi
if [[ ! -f "$RESULTS_JSON" ]]; then
# Bootstrap: create new results.json from stdin JSON + current UTC timestamp
echo "$input_json" | jq --arg ea "$EVALUATED_AT" \
'. + { evaluated_at: $ea }' > "$RESULTS_JSON"
exit 0
fi
# Merge: new .skills override existing ones; old skills not in input_json are kept.
# Optionally update .mode and .batch_progress if provided.
#
# Use mktemp for a collision-safe temp file (concurrent runs on the same RESULTS_JSON
# would race on a predictable ".tmp" suffix; random suffix prevents silent overwrites).
tmp=$(mktemp "${RESULTS_JSON}.XXXXXX")
trap 'rm -f "$tmp"' EXIT
jq -s \
--arg ea "$EVALUATED_AT" \
'.[0] as $existing | .[1] as $new |
$existing |
.evaluated_at = $ea |
.skills = ($existing.skills + ($new.skills // {})) |
if ($new | has("mode")) then .mode = $new.mode else . end |
if ($new | has("batch_progress")) then .batch_progress = $new.batch_progress else . end' \
"$RESULTS_JSON" <(echo "$input_json") > "$tmp"
mv "$tmp" "$RESULTS_JSON"

View File

@@ -0,0 +1,170 @@
#!/usr/bin/env bash
# scan.sh — enumerate skill files, extract frontmatter and UTC mtime
# Usage: scan.sh [CWD_SKILLS_DIR]
# Output: JSON to stdout
#
# When CWD_SKILLS_DIR is omitted, defaults to $PWD/.claude/skills so the
# script always picks up project-level skills without relying on the caller.
#
# Environment:
# SKILL_STOCKTAKE_GLOBAL_DIR Override ~/.claude/skills (for testing only;
# do not set in production — intended for bats tests)
# SKILL_STOCKTAKE_PROJECT_DIR Override project dir detection (for testing only)
set -euo pipefail
GLOBAL_DIR="${SKILL_STOCKTAKE_GLOBAL_DIR:-$HOME/.claude/skills}"
CWD_SKILLS_DIR="${SKILL_STOCKTAKE_PROJECT_DIR:-${1:-$PWD/.claude/skills}}"
# Path to JSONL file containing tool-use observations (optional; used for usage frequency counts).
# Override via SKILL_STOCKTAKE_OBSERVATIONS env var if your setup uses a different path.
OBSERVATIONS="${SKILL_STOCKTAKE_OBSERVATIONS:-$HOME/.claude/observations.jsonl}"
# Validate CWD_SKILLS_DIR looks like a .claude/skills path (defense-in-depth).
# Only warn when the path exists — a nonexistent path poses no traversal risk.
if [[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" && "$CWD_SKILLS_DIR" != */.claude/skills* ]]; then
echo "Warning: CWD_SKILLS_DIR does not look like a .claude/skills path: $CWD_SKILLS_DIR" >&2
fi
# Extract a frontmatter field (handles both quoted and unquoted single-line values).
# Does NOT support multi-line YAML blocks (| or >) or nested YAML keys.
extract_field() {
local file="$1" field="$2"
awk -v f="$field" '
BEGIN { fm=0 }
/^---$/ { fm++; next }
fm==1 {
n = length(f) + 2
if (substr($0, 1, n) == f ": ") {
val = substr($0, n+1)
gsub(/^"/, "", val)
gsub(/"$/, "", val)
print val
exit
}
}
fm>=2 { exit }
' "$file"
}
# Get UTC timestamp N days ago (supports both macOS and GNU date)
date_ago() {
local n="$1"
date -u -v-"${n}d" +%Y-%m-%dT%H:%M:%SZ 2>/dev/null ||
date -u -d "${n} days ago" +%Y-%m-%dT%H:%M:%SZ
}
# Count observations matching a file path since a cutoff timestamp
count_obs() {
local file="$1" cutoff="$2"
if [[ ! -f "$OBSERVATIONS" ]]; then
echo 0
return
fi
jq -r --arg p "$file" --arg c "$cutoff" \
'select(.tool=="Read" and .path==$p and .timestamp>=$c) | 1' \
"$OBSERVATIONS" 2>/dev/null | wc -l | tr -d ' '
}
# Scan a directory and produce a JSON array of skill objects
scan_dir_to_json() {
local dir="$1"
local c7 c30
c7=$(date_ago 7)
c30=$(date_ago 30)
local tmpdir
tmpdir=$(mktemp -d)
# Use a function to avoid embedding $tmpdir in a quoted string (prevents injection
# if TMPDIR were crafted to contain shell metacharacters).
local _scan_tmpdir="$tmpdir"
_scan_cleanup() { rm -rf "$_scan_tmpdir"; }
trap _scan_cleanup RETURN
# Pre-aggregate observation counts in two passes (one per window) instead of
# calling jq per-file — reduces from O(n*m) to O(n+m) jq invocations.
local obs_7d_counts obs_30d_counts
obs_7d_counts=""
obs_30d_counts=""
if [[ -f "$OBSERVATIONS" ]]; then
obs_7d_counts=$(jq -r --arg c "$c7" \
'select(.tool=="Read" and .timestamp>=$c) | .path' \
"$OBSERVATIONS" 2>/dev/null | sort | uniq -c)
obs_30d_counts=$(jq -r --arg c "$c30" \
'select(.tool=="Read" and .timestamp>=$c) | .path' \
"$OBSERVATIONS" 2>/dev/null | sort | uniq -c)
fi
local i=0
while IFS= read -r file; do
local name desc mtime u7 u30 dp
name=$(extract_field "$file" "name")
desc=$(extract_field "$file" "description")
mtime=$(date -u -r "$file" +%Y-%m-%dT%H:%M:%SZ)
# Use awk exact field match to avoid substring false-positives from grep -F.
# uniq -c output format: " N /path/to/file" — path is always field 2.
u7=$(echo "$obs_7d_counts" | awk -v f="$file" '$2 == f {print $1}' | head -1)
u7="${u7:-0}"
u30=$(echo "$obs_30d_counts" | awk -v f="$file" '$2 == f {print $1}' | head -1)
u30="${u30:-0}"
dp="${file/#$HOME/~}"
jq -n \
--arg path "$dp" \
--arg name "$name" \
--arg description "$desc" \
--arg mtime "$mtime" \
--argjson use_7d "$u7" \
--argjson use_30d "$u30" \
'{path:$path,name:$name,description:$description,use_7d:$use_7d,use_30d:$use_30d,mtime:$mtime}' \
> "$tmpdir/$i.json"
i=$((i+1))
done < <(find "$dir" -name "*.md" -type f 2>/dev/null | sort)
if [[ $i -eq 0 ]]; then
echo "[]"
else
jq -s '.' "$tmpdir"/*.json
fi
}
# --- Main ---
global_found="false"
global_count=0
global_skills="[]"
if [[ -d "$GLOBAL_DIR" ]]; then
global_found="true"
global_skills=$(scan_dir_to_json "$GLOBAL_DIR")
global_count=$(echo "$global_skills" | jq 'length')
fi
project_found="false"
project_path=""
project_count=0
project_skills="[]"
if [[ -n "$CWD_SKILLS_DIR" && -d "$CWD_SKILLS_DIR" ]]; then
project_found="true"
project_path="$CWD_SKILLS_DIR"
project_skills=$(scan_dir_to_json "$CWD_SKILLS_DIR")
project_count=$(echo "$project_skills" | jq 'length')
fi
# Merge global + project skills into one array
all_skills=$(jq -s 'add' <(echo "$global_skills") <(echo "$project_skills"))
jq -n \
--arg global_found "$global_found" \
--argjson global_count "$global_count" \
--arg project_found "$project_found" \
--arg project_path "$project_path" \
--argjson project_count "$project_count" \
--argjson skills "$all_skills" \
'{
scan_summary: {
global: { found: ($global_found == "true"), count: $global_count },
project: { found: ($project_found == "true"), path: $project_path, count: $project_count }
},
skills: $skills
}'

View File

@@ -0,0 +1,142 @@
---
name: swift-actor-persistence
description: Thread-safe data persistence in Swift using actors — in-memory cache with file-backed storage, eliminating data races by design.
---
# Swift Actors for Thread-Safe Persistence
Patterns for building thread-safe data persistence layers using Swift actors. Combines in-memory caching with file-backed storage, leveraging the actor model to eliminate data races at compile time.
## When to Activate
- Building a data persistence layer in Swift 5.5+
- Need thread-safe access to shared mutable state
- Want to eliminate manual synchronization (locks, DispatchQueues)
- Building offline-first apps with local storage
## Core Pattern
### Actor-Based Repository
The actor model guarantees serialized access — no data races, enforced by the compiler.
```swift
public actor LocalRepository<T: Codable & Identifiable> where T.ID == String {
private var cache: [String: T] = [:]
private let fileURL: URL
public init(directory: URL = .documentsDirectory, filename: String = "data.json") {
self.fileURL = directory.appendingPathComponent(filename)
// Synchronous load during init (actor isolation not yet active)
self.cache = Self.loadSynchronously(from: fileURL)
}
// MARK: - Public API
public func save(_ item: T) throws {
cache[item.id] = item
try persistToFile()
}
public func delete(_ id: String) throws {
cache[id] = nil
try persistToFile()
}
public func find(by id: String) -> T? {
cache[id]
}
public func loadAll() -> [T] {
Array(cache.values)
}
// MARK: - Private
private func persistToFile() throws {
let data = try JSONEncoder().encode(Array(cache.values))
try data.write(to: fileURL, options: .atomic)
}
private static func loadSynchronously(from url: URL) -> [String: T] {
guard let data = try? Data(contentsOf: url),
let items = try? JSONDecoder().decode([T].self, from: data) else {
return [:]
}
return Dictionary(uniqueKeysWithValues: items.map { ($0.id, $0) })
}
}
```
### Usage
All calls are automatically async due to actor isolation:
```swift
let repository = LocalRepository<Question>()
// Read fast O(1) lookup from in-memory cache
let question = await repository.find(by: "q-001")
let allQuestions = await repository.loadAll()
// Write updates cache and persists to file atomically
try await repository.save(newQuestion)
try await repository.delete("q-001")
```
### Combining with @Observable ViewModel
```swift
@Observable
final class QuestionListViewModel {
private(set) var questions: [Question] = []
private let repository: LocalRepository<Question>
init(repository: LocalRepository<Question> = LocalRepository()) {
self.repository = repository
}
func load() async {
questions = await repository.loadAll()
}
func add(_ question: Question) async throws {
try await repository.save(question)
questions = await repository.loadAll()
}
}
```
## Key Design Decisions
| Decision | Rationale |
|----------|-----------|
| Actor (not class + lock) | Compiler-enforced thread safety, no manual synchronization |
| In-memory cache + file persistence | Fast reads from cache, durable writes to disk |
| Synchronous init loading | Avoids async initialization complexity |
| Dictionary keyed by ID | O(1) lookups by identifier |
| Generic over `Codable & Identifiable` | Reusable across any model type |
| Atomic file writes (`.atomic`) | Prevents partial writes on crash |
## Best Practices
- **Use `Sendable` types** for all data crossing actor boundaries
- **Keep the actor's public API minimal** — only expose domain operations, not persistence details
- **Use `.atomic` writes** to prevent data corruption if the app crashes mid-write
- **Load synchronously in `init`** — async initializers add complexity with minimal benefit for local files
- **Combine with `@Observable`** ViewModels for reactive UI updates
## Anti-Patterns to Avoid
- Using `DispatchQueue` or `NSLock` instead of actors for new Swift concurrency code
- Exposing the internal cache dictionary to external callers
- Making the file URL configurable without validation
- Forgetting that all actor method calls are `await` — callers must handle async context
- Using `nonisolated` to bypass actor isolation (defeats the purpose)
## When to Use
- Local data storage in iOS/macOS apps (user data, settings, cached content)
- Offline-first architectures that sync to a server later
- Any shared mutable state that multiple parts of the app access concurrently
- Replacing legacy `DispatchQueue`-based thread safety with modern Swift concurrency

View File

@@ -0,0 +1,189 @@
---
name: swift-protocol-di-testing
description: Protocol-based dependency injection for testable Swift code — mock file system, network, and external APIs using focused protocols and Swift Testing.
---
# Swift Protocol-Based Dependency Injection for Testing
Patterns for making Swift code testable by abstracting external dependencies (file system, network, iCloud) behind small, focused protocols. Enables deterministic tests without I/O.
## When to Activate
- Writing Swift code that accesses file system, network, or external APIs
- Need to test error handling paths without triggering real failures
- Building modules that work across environments (app, test, SwiftUI preview)
- Designing testable architecture with Swift concurrency (actors, Sendable)
## Core Pattern
### 1. Define Small, Focused Protocols
Each protocol handles exactly one external concern.
```swift
// File system access
public protocol FileSystemProviding: Sendable {
func containerURL(for purpose: Purpose) -> URL?
}
// File read/write operations
public protocol FileAccessorProviding: Sendable {
func read(from url: URL) throws -> Data
func write(_ data: Data, to url: URL) throws
func fileExists(at url: URL) -> Bool
}
// Bookmark storage (e.g., for sandboxed apps)
public protocol BookmarkStorageProviding: Sendable {
func saveBookmark(_ data: Data, for key: String) throws
func loadBookmark(for key: String) throws -> Data?
}
```
### 2. Create Default (Production) Implementations
```swift
public struct DefaultFileSystemProvider: FileSystemProviding {
public init() {}
public func containerURL(for purpose: Purpose) -> URL? {
FileManager.default.url(forUbiquityContainerIdentifier: nil)
}
}
public struct DefaultFileAccessor: FileAccessorProviding {
public init() {}
public func read(from url: URL) throws -> Data {
try Data(contentsOf: url)
}
public func write(_ data: Data, to url: URL) throws {
try data.write(to: url, options: .atomic)
}
public func fileExists(at url: URL) -> Bool {
FileManager.default.fileExists(atPath: url.path)
}
}
```
### 3. Create Mock Implementations for Testing
```swift
public final class MockFileAccessor: FileAccessorProviding, @unchecked Sendable {
public var files: [URL: Data] = [:]
public var readError: Error?
public var writeError: Error?
public init() {}
public func read(from url: URL) throws -> Data {
if let error = readError { throw error }
guard let data = files[url] else {
throw CocoaError(.fileReadNoSuchFile)
}
return data
}
public func write(_ data: Data, to url: URL) throws {
if let error = writeError { throw error }
files[url] = data
}
public func fileExists(at url: URL) -> Bool {
files[url] != nil
}
}
```
### 4. Inject Dependencies with Default Parameters
Production code uses defaults; tests inject mocks.
```swift
public actor SyncManager {
private let fileSystem: FileSystemProviding
private let fileAccessor: FileAccessorProviding
public init(
fileSystem: FileSystemProviding = DefaultFileSystemProvider(),
fileAccessor: FileAccessorProviding = DefaultFileAccessor()
) {
self.fileSystem = fileSystem
self.fileAccessor = fileAccessor
}
public func sync() async throws {
guard let containerURL = fileSystem.containerURL(for: .sync) else {
throw SyncError.containerNotAvailable
}
let data = try fileAccessor.read(
from: containerURL.appendingPathComponent("data.json")
)
// Process data...
}
}
```
### 5. Write Tests with Swift Testing
```swift
import Testing
@Test("Sync manager handles missing container")
func testMissingContainer() async {
let mockFileSystem = MockFileSystemProvider(containerURL: nil)
let manager = SyncManager(fileSystem: mockFileSystem)
await #expect(throws: SyncError.containerNotAvailable) {
try await manager.sync()
}
}
@Test("Sync manager reads data correctly")
func testReadData() async throws {
let mockFileAccessor = MockFileAccessor()
mockFileAccessor.files[testURL] = testData
let manager = SyncManager(fileAccessor: mockFileAccessor)
let result = try await manager.loadData()
#expect(result == expectedData)
}
@Test("Sync manager handles read errors gracefully")
func testReadError() async {
let mockFileAccessor = MockFileAccessor()
mockFileAccessor.readError = CocoaError(.fileReadCorruptFile)
let manager = SyncManager(fileAccessor: mockFileAccessor)
await #expect(throws: SyncError.self) {
try await manager.sync()
}
}
```
## Best Practices
- **Single Responsibility**: Each protocol should handle one concern — don't create "god protocols" with many methods
- **Sendable conformance**: Required when protocols are used across actor boundaries
- **Default parameters**: Let production code use real implementations by default; only tests need to specify mocks
- **Error simulation**: Design mocks with configurable error properties for testing failure paths
- **Only mock boundaries**: Mock external dependencies (file system, network, APIs), not internal types
## Anti-Patterns to Avoid
- Creating a single large protocol that covers all external access
- Mocking internal types that have no external dependencies
- Using `#if DEBUG` conditionals instead of proper dependency injection
- Forgetting `Sendable` conformance when used with actors
- Over-engineering: if a type has no external dependencies, it doesn't need a protocol
## When to Use
- Any Swift code that touches file system, network, or external APIs
- Testing error handling paths that are hard to trigger in real environments
- Building modules that need to work in app, test, and SwiftUI preview contexts
- Apps using Swift concurrency (actors, structured concurrency) that need testable architecture

View File

@@ -1587,6 +1587,558 @@ function runTests() {
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 47: escape sequence and frontmatter edge cases ──
console.log('\nRound 47: validate-hooks (inline JS escape sequences):');
if (test('validates inline JS with mixed escape sequences (newline + escaped quote)', () => {
const testDir = createTestDir();
const hooksFile = path.join(testDir, 'hooks.json');
// Command value after JSON parse: node -e "var a = \"ok\"\nconsole.log(a)"
// Regex captures: var a = \"ok\"\nconsole.log(a)
// After unescape chain: var a = "ok"\nconsole.log(a) (real newline) — valid JS
fs.writeFileSync(hooksFile, JSON.stringify({
hooks: {
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command',
command: 'node -e "var a = \\"ok\\"\\nconsole.log(a)"' }] }]
}
}));
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
assert.strictEqual(result.code, 0, 'Should handle escaped quotes and newline separators');
cleanupTestDir(testDir);
})) passed++; else failed++;
if (test('rejects inline JS with syntax error after unescaping', () => {
const testDir = createTestDir();
const hooksFile = path.join(testDir, 'hooks.json');
// After unescape this becomes: var x = { — missing closing brace
fs.writeFileSync(hooksFile, JSON.stringify({
hooks: {
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command',
command: 'node -e "var x = {"' }] }]
}
}));
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
assert.strictEqual(result.code, 1, 'Should reject JS syntax error after unescaping');
assert.ok(result.stderr.includes('invalid inline JS'), 'Should report inline JS error');
cleanupTestDir(testDir);
})) passed++; else failed++;
console.log('\nRound 47: validate-agents (frontmatter lines without colon):');
if (test('silently ignores frontmatter line without colon', () => {
const testDir = createTestDir();
// Line "just some text" has no colon — should be skipped, not cause crash
fs.writeFileSync(path.join(testDir, 'mixed.md'),
'---\nmodel: sonnet\njust some text without colon\ntools: Read\n---\n# Agent');
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
assert.strictEqual(result.code, 0, 'Should ignore lines without colon in frontmatter');
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 52: command inline backtick refs, workflow whitespace, code-only rules ──
console.log('\nRound 52: validate-commands (inline backtick refs):');
if (test('validates command refs inside inline backticks (not stripped by code block removal)', () => {
const testDir = createTestDir();
const agentsDir = createTestDir();
const skillsDir = createTestDir();
fs.writeFileSync(path.join(testDir, 'deploy.md'), '# Deploy\nDeploy the app.');
// Inline backtick ref `/deploy` should be validated (only fenced blocks stripped)
fs.writeFileSync(path.join(testDir, 'workflow.md'),
'# Workflow\nFirst run `/deploy` to deploy the app.');
const result = runValidatorWithDirs('validate-commands', {
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
});
assert.strictEqual(result.code, 0, 'Inline backtick command refs should be validated');
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
})) passed++; else failed++;
console.log('\nRound 52: validate-commands (workflow whitespace):');
if (test('validates workflow arrows with irregular whitespace', () => {
const testDir = createTestDir();
const agentsDir = createTestDir();
const skillsDir = createTestDir();
fs.writeFileSync(path.join(agentsDir, 'planner.md'), '# Planner');
fs.writeFileSync(path.join(agentsDir, 'reviewer.md'), '# Reviewer');
// Three workflow lines: no spaces, double spaces, tab-separated
fs.writeFileSync(path.join(testDir, 'flow.md'),
'# Workflow\n\nplanner->reviewer\nplanner -> reviewer');
const result = runValidatorWithDirs('validate-commands', {
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
});
assert.strictEqual(result.code, 0, 'Workflow arrows with irregular whitespace should be valid');
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
})) passed++; else failed++;
console.log('\nRound 52: validate-rules (code-only content):');
if (test('passes rule file containing only a fenced code block', () => {
const testDir = createTestDir();
fs.writeFileSync(path.join(testDir, 'code-only.md'),
'```javascript\nfunction example() {\n return true;\n}\n```');
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
assert.strictEqual(result.code, 0, 'Rule with only code block should pass (non-empty)');
assert.ok(result.stdout.includes('Validated 1'), 'Should count the code-only file');
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 57: readFileSync error path, statSync catch block, adjacent code blocks ──
console.log('\nRound 57: validate-skills.js (SKILL.md is a directory — readFileSync error):');
if (test('fails gracefully when SKILL.md is a directory instead of a file', () => {
const testDir = createTestDir();
const skillDir = path.join(testDir, 'dir-skill');
fs.mkdirSync(skillDir);
// Create SKILL.md as a DIRECTORY, not a file — existsSync returns true
// but readFileSync throws EISDIR, exercising the catch block (lines 33-37)
fs.mkdirSync(path.join(skillDir, 'SKILL.md'));
const result = runValidatorWithDir('validate-skills', 'SKILLS_DIR', testDir);
assert.strictEqual(result.code, 1, 'Should fail when SKILL.md is a directory');
assert.ok(result.stderr.includes('dir-skill'), 'Should report the problematic skill');
cleanupTestDir(testDir);
})) passed++; else failed++;
console.log('\nRound 57: validate-rules.js (broken symlink — statSync catch block):');
if (test('reports error for broken symlink .md file in rules directory', () => {
const testDir = createTestDir();
// Create a valid rule first
fs.writeFileSync(path.join(testDir, 'valid.md'), '# Valid Rule');
// Create a broken symlink (dangling → target doesn't exist)
// statSync follows symlinks and throws ENOENT, exercising catch (lines 35-38)
try {
fs.symlinkSync('/nonexistent/target.md', path.join(testDir, 'broken.md'));
} catch {
// Skip on systems that don't support symlinks
console.log(' (skipped — symlinks not supported)');
cleanupTestDir(testDir);
return;
}
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
assert.strictEqual(result.code, 1, 'Should fail on broken symlink');
assert.ok(result.stderr.includes('broken.md'), 'Should report the broken symlink file');
cleanupTestDir(testDir);
})) passed++; else failed++;
console.log('\nRound 57: validate-commands.js (adjacent code blocks both stripped):');
if (test('strips multiple adjacent code blocks before checking references', () => {
const testDir = createTestDir();
const agentsDir = createTestDir();
const skillsDir = createTestDir();
// Two adjacent code blocks, each with broken refs — BOTH must be stripped
fs.writeFileSync(path.join(testDir, 'multi-blocks.md'),
'# Multi Block\n\n' +
'```\n`/phantom-a` in first block\n```\n\n' +
'Content between blocks\n\n' +
'```\n`/phantom-b` in second block\nagents/ghost-agent.md\n```\n\n' +
'Final content');
const result = runValidatorWithDirs('validate-commands', {
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
});
assert.strictEqual(result.code, 0,
'Both code blocks should be stripped — no broken refs reported');
assert.ok(!result.stderr.includes('phantom-a'), 'First block ref should be stripped');
assert.ok(!result.stderr.includes('phantom-b'), 'Second block ref should be stripped');
assert.ok(!result.stderr.includes('ghost-agent'), 'Agent ref in second block should be stripped');
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
})) passed++; else failed++;
// ── Round 58: readFileSync catch block, colonIdx edge case, command-as-object ──
console.log('\nRound 58: validate-agents.js (unreadable agent file — readFileSync catch):');
if (test('reports error when agent .md file is unreadable (chmod 000)', () => {
// Skip on Windows or when running as root (permissions won't work)
if (process.platform === 'win32' || (process.getuid && process.getuid() === 0)) {
console.log(' (skipped — not supported on this platform)');
return;
}
const testDir = createTestDir();
const agentFile = path.join(testDir, 'locked.md');
fs.writeFileSync(agentFile, '---\nmodel: sonnet\ntools: Read\n---\n# Agent');
fs.chmodSync(agentFile, 0o000);
try {
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
assert.strictEqual(result.code, 1, 'Should exit 1 on read error');
assert.ok(result.stderr.includes('locked.md'), 'Should mention the unreadable file');
} finally {
fs.chmodSync(agentFile, 0o644);
cleanupTestDir(testDir);
}
})) passed++; else failed++;
console.log('\nRound 58: validate-agents.js (frontmatter line with colon at position 0):');
if (test('rejects agent when required field key has colon at position 0 (no key name)', () => {
const testDir = createTestDir();
fs.writeFileSync(path.join(testDir, 'bad-colon.md'),
'---\n:sonnet\ntools: Read\n---\n# Agent with leading colon');
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
assert.strictEqual(result.code, 1, 'Should fail — model field is missing (colon at idx 0 skipped)');
assert.ok(result.stderr.includes('model'), 'Should report missing model field');
cleanupTestDir(testDir);
})) passed++; else failed++;
console.log('\nRound 58: validate-hooks.js (command is a plain object — not string or array):');
if (test('rejects hook entry where command is a plain object', () => {
const testDir = createTestDir();
const hooksFile = path.join(testDir, 'hooks.json');
fs.writeFileSync(hooksFile, JSON.stringify({
hooks: {
PreToolUse: [{ matcher: 'test', hooks: [{ type: 'command', command: { run: 'echo hi' } }] }]
}
}));
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
assert.strictEqual(result.code, 1, 'Should reject object command (not string or array)');
assert.ok(result.stderr.includes('command'), 'Should report invalid command field');
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 63: object-format missing matcher, unreadable command file, empty commands dir ──
console.log('\nRound 63: validate-hooks.js (object-format matcher missing matcher field):');
if (test('rejects object-format matcher entry missing matcher field', () => {
const testDir = createTestDir();
const hooksFile = path.join(testDir, 'hooks.json');
// Object format: matcher entry has hooks array but NO matcher field
fs.writeFileSync(hooksFile, JSON.stringify({
hooks: {
PreToolUse: [{ hooks: [{ type: 'command', command: 'echo ok' }] }]
}
}));
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
assert.strictEqual(result.code, 1, 'Should fail on missing matcher field in object format');
assert.ok(result.stderr.includes("missing 'matcher' field"), 'Should report missing matcher field');
cleanupTestDir(testDir);
})) passed++; else failed++;
console.log('\nRound 63: validate-commands.js (unreadable command file):');
if (test('reports error when command .md file is unreadable (chmod 000)', () => {
if (process.platform === 'win32' || (process.getuid && process.getuid() === 0)) {
console.log(' (skipped — not supported on this platform)');
return;
}
const testDir = createTestDir();
const cmdFile = path.join(testDir, 'locked.md');
fs.writeFileSync(cmdFile, '# Locked Command');
fs.chmodSync(cmdFile, 0o000);
try {
const result = runValidatorWithDirs('validate-commands', {
COMMANDS_DIR: testDir, AGENTS_DIR: '/nonexistent', SKILLS_DIR: '/nonexistent'
});
assert.strictEqual(result.code, 1, 'Should exit 1 on read error');
assert.ok(result.stderr.includes('locked.md'), 'Should mention the unreadable file');
} finally {
fs.chmodSync(cmdFile, 0o644);
cleanupTestDir(testDir);
}
})) passed++; else failed++;
console.log('\nRound 63: validate-commands.js (empty commands directory):');
if (test('passes on empty commands directory (no .md files)', () => {
const testDir = createTestDir();
// Only non-.md files — no .md files to validate
fs.writeFileSync(path.join(testDir, 'readme.txt'), 'not a command');
const result = runValidatorWithDirs('validate-commands', {
COMMANDS_DIR: testDir, AGENTS_DIR: '/nonexistent', SKILLS_DIR: '/nonexistent'
});
assert.strictEqual(result.code, 0, 'Should pass on empty commands directory');
assert.ok(result.stdout.includes('Validated 0'), 'Should report 0 validated');
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 65: empty directories for rules and skills ──
console.log('\nRound 65: validate-rules.js (empty directory — no .md files):');
if (test('passes on rules directory with no .md files (Validated 0)', () => {
const testDir = createTestDir();
// Only non-.md files — readdirSync filter yields empty array
fs.writeFileSync(path.join(testDir, 'notes.txt'), 'not a rule');
fs.writeFileSync(path.join(testDir, 'config.json'), '{}');
const result = runValidatorWithDir('validate-rules', 'RULES_DIR', testDir);
assert.strictEqual(result.code, 0, 'Should pass on empty rules directory');
assert.ok(result.stdout.includes('Validated 0'), 'Should report 0 validated rule files');
cleanupTestDir(testDir);
})) passed++; else failed++;
console.log('\nRound 65: validate-skills.js (empty directory — no subdirectories):');
if (test('passes on skills directory with only files, no subdirectories (Validated 0)', () => {
const testDir = createTestDir();
// Only files, no subdirectories — isDirectory filter yields empty array
fs.writeFileSync(path.join(testDir, 'README.md'), '# Skills');
fs.writeFileSync(path.join(testDir, '.gitkeep'), '');
const result = runValidatorWithDir('validate-skills', 'SKILLS_DIR', testDir);
assert.strictEqual(result.code, 0, 'Should pass on skills directory with no subdirectories');
assert.ok(result.stdout.includes('Validated 0'), 'Should report 0 validated skill directories');
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 70: validate-commands.js "would create:" line skip ──
console.log('\nRound 70: validate-commands.js (would create: skip):');
if (test('skips command references on "would create:" lines', () => {
const testDir = createTestDir();
const agentsDir = createTestDir();
const skillsDir = createTestDir();
// "Would create:" is the alternate form checked by the regex at line 80:
// if (/creates:|would create:/i.test(line)) continue;
// Only "creates:" was previously tested (Round 20). "Would create:" exercises
// the second alternation in the regex.
fs.writeFileSync(path.join(testDir, 'gen-cmd.md'),
'# Generator Command\n\nWould create: `/phantom-cmd` in your project.\n\nThis is safe.');
const result = runValidatorWithDirs('validate-commands', {
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
});
assert.strictEqual(result.code, 0, 'Should skip "would create:" lines');
assert.ok(!result.stderr.includes('phantom-cmd'), 'Should not flag ref on "would create:" line');
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
})) passed++; else failed++;
// ── Round 72: validate-hooks.js async/timeout type validation ──
console.log('\nRound 72: validate-hooks.js (async and timeout type validation):');
if (test('rejects hook with non-boolean async field', () => {
const testDir = createTestDir();
const hooksFile = path.join(testDir, 'hooks.json');
fs.writeFileSync(hooksFile, JSON.stringify({
PreToolUse: [{
matcher: 'Write',
hooks: [{
type: 'intercept',
command: 'echo test',
async: 'yes' // Should be boolean, not string
}]
}]
}));
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
assert.strictEqual(result.code, 1, 'Should fail on non-boolean async');
assert.ok(result.stderr.includes('async'), 'Should mention async in error');
assert.ok(result.stderr.includes('boolean'), 'Should mention boolean type');
cleanupTestDir(testDir);
})) passed++; else failed++;
if (test('rejects hook with negative timeout value', () => {
const testDir = createTestDir();
const hooksFile = path.join(testDir, 'hooks.json');
fs.writeFileSync(hooksFile, JSON.stringify({
PostToolUse: [{
matcher: 'Edit',
hooks: [{
type: 'intercept',
command: 'echo test',
timeout: -5 // Must be non-negative
}]
}]
}));
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
assert.strictEqual(result.code, 1, 'Should fail on negative timeout');
assert.ok(result.stderr.includes('timeout'), 'Should mention timeout in error');
assert.ok(result.stderr.includes('non-negative'), 'Should mention non-negative');
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 73: validate-commands.js skill directory statSync catch ──
console.log('\nRound 73: validate-commands.js (unreadable skill entry — statSync catch):');
if (test('skips unreadable skill directory entries without error (broken symlink)', () => {
const testDir = createTestDir();
const agentsDir = createTestDir();
const skillsDir = createTestDir();
// Create one valid skill directory and one broken symlink
const validSkill = path.join(skillsDir, 'valid-skill');
fs.mkdirSync(validSkill, { recursive: true });
// Broken symlink: target does not exist — statSync will throw ENOENT
const brokenLink = path.join(skillsDir, 'broken-skill');
fs.symlinkSync('/nonexistent/target/path', brokenLink);
// Command that references the valid skill (should resolve)
fs.writeFileSync(path.join(testDir, 'cmd.md'),
'# Command\nSee skills/valid-skill/ for details.');
const result = runValidatorWithDirs('validate-commands', {
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
});
assert.strictEqual(result.code, 0,
'Should pass — broken symlink in skills dir should be skipped silently');
// The broken-skill should NOT be in validSkills, so referencing it would warn
// but the valid-skill reference should resolve fine
cleanupTestDir(testDir);
cleanupTestDir(agentsDir);
fs.rmSync(skillsDir, { recursive: true, force: true });
})) passed++; else failed++;
// ── Round 76: validate-hooks.js invalid JSON in hooks.json ──
console.log('\nRound 76: validate-hooks.js (invalid JSON in hooks.json):');
if (test('reports error for invalid JSON in hooks.json', () => {
const testDir = createTestDir();
const hooksFile = path.join(testDir, 'hooks.json');
fs.writeFileSync(hooksFile, '{not valid json!!!');
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
assert.strictEqual(result.code, 1,
`Expected exit 1 for invalid JSON, got ${result.code}`);
assert.ok(result.stderr.includes('Invalid JSON'),
`stderr should mention Invalid JSON, got: ${result.stderr}`);
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 78: validate-hooks.js wrapped { hooks: { ... } } format ──
console.log('\nRound 78: validate-hooks.js (wrapped hooks format):');
if (test('validates wrapped format { hooks: { PreToolUse: [...] } }', () => {
const testDir = createTestDir();
const hooksFile = path.join(testDir, 'hooks.json');
// The production hooks.json uses this wrapped format — { hooks: { ... } }
// data.hooks is the object with event types, not data itself
fs.writeFileSync(hooksFile, JSON.stringify({
"$schema": "https://json.schemastore.org/claude-code-settings.json",
hooks: {
PreToolUse: [{ matcher: 'Write', hooks: [{ type: 'command', command: 'echo ok' }] }],
PostToolUse: [{ matcher: 'Read', hooks: [{ type: 'command', command: 'echo done' }] }]
}
}));
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', hooksFile);
assert.strictEqual(result.code, 0,
`Should pass wrapped hooks format, got exit ${result.code}. stderr: ${result.stderr}`);
assert.ok(result.stdout.includes('Validated 2'),
`Should validate 2 matchers, got: ${result.stdout}`);
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 79: validate-commands.js warnings count suffix in output ──
console.log('\nRound 79: validate-commands.js (warnings count in output):');
if (test('output includes (N warnings) suffix when skill references produce warnings', () => {
const testDir = createTestDir();
const agentsDir = createTestDir();
const skillsDir = createTestDir();
// Create a command that references 2 non-existent skill directories
// Each triggers a WARN (not error) — warnCount should be 2
fs.writeFileSync(path.join(testDir, 'cmd-warn.md'),
'# Command\nSee skills/fake-skill-a/ and skills/fake-skill-b/ for details.');
const result = runValidatorWithDirs('validate-commands', {
COMMANDS_DIR: testDir, AGENTS_DIR: agentsDir, SKILLS_DIR: skillsDir
});
assert.strictEqual(result.code, 0, 'Skill warnings should not cause error exit');
// The validate-commands output appends "(N warnings)" when warnCount > 0
assert.ok(result.stdout.includes('(2 warnings)'),
`Output should include "(2 warnings)" suffix, got: ${result.stdout}`);
cleanupTestDir(testDir); cleanupTestDir(agentsDir); cleanupTestDir(skillsDir);
})) passed++; else failed++;
// ── Round 80: validate-hooks.js legacy array format (lines 115-135) ──
console.log('\nRound 80: validate-hooks.js (legacy array format):');
if (test('validates hooks in legacy array format (hooks is an array, not object)', () => {
const testDir = createTestDir();
// The legacy array format wraps hooks as { hooks: [...] } where the array
// contains matcher objects directly. This exercises lines 115-135 of
// validate-hooks.js which use "Hook ${i}" error labels instead of "${eventType}[${i}]".
const hooksJson = JSON.stringify({
hooks: [
{
matcher: 'Edit',
hooks: [{ type: 'command', command: 'echo legacy test' }]
}
]
});
fs.writeFileSync(path.join(testDir, 'hooks.json'), hooksJson);
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', path.join(testDir, 'hooks.json'));
assert.strictEqual(result.code, 0, 'Should pass on valid legacy array format');
assert.ok(result.stdout.includes('Validated 1 hook'),
`Should report 1 validated matcher, got: ${result.stdout}`);
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 82: Notification and SubagentStop event types ──
console.log('\nRound 82: validate-hooks (Notification and SubagentStop event types):');
if (test('accepts Notification and SubagentStop as valid event types', () => {
const testDir = createTestDir();
const hooksJson = JSON.stringify({
hooks: [
{
matcher: { type: 'Notification' },
hooks: [{ type: 'command', command: 'echo notification' }]
},
{
matcher: { type: 'SubagentStop' },
hooks: [{ type: 'command', command: 'echo subagent stopped' }]
}
]
});
fs.writeFileSync(path.join(testDir, 'hooks.json'), hooksJson);
const result = runValidatorWithDir('validate-hooks', 'HOOKS_FILE', path.join(testDir, 'hooks.json'));
assert.strictEqual(result.code, 0, 'Should pass with Notification and SubagentStop events');
assert.ok(result.stdout.includes('Validated 2 hook'),
`Should report 2 validated matchers, got: ${result.stdout}`);
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 83: validate-agents whitespace-only field, validate-skills empty SKILL.md ──
console.log('\nRound 83: validate-agents (whitespace-only frontmatter field value):');
if (test('rejects agent with whitespace-only model field (trim guard)', () => {
const testDir = createTestDir();
// model has only whitespace — extractFrontmatter produces { model: ' ', tools: 'Read' }
// The condition: typeof frontmatter[field] === 'string' && !frontmatter[field].trim()
// evaluates to true for model → "Missing required field: model"
fs.writeFileSync(path.join(testDir, 'ws.md'), '---\nmodel: \ntools: Read\n---\n# Whitespace model');
const result = runValidatorWithDir('validate-agents', 'AGENTS_DIR', testDir);
assert.strictEqual(result.code, 1, 'Should reject whitespace-only model');
assert.ok(result.stderr.includes('model'), 'Should report missing model field');
assert.ok(!result.stderr.includes('tools'), 'tools field is valid and should NOT be flagged');
cleanupTestDir(testDir);
})) passed++; else failed++;
console.log('\nRound 83: validate-skills (empty SKILL.md file):');
if (test('rejects skill directory with empty SKILL.md file', () => {
const testDir = createTestDir();
const skillDir = path.join(testDir, 'empty-skill');
fs.mkdirSync(skillDir, { recursive: true });
// Create SKILL.md with only whitespace (trim to zero length)
fs.writeFileSync(path.join(skillDir, 'SKILL.md'), ' \n \n');
const result = runValidatorWithDir('validate-skills', 'SKILLS_DIR', testDir);
assert.strictEqual(result.code, 1, 'Should reject empty SKILL.md');
assert.ok(result.stderr.includes('Empty file'),
`Should report "Empty file", got: ${result.stderr}`);
cleanupTestDir(testDir);
})) passed++; else failed++;
// Summary
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);

View File

@@ -11,7 +11,7 @@ const assert = require('assert');
const path = require('path');
const fs = require('fs');
const os = require('os');
const { spawnSync, execFileSync } = require('child_process');
const { spawnSync } = require('child_process');
const evaluateScript = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'evaluate-session.js');
@@ -258,6 +258,159 @@ function runTests() {
assert.strictEqual(result.status, 0, 'Should exit 0 on empty stdin');
})) passed++; else failed++;
// ── Round 53: env var fallback path ──
console.log('\nRound 53: CLAUDE_TRANSCRIPT_PATH fallback:');
if (test('falls back to CLAUDE_TRANSCRIPT_PATH env var when stdin is invalid JSON', () => {
const testDir = createTestDir();
const transcript = createTranscript(testDir, 15);
const result = spawnSync('node', [evaluateScript], {
encoding: 'utf8',
input: 'invalid json {{{',
timeout: 10000,
env: { ...process.env, CLAUDE_TRANSCRIPT_PATH: transcript }
});
assert.strictEqual(result.status, 0, 'Should exit 0');
assert.ok(
result.stderr.includes('15 messages'),
'Should evaluate using env var fallback path'
);
assert.ok(
result.stderr.includes('evaluate'),
'Should indicate session evaluation'
);
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 65: regex whitespace tolerance in countInFile ──
console.log('\nRound 65: regex whitespace tolerance around colon:');
if (test('counts user messages when JSON has spaces around colon ("type" : "user")', () => {
const testDir = createTestDir();
const filePath = path.join(testDir, 'spaced.jsonl');
// Manually write JSON with spaces around the colon — NOT JSON.stringify
// The regex /"type"\s*:\s*"user"/g should match these
const lines = [];
for (let i = 0; i < 12; i++) {
lines.push(`{"type" : "user", "content": "msg ${i}"}`);
lines.push(`{"type" : "assistant", "content": "resp ${i}"}`);
}
fs.writeFileSync(filePath, lines.join('\n') + '\n');
const result = runEvaluate({ transcript_path: filePath });
assert.strictEqual(result.code, 0);
// 12 user messages >= 10 threshold → should evaluate (not "too short")
assert.ok(!result.stderr.includes('too short'),
'Should NOT say too short for 12 spaced-colon user messages');
assert.ok(
result.stderr.includes('12 messages') || result.stderr.includes('evaluate'),
`Should evaluate session with spaced-colon JSON. Got stderr: ${result.stderr}`
);
cleanupTestDir(testDir);
})) passed++; else failed++;
// ── Round 85: config file parse error (corrupt JSON) ──
console.log('\nRound 85: config parse error catch block:');
if (test('falls back to defaults when config file contains invalid JSON', () => {
// The evaluate-session.js script reads config from:
// path.join(__dirname, '..', '..', 'skills', 'continuous-learning', 'config.json')
// where __dirname = scripts/hooks/ → config = repo_root/skills/continuous-learning/config.json
const configPath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning', 'config.json');
let originalContent = null;
try {
originalContent = fs.readFileSync(configPath, 'utf8');
} catch {
// Config file may not exist — that's fine
}
try {
// Write corrupt JSON to the config file
fs.writeFileSync(configPath, 'NOT VALID JSON {{{ corrupt data !!!', 'utf8');
// Create a transcript with 12 user messages (above default threshold of 10)
const testDir = createTestDir();
const transcript = createTranscript(testDir, 12);
const result = runEvaluate({ transcript_path: transcript });
assert.strictEqual(result.code, 0, 'Should exit 0 despite corrupt config');
// With corrupt config, defaults apply: min_session_length = 10
// 12 >= 10 → should evaluate (not "too short")
assert.ok(!result.stderr.includes('too short'),
`Should NOT say too short — corrupt config falls back to default min=10. Got: ${result.stderr}`);
assert.ok(
result.stderr.includes('12 messages') || result.stderr.includes('evaluate'),
`Should evaluate with 12 messages using default threshold. Got: ${result.stderr}`
);
// The catch block logs "Failed to parse config" — verify that log message
assert.ok(result.stderr.includes('Failed to parse config'),
`Should log config parse error. Got: ${result.stderr}`);
cleanupTestDir(testDir);
} finally {
// Restore original config file
if (originalContent !== null) {
fs.writeFileSync(configPath, originalContent, 'utf8');
} else {
// Config didn't exist before — remove the corrupt one we created
try { fs.unlinkSync(configPath); } catch { /* best-effort */ }
}
}
})) passed++; else failed++;
// ── Round 86: config learned_skills_path override with ~ expansion ──
console.log('\nRound 86: config learned_skills_path override:');
if (test('uses learned_skills_path from config with ~ expansion', () => {
// evaluate-session.js lines 69-72:
// if (config.learned_skills_path) {
// learnedSkillsPath = config.learned_skills_path.replace(/^~/, require('os').homedir());
// }
// This branch was never tested — only the parse error (Round 85) and default path.
const configPath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning', 'config.json');
let originalContent = null;
try {
originalContent = fs.readFileSync(configPath, 'utf8');
} catch {
// Config file may not exist
}
try {
// Write config with a custom learned_skills_path using ~ prefix
fs.writeFileSync(configPath, JSON.stringify({
min_session_length: 10,
learned_skills_path: '~/custom-learned-skills-dir'
}));
// Create a transcript with 12 user messages (above threshold)
const testDir = createTestDir();
const transcript = createTranscript(testDir, 12);
const result = runEvaluate({ transcript_path: transcript });
assert.strictEqual(result.code, 0, 'Should exit 0');
// The script logs "Save learned skills to: <path>" where <path> should
// be the expanded home directory, NOT the literal "~"
assert.ok(!result.stderr.includes('~/custom-learned-skills-dir'),
'Should NOT contain literal ~ in output (should be expanded)');
assert.ok(result.stderr.includes('custom-learned-skills-dir'),
`Should reference the custom learned skills dir. Got: ${result.stderr}`);
// The ~ should have been replaced with os.homedir()
assert.ok(result.stderr.includes(os.homedir()),
`Should contain expanded home directory. Got: ${result.stderr}`);
cleanupTestDir(testDir);
} finally {
// Restore original config file
if (originalContent !== null) {
fs.writeFileSync(configPath, originalContent, 'utf8');
} else {
try { fs.unlinkSync(configPath); } catch { /* best-effort */ }
}
}
})) passed++; else failed++;
// Summary
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);

File diff suppressed because it is too large Load Diff

View File

@@ -19,11 +19,11 @@ const compactScript = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'sugg
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
console.log(` \u2713 ${name}`);
return true;
} catch (err) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${err.message}`);
} catch (_err) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${_err.message}`);
return false;
}
}
@@ -66,7 +66,11 @@ function runTests() {
// Cleanup helper
function cleanupCounter() {
try { fs.unlinkSync(counterFile); } catch {}
try {
fs.unlinkSync(counterFile);
} catch (_err) {
// Ignore error
}
}
// Basic functionality
@@ -80,7 +84,8 @@ function runTests() {
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Counter should be 1 after first run');
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('increments counter on subsequent runs', () => {
cleanupCounter();
@@ -90,7 +95,8 @@ function runTests() {
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 3, 'Counter should be 3 after three runs');
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
// Threshold suggestion
console.log('\nThreshold suggestion:');
@@ -106,7 +112,8 @@ function runTests() {
`Should suggest compact at threshold. Got stderr: ${result.stderr}`
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('does NOT suggest compact before threshold', () => {
cleanupCounter();
@@ -117,7 +124,8 @@ function runTests() {
'Should NOT suggest compact before threshold'
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
// Interval suggestion (every 25 calls after threshold)
console.log('\nInterval suggestion:');
@@ -135,7 +143,8 @@ function runTests() {
`Should suggest at threshold+25 interval. Got stderr: ${result.stderr}`
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
// Environment variable handling
console.log('\nEnvironment variable handling:');
@@ -151,7 +160,8 @@ function runTests() {
`Should use default threshold of 50. Got stderr: ${result.stderr}`
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('ignores invalid COMPACT_THRESHOLD (negative)', () => {
cleanupCounter();
@@ -163,7 +173,8 @@ function runTests() {
`Should fallback to 50 for negative threshold. Got stderr: ${result.stderr}`
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('ignores non-numeric COMPACT_THRESHOLD', () => {
cleanupCounter();
@@ -175,7 +186,8 @@ function runTests() {
`Should fallback to 50 for non-numeric threshold. Got stderr: ${result.stderr}`
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
// Corrupted counter file
console.log('\nCorrupted counter file:');
@@ -189,7 +201,8 @@ function runTests() {
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Should reset to 1 on corrupted file');
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('resets counter on extremely large value', () => {
cleanupCounter();
@@ -200,7 +213,8 @@ function runTests() {
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Should reset to 1 for value > 1000000');
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('handles empty counter file', () => {
cleanupCounter();
@@ -211,7 +225,8 @@ function runTests() {
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Should start at 1 for empty file');
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
// Session isolation
console.log('\nSession isolation:');
@@ -230,10 +245,11 @@ function runTests() {
assert.strictEqual(countA, 2, 'Session A should have count 2');
assert.strictEqual(countB, 1, 'Session B should have count 1');
} finally {
try { fs.unlinkSync(fileA); } catch {}
try { fs.unlinkSync(fileB); } catch {}
try { fs.unlinkSync(fileA); } catch (_err) { /* ignore */ }
try { fs.unlinkSync(fileB); } catch (_err) { /* ignore */ }
}
})) passed++; else failed++;
})) passed++;
else failed++;
// Always exits 0
console.log('\nExit code:');
@@ -243,7 +259,8 @@ function runTests() {
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
assert.strictEqual(result.code, 0, 'Should always exit 0');
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
// ── Round 29: threshold boundary values ──
console.log('\nThreshold boundary values:');
@@ -258,7 +275,8 @@ function runTests() {
`Should fallback to 50 for threshold=0. Got stderr: ${result.stderr}`
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('accepts COMPACT_THRESHOLD=10000 (boundary max)', () => {
cleanupCounter();
@@ -270,7 +288,8 @@ function runTests() {
`Should accept threshold=10000. Got stderr: ${result.stderr}`
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('rejects COMPACT_THRESHOLD=10001 (falls back to 50)', () => {
cleanupCounter();
@@ -282,7 +301,8 @@ function runTests() {
`Should fallback to 50 for threshold=10001. Got stderr: ${result.stderr}`
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('rejects float COMPACT_THRESHOLD (e.g. 3.5)', () => {
cleanupCounter();
@@ -297,29 +317,58 @@ function runTests() {
'Float threshold should be parseInt-ed to 3, no suggestion at count=50'
);
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('counter value at exact boundary 1000000 is valid', () => {
cleanupCounter();
fs.writeFileSync(counterFile, '999999');
const result = runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
runCompact({ CLAUDE_SESSION_ID: testSession, COMPACT_THRESHOLD: '3' });
// 999999 is valid (> 0, <= 1000000), count becomes 1000000
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1000000, 'Counter at 1000000 boundary should be valid');
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
if (test('counter value at 1000001 is clamped (reset to 1)', () => {
cleanupCounter();
fs.writeFileSync(counterFile, '1000001');
const result = runCompact({ CLAUDE_SESSION_ID: testSession });
runCompact({ CLAUDE_SESSION_ID: testSession });
const count = parseInt(fs.readFileSync(counterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Counter > 1000000 should be reset to 1');
cleanupCounter();
})) passed++; else failed++;
})) passed++;
else failed++;
// ── Round 64: default session ID fallback ──
console.log('\nDefault session ID fallback (Round 64):');
if (test('uses "default" session ID when CLAUDE_SESSION_ID is empty', () => {
const defaultCounterFile = getCounterFilePath('default');
try { fs.unlinkSync(defaultCounterFile); } catch (_err) { /* ignore */ }
try {
// Pass empty CLAUDE_SESSION_ID — falsy, so script uses 'default'
const env = { ...process.env, CLAUDE_SESSION_ID: '' };
const result = spawnSync('node', [compactScript], {
encoding: 'utf8',
input: '{}',
timeout: 10000,
env,
});
assert.strictEqual(result.status || 0, 0, 'Should exit 0');
assert.ok(fs.existsSync(defaultCounterFile), 'Counter file should use "default" session ID');
const count = parseInt(fs.readFileSync(defaultCounterFile, 'utf8').trim(), 10);
assert.strictEqual(count, 1, 'Counter should be 1 for first run with default session');
} finally {
try { fs.unlinkSync(defaultCounterFile); } catch (_err) { /* ignore */ }
}
})) passed++;
else failed++;
// Summary
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
console.log(`
Results: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}

View File

@@ -262,8 +262,13 @@ async function runTests() {
});
});
assert.ok(stderr.includes('BLOCKED'), 'Blocking hook should output BLOCKED');
assert.strictEqual(code, 2, 'Blocking hook should exit with code 2');
// Hook only blocks on non-Windows platforms (tmux is Unix-only)
if (process.platform === 'win32') {
assert.strictEqual(code, 0, 'On Windows, hook should not block (exit 0)');
} else {
assert.ok(stderr.includes('BLOCKED'), 'Blocking hook should output BLOCKED');
assert.strictEqual(code, 2, 'Blocking hook should exit with code 2');
}
})) passed++; else failed++;
// ==========================================
@@ -298,7 +303,12 @@ async function runTests() {
});
});
assert.strictEqual(code, 2, 'Blocking hook should exit 2');
// Hook only blocks on non-Windows platforms (tmux is Unix-only)
if (process.platform === 'win32') {
assert.strictEqual(code, 0, 'On Windows, hook should not block (exit 0)');
} else {
assert.strictEqual(code, 2, 'Blocking hook should exit 2');
}
})) passed++; else failed++;
if (await asyncTest('hooks handle missing files gracefully', async () => {
@@ -622,6 +632,76 @@ async function runTests() {
assert.strictEqual(code, 0, 'Should not crash on truncated JSON');
})) passed++; else failed++;
// ==========================================
// Round 51: Timeout Enforcement
// ==========================================
console.log('\nRound 51: Timeout Enforcement:');
if (await asyncTest('runHookWithInput kills hanging hooks after timeout', async () => {
const testDir = createTestDir();
const hangingHookPath = path.join(testDir, 'hanging-hook.js');
fs.writeFileSync(hangingHookPath, 'setInterval(() => {}, 100);');
try {
const startTime = Date.now();
let error = null;
try {
await runHookWithInput(hangingHookPath, {}, {}, 500);
} catch (err) {
error = err;
}
const elapsed = Date.now() - startTime;
assert.ok(error, 'Should throw timeout error');
assert.ok(error.message.includes('timed out'), 'Error should mention timeout');
assert.ok(elapsed >= 450, `Should wait at least ~500ms, waited ${elapsed}ms`);
assert.ok(elapsed < 2000, `Should not wait much longer than 500ms, waited ${elapsed}ms`);
} finally {
cleanupTestDir(testDir);
}
})) passed++; else failed++;
// ==========================================
// Round 51: hooks.json Schema Validation
// ==========================================
console.log('\nRound 51: hooks.json Schema Validation:');
if (await asyncTest('hooks.json async hook has valid timeout field', async () => {
const asyncHook = hooks.hooks.PostToolUse.find(h =>
h.hooks && h.hooks[0] && h.hooks[0].async === true
);
assert.ok(asyncHook, 'Should have at least one async hook defined');
assert.strictEqual(asyncHook.hooks[0].async, true, 'async field should be true');
assert.ok(asyncHook.hooks[0].timeout, 'Should have timeout field');
assert.strictEqual(typeof asyncHook.hooks[0].timeout, 'number', 'Timeout should be a number');
assert.ok(asyncHook.hooks[0].timeout > 0, 'Timeout should be positive');
const match = asyncHook.hooks[0].command.match(/^node -e "(.+)"$/s);
assert.ok(match, 'Async hook command should be node -e format');
})) passed++; else failed++;
if (await asyncTest('all hook commands in hooks.json are valid format', async () => {
for (const [hookType, hookArray] of Object.entries(hooks.hooks)) {
for (const hookDef of hookArray) {
assert.ok(hookDef.hooks, `${hookType} entry should have hooks array`);
for (const hook of hookDef.hooks) {
assert.ok(hook.command, `Hook in ${hookType} should have command field`);
const isInline = hook.command.startsWith('node -e');
const isFilePath = hook.command.startsWith('node "');
assert.ok(
isInline || isFilePath,
`Hook command in ${hookType} should be inline (node -e) or file path (node "), got: ${hook.command.substring(0, 50)}`
);
}
}
}
})) passed++; else failed++;
// Summary
console.log('\n=== Test Results ===');
console.log(`Passed: ${passed}`);

File diff suppressed because it is too large Load Diff

View File

@@ -787,7 +787,6 @@ function runTests() {
// Verify the file exists
const aliasesPath = path.join(tmpHome, '.claude', 'session-aliases.json');
assert.ok(fs.existsSync(aliasesPath), 'Aliases file should exist');
const contentBefore = fs.readFileSync(aliasesPath, 'utf8');
// Attempt to save circular data — will fail
const circular = { aliases: {}, metadata: {} };
@@ -839,6 +838,989 @@ function runTests() {
// best-effort
}
// ── Round 48: rapid sequential saves data integrity ──
console.log('\nRound 48: rapid sequential saves:');
if (test('rapid sequential setAlias calls maintain data integrity', () => {
resetAliases();
for (let i = 0; i < 5; i++) {
const result = aliases.setAlias(`rapid-${i}`, `/path/${i}`, `Title ${i}`);
assert.strictEqual(result.success, true, `setAlias rapid-${i} should succeed`);
}
const data = aliases.loadAliases();
for (let i = 0; i < 5; i++) {
assert.ok(data.aliases[`rapid-${i}`], `rapid-${i} should exist after all saves`);
assert.strictEqual(data.aliases[`rapid-${i}`].sessionPath, `/path/${i}`);
}
assert.strictEqual(data.metadata.totalCount, 5, 'Metadata count should match actual aliases');
})) passed++; else failed++;
// ── Round 56: Windows platform unlink-before-rename code path ──
console.log('\nRound 56: Windows platform atomic write path:');
if (test('Windows platform mock: unlinks existing file before rename', () => {
resetAliases();
// First create an alias so the file exists
const r1 = aliases.setAlias('win-initial', '2026-01-01-abc123-session.tmp');
assert.strictEqual(r1.success, true, 'Initial alias should succeed');
const aliasesPath = aliases.getAliasesPath();
assert.ok(fs.existsSync(aliasesPath), 'Aliases file should exist before win32 test');
// Mock process.platform to 'win32' to trigger the unlink-before-rename path
const origPlatform = Object.getOwnPropertyDescriptor(process, 'platform');
Object.defineProperty(process, 'platform', { value: 'win32', configurable: true });
try {
// This save triggers the Windows code path: unlink existing → rename temp
const r2 = aliases.setAlias('win-updated', '2026-02-01-def456-session.tmp');
assert.strictEqual(r2.success, true, 'setAlias should succeed under win32 mock');
// Verify data integrity after the Windows path
assert.ok(fs.existsSync(aliasesPath), 'Aliases file should exist after win32 save');
const data = aliases.loadAliases();
assert.ok(data.aliases['win-initial'], 'Original alias should still exist');
assert.ok(data.aliases['win-updated'], 'New alias should exist');
assert.strictEqual(data.aliases['win-updated'].sessionPath,
'2026-02-01-def456-session.tmp', 'Session path should match');
// No .tmp or .bak files left behind
assert.ok(!fs.existsSync(aliasesPath + '.tmp'), 'No temp file should remain');
assert.ok(!fs.existsSync(aliasesPath + '.bak'), 'No backup file should remain');
} finally {
// Restore original platform descriptor
if (origPlatform) {
Object.defineProperty(process, 'platform', origPlatform);
}
resetAliases();
}
})) passed++; else failed++;
// ── Round 64: loadAliases backfills missing version and metadata ──
console.log('\nRound 64: loadAliases version/metadata backfill:');
if (test('loadAliases backfills missing version and metadata fields', () => {
resetAliases();
const aliasesPath = aliases.getAliasesPath();
// Write a file with valid aliases but NO version and NO metadata
fs.writeFileSync(aliasesPath, JSON.stringify({
aliases: {
'backfill-test': {
sessionPath: '/sessions/backfill',
createdAt: '2026-01-15T00:00:00.000Z',
updatedAt: '2026-01-15T00:00:00.000Z',
title: 'Backfill Test'
}
}
}));
const data = aliases.loadAliases();
// Version should be backfilled to ALIAS_VERSION ('1.0')
assert.strictEqual(data.version, '1.0', 'Should backfill missing version to 1.0');
// Metadata should be backfilled with totalCount from aliases
assert.ok(data.metadata, 'Should backfill missing metadata object');
assert.strictEqual(data.metadata.totalCount, 1, 'Metadata totalCount should match alias count');
assert.ok(data.metadata.lastUpdated, 'Metadata should have lastUpdated');
// Alias data should be preserved
assert.ok(data.aliases['backfill-test'], 'Alias data should be preserved');
assert.strictEqual(data.aliases['backfill-test'].sessionPath, '/sessions/backfill');
resetAliases();
})) passed++; else failed++;
// ── Round 67: loadAliases empty file, resolveSessionAlias null, metadata-only backfill ──
console.log('\nRound 67: loadAliases (empty 0-byte file):');
if (test('loadAliases returns default structure for empty (0-byte) file', () => {
resetAliases();
const aliasesPath = aliases.getAliasesPath();
// Write a 0-byte file — readFile returns '', which is falsy → !content branch
fs.writeFileSync(aliasesPath, '');
const data = aliases.loadAliases();
assert.ok(data.aliases, 'Should have aliases key');
assert.strictEqual(Object.keys(data.aliases).length, 0, 'Should have no aliases');
assert.strictEqual(data.version, '1.0', 'Should have default version');
assert.ok(data.metadata, 'Should have metadata');
assert.strictEqual(data.metadata.totalCount, 0, 'Should have totalCount 0');
resetAliases();
})) passed++; else failed++;
console.log('\nRound 67: resolveSessionAlias (null/falsy input):');
if (test('resolveSessionAlias returns null when given null input', () => {
resetAliases();
const result = aliases.resolveSessionAlias(null);
assert.strictEqual(result, null, 'Should return null for null input');
})) passed++; else failed++;
console.log('\nRound 67: loadAliases (metadata-only backfill, version present):');
if (test('loadAliases backfills only metadata when version already present', () => {
resetAliases();
const aliasesPath = aliases.getAliasesPath();
// Write a file WITH version but WITHOUT metadata
fs.writeFileSync(aliasesPath, JSON.stringify({
version: '1.0',
aliases: {
'meta-only': {
sessionPath: '/sessions/meta-only',
createdAt: '2026-01-20T00:00:00.000Z',
updatedAt: '2026-01-20T00:00:00.000Z',
title: 'Metadata Only Test'
}
}
}));
const data = aliases.loadAliases();
// Version should remain as-is (NOT overwritten)
assert.strictEqual(data.version, '1.0', 'Version should remain 1.0');
// Metadata should be backfilled
assert.ok(data.metadata, 'Should backfill missing metadata');
assert.strictEqual(data.metadata.totalCount, 1, 'Metadata totalCount should be 1');
assert.ok(data.metadata.lastUpdated, 'Metadata should have lastUpdated');
// Alias data should be preserved
assert.ok(data.aliases['meta-only'], 'Alias should be preserved');
assert.strictEqual(data.aliases['meta-only'].title, 'Metadata Only Test');
resetAliases();
})) passed++; else failed++;
// ── Round 70: updateAliasTitle save failure path ──
console.log('\nupdateAliasTitle save failure (Round 70):');
if (test('updateAliasTitle returns failure when saveAliases fails (read-only dir)', () => {
if (process.platform === 'win32' || process.getuid?.() === 0) {
console.log(' (skipped — chmod ineffective on Windows/root)');
return;
}
// Use a fresh isolated HOME to avoid .tmp/.bak leftovers from other tests.
// On macOS, overwriting an EXISTING file in a read-only dir succeeds,
// so we must start clean with ONLY the .json file present.
const isoHome = path.join(os.tmpdir(), `ecc-alias-r70-${Date.now()}`);
const isoClaudeDir = path.join(isoHome, '.claude');
fs.mkdirSync(isoClaudeDir, { recursive: true });
const savedHome = process.env.HOME;
const savedProfile = process.env.USERPROFILE;
try {
process.env.HOME = isoHome;
process.env.USERPROFILE = isoHome;
// Re-require to pick up new HOME
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
const freshAliases = require('../../scripts/lib/session-aliases');
// Set up a valid alias
freshAliases.setAlias('title-save-fail', '/path/session', 'Original Title');
// Verify no leftover .tmp/.bak
const ap = freshAliases.getAliasesPath();
assert.ok(fs.existsSync(ap), 'Alias file should exist after setAlias');
// Make .claude dir read-only so saveAliases fails when creating .bak
fs.chmodSync(isoClaudeDir, 0o555);
const result = freshAliases.updateAliasTitle('title-save-fail', 'New Title');
assert.strictEqual(result.success, false, 'Should fail when save is blocked');
assert.ok(result.error.includes('Failed to update alias title'),
`Should return save failure error, got: ${result.error}`);
} finally {
try { fs.chmodSync(isoClaudeDir, 0o755); } catch { /* best-effort */ }
process.env.HOME = savedHome;
process.env.USERPROFILE = savedProfile;
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
fs.rmSync(isoHome, { recursive: true, force: true });
}
})) passed++; else failed++;
// ── Round 72: deleteAlias save failure path ──
console.log('\nRound 72: deleteAlias (save failure):');
if (test('deleteAlias returns failure when saveAliases fails (read-only dir)', () => {
if (process.platform === 'win32' || process.getuid?.() === 0) {
console.log(' (skipped — chmod ineffective on Windows/root)');
return;
}
const isoHome = path.join(os.tmpdir(), `ecc-alias-r72-${Date.now()}`);
const isoClaudeDir = path.join(isoHome, '.claude');
fs.mkdirSync(isoClaudeDir, { recursive: true });
const savedHome = process.env.HOME;
const savedProfile = process.env.USERPROFILE;
try {
process.env.HOME = isoHome;
process.env.USERPROFILE = isoHome;
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
const freshAliases = require('../../scripts/lib/session-aliases');
// Create an alias first (writes the file)
freshAliases.setAlias('to-delete', '/path/session', 'Test');
const ap = freshAliases.getAliasesPath();
assert.ok(fs.existsSync(ap), 'Alias file should exist after setAlias');
// Make .claude directory read-only — save will fail (can't create temp file)
fs.chmodSync(isoClaudeDir, 0o555);
const result = freshAliases.deleteAlias('to-delete');
assert.strictEqual(result.success, false, 'Should fail when save is blocked');
assert.ok(result.error.includes('Failed to delete alias'),
`Should return delete failure error, got: ${result.error}`);
} finally {
try { fs.chmodSync(isoClaudeDir, 0o755); } catch { /* best-effort */ }
process.env.HOME = savedHome;
process.env.USERPROFILE = savedProfile;
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
fs.rmSync(isoHome, { recursive: true, force: true });
}
})) passed++; else failed++;
// ── Round 73: cleanupAliases save failure path ──
console.log('\nRound 73: cleanupAliases (save failure):');
if (test('cleanupAliases returns failure when saveAliases fails after removing aliases', () => {
if (process.platform === 'win32' || process.getuid?.() === 0) {
console.log(' (skipped — chmod ineffective on Windows/root)');
return;
}
const isoHome = path.join(os.tmpdir(), `ecc-alias-r73-cleanup-${Date.now()}`);
const isoClaudeDir = path.join(isoHome, '.claude');
fs.mkdirSync(isoClaudeDir, { recursive: true });
const savedHome = process.env.HOME;
const savedProfile = process.env.USERPROFILE;
try {
process.env.HOME = isoHome;
process.env.USERPROFILE = isoHome;
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
const freshAliases = require('../../scripts/lib/session-aliases');
// Create aliases — one to keep, one to remove
freshAliases.setAlias('keep-me', '/sessions/real', 'Kept');
freshAliases.setAlias('remove-me', '/sessions/gone', 'Gone');
// Make .claude dir read-only so save will fail
fs.chmodSync(isoClaudeDir, 0o555);
// Cleanup: "gone" session doesn't exist, so remove-me should be removed
const result = freshAliases.cleanupAliases((p) => p === '/sessions/real');
assert.strictEqual(result.success, false, 'Should fail when save is blocked');
assert.ok(result.error.includes('Failed to save after cleanup'),
`Should return cleanup save failure error, got: ${result.error}`);
assert.strictEqual(result.removed, 1, 'Should report 1 removed alias');
assert.ok(result.removedAliases.some(a => a.name === 'remove-me'),
'Should report remove-me in removedAliases');
} finally {
try { fs.chmodSync(isoClaudeDir, 0o755); } catch { /* best-effort */ }
process.env.HOME = savedHome;
process.env.USERPROFILE = savedProfile;
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
fs.rmSync(isoHome, { recursive: true, force: true });
}
})) passed++; else failed++;
// ── Round 73: setAlias save failure path ──
console.log('\nRound 73: setAlias (save failure):');
if (test('setAlias returns failure when saveAliases fails', () => {
if (process.platform === 'win32' || process.getuid?.() === 0) {
console.log(' (skipped — chmod ineffective on Windows/root)');
return;
}
const isoHome = path.join(os.tmpdir(), `ecc-alias-r73-set-${Date.now()}`);
const isoClaudeDir = path.join(isoHome, '.claude');
fs.mkdirSync(isoClaudeDir, { recursive: true });
const savedHome = process.env.HOME;
const savedProfile = process.env.USERPROFILE;
try {
process.env.HOME = isoHome;
process.env.USERPROFILE = isoHome;
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
const freshAliases = require('../../scripts/lib/session-aliases');
// Make .claude dir read-only BEFORE any setAlias call
fs.chmodSync(isoClaudeDir, 0o555);
const result = freshAliases.setAlias('my-alias', '/sessions/test', 'Test');
assert.strictEqual(result.success, false, 'Should fail when save is blocked');
assert.ok(result.error.includes('Failed to save alias'),
`Should return save failure error, got: ${result.error}`);
} finally {
try { fs.chmodSync(isoClaudeDir, 0o755); } catch { /* best-effort */ }
process.env.HOME = savedHome;
process.env.USERPROFILE = savedProfile;
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
fs.rmSync(isoHome, { recursive: true, force: true });
}
})) passed++; else failed++;
// ── Round 84: listAliases sort NaN date fallback (getTime() || 0) ──
console.log('\nRound 84: listAliases (NaN date fallback in sort comparator):');
if (test('listAliases sorts entries with invalid/missing dates to the end via || 0 fallback', () => {
// session-aliases.js line 257:
// (new Date(b.updatedAt || b.createdAt || 0).getTime() || 0) - ...
// When updatedAt and createdAt are both invalid strings, getTime() returns NaN.
// The outer || 0 converts NaN to 0 (epoch time), pushing the entry to the end.
resetAliases();
const data = aliases.loadAliases();
// Entry with valid dates — should sort first (newest)
data.aliases['valid-alias'] = {
sessionPath: '/sessions/valid',
createdAt: '2026-02-10T12:00:00.000Z',
updatedAt: '2026-02-10T12:00:00.000Z',
title: 'Valid'
};
// Entry with invalid date strings — getTime() → NaN → || 0 → epoch (oldest)
data.aliases['nan-alias'] = {
sessionPath: '/sessions/nan',
createdAt: 'not-a-date',
updatedAt: 'also-invalid',
title: 'NaN dates'
};
// Entry with missing date fields — undefined || undefined || 0 → new Date(0) → epoch
data.aliases['missing-alias'] = {
sessionPath: '/sessions/missing',
title: 'Missing dates'
// No createdAt or updatedAt
};
aliases.saveAliases(data);
const list = aliases.listAliases();
assert.strictEqual(list.length, 3, 'Should list all 3 aliases');
// Valid-dated entry should be first (newest by updatedAt)
assert.strictEqual(list[0].name, 'valid-alias',
'Entry with valid dates should sort first');
// The two invalid-dated entries sort to epoch (0), so they come after
assert.ok(
(list[1].name === 'nan-alias' || list[1].name === 'missing-alias') &&
(list[2].name === 'nan-alias' || list[2].name === 'missing-alias'),
'Entries with invalid/missing dates should sort to the end');
})) passed++; else failed++;
// ── Round 86: loadAliases with truthy non-object aliases field ──
console.log('\nRound 86: loadAliases (truthy non-object aliases field):');
if (test('loadAliases resets to defaults when aliases field is a string (typeof !== object)', () => {
// session-aliases.js line 58: if (!data.aliases || typeof data.aliases !== 'object')
// Previous tests covered !data.aliases (undefined) via { noAliasesKey: true }.
// This exercises the SECOND half: aliases is truthy but typeof !== 'object'.
const aliasesPath = aliases.getAliasesPath();
fs.writeFileSync(aliasesPath, JSON.stringify({
version: '1.0',
aliases: 'this-is-a-string-not-an-object',
metadata: { totalCount: 0 }
}));
const data = aliases.loadAliases();
assert.strictEqual(typeof data.aliases, 'object', 'Should reset aliases to object');
assert.ok(!Array.isArray(data.aliases), 'Should be a plain object, not array');
assert.strictEqual(Object.keys(data.aliases).length, 0, 'Should have no aliases');
assert.strictEqual(data.version, '1.0', 'Should have version');
resetAliases();
})) passed++; else failed++;
// ── Round 90: saveAliases backup restore double failure (inner catch restoreErr) ──
console.log('\nRound 90: saveAliases (backup restore double failure):');
if (test('saveAliases triggers inner restoreErr catch when both save and restore fail', () => {
// session-aliases.js lines 131-137: When saveAliases fails (outer catch),
// it tries to restore from backup. If the restore ALSO fails, the inner
// catch at line 135 logs restoreErr. No existing test creates this double-fault.
if (process.platform === 'win32') {
console.log(' (skipped — chmod not reliable on Windows)');
return;
}
const isoHome = path.join(os.tmpdir(), `ecc-r90-restore-fail-${Date.now()}`);
const claudeDir = path.join(isoHome, '.claude');
fs.mkdirSync(claudeDir, { recursive: true });
// Pre-create a backup file while directory is still writable
const backupPath = path.join(claudeDir, 'session-aliases.json.bak');
fs.writeFileSync(backupPath, JSON.stringify({ aliases: {}, version: '1.0' }));
// Make .claude directory read-only (0o555):
// 1. writeFileSync(tempPath) → EACCES (can't create file in read-only dir) — outer catch
// 2. copyFileSync(backupPath, aliasesPath) → EACCES (can't create target) — inner catch (line 135)
fs.chmodSync(claudeDir, 0o555);
const origH = process.env.HOME;
const origP = process.env.USERPROFILE;
process.env.HOME = isoHome;
process.env.USERPROFILE = isoHome;
try {
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
const freshAliases = require('../../scripts/lib/session-aliases');
const result = freshAliases.saveAliases({ aliases: { x: 1 }, version: '1.0' });
assert.strictEqual(result, false, 'Should return false when save fails');
// Backup should still exist (restore also failed, so backup was not consumed)
assert.ok(fs.existsSync(backupPath), 'Backup should still exist after double failure');
} finally {
process.env.HOME = origH;
process.env.USERPROFILE = origP;
delete require.cache[require.resolve('../../scripts/lib/session-aliases')];
delete require.cache[require.resolve('../../scripts/lib/utils')];
try { fs.chmodSync(claudeDir, 0o755); } catch { /* best-effort */ }
fs.rmSync(isoHome, { recursive: true, force: true });
}
})) passed++; else failed++;
// ── Round 95: renameAlias with same old and new name (self-rename) ──
console.log('\nRound 95: renameAlias (self-rename same name):');
if (test('renameAlias returns "already exists" error when renaming alias to itself', () => {
resetAliases();
// Create an alias first
const created = aliases.setAlias('self-rename', '/path/session', 'Self Rename');
assert.strictEqual(created.success, true, 'Setup: alias should be created');
// Attempt to rename to the same name
const result = aliases.renameAlias('self-rename', 'self-rename');
assert.strictEqual(result.success, false, 'Renaming to itself should fail');
assert.ok(result.error.includes('already exists'),
'Error should indicate alias already exists (line 333-334 check)');
// Verify original alias is still intact
const resolved = aliases.resolveAlias('self-rename');
assert.ok(resolved, 'Original alias should still exist after failed self-rename');
assert.strictEqual(resolved.sessionPath, '/path/session',
'Alias data should be preserved');
})) passed++; else failed++;
// ── Round 100: cleanupAliases callback returning falsy non-boolean 0 ──
console.log('\nRound 100: cleanupAliases (callback returns 0 — falsy non-boolean coercion):');
if (test('cleanupAliases removes alias when callback returns 0 (falsy coercion: !0 === true)', () => {
resetAliases();
aliases.setAlias('zero-test', '/sessions/some-session', '2026-01-15');
// callback returns 0 (a falsy value) — !0 === true → alias is removed
const result = aliases.cleanupAliases(() => 0);
assert.strictEqual(result.removed, 1,
'Alias should be removed because !0 === true (JavaScript falsy coercion)');
assert.strictEqual(result.success, true,
'Cleanup should succeed');
const resolved = aliases.resolveAlias('zero-test');
assert.strictEqual(resolved, null,
'Alias should no longer exist after removal');
})) passed++; else failed++;
// ── Round 102: setAlias with title=0 (falsy number coercion) ──
console.log('\nRound 102: setAlias (title=0 — falsy coercion silently converts to null):');
if (test('setAlias with title=0 stores null (0 || null === null due to JavaScript falsy coercion)', () => {
// session-aliases.js line 221: `title: title || null` — the value 0 is falsy
// in JavaScript, so `0 || null` evaluates to `null`. This means numeric
// titles like 0 are silently discarded.
resetAliases();
const result = aliases.setAlias('zero-title', '/sessions/test', 0);
assert.strictEqual(result.success, true,
'setAlias should succeed (0 is valid as a truthy check bypass)');
assert.strictEqual(result.title, null,
'Title should be null because 0 || null === null (falsy coercion)');
const resolved = aliases.resolveAlias('zero-title');
assert.strictEqual(resolved.title, null,
'Persisted title should be null after round-trip through saveAliases/loadAliases');
})) passed++; else failed++;
// ── Round 103: loadAliases with array aliases in JSON (typeof [] === 'object' bypass) ──
console.log('\nRound 103: loadAliases (array aliases — typeof bypass):');
if (test('loadAliases accepts array aliases because typeof [] === "object" passes validation', () => {
// session-aliases.js line 58: `typeof data.aliases !== 'object'` is the guard.
// Arrays are typeof 'object' in JavaScript, so {"aliases": [1,2,3]} passes
// validation. The returned data.aliases is an array, not a plain object.
// Downstream code (Object.keys, Object.entries, bracket access) behaves
// differently on arrays vs objects but doesn't crash — it just produces
// unexpected results like numeric string keys "0", "1", "2".
resetAliases();
const aliasesPath = aliases.getAliasesPath();
fs.writeFileSync(aliasesPath, JSON.stringify({
version: '1.0',
aliases: ['item0', 'item1', 'item2'],
metadata: { totalCount: 3, lastUpdated: new Date().toISOString() }
}));
const data = aliases.loadAliases();
// The array passes the typeof 'object' check and is returned as-is
assert.ok(Array.isArray(data.aliases),
'data.aliases should be an array (typeof [] === "object" bypasses guard)');
assert.strictEqual(data.aliases.length, 3,
'Array should have 3 elements');
// Object.keys on an array returns ["0", "1", "2"] — numeric index strings
const keys = Object.keys(data.aliases);
assert.deepStrictEqual(keys, ['0', '1', '2'],
'Object.keys of array returns numeric string indices, not named alias keys');
})) passed++; else failed++;
// ── Round 104: resolveSessionAlias with path-traversal input (passthrough without validation) ──
console.log('\nRound 104: resolveSessionAlias (path-traversal input — returned unchanged):');
if (test('resolveSessionAlias returns path-traversal input as-is when alias lookup fails', () => {
// session-aliases.js lines 365-374: resolveSessionAlias first tries resolveAlias(),
// which rejects '../etc/passwd' because the regex /^[a-zA-Z0-9_-]+$/ fails on dots
// and slashes (returns null). Then the function falls through to line 373:
// `return aliasOrId` — returning the potentially dangerous input unchanged.
// Callers that blindly use this return value could be at risk.
resetAliases();
const traversal = '../etc/passwd';
const result = aliases.resolveSessionAlias(traversal);
assert.strictEqual(result, traversal,
'Path-traversal input should be returned as-is (resolveAlias rejects it, fallback returns input)');
// Also test with another invalid alias pattern
const dotSlash = './../../secrets';
const result2 = aliases.resolveSessionAlias(dotSlash);
assert.strictEqual(result2, dotSlash,
'Another path-traversal pattern also returned unchanged');
})) passed++; else failed++;
// ── Round 107: setAlias with whitespace-only title (not trimmed unlike sessionPath) ──
console.log('\nRound 107: setAlias (whitespace-only title — truthy string stored as-is, unlike sessionPath which is trim-checked):');
if (test('setAlias stores whitespace-only title as-is (no trim validation, unlike sessionPath)', () => {
resetAliases();
// sessionPath with whitespace is rejected (line 195: sessionPath.trim().length === 0)
const pathResult = aliases.setAlias('ws-path', ' ');
assert.strictEqual(pathResult.success, false,
'Whitespace-only sessionPath is rejected by trim check');
// But title with whitespace is stored as-is (line 221: title || null — whitespace is truthy)
const titleResult = aliases.setAlias('ws-title', '/valid/path', ' ');
assert.strictEqual(titleResult.success, true,
'Whitespace-only title is accepted (no trim check on title)');
assert.strictEqual(titleResult.title, ' ',
'Title stored as whitespace string (truthy, so title || null returns the whitespace)');
// Verify persisted correctly
const loaded = aliases.loadAliases();
assert.strictEqual(loaded.aliases['ws-title'].title, ' ',
'Whitespace title persists in JSON as-is');
})) passed++; else failed++;
// ── Round 111: setAlias with exactly 128-character alias — off-by-one boundary ──
console.log('\nRound 111: setAlias (128-char alias — exact boundary of > 128 check):');
if (test('setAlias accepts alias of exactly 128 characters (128 is NOT > 128)', () => {
// session-aliases.js line 199: if (alias.length > 128)
// 128 is NOT > 128, so exactly 128 chars is ACCEPTED.
// Existing test only checks 129 (rejected).
resetAliases();
const alias128 = 'a'.repeat(128);
const result = aliases.setAlias(alias128, '/path/to/session');
assert.strictEqual(result.success, true,
'128-char alias should be accepted (128 is NOT > 128)');
assert.strictEqual(result.isNew, true);
// Verify it can be resolved
const resolved = aliases.resolveAlias(alias128);
assert.notStrictEqual(resolved, null, '128-char alias should be resolvable');
assert.strictEqual(resolved.sessionPath, '/path/to/session');
// Confirm 129 is rejected (boundary)
const result129 = aliases.setAlias('b'.repeat(129), '/path');
assert.strictEqual(result129.success, false, '129-char alias should be rejected');
assert.ok(result129.error.includes('128'),
'Error message should mention 128-char limit');
})) passed++; else failed++;
// ── Round 112: resolveAlias rejects Unicode characters in alias name ──
console.log('\nRound 112: resolveAlias (Unicode rejection):');
if (test('resolveAlias returns null for alias names containing Unicode characters', () => {
resetAliases();
// First create a valid alias to ensure the store works
aliases.setAlias('valid-alias', '/path/to/session');
const validResult = aliases.resolveAlias('valid-alias');
assert.notStrictEqual(validResult, null, 'Valid ASCII alias should resolve');
// Unicode accented characters — rejected by /^[a-zA-Z0-9_-]+$/
const accentedResult = aliases.resolveAlias('café-session');
assert.strictEqual(accentedResult, null,
'Accented character "é" should be rejected by [a-zA-Z0-9_-]');
const umlautResult = aliases.resolveAlias('über-test');
assert.strictEqual(umlautResult, null,
'Umlaut "ü" should be rejected by [a-zA-Z0-9_-]');
// CJK characters
const cjkResult = aliases.resolveAlias('会議-notes');
assert.strictEqual(cjkResult, null,
'CJK characters should be rejected');
// Emoji
const emojiResult = aliases.resolveAlias('rocket-🚀');
assert.strictEqual(emojiResult, null,
'Emoji should be rejected by the ASCII-only regex');
// Cyrillic characters that look like Latin (homoglyphs)
const cyrillicResult = aliases.resolveAlias('tеst'); // 'е' is Cyrillic U+0435
assert.strictEqual(cyrillicResult, null,
'Cyrillic homoglyph "е" (U+0435) should be rejected even though it looks like "e"');
})) passed++; else failed++;
// ── Round 114: listAliases with non-string search (number) — TypeError on toLowerCase ──
console.log('\nRound 114: listAliases (non-string search — number triggers TypeError):');
if (test('listAliases throws TypeError when search option is a number (no toLowerCase method)', () => {
resetAliases();
// Set up some aliases to search through
aliases.setAlias('alpha-session', '/path/to/alpha');
aliases.setAlias('beta-session', '/path/to/beta');
// String search works fine — baseline
const stringResult = aliases.listAliases({ search: 'alpha' });
assert.strictEqual(stringResult.length, 1, 'String search should find 1 match');
assert.strictEqual(stringResult[0].name, 'alpha-session');
// Numeric search — search.toLowerCase() at line 261 of session-aliases.js
// throws TypeError because Number.prototype has no toLowerCase method.
// The code does NOT guard against non-string search values.
assert.throws(
() => aliases.listAliases({ search: 123 }),
(err) => err instanceof TypeError && /toLowerCase/.test(err.message),
'Numeric search value should throw TypeError from toLowerCase call'
);
// Boolean search — also lacks toLowerCase
assert.throws(
() => aliases.listAliases({ search: true }),
(err) => err instanceof TypeError && /toLowerCase/.test(err.message),
'Boolean search value should also throw TypeError'
);
})) passed++; else failed++;
// ── Round 115: updateAliasTitle with empty string — stored as null via || but returned as "" ──
console.log('\nRound 115: updateAliasTitle (empty string title — stored null, returned ""):');
if (test('updateAliasTitle with empty string stores null but returns empty string (|| coercion mismatch)', () => {
resetAliases();
// Create alias with a title
aliases.setAlias('r115-alias', '/path/to/session', 'Original Title');
const before = aliases.resolveAlias('r115-alias');
assert.strictEqual(before.title, 'Original Title', 'Baseline: title should be set');
// Update title with empty string
// Line 383: typeof "" === 'string' → passes validation
// Line 393: "" || null → null (empty string is falsy in JS)
// Line 400: returns { title: "" } (original parameter, not stored value)
const result = aliases.updateAliasTitle('r115-alias', '');
assert.strictEqual(result.success, true, 'Should succeed (empty string passes validation)');
assert.strictEqual(result.title, '', 'Return value reflects the input parameter (empty string)');
// But what's actually stored?
const after = aliases.resolveAlias('r115-alias');
assert.strictEqual(after.title, null,
'Stored title should be null because "" || null evaluates to null');
// Contrast: non-empty string is stored as-is
aliases.updateAliasTitle('r115-alias', 'New Title');
const withTitle = aliases.resolveAlias('r115-alias');
assert.strictEqual(withTitle.title, 'New Title', 'Non-empty string stored as-is');
// null explicitly clears title
aliases.updateAliasTitle('r115-alias', null);
const cleared = aliases.resolveAlias('r115-alias');
assert.strictEqual(cleared.title, null, 'null clears title');
})) passed++; else failed++;
// ── Round 116: loadAliases with extra unknown fields — silently preserved ──
console.log('\nRound 116: loadAliases (extra unknown JSON fields — preserved by loose validation):');
if (test('loadAliases preserves extra unknown fields because only aliases key is validated', () => {
resetAliases();
// Manually write an aliases file with extra fields
const aliasesPath = aliases.getAliasesPath();
const customData = {
version: '1.0',
aliases: {
'test-session': {
sessionPath: '/path/to/session',
createdAt: '2026-01-01T00:00:00.000Z',
updatedAt: '2026-01-01T00:00:00.000Z',
title: 'Test'
}
},
metadata: {
totalCount: 1,
lastUpdated: '2026-01-01T00:00:00.000Z'
},
customField: 'extra data',
debugInfo: { level: 3, verbose: true },
tags: ['important', 'test']
};
fs.writeFileSync(aliasesPath, JSON.stringify(customData, null, 2), 'utf8');
// loadAliases only validates data.aliases — extra fields pass through
const loaded = aliases.loadAliases();
assert.ok(loaded.aliases['test-session'], 'Should load the valid alias');
assert.strictEqual(loaded.aliases['test-session'].title, 'Test');
assert.strictEqual(loaded.customField, 'extra data',
'Extra string field should be preserved');
assert.deepStrictEqual(loaded.debugInfo, { level: 3, verbose: true },
'Extra object field should be preserved');
assert.deepStrictEqual(loaded.tags, ['important', 'test'],
'Extra array field should be preserved');
// After saving, extra fields survive a round-trip (saveAliases only updates metadata)
aliases.setAlias('new-alias', '/path/to/new');
const reloaded = aliases.loadAliases();
assert.ok(reloaded.aliases['new-alias'], 'New alias should be saved');
assert.strictEqual(reloaded.customField, 'extra data',
'Extra field should survive save/load round-trip');
})) passed++; else failed++;
// ── Round 118: renameAlias to the same name — "already exists" because self-check ──
console.log('\nRound 118: renameAlias (same name — "already exists" because data.aliases[newAlias] is truthy):');
if (test('renameAlias to the same name returns "already exists" error (no self-rename short-circuit)', () => {
resetAliases();
aliases.setAlias('same-name', '/path/to/session');
// Rename 'same-name' → 'same-name'
// Line 333: data.aliases[newAlias] → truthy (the alias exists under that name)
// Returns error before checking if oldAlias === newAlias
const result = aliases.renameAlias('same-name', 'same-name');
assert.strictEqual(result.success, false, 'Should fail');
assert.ok(result.error.includes('already exists'),
'Error should say "already exists" (not "same name" or a no-op success)');
// Verify alias is unchanged
const resolved = aliases.resolveAlias('same-name');
assert.ok(resolved, 'Original alias should still exist');
assert.strictEqual(resolved.sessionPath, '/path/to/session');
})) passed++; else failed++;
// ── Round 118: setAlias reserved names — case-insensitive rejection ──
console.log('\nRound 118: setAlias (reserved names — case-insensitive rejection):');
if (test('setAlias rejects all reserved names case-insensitively (list, help, remove, delete, create, set)', () => {
resetAliases();
// All reserved names in lowercase
const reserved = ['list', 'help', 'remove', 'delete', 'create', 'set'];
for (const name of reserved) {
const result = aliases.setAlias(name, '/path/to/session');
assert.strictEqual(result.success, false,
`'${name}' should be rejected as reserved`);
assert.ok(result.error.includes('reserved'),
`Error for '${name}' should mention "reserved"`);
}
// Case-insensitive: uppercase variants also rejected
const upperResult = aliases.setAlias('LIST', '/path/to/session');
assert.strictEqual(upperResult.success, false,
'"LIST" (uppercase) should be rejected (toLowerCase check)');
const mixedResult = aliases.setAlias('Help', '/path/to/session');
assert.strictEqual(mixedResult.success, false,
'"Help" (mixed case) should be rejected');
const allCapsResult = aliases.setAlias('DELETE', '/path/to/session');
assert.strictEqual(allCapsResult.success, false,
'"DELETE" (all caps) should be rejected');
// Non-reserved names work fine
const validResult = aliases.setAlias('my-session', '/path/to/session');
assert.strictEqual(validResult.success, true,
'Non-reserved name should succeed');
})) passed++; else failed++;
// ── Round 119: renameAlias with reserved newAlias name — parallel reserved check ──
console.log('\nRound 119: renameAlias (reserved newAlias name — parallel check to setAlias):');
if (test('renameAlias rejects reserved names for newAlias (same reserved list as setAlias)', () => {
resetAliases();
aliases.setAlias('my-alias', '/path/to/session');
// Rename to reserved name 'list' — should fail
const listResult = aliases.renameAlias('my-alias', 'list');
assert.strictEqual(listResult.success, false, '"list" should be rejected');
assert.ok(listResult.error.includes('reserved'),
'Error should mention "reserved"');
// Rename to reserved name 'help' (uppercase) — should fail
const helpResult = aliases.renameAlias('my-alias', 'Help');
assert.strictEqual(helpResult.success, false, '"Help" should be rejected');
// Rename to reserved name 'delete' — should fail
const deleteResult = aliases.renameAlias('my-alias', 'DELETE');
assert.strictEqual(deleteResult.success, false, '"DELETE" should be rejected');
// Verify alias is unchanged
const resolved = aliases.resolveAlias('my-alias');
assert.ok(resolved, 'Original alias should still exist after failed renames');
assert.strictEqual(resolved.sessionPath, '/path/to/session');
// Valid rename works
const validResult = aliases.renameAlias('my-alias', 'new-valid-name');
assert.strictEqual(validResult.success, true, 'Non-reserved name should succeed');
})) passed++; else failed++;
// ── Round 120: setAlias max length boundary — 128 accepted, 129 rejected ──
console.log('\nRound 120: setAlias (max alias length boundary — 128 ok, 129 rejected):');
if (test('setAlias accepts exactly 128-char alias name but rejects 129 chars (> 128 boundary)', () => {
resetAliases();
// 128 characters — exactly at limit (alias.length > 128 is false)
const name128 = 'a'.repeat(128);
const result128 = aliases.setAlias(name128, '/path/to/session');
assert.strictEqual(result128.success, true,
'128-char alias should be accepted (128 > 128 is false)');
// 129 characters — just over limit
const name129 = 'a'.repeat(129);
const result129 = aliases.setAlias(name129, '/path/to/session');
assert.strictEqual(result129.success, false,
'129-char alias should be rejected (129 > 128 is true)');
assert.ok(result129.error.includes('128'),
'Error should mention the 128 character limit');
// 1 character — minimum valid
const name1 = 'x';
const result1 = aliases.setAlias(name1, '/path/to/session');
assert.strictEqual(result1.success, true,
'Single character alias should be accepted');
// Verify the 128-char alias was actually stored
const resolved = aliases.resolveAlias(name128);
assert.ok(resolved, '128-char alias should be resolvable');
assert.strictEqual(resolved.sessionPath, '/path/to/session');
})) passed++; else failed++;
// ── Round 121: setAlias sessionPath validation — null, empty, whitespace, non-string ──
console.log('\nRound 121: setAlias (sessionPath validation — null, empty, whitespace, non-string):');
if (test('setAlias rejects invalid sessionPath: null, empty, whitespace-only, and non-string types', () => {
resetAliases();
// null sessionPath → falsy → rejected
const nullResult = aliases.setAlias('test-alias', null);
assert.strictEqual(nullResult.success, false, 'null path should fail');
assert.ok(nullResult.error.includes('empty'), 'Error should mention empty');
// undefined sessionPath → falsy → rejected
const undefResult = aliases.setAlias('test-alias', undefined);
assert.strictEqual(undefResult.success, false, 'undefined path should fail');
// empty string → falsy → rejected
const emptyResult = aliases.setAlias('test-alias', '');
assert.strictEqual(emptyResult.success, false, 'Empty string path should fail');
// whitespace-only → passes falsy check but trim().length === 0 → rejected
const wsResult = aliases.setAlias('test-alias', ' ');
assert.strictEqual(wsResult.success, false, 'Whitespace-only path should fail');
// number → typeof !== 'string' → rejected
const numResult = aliases.setAlias('test-alias', 42);
assert.strictEqual(numResult.success, false, 'Number path should fail');
// boolean → typeof !== 'string' → rejected
const boolResult = aliases.setAlias('test-alias', true);
assert.strictEqual(boolResult.success, false, 'Boolean path should fail');
// Valid path works
const validResult = aliases.setAlias('test-alias', '/valid/path');
assert.strictEqual(validResult.success, true, 'Valid string path should succeed');
})) passed++; else failed++;
// ── Round 122: listAliases limit edge cases — limit=0, negative, NaN bypassed (JS falsy) ──
console.log('\nRound 122: listAliases (limit edge cases — 0/negative/NaN are falsy, return all):');
if (test('listAliases limit=0 returns all aliases because 0 is falsy in JS (no slicing)', () => {
resetAliases();
aliases.setAlias('alias-a', '/path/a');
aliases.setAlias('alias-b', '/path/b');
aliases.setAlias('alias-c', '/path/c');
// limit=0: 0 is falsy → `if (0 && 0 > 0)` short-circuits → no slicing → ALL returned
const zeroResult = aliases.listAliases({ limit: 0 });
assert.strictEqual(zeroResult.length, 3,
'limit=0 should return ALL aliases (0 is falsy in JS)');
// limit=-1: -1 is truthy but -1 > 0 is false → no slicing → ALL returned
const negResult = aliases.listAliases({ limit: -1 });
assert.strictEqual(negResult.length, 3,
'limit=-1 should return ALL aliases (-1 > 0 is false)');
// limit=NaN: NaN is falsy → no slicing → ALL returned
const nanResult = aliases.listAliases({ limit: NaN });
assert.strictEqual(nanResult.length, 3,
'limit=NaN should return ALL aliases (NaN is falsy)');
// limit=1: normal case — returns exactly 1
const oneResult = aliases.listAliases({ limit: 1 });
assert.strictEqual(oneResult.length, 1,
'limit=1 should return exactly 1 alias');
// limit=2: returns exactly 2
const twoResult = aliases.listAliases({ limit: 2 });
assert.strictEqual(twoResult.length, 2,
'limit=2 should return exactly 2 aliases');
// limit=100 (more than total): returns all 3
const bigResult = aliases.listAliases({ limit: 100 });
assert.strictEqual(bigResult.length, 3,
'limit > total should return all aliases');
})) passed++; else failed++;
// ── Round 125: loadAliases with __proto__ key in JSON — no prototype pollution ──
console.log('\nRound 125: loadAliases (__proto__ key in JSON — safe, no prototype pollution):');
if (test('loadAliases with __proto__ alias key does not pollute Object prototype', () => {
// JSON.parse('{"__proto__":...}') creates a normal property named "__proto__",
// it does NOT modify Object.prototype. This is safe but worth documenting.
// The alias would be accessible via data.aliases['__proto__'] and iterable
// via Object.entries, but it won't affect other objects.
resetAliases();
// Write raw JSON string with __proto__ as an alias name.
// IMPORTANT: Cannot use JSON.stringify(obj) because {'__proto__':...} in JS
// sets the prototype rather than creating an own property, so stringify drops it.
// Must write the JSON string directly to simulate a maliciously crafted file.
const aliasesPath = aliases.getAliasesPath();
const now = new Date().toISOString();
const rawJson = `{
"version": "1.0.0",
"aliases": {
"__proto__": {
"sessionPath": "/evil/path",
"createdAt": "${now}",
"title": "Prototype Pollution Attempt"
},
"normal": {
"sessionPath": "/normal/path",
"createdAt": "${now}",
"title": "Normal Alias"
}
},
"metadata": { "totalCount": 2, "lastUpdated": "${now}" }
}`;
fs.writeFileSync(aliasesPath, rawJson);
// Load aliases — should NOT pollute prototype
const data = aliases.loadAliases();
// Verify __proto__ did NOT pollute Object.prototype
const freshObj = {};
assert.strictEqual(freshObj.sessionPath, undefined,
'Object.prototype should NOT have sessionPath (no pollution)');
assert.strictEqual(freshObj.title, undefined,
'Object.prototype should NOT have title (no pollution)');
// The __proto__ key IS accessible as a normal property
assert.ok(data.aliases['__proto__'],
'__proto__ key exists as normal property in parsed aliases');
assert.strictEqual(data.aliases['__proto__'].sessionPath, '/evil/path',
'__proto__ alias data is accessible normally');
// Normal alias also works
assert.ok(data.aliases['normal'],
'Normal alias coexists with __proto__ key');
// resolveAlias with '__proto__' — rejected by regex (underscores ok but __ prefix works)
// Actually ^[a-zA-Z0-9_-]+$ would ACCEPT '__proto__' since _ is allowed
const resolved = aliases.resolveAlias('__proto__');
// If the regex accepts it, it should find the alias
if (resolved) {
assert.strictEqual(resolved.sessionPath, '/evil/path',
'resolveAlias can access __proto__ alias (regex allows underscores)');
}
// Object.keys should enumerate __proto__ from JSON.parse
const keys = Object.keys(data.aliases);
assert.ok(keys.includes('__proto__'),
'Object.keys includes __proto__ from JSON.parse (normal property)');
assert.ok(keys.includes('normal'),
'Object.keys includes normal alias');
})) passed++; else failed++;
// Summary
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -8,6 +8,8 @@
const assert = require('assert');
const path = require('path');
const fs = require('fs');
const os = require('os');
const { execFileSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'setup-package-manager.js');
@@ -256,6 +258,137 @@ function runTests() {
assert.strictEqual(installCount, 4, `Expected 4 "Install:" entries, found ${installCount}`);
})) passed++; else failed++;
// ── Round 62: --global success path and bare PM name ──
console.log('\n--global success path (Round 62):');
if (test('--global npm writes config and succeeds', () => {
const tmpDir = path.join(os.tmpdir(), `spm-test-global-${Date.now()}`);
fs.mkdirSync(tmpDir, { recursive: true });
try {
const result = run(['--global', 'npm'], { HOME: tmpDir, USERPROFILE: tmpDir });
assert.strictEqual(result.code, 0, `Expected exit 0, got ${result.code}. stderr: ${result.stderr}`);
assert.ok(result.stdout.includes('Global preference set to'), 'Should show success message');
assert.ok(result.stdout.includes('npm'), 'Should mention npm');
// Verify config file was created
const configPath = path.join(tmpDir, '.claude', 'package-manager.json');
assert.ok(fs.existsSync(configPath), 'Config file should be created');
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
assert.strictEqual(config.packageManager, 'npm', 'Config should contain npm');
} finally {
fs.rmSync(tmpDir, { recursive: true, force: true });
}
})) passed++; else failed++;
console.log('\nbare PM name success (Round 62):');
if (test('bare npm sets global preference and succeeds', () => {
const tmpDir = path.join(os.tmpdir(), `spm-test-bare-${Date.now()}`);
fs.mkdirSync(tmpDir, { recursive: true });
try {
const result = run(['npm'], { HOME: tmpDir, USERPROFILE: tmpDir });
assert.strictEqual(result.code, 0, `Expected exit 0, got ${result.code}. stderr: ${result.stderr}`);
assert.ok(result.stdout.includes('Global preference set to'), 'Should show success message');
// Verify config file was created
const configPath = path.join(tmpDir, '.claude', 'package-manager.json');
assert.ok(fs.existsSync(configPath), 'Config file should be created');
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
assert.strictEqual(config.packageManager, 'npm', 'Config should contain npm');
} finally {
fs.rmSync(tmpDir, { recursive: true, force: true });
}
})) passed++; else failed++;
console.log('\n--detect source label (Round 62):');
if (test('--detect with env var shows source as environment', () => {
const result = run(['--detect'], { CLAUDE_PACKAGE_MANAGER: 'pnpm' });
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('Source: environment'), 'Should show environment as source');
})) passed++; else failed++;
// ── Round 68: --project success path and --list (current) marker ──
console.log('\n--project success path (Round 68):');
if (test('--project npm writes project config and succeeds', () => {
const tmpDir = path.join(os.tmpdir(), `spm-test-project-${Date.now()}`);
fs.mkdirSync(tmpDir, { recursive: true });
try {
const result = require('child_process').spawnSync('node', [SCRIPT, '--project', 'npm'], {
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
env: { ...process.env },
timeout: 10000,
cwd: tmpDir
});
assert.strictEqual(result.status, 0, `Expected exit 0, got ${result.status}. stderr: ${result.stderr}`);
assert.ok(result.stdout.includes('Project preference set to'), 'Should show project success message');
assert.ok(result.stdout.includes('npm'), 'Should mention npm');
// Verify config file was created in the project CWD
const configPath = path.join(tmpDir, '.claude', 'package-manager.json');
assert.ok(fs.existsSync(configPath), 'Project config file should be created in CWD');
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
assert.strictEqual(config.packageManager, 'npm', 'Config should contain npm');
} finally {
fs.rmSync(tmpDir, { recursive: true, force: true });
}
})) passed++; else failed++;
console.log('\n--list (current) marker (Round 68):');
if (test('--list output includes (current) marker for active PM', () => {
const result = run(['--list']);
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('(current)'), '--list should mark the active PM with (current)');
// The (current) marker should appear exactly once
const currentCount = (result.stdout.match(/\(current\)/g) || []).length;
assert.strictEqual(currentCount, 1, `Expected exactly 1 "(current)" in --list, found ${currentCount}`);
})) passed++; else failed++;
// ── Round 74: setGlobal catch — setPreferredPackageManager throws ──
console.log('\nRound 74: setGlobal catch (save failure):');
if (test('--global npm fails when HOME is not a directory', () => {
if (process.platform === 'win32') {
console.log(' (skipped — /dev/null not available on Windows)');
return;
}
// HOME=/dev/null causes ensureDir to throw ENOTDIR when creating ~/.claude/
const result = run(['--global', 'npm'], { HOME: '/dev/null', USERPROFILE: '/dev/null' });
assert.strictEqual(result.code, 1, `Expected exit 1, got ${result.code}`);
assert.ok(result.stderr.includes('Error:'),
`stderr should contain Error:, got: ${result.stderr}`);
})) passed++; else failed++;
// ── Round 74: setProject catch — setProjectPackageManager throws ──
console.log('\nRound 74: setProject catch (save failure):');
if (test('--project npm fails when CWD is read-only', () => {
if (process.platform === 'win32' || process.getuid?.() === 0) {
console.log(' (skipped — chmod ineffective on Windows/root)');
return;
}
const tmpDir = path.join(os.tmpdir(), `spm-test-ro-${Date.now()}`);
fs.mkdirSync(tmpDir, { recursive: true });
try {
// Make CWD read-only so .claude/ dir creation fails with EACCES
fs.chmodSync(tmpDir, 0o555);
const result = require('child_process').spawnSync('node', [SCRIPT, '--project', 'npm'], {
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
env: { ...process.env },
timeout: 10000,
cwd: tmpDir
});
assert.strictEqual(result.status, 1,
`Expected exit 1, got ${result.status}. stderr: ${result.stderr}`);
assert.ok(result.stderr.includes('Error:'),
`stderr should contain Error:, got: ${result.stderr}`);
} finally {
try { fs.chmodSync(tmpDir, 0o755); } catch { /* best-effort */ }
fs.rmSync(tmpDir, { recursive: true, force: true });
}
})) passed++; else failed++;
// Summary
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);

View File

@@ -451,6 +451,80 @@ function runTests() {
});
})) passed++; else failed++;
// ── Round 54: analysisResults with zero values ──
console.log('\nanalysisResults zero values (Round 54):');
if (test('analysisResults handles zero values for all data fields', () => {
const output = new SkillCreateOutput('repo');
const logs = captureLog(() => output.analysisResults({
commits: 0, timeRange: '', contributors: 0, files: 0,
}));
const combined = logs.join('\n');
assert.ok(combined.includes('0'), 'Should display zero values');
assert.ok(logs.length > 0, 'Should produce output without crash');
// Box lines should still be 60 chars wide
const boxLines = combined.split('\n').filter(l => {
const s = stripAnsi(l).trim();
return s.startsWith('\u256D') || s.startsWith('\u2502') || s.startsWith('\u2570');
});
assert.ok(boxLines.length >= 3, 'Should render a complete box');
})) passed++; else failed++;
// ── Round 68: demo function export ──
console.log('\ndemo export (Round 68):');
if (test('module exports demo function alongside SkillCreateOutput', () => {
const mod = require('../../scripts/skill-create-output');
assert.ok(mod.demo, 'Should export demo function');
assert.strictEqual(typeof mod.demo, 'function', 'demo should be a function');
assert.ok(mod.SkillCreateOutput, 'Should also export SkillCreateOutput');
assert.strictEqual(typeof mod.SkillCreateOutput, 'function', 'SkillCreateOutput should be a constructor');
})) passed++; else failed++;
// ── Round 85: patterns() confidence=0 uses ?? (not ||) ──
console.log('\nRound 85: patterns() confidence=0 nullish coalescing:');
if (test('patterns() with confidence=0 shows 0%, not 80% (nullish coalescing fix)', () => {
const output = new SkillCreateOutput('repo');
const logs = captureLog(() => output.patterns([
{ name: 'Zero Confidence', trigger: 'never', confidence: 0, evidence: 'none' },
]));
const combined = stripAnsi(logs.join('\n'));
// With ?? operator: 0 ?? 0.8 = 0 → Math.round(0 * 100) = 0 → shows "0%"
// With || operator (bug): 0 || 0.8 = 0.8 → shows "80%"
assert.ok(combined.includes('0%'), 'Should show 0% for zero confidence');
assert.ok(!combined.includes('80%'),
'Should NOT show 80% — confidence=0 is explicitly provided, not missing');
})) passed++; else failed++;
// ── Round 87: analyzePhase() async method (untested) ──
console.log('\nRound 87: analyzePhase() async method:');
if (test('analyzePhase completes without error and writes to stdout', () => {
const output = new SkillCreateOutput('test-repo');
// analyzePhase is async and calls animateProgress which uses sleep() and
// process.stdout.write/clearLine/cursorTo. In non-TTY environments clearLine
// and cursorTo are undefined, but the code uses optional chaining (?.) to
// handle this safely. We verify it resolves without throwing.
// Capture stdout.write to verify output was produced.
const writes = [];
const origWrite = process.stdout.write;
process.stdout.write = function(str) { writes.push(String(str)); return true; };
try {
// Call synchronously by accessing the returned promise — we just need to
// verify it doesn't throw during setup. The sleeps total 1.9s so we
// verify the promise is a thenable (async function returns Promise).
const promise = output.analyzePhase({ commits: 42 });
assert.ok(promise && typeof promise.then === 'function',
'analyzePhase should return a Promise');
} finally {
process.stdout.write = origWrite;
}
// Verify that process.stdout.write was called (the header line is written synchronously)
assert.ok(writes.length > 0, 'Should have written output via process.stdout.write');
assert.ok(writes.some(w => w.includes('Analyzing')), 'Should include "Analyzing" label');
})) passed++; else failed++;
// Summary
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);