mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-05-15 13:23:13 +08:00
Compare commits
24 Commits
cf54c791e4
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f7315016c0 | ||
|
|
375d750b4c | ||
|
|
d1710bd2e7 | ||
|
|
7d15a2282b | ||
|
|
0e66c838c7 | ||
|
|
cb9702ca99 | ||
|
|
f9384427b8 | ||
|
|
4423f10cfb | ||
|
|
3b12fb273f | ||
|
|
4fb80d8861 | ||
|
|
a27831c13e | ||
|
|
b24d762caa | ||
|
|
f94478e524 | ||
|
|
6cdac19764 | ||
|
|
af3a206412 | ||
|
|
20f00c1410 | ||
|
|
e7a6f137e5 | ||
|
|
7596502092 | ||
|
|
c04baa8c25 | ||
|
|
9082bdedac | ||
|
|
3243a1c5d3 | ||
|
|
69401b28b3 | ||
|
|
9a5ed3223a | ||
|
|
d844bd6bfc |
@@ -11,7 +11,7 @@
|
||||
{
|
||||
"name": "ecc",
|
||||
"source": "./",
|
||||
"description": "The most comprehensive Claude Code plugin — 60 agents, 228 skills, 75 legacy command shims, selective install profiles, and production-ready hooks for TDD, security scanning, code review, and continuous learning",
|
||||
"description": "The most comprehensive Claude Code plugin — 60 agents, 229 skills, 75 legacy command shims, selective install profiles, and production-ready hooks for TDD, security scanning, code review, and continuous learning",
|
||||
"version": "2.0.0-rc.1",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "ecc",
|
||||
"version": "2.0.0-rc.1",
|
||||
"description": "Battle-tested Claude Code plugin for engineering teams — 60 agents, 228 skills, 75 legacy command shims, production-ready hooks, and selective install workflows evolved through continuous real-world use",
|
||||
"description": "Battle-tested Claude Code plugin for engineering teams — 60 agents, 229 skills, 75 legacy command shims, production-ready hooks, and selective install workflows evolved through continuous real-world use",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
"url": "https://x.com/affaanmustafa"
|
||||
|
||||
38
.github/workflows/ci.yml
vendored
38
.github/workflows/ci.yml
vendored
@@ -220,6 +220,10 @@ jobs:
|
||||
run: node scripts/ci/catalog.js --text
|
||||
continue-on-error: false
|
||||
|
||||
- name: Validate command registry
|
||||
run: npm run command-registry:check
|
||||
continue-on-error: false
|
||||
|
||||
- name: Check unicode safety
|
||||
run: node scripts/ci/check-unicode-safety.js
|
||||
continue-on-error: false
|
||||
@@ -242,11 +246,43 @@ jobs:
|
||||
with:
|
||||
node-version: '20.x'
|
||||
|
||||
- name: Install audit dependencies
|
||||
run: npm ci --ignore-scripts
|
||||
|
||||
- name: Run npm audit
|
||||
run: |
|
||||
npm audit signatures
|
||||
npm audit --audit-level=high
|
||||
continue-on-error: true # Allows PR to proceed, but marks job as failed if vulnerabilities found
|
||||
|
||||
- name: Run supply-chain IOC scan
|
||||
run: npm run security:ioc-scan
|
||||
|
||||
coverage:
|
||||
name: Coverage
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
|
||||
with:
|
||||
node-version: '20.x'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci --ignore-scripts
|
||||
|
||||
- name: Run coverage
|
||||
run: npm run coverage
|
||||
|
||||
- name: Upload coverage report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
|
||||
with:
|
||||
name: coverage-ubuntu-node20-npm
|
||||
path: coverage/
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Everything Claude Code (ECC) — Agent Instructions
|
||||
|
||||
This is a **production-ready AI coding plugin** providing 60 specialized agents, 228 skills, 75 commands, and automated hook workflows for software development.
|
||||
This is a **production-ready AI coding plugin** providing 60 specialized agents, 229 skills, 75 commands, and automated hook workflows for software development.
|
||||
|
||||
**Version:** 2.0.0-rc.1
|
||||
|
||||
@@ -150,7 +150,7 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat
|
||||
|
||||
```
|
||||
agents/ — 60 specialized subagents
|
||||
skills/ — 228 workflow skills and domain knowledge
|
||||
skills/ — 229 workflow skills and domain knowledge
|
||||
commands/ — 75 slash commands
|
||||
hooks/ — Trigger-based automations
|
||||
rules/ — Always-follow guidelines (common + per-language)
|
||||
|
||||
@@ -89,7 +89,7 @@ This repo is the raw code only. The guides explain everything.
|
||||
### v2.0.0-rc.1 — Surface Refresh, Operator Workflows, and ECC 2.0 Alpha (Apr 2026)
|
||||
|
||||
- **Dashboard GUI** — New Tkinter-based desktop application (`ecc_dashboard.py` or `npm run dashboard`) with dark/light theme toggle, font customization, and project logo in header and taskbar.
|
||||
- **Public surface synced to the live repo** — metadata, catalog counts, plugin manifests, and install-facing docs now match the actual OSS surface: 55 agents, 208 skills, and 72 legacy command shims.
|
||||
- **Public surface synced to the live repo** — metadata, catalog counts, plugin manifests, and install-facing docs now match the actual OSS surface: 60 agents, 229 skills, and 75 legacy command shims.
|
||||
- **Operator and outbound workflow expansion** — `brand-voice`, `social-graph-ranker`, `connections-optimizer`, `customer-billing-ops`, `ecc-tools-cost-audit`, `google-workspace-ops`, `project-flow-ops`, and `workspace-surface-audit` round out the operator lane.
|
||||
- **Media and launch tooling** — `manim-video`, `remotion-video-creation`, and upgraded social publishing surfaces make technical explainers and launch content part of the same system.
|
||||
- **Framework and product surface growth** — `nestjs-patterns`, richer Codex/OpenCode install surfaces, and expanded cross-harness packaging keep the repo usable beyond Claude Code alone.
|
||||
@@ -358,7 +358,7 @@ If you stacked methods, clean up in this order:
|
||||
/plugin list ecc@ecc
|
||||
```
|
||||
|
||||
**That's it!** You now have access to 60 agents, 228 skills, and 75 legacy command shims.
|
||||
**That's it!** You now have access to 60 agents, 229 skills, and 75 legacy command shims.
|
||||
|
||||
### Dashboard GUI
|
||||
|
||||
@@ -1363,7 +1363,7 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|
||||
|---------|-------------|----------|--------|
|
||||
| Agents | PASS: 60 agents | PASS: 12 agents | **Claude Code leads** |
|
||||
| Commands | PASS: 75 commands | PASS: 35 commands | **Claude Code leads** |
|
||||
| Skills | PASS: 228 skills | PASS: 37 skills | **Claude Code leads** |
|
||||
| Skills | PASS: 229 skills | PASS: 37 skills | **Claude Code leads** |
|
||||
| Hooks | PASS: 8 event types | PASS: 11 events | **OpenCode has more!** |
|
||||
| Rules | PASS: 29 rules | PASS: 13 instructions | **Claude Code leads** |
|
||||
| MCP Servers | PASS: 14 servers | PASS: Full | **Full parity** |
|
||||
@@ -1525,7 +1525,7 @@ ECC is the **first plugin to maximize every major AI coding tool**. Here's how e
|
||||
|---------|------------|------------|-----------|----------|----------------|
|
||||
| **Agents** | 60 | Shared (AGENTS.md) | Shared (AGENTS.md) | 12 | N/A |
|
||||
| **Commands** | 75 | Shared | Instruction-based | 35 | 6 prompts |
|
||||
| **Skills** | 228 | Shared | 10 (native format) | 37 | Via instructions |
|
||||
| **Skills** | 229 | Shared | 10 (native format) | 37 | Via instructions |
|
||||
| **Hook Events** | 8 types | 15 types | None yet | 11 types | None |
|
||||
| **Hook Scripts** | 20+ scripts | 16 scripts (DRY adapter) | N/A | Plugin hooks | N/A |
|
||||
| **Rules** | 34 (common + lang) | 34 (YAML frontmatter) | Instruction-based | 13 instructions | 1 always-on file |
|
||||
|
||||
@@ -160,7 +160,7 @@ Copy-Item -Recurse rules/typescript "$HOME/.claude/rules/"
|
||||
/plugin list ecc@ecc
|
||||
```
|
||||
|
||||
**完成!** 你现在可以使用 60 个代理、228 个技能和 75 个命令。
|
||||
**完成!** 你现在可以使用 60 个代理、229 个技能和 75 个命令。
|
||||
|
||||
### multi-* 命令需要额外配置
|
||||
|
||||
|
||||
898
docs/COMMAND-REGISTRY.json
Normal file
898
docs/COMMAND-REGISTRY.json
Normal file
@@ -0,0 +1,898 @@
|
||||
{
|
||||
"schemaVersion": 1,
|
||||
"totalCommands": 75,
|
||||
"commands": [
|
||||
{
|
||||
"command": "aside",
|
||||
"description": "Answer a quick side question without interrupting or losing context from the current task. Resume work automatically after answering.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/aside.md"
|
||||
},
|
||||
{
|
||||
"command": "auto-update",
|
||||
"description": "Pull the latest ECC repo changes and reinstall the current managed targets.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/auto-update.md"
|
||||
},
|
||||
{
|
||||
"command": "build-fix",
|
||||
"description": "Detect the project build system and incrementally fix build/type errors with minimal safe changes.",
|
||||
"type": "refactoring",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/build-fix.md"
|
||||
},
|
||||
{
|
||||
"command": "checkpoint",
|
||||
"description": "Create, verify, or list workflow checkpoints after running verification checks.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/checkpoint.md"
|
||||
},
|
||||
{
|
||||
"command": "code-review",
|
||||
"description": "Code review — local uncommitted changes or GitHub PR (pass PR number/URL for PR mode)",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/code-review.md"
|
||||
},
|
||||
{
|
||||
"command": "cost-report",
|
||||
"description": "Generate a local Claude Code cost report from a cost-tracker SQLite database.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/cost-report.md"
|
||||
},
|
||||
{
|
||||
"command": "cpp-build",
|
||||
"description": "Fix C++ build errors, CMake issues, and linker problems incrementally. Invokes the cpp-build-resolver agent for minimal, surgical fixes.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"cpp-build-resolver"
|
||||
],
|
||||
"allAgents": [
|
||||
"cpp-build-resolver"
|
||||
],
|
||||
"skills": [
|
||||
"cpp-coding-standards"
|
||||
],
|
||||
"path": "commands/cpp-build.md"
|
||||
},
|
||||
{
|
||||
"command": "cpp-review",
|
||||
"description": "Comprehensive C++ code review for memory safety, modern C++ idioms, concurrency, and security. Invokes the cpp-reviewer agent.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"cpp-reviewer"
|
||||
],
|
||||
"allAgents": [
|
||||
"cpp-reviewer"
|
||||
],
|
||||
"skills": [
|
||||
"cpp-coding-standards",
|
||||
"cpp-testing"
|
||||
],
|
||||
"path": "commands/cpp-review.md"
|
||||
},
|
||||
{
|
||||
"command": "cpp-test",
|
||||
"description": "Enforce TDD workflow for C++. Write GoogleTest tests first, then implement. Verify coverage with gcov/lcov.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"cpp-testing",
|
||||
"tdd-workflow"
|
||||
],
|
||||
"path": "commands/cpp-test.md"
|
||||
},
|
||||
{
|
||||
"command": "ecc-guide",
|
||||
"description": "Navigate ECC's current agents, skills, commands, hooks, install profiles, and docs from the live repository surface.",
|
||||
"type": "review",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"ecc-guide",
|
||||
"security-scan"
|
||||
],
|
||||
"path": "commands/ecc-guide.md"
|
||||
},
|
||||
{
|
||||
"command": "evolve",
|
||||
"description": "Analyze instincts and suggest or generate evolved structures",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"continuous-learning-v2"
|
||||
],
|
||||
"path": "commands/evolve.md"
|
||||
},
|
||||
{
|
||||
"command": "fastapi-review",
|
||||
"description": "Review a FastAPI application for architecture, async correctness, dependency injection, Pydantic schemas, security, performance, and testability.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/fastapi-review.md"
|
||||
},
|
||||
{
|
||||
"command": "feature-dev",
|
||||
"description": "Guided feature development with codebase understanding and architecture focus",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/feature-dev.md"
|
||||
},
|
||||
{
|
||||
"command": "flutter-build",
|
||||
"description": "Fix Dart analyzer errors and Flutter build failures incrementally. Invokes the dart-build-resolver agent for minimal, surgical fixes.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"dart-build-resolver"
|
||||
],
|
||||
"allAgents": [
|
||||
"dart-build-resolver"
|
||||
],
|
||||
"skills": [
|
||||
"flutter-dart-code-review"
|
||||
],
|
||||
"path": "commands/flutter-build.md"
|
||||
},
|
||||
{
|
||||
"command": "flutter-review",
|
||||
"description": "Review Flutter/Dart code for idiomatic patterns, widget best practices, state management, performance, accessibility, and security. Invokes the flutter-reviewer agent.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"flutter-reviewer"
|
||||
],
|
||||
"allAgents": [
|
||||
"flutter-reviewer"
|
||||
],
|
||||
"skills": [
|
||||
"flutter-dart-code-review"
|
||||
],
|
||||
"path": "commands/flutter-review.md"
|
||||
},
|
||||
{
|
||||
"command": "flutter-test",
|
||||
"description": "Run Flutter/Dart tests, report failures, and incrementally fix test issues. Covers unit, widget, golden, and integration tests.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"dart-build-resolver",
|
||||
"flutter-reviewer"
|
||||
],
|
||||
"allAgents": [
|
||||
"dart-build-resolver",
|
||||
"flutter-reviewer"
|
||||
],
|
||||
"skills": [
|
||||
"flutter-dart-code-review"
|
||||
],
|
||||
"path": "commands/flutter-test.md"
|
||||
},
|
||||
{
|
||||
"command": "gan-build",
|
||||
"description": "Run a generator/evaluator build loop for implementation tasks with bounded iterations and scoring.",
|
||||
"type": "orchestration",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/gan-build.md"
|
||||
},
|
||||
{
|
||||
"command": "gan-design",
|
||||
"description": "Run a generator/evaluator design loop for frontend or visual work with bounded iterations and scoring.",
|
||||
"type": "planning",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/gan-design.md"
|
||||
},
|
||||
{
|
||||
"command": "go-build",
|
||||
"description": "Fix Go build errors, go vet warnings, and linter issues incrementally. Invokes the go-build-resolver agent for minimal, surgical fixes.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"go-build-resolver"
|
||||
],
|
||||
"allAgents": [
|
||||
"go-build-resolver"
|
||||
],
|
||||
"skills": [
|
||||
"golang-patterns"
|
||||
],
|
||||
"path": "commands/go-build.md"
|
||||
},
|
||||
{
|
||||
"command": "go-review",
|
||||
"description": "Comprehensive Go code review for idiomatic patterns, concurrency safety, error handling, and security. Invokes the go-reviewer agent.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"go-reviewer"
|
||||
],
|
||||
"allAgents": [
|
||||
"go-reviewer"
|
||||
],
|
||||
"skills": [
|
||||
"golang-patterns",
|
||||
"golang-testing"
|
||||
],
|
||||
"path": "commands/go-review.md"
|
||||
},
|
||||
{
|
||||
"command": "go-test",
|
||||
"description": "Enforce TDD workflow for Go. Write table-driven tests first, then implement. Verify 80%+ coverage with go test -cover.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"golang-testing",
|
||||
"tdd-workflow"
|
||||
],
|
||||
"path": "commands/go-test.md"
|
||||
},
|
||||
{
|
||||
"command": "gradle-build",
|
||||
"description": "Fix Gradle build errors for Android and KMP projects",
|
||||
"type": "build",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/gradle-build.md"
|
||||
},
|
||||
{
|
||||
"command": "harness-audit",
|
||||
"description": "Run a deterministic repository harness audit and return a prioritized scorecard.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/harness-audit.md"
|
||||
},
|
||||
{
|
||||
"command": "hookify-configure",
|
||||
"description": "Enable or disable hookify rules interactively",
|
||||
"type": "general",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/hookify-configure.md"
|
||||
},
|
||||
{
|
||||
"command": "hookify-help",
|
||||
"description": "Get help with the hookify system",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/hookify-help.md"
|
||||
},
|
||||
{
|
||||
"command": "hookify-list",
|
||||
"description": "List all configured hookify rules",
|
||||
"type": "general",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/hookify-list.md"
|
||||
},
|
||||
{
|
||||
"command": "hookify",
|
||||
"description": "Create hooks to prevent unwanted behaviors from conversation analysis or explicit instructions",
|
||||
"type": "general",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/hookify.md"
|
||||
},
|
||||
{
|
||||
"command": "instinct-export",
|
||||
"description": "Export instincts from project/global scope to a file",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/instinct-export.md"
|
||||
},
|
||||
{
|
||||
"command": "instinct-import",
|
||||
"description": "Import instincts from file or URL into project/global scope",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"continuous-learning-v2"
|
||||
],
|
||||
"path": "commands/instinct-import.md"
|
||||
},
|
||||
{
|
||||
"command": "instinct-status",
|
||||
"description": "Show learned instincts (project + global) with confidence",
|
||||
"type": "review",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"continuous-learning-v2"
|
||||
],
|
||||
"path": "commands/instinct-status.md"
|
||||
},
|
||||
{
|
||||
"command": "jira",
|
||||
"description": "Retrieve a Jira ticket, analyze requirements, update status, or add comments. Uses the jira-integration skill and MCP or REST API.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"jira-integration"
|
||||
],
|
||||
"path": "commands/jira.md"
|
||||
},
|
||||
{
|
||||
"command": "kotlin-build",
|
||||
"description": "Fix Kotlin/Gradle build errors, compiler warnings, and dependency issues incrementally. Invokes the kotlin-build-resolver agent for minimal, surgical fixes.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"kotlin-build-resolver"
|
||||
],
|
||||
"allAgents": [
|
||||
"kotlin-build-resolver"
|
||||
],
|
||||
"skills": [
|
||||
"kotlin-patterns"
|
||||
],
|
||||
"path": "commands/kotlin-build.md"
|
||||
},
|
||||
{
|
||||
"command": "kotlin-review",
|
||||
"description": "Comprehensive Kotlin code review for idiomatic patterns, null safety, coroutine safety, and security. Invokes the kotlin-reviewer agent.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"kotlin-reviewer"
|
||||
],
|
||||
"allAgents": [
|
||||
"kotlin-reviewer"
|
||||
],
|
||||
"skills": [
|
||||
"kotlin-patterns",
|
||||
"kotlin-testing"
|
||||
],
|
||||
"path": "commands/kotlin-review.md"
|
||||
},
|
||||
{
|
||||
"command": "kotlin-test",
|
||||
"description": "Enforce TDD workflow for Kotlin. Write Kotest tests first, then implement. Verify 80%+ coverage with Kover.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"kotlin-testing",
|
||||
"tdd-workflow"
|
||||
],
|
||||
"path": "commands/kotlin-test.md"
|
||||
},
|
||||
{
|
||||
"command": "learn-eval",
|
||||
"description": "Extract reusable patterns from the session, self-evaluate quality before saving, and determine the right save location (Global vs Project).",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/learn-eval.md"
|
||||
},
|
||||
{
|
||||
"command": "learn",
|
||||
"description": "Extract reusable patterns from the current session and save them as candidate skills or guidance.",
|
||||
"type": "review",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/learn.md"
|
||||
},
|
||||
{
|
||||
"command": "loop-start",
|
||||
"description": "Start a managed autonomous loop pattern with safety defaults and explicit stop conditions.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/loop-start.md"
|
||||
},
|
||||
{
|
||||
"command": "loop-status",
|
||||
"description": "Inspect active loop state, progress, failure signals, and recommended intervention.",
|
||||
"type": "general",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/loop-status.md"
|
||||
},
|
||||
{
|
||||
"command": "model-route",
|
||||
"description": "Recommend the best model tier for the current task based on complexity, risk, and budget.",
|
||||
"type": "review",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/model-route.md"
|
||||
},
|
||||
{
|
||||
"command": "multi-backend",
|
||||
"description": "Run a backend-focused multi-model workflow for APIs, algorithms, data, and business logic.",
|
||||
"type": "orchestration",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/multi-backend.md"
|
||||
},
|
||||
{
|
||||
"command": "multi-execute",
|
||||
"description": "Execute a multi-model implementation plan while preserving Claude as the only filesystem writer.",
|
||||
"type": "orchestration",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/multi-execute.md"
|
||||
},
|
||||
{
|
||||
"command": "multi-frontend",
|
||||
"description": "Run a frontend-focused multi-model workflow for components, layouts, animation, and UI polish.",
|
||||
"type": "orchestration",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/multi-frontend.md"
|
||||
},
|
||||
{
|
||||
"command": "multi-plan",
|
||||
"description": "Create a multi-model implementation plan without modifying production code.",
|
||||
"type": "orchestration",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"accessibility"
|
||||
],
|
||||
"path": "commands/multi-plan.md"
|
||||
},
|
||||
{
|
||||
"command": "multi-workflow",
|
||||
"description": "Run a full multi-model development workflow with research, planning, execution, optimization, and review.",
|
||||
"type": "orchestration",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/multi-workflow.md"
|
||||
},
|
||||
{
|
||||
"command": "plan-prd",
|
||||
"description": "Generate a lean, problem-first PRD and hand off to /plan for implementation planning.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/plan-prd.md"
|
||||
},
|
||||
{
|
||||
"command": "plan",
|
||||
"description": "Restate requirements, assess risks, and create step-by-step implementation plan. WAIT for user CONFIRM before touching any code.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"planner"
|
||||
],
|
||||
"allAgents": [
|
||||
"planner"
|
||||
],
|
||||
"skills": [],
|
||||
"path": "commands/plan.md"
|
||||
},
|
||||
{
|
||||
"command": "pm2",
|
||||
"description": "Analyze a project and generate PM2 service commands for detected frontend, backend, or database services.",
|
||||
"type": "general",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/pm2.md"
|
||||
},
|
||||
{
|
||||
"command": "pr",
|
||||
"description": "Create a GitHub PR from current branch with unpushed commits — discovers templates, analyzes changes, pushes",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/pr.md"
|
||||
},
|
||||
{
|
||||
"command": "project-init",
|
||||
"description": "Detect a project's stack and produce a dry-run ECC onboarding plan using the repository's install manifests and stack mappings.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"ecc-guide"
|
||||
],
|
||||
"path": "commands/project-init.md"
|
||||
},
|
||||
{
|
||||
"command": "projects",
|
||||
"description": "List known projects and their instinct statistics",
|
||||
"type": "general",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"continuous-learning-v2"
|
||||
],
|
||||
"path": "commands/projects.md"
|
||||
},
|
||||
{
|
||||
"command": "promote",
|
||||
"description": "Promote project-scoped instincts to global scope",
|
||||
"type": "review",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"continuous-learning-v2"
|
||||
],
|
||||
"path": "commands/promote.md"
|
||||
},
|
||||
{
|
||||
"command": "prp-commit",
|
||||
"description": "Quick commit with natural language file targeting — describe what to commit in plain English",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/prp-commit.md"
|
||||
},
|
||||
{
|
||||
"command": "prp-implement",
|
||||
"description": "Execute an implementation plan with rigorous validation loops",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/prp-implement.md"
|
||||
},
|
||||
{
|
||||
"command": "prp-plan",
|
||||
"description": "Create comprehensive feature implementation plan with codebase analysis and pattern extraction",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/prp-plan.md"
|
||||
},
|
||||
{
|
||||
"command": "prp-pr",
|
||||
"description": "Create a GitHub PR from current branch with unpushed commits — discovers templates, analyzes changes, pushes",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/prp-pr.md"
|
||||
},
|
||||
{
|
||||
"command": "prp-prd",
|
||||
"description": "Interactive PRD generator - problem-first, hypothesis-driven product spec with back-and-forth questioning",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/prp-prd.md"
|
||||
},
|
||||
{
|
||||
"command": "prune",
|
||||
"description": "Delete pending instincts older than 30 days that were never promoted",
|
||||
"type": "review",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"continuous-learning-v2"
|
||||
],
|
||||
"path": "commands/prune.md"
|
||||
},
|
||||
{
|
||||
"command": "python-review",
|
||||
"description": "Comprehensive Python code review for PEP 8 compliance, type hints, security, and Pythonic idioms. Invokes the python-reviewer agent.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"python-reviewer"
|
||||
],
|
||||
"allAgents": [
|
||||
"python-reviewer"
|
||||
],
|
||||
"skills": [
|
||||
"python-patterns",
|
||||
"python-testing"
|
||||
],
|
||||
"path": "commands/python-review.md"
|
||||
},
|
||||
{
|
||||
"command": "quality-gate",
|
||||
"description": "Run the ECC quality pipeline for a file or project scope and report remediation steps.",
|
||||
"type": "general",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/quality-gate.md"
|
||||
},
|
||||
{
|
||||
"command": "refactor-clean",
|
||||
"description": "Safely identify and remove dead code with verification after each change.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/refactor-clean.md"
|
||||
},
|
||||
{
|
||||
"command": "resume-session",
|
||||
"description": "Load the most recent session file from ~/.claude/session-data/ and resume work with full context from where the last session ended.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/resume-session.md"
|
||||
},
|
||||
{
|
||||
"command": "review-pr",
|
||||
"description": "Comprehensive PR review using specialized agents",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/review-pr.md"
|
||||
},
|
||||
{
|
||||
"command": "rust-build",
|
||||
"description": "Fix Rust build errors, borrow checker issues, and dependency problems incrementally. Invokes the rust-build-resolver agent for minimal, surgical fixes.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"rust-build-resolver"
|
||||
],
|
||||
"allAgents": [
|
||||
"rust-build-resolver"
|
||||
],
|
||||
"skills": [
|
||||
"rust-patterns"
|
||||
],
|
||||
"path": "commands/rust-build.md"
|
||||
},
|
||||
{
|
||||
"command": "rust-review",
|
||||
"description": "Comprehensive Rust code review for ownership, lifetimes, error handling, unsafe usage, and idiomatic patterns. Invokes the rust-reviewer agent.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [
|
||||
"rust-reviewer"
|
||||
],
|
||||
"allAgents": [
|
||||
"rust-reviewer"
|
||||
],
|
||||
"skills": [
|
||||
"rust-patterns",
|
||||
"rust-testing"
|
||||
],
|
||||
"path": "commands/rust-review.md"
|
||||
},
|
||||
{
|
||||
"command": "rust-test",
|
||||
"description": "Enforce TDD workflow for Rust. Write tests first, then implement. Verify 80%+ coverage with cargo-llvm-cov.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [
|
||||
"rust-patterns",
|
||||
"rust-testing"
|
||||
],
|
||||
"path": "commands/rust-test.md"
|
||||
},
|
||||
{
|
||||
"command": "santa-loop",
|
||||
"description": "Adversarial dual-review convergence loop — two independent model reviewers must both approve before code ships.",
|
||||
"type": "review",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/santa-loop.md"
|
||||
},
|
||||
{
|
||||
"command": "save-session",
|
||||
"description": "Save current session state to a dated file in ~/.claude/session-data/ so work can be resumed in a future session with full context.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/save-session.md"
|
||||
},
|
||||
{
|
||||
"command": "security-scan",
|
||||
"description": "Run AgentShield against agent, hook, MCP, permission, and secret surfaces.",
|
||||
"type": "review",
|
||||
"primaryAgents": [
|
||||
"security-reviewer"
|
||||
],
|
||||
"allAgents": [
|
||||
"security-reviewer"
|
||||
],
|
||||
"skills": [
|
||||
"security-scan"
|
||||
],
|
||||
"path": "commands/security-scan.md"
|
||||
},
|
||||
{
|
||||
"command": "sessions",
|
||||
"description": "Manage Claude Code session history, aliases, and session metadata.",
|
||||
"type": "general",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/sessions.md"
|
||||
},
|
||||
{
|
||||
"command": "setup-pm",
|
||||
"description": "Configure your preferred package manager (npm/pnpm/yarn/bun)",
|
||||
"type": "build",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/setup-pm.md"
|
||||
},
|
||||
{
|
||||
"command": "skill-create",
|
||||
"description": "Analyze local git history to extract coding patterns and generate SKILL.md files. Local version of the Skill Creator GitHub App.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/skill-create.md"
|
||||
},
|
||||
{
|
||||
"command": "skill-health",
|
||||
"description": "Show skill portfolio health dashboard with charts and analytics",
|
||||
"type": "review",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/skill-health.md"
|
||||
},
|
||||
{
|
||||
"command": "test-coverage",
|
||||
"description": "Analyze coverage, identify gaps, and generate missing tests toward the target threshold.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/test-coverage.md"
|
||||
},
|
||||
{
|
||||
"command": "update-codemaps",
|
||||
"description": "Scan project structure and generate token-lean architecture codemaps.",
|
||||
"type": "planning",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/update-codemaps.md"
|
||||
},
|
||||
{
|
||||
"command": "update-docs",
|
||||
"description": "Sync documentation from source-of-truth files such as scripts, schemas, routes, and exports.",
|
||||
"type": "testing",
|
||||
"primaryAgents": [],
|
||||
"allAgents": [],
|
||||
"skills": [],
|
||||
"path": "commands/update-docs.md"
|
||||
}
|
||||
],
|
||||
"statistics": {
|
||||
"byType": {
|
||||
"build": 2,
|
||||
"general": 8,
|
||||
"orchestration": 6,
|
||||
"planning": 2,
|
||||
"refactoring": 1,
|
||||
"review": 9,
|
||||
"testing": 47
|
||||
},
|
||||
"topAgents": [
|
||||
{
|
||||
"agent": "dart-build-resolver",
|
||||
"count": 2
|
||||
},
|
||||
{
|
||||
"agent": "flutter-reviewer",
|
||||
"count": 2
|
||||
},
|
||||
{
|
||||
"agent": "cpp-build-resolver",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"agent": "cpp-reviewer",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"agent": "go-build-resolver",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"agent": "go-reviewer",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"agent": "kotlin-build-resolver",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"agent": "kotlin-reviewer",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"agent": "planner",
|
||||
"count": 1
|
||||
},
|
||||
{
|
||||
"agent": "python-reviewer",
|
||||
"count": 1
|
||||
}
|
||||
],
|
||||
"topSkills": [
|
||||
{
|
||||
"skill": "continuous-learning-v2",
|
||||
"count": 6
|
||||
},
|
||||
{
|
||||
"skill": "flutter-dart-code-review",
|
||||
"count": 3
|
||||
},
|
||||
{
|
||||
"skill": "rust-patterns",
|
||||
"count": 3
|
||||
},
|
||||
{
|
||||
"skill": "tdd-workflow",
|
||||
"count": 3
|
||||
},
|
||||
{
|
||||
"skill": "cpp-coding-standards",
|
||||
"count": 2
|
||||
},
|
||||
{
|
||||
"skill": "cpp-testing",
|
||||
"count": 2
|
||||
},
|
||||
{
|
||||
"skill": "ecc-guide",
|
||||
"count": 2
|
||||
},
|
||||
{
|
||||
"skill": "golang-patterns",
|
||||
"count": 2
|
||||
},
|
||||
{
|
||||
"skill": "golang-testing",
|
||||
"count": 2
|
||||
},
|
||||
{
|
||||
"skill": "kotlin-patterns",
|
||||
"count": 2
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -61,6 +61,14 @@ As of 2026-05-13:
|
||||
and added prioritized corpus accuracy recommendations to failed corpus gates,
|
||||
mapping misses by category, missing rule, and config ID so enterprise
|
||||
scanner-regression work has an actionable improvement plan.
|
||||
- AgentShield PR #81 merged as `6583884e74ba2e896942113e1ce3146230e6fb76`
|
||||
and added ordered remediation workflow phases to remediation plans, routing
|
||||
safe auto-fixes, manual review, and verification through stable finding
|
||||
fingerprints without copying raw evidence.
|
||||
- AgentShield PR #82 merged as `51336ba074ad5e9fed2c0aa3237422be22147e76`
|
||||
and expanded the built-in attack corpus with an env proxy hijack scenario
|
||||
covering proxy/runtime mutation, env-token exfiltration, DNS exfiltration,
|
||||
credential-store access, and clipboard access.
|
||||
- JARVIS PR #13 merged as `127efabbfb5033ae53d7a53e1546aa3c33d6f962`
|
||||
and hardened CI/deploy workflows with npm registry signature verification,
|
||||
disabled persisted checkout credentials in write-permission jobs, and pinned
|
||||
@@ -72,6 +80,115 @@ As of 2026-05-13:
|
||||
and made `/ecc-tools followups sync-linear` track copy-ready PR drafts in
|
||||
the Linear/project backlog when `open-pr-drafts` is not used, preserving
|
||||
useful stale-PR salvage work without opening extra PR shells.
|
||||
- ECC-Tools PR #55 merged as `5d8c112cce4794cfa089d5b0ea661ba87a178be1`
|
||||
and added analysis-depth readiness to `/ecc-tools analyze` comments,
|
||||
separating commit-history-only repos from evidence-backed and deep-ready repos
|
||||
using CI/CD, security, harness, reference/eval, AI routing/cost-control, and
|
||||
team handoff evidence.
|
||||
- ECC-Tools PR #56 merged as `5b729c88641eafe80f65364bab3fc74d0270f57b`
|
||||
and added the authenticated `/api/analysis/depth-plan` contract that maps
|
||||
analysis-depth readiness into concrete hosted jobs for CI diagnostics,
|
||||
security evidence review, harness compatibility, reference-set evaluation,
|
||||
AI routing/cost review, and team backlog routing.
|
||||
- ECC-Tools PR #57 merged as `4cc61112a4cc9feec7b07af09321f360e34af6a4`
|
||||
and added the first executable hosted analysis job:
|
||||
`/api/analysis/jobs/ci-diagnostics` now gates on CI/CD readiness, inspects
|
||||
workflow/test-runner/failure-evidence artifacts, returns CI hardening
|
||||
findings and next actions, and charges usage only after successful execution.
|
||||
- ECC-Tools PR #58 merged as `ce09dd8d9b46f65c6b88dc4f48cfb6b6227ae0bf`
|
||||
and added the second executable hosted analysis job:
|
||||
`/api/analysis/jobs/security-evidence-review` now gates on security-evidence
|
||||
readiness, inspects capped AgentShield evidence-pack, policy, baseline,
|
||||
SBOM, SARIF, and security-scan artifacts, returns supply-chain evidence
|
||||
findings and next actions, and charges usage only after successful execution.
|
||||
- ECC-Tools PR #59 merged as `505b372dbd8f75f996d9e2ed079effd30cec5ba5`
|
||||
and added the third executable hosted analysis job:
|
||||
`/api/analysis/jobs/harness-compatibility-audit` now gates on harness-config
|
||||
readiness, inspects capped Claude, Codex, OpenCode, MCP, plugin, and
|
||||
cross-harness documentation artifacts, excludes local secret-bearing config
|
||||
paths from fetches, returns portability findings and next actions, and
|
||||
charges usage only after successful execution.
|
||||
- ECC-Tools PR #60 merged as `b75e0a49ba5672b1ec9a2a4880ddcfa2d07dc557`
|
||||
and added the fourth executable hosted analysis job:
|
||||
`/api/analysis/jobs/reference-set-evaluation` now gates on reference-evidence
|
||||
readiness, evaluates analyzer corpus, RAG/evaluator, PR salvage/review,
|
||||
harness, security, and CI failure-mode evidence, excludes obvious
|
||||
secret-bearing fixture paths from fetches, returns reference coverage
|
||||
findings and next actions, and charges usage only after successful execution.
|
||||
- ECC-Tools PR #61 merged as `7b01b67cae0b80774b311cb515b7eca0aa038c65`
|
||||
and added the fifth executable hosted analysis job:
|
||||
`/api/analysis/jobs/ai-routing-cost-review` now gates on AI routing/cost
|
||||
readiness, evaluates model routing, token budget, usage-limit, rate-limit,
|
||||
billing/entitlement, cost-regression, and cost-policy evidence, excludes
|
||||
obvious secret-bearing paths from fetches, returns cost-control findings and
|
||||
next actions, and charges usage only after successful execution.
|
||||
- ECC-Tools PR #62 merged as `781d6733e56f7556edb43fb96bdfb00b1f0a3aa6`
|
||||
and added the sixth executable hosted analysis job:
|
||||
`/api/analysis/jobs/team-backlog-routing` now gates on team handoff/project
|
||||
tracking readiness, evaluates roadmap, runbook, handoff, release-plan,
|
||||
issue-template, ownership, project-tracker, backlog, and follow-up evidence,
|
||||
excludes obvious secret-bearing paths from fetches, returns team-routing
|
||||
findings and next actions, and charges usage only after successful execution.
|
||||
- ECC-Tools PR #63 merged as `fb9e4c5ceb9ccde50da74c7a69c3fa4bd321fc07`
|
||||
and made the hosted execution plan operator-visible on queued PR analysis:
|
||||
the queue now publishes a non-blocking `ECC Tools / Hosted Depth Plan`
|
||||
check-run on the PR head SHA with ready/blocked hosted executor commands
|
||||
and next action text, while keeping check-run publication best-effort so
|
||||
bundle generation and analysis comments are not blocked.
|
||||
- ECC-Tools PR #64 merged as `72020ef94db94840812977ea7ac37e9344036668`
|
||||
and added PR-facing hosted job dispatch controls:
|
||||
`/ecc-tools analyze --job ...` comments now queue hosted jobs against the
|
||||
PR head SHA, execute them through the existing hosted readiness/evidence
|
||||
gates, post artifacts/findings/next actions back to the PR, and scope
|
||||
idempotency keys by job id so hosted jobs do not collide with bundle
|
||||
analysis.
|
||||
- ECC-Tools PR #65 merged as `bacd4adf6a3a629e8d403865456d15f127baaf4e`
|
||||
and added hosted job result history/check-run summaries:
|
||||
queued hosted jobs now cache both the latest result and immutable run records
|
||||
for completed or blocked runs, then publish a non-blocking per-job check-run
|
||||
on the PR head SHA with artifacts, findings, readiness blockers, and next
|
||||
actions.
|
||||
- ECC-Tools PR #66 merged as `4e1db48252d068ea5dcf4308b0bc11b0dfe0c9ce`
|
||||
and added a read-only hosted status command:
|
||||
`/ecc-tools analyze --job status` now reads the #65 latest-result cache for
|
||||
the current PR head and posts a compact completed/blocked/not-run table with
|
||||
the next hosted job command, without queueing work or billing usage.
|
||||
- ECC-Tools PR #67 merged as `f20e6bec2b0bf49e4cc36e08b7285c795973b73d`
|
||||
and made the hosted depth-plan check-run status-aware:
|
||||
queued PR analysis now reads the #65/#66 latest-result cache when publishing
|
||||
`ECC Tools / Hosted Depth Plan`, includes the latest hosted run status in
|
||||
the plan table, and recommends the next unrun ready job before reruns.
|
||||
- ECC-Tools PR #68 merged as `2cde524b5ef8f34ab7bb1af973248fe4be4359f8`
|
||||
and added deterministic hosted promotion readiness:
|
||||
opened/synchronized PRs now publish a non-blocking
|
||||
`ECC Tools / Hosted Promotion Readiness` check-run that compares changed
|
||||
files against the checked-in evaluator/RAG corpus, warns on missing
|
||||
hosted-job promotion evidence, and can be disabled with
|
||||
`PR_HOSTED_PROMOTION_READINESS_CHECK_MODE=off`.
|
||||
- ECC-Tools PR #69 merged as `d0112dac7cef807ae27def41f057682ef0772cce`
|
||||
and extended hosted promotion readiness with deterministic output scoring:
|
||||
the check now reads cached completed hosted job results for the current PR
|
||||
head, scores their artifacts and findings against evaluator/RAG corpus
|
||||
expectations, and treats matching hosted artifacts as promotion evidence
|
||||
before reporting a gap.
|
||||
- ECC-Tools PR #70 merged as `7001d805ac981fe220b4575159f469fbea9dbb76`
|
||||
and added retrieval planning for hosted promotion:
|
||||
the check now emits ranked retrieval candidates from cached hosted artifacts,
|
||||
hosted findings, expected evidence paths, and changed source paths, plus a
|
||||
model prompt seed that tells the later hosted judge not to promote from
|
||||
changed paths alone.
|
||||
- ECC-Tools PR #71 merged as `d41e59ff00fe1bd0b0c96386e56bc5269d7b9c15`
|
||||
and added the first model-backed hosted promotion judge contract:
|
||||
the check now emits a provider-neutral `hosted-promotion-judge.v1` request
|
||||
contract and fails closed unless hosted retrieval evidence, entitlement,
|
||||
remaining budget, and provider configuration are present. It still does not
|
||||
make live model calls.
|
||||
- ECC-Tools PR #72 merged as `973bc51e5436dd279ae5a890cce9811485eef0b5`
|
||||
and executes the hosted promotion model judge behind explicit gates:
|
||||
`PR_HOSTED_PROMOTION_MODEL_JUDGE_MODE=execute` now calls the configured
|
||||
provider only after hosted retrieval evidence, entitlement, budget, provider,
|
||||
and executor gates pass; the check remains non-blocking, strict-JSON-only,
|
||||
and rejects uncited or non-hosted model output without echoing raw responses.
|
||||
- Handoff `ecc-supply-chain-audit-20260513-0645.md` under
|
||||
`~/.cluster-swarm/handoffs/`
|
||||
records the May 13 supply-chain sweep: no active lockfile/manifest hit for
|
||||
@@ -255,6 +372,66 @@ As of 2026-05-13:
|
||||
artifact contract so canonical bundle files now satisfy the taxonomy and
|
||||
generated follow-up PRs point maintainers at
|
||||
`agentshield scan --evidence-pack <dir>`.
|
||||
- ECC-Tools PR #55 added the first hosted/deeper-analysis readiness signal:
|
||||
analysis comments now classify a repo as commit-history-only,
|
||||
evidence-backed, or deep-ready before routing work into CI, AgentShield,
|
||||
harness, reference-set, RAG/evaluator, AI-routing, cost-control, and
|
||||
Linear/project-tracking lanes.
|
||||
- ECC-Tools PR #56 turned that signal into a hosted execution-plan contract:
|
||||
`/api/analysis/depth-plan` returns ready/blocked jobs and next action text
|
||||
without charging analysis usage or creating bundle PRs.
|
||||
- ECC-Tools PR #57 implemented the first job-specific hosted executor:
|
||||
`/api/analysis/jobs/ci-diagnostics` reuses the depth-readiness gate, internal
|
||||
API auth, installation ownership, repo-access billing checks, capped workflow
|
||||
file reads, and usage accounting to return concrete CI hardening findings.
|
||||
- ECC-Tools PR #58 implemented the second job-specific hosted executor:
|
||||
`/api/analysis/jobs/security-evidence-review` applies the same hosted gates
|
||||
to AgentShield evidence-pack, policy, baseline, SBOM, SARIF, and security
|
||||
scanner artifacts.
|
||||
- ECC-Tools PR #59 implemented the third job-specific hosted executor:
|
||||
`/api/analysis/jobs/harness-compatibility-audit` applies the same hosted
|
||||
gates to Claude, Codex, OpenCode, MCP, plugin, and cross-harness evidence
|
||||
while avoiding local secret-bearing harness config fetches.
|
||||
- ECC-Tools PR #60 implemented the fourth job-specific hosted executor:
|
||||
`/api/analysis/jobs/reference-set-evaluation` applies the same hosted gates
|
||||
to analyzer corpus, RAG/evaluator, PR salvage, harness, security, and CI
|
||||
failure-mode reference evidence while avoiding obvious secret-bearing fixture
|
||||
fetches.
|
||||
- ECC-Tools PR #61 implemented the fifth job-specific hosted executor:
|
||||
`/api/analysis/jobs/ai-routing-cost-review` applies the same hosted gates to
|
||||
model-routing, token-budget, usage-limit, rate-limit, billing/entitlement,
|
||||
cost-regression, and cost-policy evidence while avoiding obvious
|
||||
secret-bearing path fetches.
|
||||
- ECC-Tools PR #62 implemented the sixth job-specific hosted executor:
|
||||
`/api/analysis/jobs/team-backlog-routing` applies the same hosted gates to
|
||||
roadmap, runbook, handoff, release-plan, issue-template, ownership,
|
||||
project-tracker, backlog, and follow-up evidence while avoiding obvious
|
||||
secret-bearing path fetches.
|
||||
- ECC-Tools PR #63 publishes the hosted depth-plan check-run after queued PR
|
||||
analysis completes, making the six hosted executor commands visible on the
|
||||
PR head SHA without turning the check into a merge blocker.
|
||||
- ECC-Tools PR #64 wires those commands into the queue: maintainers can comment
|
||||
`/ecc-tools analyze --job ci-diagnostics`, `security-evidence`,
|
||||
`harness-compatibility`, `reference-set-evaluation`, `ai-routing-cost`, or
|
||||
`team-backlog` on a PR and receive hosted job results in a PR comment.
|
||||
- ECC-Tools PR #65 persists completed and blocked hosted job results to the
|
||||
analysis cache for 30 days and publishes non-blocking `ECC Tools / Hosted
|
||||
Job: ...` check-runs so maintainers can scan hosted outcomes from the PR
|
||||
checks surface instead of rereading older comments.
|
||||
- ECC-Tools PR #66 exposes the cached results from PR comments with
|
||||
`/ecc-tools analyze --job status`, summarizing completed, blocked, and
|
||||
not-yet-run hosted jobs for the PR head and recommending the next hosted job
|
||||
command.
|
||||
- ECC-Tools PR #67 feeds those cached results back into the hosted depth-plan
|
||||
check-run so queued analysis recommends the next unrun ready hosted job from
|
||||
cache state instead of repeating the static readiness order.
|
||||
- ECC-Tools PR #68 adds the first evaluator-backed hosted promotion gate:
|
||||
opened/synchronized PRs get a non-blocking Hosted Promotion Readiness
|
||||
check-run that turns the evaluator/RAG corpus into warnings when changed
|
||||
files match fixture scenarios without their expected evidence artifacts.
|
||||
- ECC-Tools PR #69 extends that gate to score cached completed hosted job
|
||||
outputs for the current PR head, so hosted artifacts can satisfy corpus
|
||||
evidence expectations before the check reports a promotion gap.
|
||||
- ECC PR #1803 landed the contributor Quarkus handling branch after maintainer
|
||||
cleanup, current-`main` alignment, full local validation, and preservation of
|
||||
the author's removal of incomplete ja-JP and zh-CN Quarkus translations.
|
||||
@@ -307,11 +484,11 @@ is not complete unless the evidence column exists and has been freshly verified.
|
||||
| Naming and rename readiness | Naming matrix across package/plugin/docs/social surfaces | `docs/releases/2.0.0-rc.1/naming-and-publication-matrix.md` records current package, repo, Claude plugin, Codex plugin, OpenCode, and npm availability evidence | Complete for rc.1; post-rc rename remains future work |
|
||||
| Claude and Codex plugin publication | Contact/submission path with required artifacts and status | Publication readiness, naming matrix, and May 12 dry-run evidence document plugin validation, clean-checkout Claude tag/install smoke, and Codex marketplace CLI shape | Needs explicit approval for real tag/push and marketplace submission |
|
||||
| Articles, tweets, and announcements | X thread, LinkedIn copy, GitHub release copy, push checklist | Draft launch collateral exists under rc.1 release docs | Needs URL-backed refresh |
|
||||
| AgentShield enterprise iteration | Policy gates, SARIF, packs, provenance, corpus, HTML reports, exception lifecycle audit, baseline drift Action/CLI surfaces, evidence-pack redaction, harness adapter registry, enterprise research roadmap, supply-chain hardened release path, CI-safe baseline fingerprints, corpus accuracy recommendations | PRs #53, #55-#64, #67-#69, and #78-#80 landed with test evidence; native PDF export deferred in favor of self-contained HTML plus print-to-PDF until explicit enterprise demand appears; `docs/architecture/agentshield-enterprise-research-roadmap.md` now has baseline drift, evidence-pack bundle, redaction, adapter-registry, supply-chain hardening, hashed baseline fingerprints, and corpus accuracy recommendation slices landed | Next remediation workflow depth or corpus expansion |
|
||||
| ECC Tools next-level app | Billing audit, PR checks, deep analyzer, sync backlog, evaluator/RAG corpus | PRs #26-#43 plus #53/#54 landed with test evidence, including AgentShield evidence-pack gap routing, canonical bundle recognition, supply-chain signature gates, and PR draft follow-up Linear tracking | Needs hosted/deeper analysis follow-up |
|
||||
| AgentShield enterprise iteration | Policy gates, SARIF, packs, provenance, corpus, HTML reports, exception lifecycle audit, baseline drift Action/CLI surfaces, evidence-pack redaction, harness adapter registry, enterprise research roadmap, supply-chain hardened release path, CI-safe baseline fingerprints, corpus accuracy recommendations, remediation workflow phases, env proxy hijack corpus coverage | PRs #53, #55-#64, #67-#69, and #78-#82 landed with test evidence; native PDF export deferred in favor of self-contained HTML plus print-to-PDF until explicit enterprise demand appears; `docs/architecture/agentshield-enterprise-research-roadmap.md` now has baseline drift, evidence-pack bundle, redaction, adapter-registry, supply-chain hardening, hashed baseline fingerprints, corpus accuracy recommendation, remediation workflow, and env proxy hijack corpus slices landed | Next hosted evidence-pack workflow depth |
|
||||
| ECC Tools next-level app | Billing audit, PR checks, deep analyzer, sync backlog, evaluator/RAG corpus, analysis-depth readiness, hosted execution planning, hosted CI diagnostics, hosted security evidence review, hosted harness compatibility audit, hosted reference-set evaluation, hosted AI routing/cost review, hosted team backlog routing, hosted depth-plan check-run, PR-comment hosted job dispatch, hosted job result history/check-runs, hosted result status command, status-aware depth-plan recommendations, hosted promotion readiness, hosted promotion output scoring, hosted promotion retrieval planning, hosted promotion judge contract, gated hosted promotion judge execution | PRs #26-#43 plus #53-#72 landed with test evidence, including AgentShield evidence-pack gap routing, canonical bundle recognition, supply-chain signature gates, PR draft follow-up Linear tracking, evidence-backed/deep-ready repository classification, the `/api/analysis/depth-plan` hosted job plan, `/api/analysis/jobs/ci-diagnostics`, `/api/analysis/jobs/security-evidence-review`, `/api/analysis/jobs/harness-compatibility-audit`, `/api/analysis/jobs/reference-set-evaluation`, `/api/analysis/jobs/ai-routing-cost-review`, `/api/analysis/jobs/team-backlog-routing`, the `ECC Tools / Hosted Depth Plan` check-run, `/ecc-tools analyze --job ...` PR-comment dispatch, non-blocking per-hosted-job result check-runs backed by 30-day result cache records, `/ecc-tools analyze --job status` cache lookup, cache-aware next-job recommendations in the depth-plan check-run, the `ECC Tools / Hosted Promotion Readiness` corpus-backed PR check-run, deterministic hosted-output scoring against cached completed job artifacts/findings, ranked retrieval/model-prompt planning, the fail-closed `hosted-promotion-judge.v1` request contract, and opt-in live model-judge execution behind hosted evidence, entitlement, budget, provider, executor, strict JSON, and citation gates | Next work is hosted promotion telemetry and operator review UX |
|
||||
| GitGuardian/Dependabot/CodeRabbit-style checks | Non-blocking taxonomy, deterministic follow-up checks, and local supply-chain gates | ECC-Tools risk taxonomy check plus follow-up signals landed, including Skill Quality, Deep Analyzer Evidence, Analyzer Corpus Evidence, RAG/Evaluator Evidence, PR Review/Salvage Evidence, and AgentShield evidence-pack evidence; #1846 added npm registry signature gates; #1848 added the supply-chain incident-response playbook and `pull_request_target` cache-poisoning validator guard; #1851 added the privileged checkout credential-persistence guard; AgentShield #78, JARVIS #13, and ECC-Tools #53 applied the same hardening outside trunk | Current supply-chain gate complete; deeper hosted review features remain future |
|
||||
| Harness-agnostic learning system | Audit, adapter matrix, observability, traces, promotion loop | Audit/adapters/observability gates plus `docs/architecture/evaluator-rag-prototype.md`, `examples/evaluator-rag-prototype/`, and ECC-Tools PR #40 define read-only stale-salvage, billing-readiness, CI-failure-diagnosis, harness-config-quality, AgentShield policy-exception, skill-quality evidence, deep-analyzer evidence, and RAG/evaluator comparison scenarios with trace, report, playbook, verifier, and predictive-check artifacts | Local corpus complete; hosted integration remains future |
|
||||
| Linear roadmap is detailed | Linear project status plus repo mirror | Repo mirror exists; issue creation was retried on 2026-05-12 and remains blocked by the workspace free issue limit; this May 13 sync adds ECC #1860, AgentShield #78/#79, JARVIS #13, ECC-Tools #53/#54, resolved queue/discussion counts, and Linear project status updates `59f630eb`/`c7ea6daf` | Needs recurring status updates after each merge batch |
|
||||
| Harness-agnostic learning system | Audit, adapter matrix, observability, traces, promotion loop | Audit/adapters/observability gates plus `docs/architecture/evaluator-rag-prototype.md`, `examples/evaluator-rag-prototype/`, and ECC-Tools PR #40 define read-only stale-salvage, billing-readiness, CI-failure-diagnosis, harness-config-quality, AgentShield policy-exception, skill-quality evidence, deep-analyzer evidence, and RAG/evaluator comparison scenarios with trace, report, playbook, verifier, and predictive-check artifacts; ECC-Tools PRs #68-#72 now turn that corpus into a deterministic PR check-run gate with cached hosted-output scoring, ranked retrieval candidates, a model prompt seed, a fail-closed hosted model-judge request contract, and opt-in live model execution behind strict hosted-evidence gates | Deterministic hosted PR check, cached output scoring, retrieval planning, judge contract, and gated model execution integrated |
|
||||
| Linear roadmap is detailed | Linear project status plus repo mirror | Repo mirror exists; issue creation was retried on 2026-05-12 and remains blocked by the workspace free issue limit; this May 13 sync adds ECC #1860, AgentShield #78-#82, JARVIS #13, ECC-Tools #53-#72, resolved queue/discussion counts, and notes that Linear connector status updates after ECC-Tools #68 remain blocked by a connector secret-owner error | Needs recurring status updates after connector recovery |
|
||||
| Flow separation and progress tracking | Flow lanes with owner artifacts and update cadence | This roadmap defines lanes below and `docs/architecture/progress-sync-contract.md` makes GitHub/Linear/handoff/roadmap sync part of the readiness gate | Active |
|
||||
| Realtime Linear sync | Project updates while issue limit is blocked; issues later | ECC-Tools #39 implements opt-in Linear API sync for deferred follow-up backlog items, and ECC-Tools #54 adds copy-ready PR drafts to that backlog when draft PR shells are not opened; `docs/architecture/progress-sync-contract.md` defines the local file-backed realtime boundary while issue capacity is blocked | Needs workspace capacity/config rollout |
|
||||
| Observability for self-use | Local readiness gate, traces, status snapshots, HUD/status contract, risk ledger, progress-sync contract | `npm run observability:ready` reports 21/21 | Complete for local gate |
|
||||
@@ -330,9 +507,9 @@ repo evidence and merge commits.
|
||||
| Queue hygiene and salvage | GitHub PR/issue state, salvage ledger | Append ledger entries for any future stale closures | Every cleanup batch |
|
||||
| Release and publication | rc.1 release docs, publication readiness doc | Naming matrix and plugin submission/contact checklist | Before any tag |
|
||||
| Harness OS core | Audit, adapter matrix, observability docs, `ecc2/` | HUD/session-control acceptance spec | Weekly until GA |
|
||||
| Evaluation and RAG | Reference-set validation, harness audit, traces, ECC-Tools corpus | Read-only evaluator/RAG prototype plus stale-salvage, billing-readiness, CI-failure-diagnosis, harness-config-quality, AgentShield policy-exception, skill-quality evidence, deep-analyzer evidence, and RAG/evaluator comparison fixtures | Hosted retrieval/check-run automation plan |
|
||||
| Evaluation and RAG | Reference-set validation, harness audit, traces, ECC-Tools corpus | Read-only evaluator/RAG prototype plus stale-salvage, billing-readiness, CI-failure-diagnosis, harness-config-quality, AgentShield policy-exception, skill-quality evidence, deep-analyzer evidence, and RAG/evaluator comparison fixtures; ECC-Tools #68 publishes the corpus as a hosted promotion readiness check-run, #69 scores cached hosted job outputs against the same corpus, #70 emits ranked retrieval candidates plus a model prompt seed, #71 adds a fail-closed hosted model-judge request contract, and #72 executes that judge only when explicitly enabled and backed by hosted retrieval citations | Hosted promotion telemetry and operator review UX |
|
||||
| AgentShield enterprise | AgentShield PR evidence and roadmap notes | Remediation workflow depth or corpus expansion follow-up | Next implementation batch |
|
||||
| ECC Tools app | ECC-Tools PR evidence, billing audit, risk taxonomy, evaluator/RAG corpus | ECC-Tools #53 published the supply-chain workflow hardening branch and #54 tracks copy-ready PR drafts in the Linear/project backlog; next work is hosted/deeper analysis follow-up | Next implementation batch |
|
||||
| ECC Tools app | ECC-Tools PR evidence, billing audit, risk taxonomy, evaluator/RAG corpus | ECC-Tools #53 published the supply-chain workflow hardening branch, #54 tracks copy-ready PR drafts in the Linear/project backlog, #55 classifies analysis-depth readiness, #56 exposes the hosted execution plan, #57 executes the first hosted CI diagnostics job, #58 executes the hosted security evidence review job, #59 executes the hosted harness compatibility audit, #60 executes the hosted reference-set evaluation, #61 executes the hosted AI routing/cost review, #62 executes hosted team backlog routing, #63 publishes the hosted depth-plan check-run, #64 dispatches hosted jobs from PR comments, #65 persists hosted result history/check-runs, #66 exposes hosted job status from PR comments, #67 makes depth-plan recommendations cache-aware, #68 publishes hosted promotion readiness from the evaluator/RAG corpus, #69 scores cached hosted job outputs against that corpus, #70 emits ranked retrieval candidates plus a model prompt seed, #71 emits the gated `hosted-promotion-judge.v1` contract without live model calls, and #72 adds opt-in live model-judge execution behind hosted-evidence and strict JSON/citation gates | Next implementation batch |
|
||||
| Linear progress | Linear project status updates, `docs/architecture/progress-sync-contract.md`, and this mirror | Status update with queue/evidence/missing gates | Every significant merge batch |
|
||||
|
||||
The project status update should always include:
|
||||
@@ -545,16 +722,16 @@ Acceptance:
|
||||
supply-chain incident class; PR #79 moved baseline/watch/remediation
|
||||
fingerprints to hashed evidence and stopped writing raw evidence into new
|
||||
baselines; PR #80 added prioritized corpus accuracy recommendations for
|
||||
failed regression gates; and ECC-Tools PRs #42/#43 now route and recognize
|
||||
evidence packs. The next slice is remediation workflow depth or corpus
|
||||
expansion.
|
||||
2. Keep ECC-Tools #53's supply-chain workflow gate and #54's PR-draft backlog
|
||||
tracking in the recurring queue evidence, and use the org-scoped GitHub auth
|
||||
path for future ECC-Tools maintenance while the narrow environment token
|
||||
remains active.
|
||||
failed regression gates; PR #81 added ordered remediation workflow phases;
|
||||
PR #82 expanded corpus coverage for env proxy hijacks and out-of-band
|
||||
exfiltration; and ECC-Tools PRs #42/#43 now route and recognize evidence
|
||||
packs. The next slice is hosted evidence-pack workflow depth.
|
||||
2. Add hosted promotion telemetry and operator review UX on top of the #72
|
||||
gated model execution path so live judgments can be audited before any
|
||||
promotion policy becomes enforceable.
|
||||
3. Enable/configure the merged Linear backlog sync path after workspace issue
|
||||
capacity clears or the Linear workspace is upgraded, then verify PR-draft
|
||||
salvage items land in the expected project.
|
||||
4. Use the ECC-Tools evaluator/RAG corpus as the promotion gate before adding
|
||||
hosted retrieval, vector storage, model-backed judging, or automated
|
||||
hosted retrieval, vector storage, live model-backed judging, or automated
|
||||
check-run promotion.
|
||||
|
||||
@@ -122,12 +122,12 @@
|
||||
git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
|
||||
# 共通ルールをインストール(必須)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
|
||||
# 言語固有ルールをインストール(スタックを選択)
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/typescript
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/python
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/golang
|
||||
```
|
||||
|
||||
### ステップ3:使用開始
|
||||
@@ -462,15 +462,15 @@ Duplicate hook file detected: ./hooks/hooks.json is already resolved to a loaded
|
||||
>
|
||||
> # オプション A:ユーザーレベルルール(すべてのプロジェクトに適用)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # スタックを選択
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/typescript # スタックを選択
|
||||
> cp -r everything-claude-code/rules/python ~/.claude/rules/python
|
||||
> cp -r everything-claude-code/rules/golang ~/.claude/rules/golang
|
||||
>
|
||||
> # オプション B:プロジェクトレベルルール(現在のプロジェクトのみ)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* .claude/rules/ # スタックを選択
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/common
|
||||
> cp -r everything-claude-code/rules/typescript .claude/rules/typescript # スタックを選択
|
||||
> ```
|
||||
|
||||
---
|
||||
@@ -487,10 +487,10 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# ルール(共通 + 言語固有)をコピー
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # スタックを選択
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/typescript # スタックを選択
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/python
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/golang
|
||||
|
||||
# コマンドをコピー
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
|
||||
@@ -169,13 +169,13 @@ Options:
|
||||
|
||||
インストールを実行:
|
||||
```bash
|
||||
# 共通ルール(rules/ にフラットコピー)
|
||||
cp -r $ECC_ROOT/rules/common/* $TARGET/rules/
|
||||
# 共通ルール
|
||||
cp -r $ECC_ROOT/rules/common $TARGET/rules/common
|
||||
|
||||
# 言語固有のルール(rules/ にフラットコピー)
|
||||
cp -r $ECC_ROOT/rules/typescript/* $TARGET/rules/ # 選択された場合
|
||||
cp -r $ECC_ROOT/rules/python/* $TARGET/rules/ # 選択された場合
|
||||
cp -r $ECC_ROOT/rules/golang/* $TARGET/rules/ # 選択された場合
|
||||
# 言語固有のルール(言語別ディレクトリを保持)
|
||||
cp -r $ECC_ROOT/rules/typescript $TARGET/rules/typescript # 選択された場合
|
||||
cp -r $ECC_ROOT/rules/python $TARGET/rules/python # 選択された場合
|
||||
cp -r $ECC_ROOT/rules/golang $TARGET/rules/golang # 選択された場合
|
||||
```
|
||||
|
||||
**重要**: ユーザーが言語固有のルールを選択したが、共通ルールを選択しなかった場合、警告します:
|
||||
|
||||
@@ -387,12 +387,12 @@ Claude Code v2.1+는 설치된 플러그인의 `hooks/hooks.json`을 **자동으
|
||||
>
|
||||
> # 옵션 A: 사용자 레벨 룰 (모든 프로젝트에 적용)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 사용하는 스택 선택
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/typescript # 사용하는 스택 선택
|
||||
>
|
||||
> # 옵션 B: 프로젝트 레벨 룰 (현재 프로젝트에만 적용)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/common
|
||||
> ```
|
||||
|
||||
---
|
||||
@@ -409,8 +409,8 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# 룰 복사 (common + 언어별)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 사용하는 스택 선택
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/typescript # 사용하는 스택 선택
|
||||
|
||||
# 커맨드 복사
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
@@ -573,7 +573,7 @@ MCP 서버가 너무 많으면 컨텍스트를 잡아먹습니다. 각 MCP 도
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# 룰만
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
```
|
||||
|
||||
각 컴포넌트는 완전히 독립적입니다.
|
||||
|
||||
@@ -342,12 +342,12 @@ Ou adicione diretamente ao seu `~/.claude/settings.json`:
|
||||
>
|
||||
> # Opção A: Regras no nível do usuário (aplica a todos os projetos)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # escolha sua stack
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/typescript # escolha sua stack
|
||||
>
|
||||
> # Opção B: Regras no nível do projeto (aplica apenas ao projeto atual)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/common
|
||||
> ```
|
||||
|
||||
---
|
||||
@@ -362,8 +362,8 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Copiar regras (comuns + específicas da linguagem)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/typescript
|
||||
|
||||
# Copiar comandos
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
|
||||
@@ -7,16 +7,24 @@ they do not prove that the workflow executed the intended code path.
|
||||
|
||||
## Current External Trigger
|
||||
|
||||
As of 2026-05-13, the active incident class is the May 2026 TanStack npm
|
||||
supply-chain compromise. ECC also keeps Mini Shai-Hulud-style npm worm IOCs in
|
||||
the same release-safety sweep because both incident classes target package
|
||||
install/publish paths and developer credentials:
|
||||
As of 2026-05-15, the active incident class is the May 2026 TanStack npm
|
||||
supply-chain compromise and broader Mini Shai-Hulud campaign. ECC keeps the
|
||||
same IOC sweep for the related npm/PyPI waves because these incidents target
|
||||
package install/publish paths, AI developer-tool configs, and developer
|
||||
credentials:
|
||||
|
||||
- TanStack reported 84 malicious versions across 42 `@tanstack/*` packages,
|
||||
published on 2026-05-11 between 19:20 and 19:26 UTC.
|
||||
- GitHub advisory `GHSA-g7cv-rxg3-hmpx` / `CVE-2026-45321` describes
|
||||
install-time malware that harvests cloud credentials, GitHub tokens, npm
|
||||
credentials, Vault tokens, Kubernetes tokens, and SSH private keys.
|
||||
- Follow-on reporting from StepSecurity, Socket, Aikido, and Wiz describes the
|
||||
same campaign expanding into packages associated with Mistral AI, UiPath,
|
||||
OpenSearch, Guardrails AI, Squawk, and other npm/PyPI packages.
|
||||
- The live IOC set includes persistence through Claude Code
|
||||
`.claude/settings.json`, VS Code `.vscode/tasks.json`, and OS-level
|
||||
`gh-token-monitor` LaunchAgent/systemd services. Remove those persistence
|
||||
hooks before rotating a stolen GitHub token.
|
||||
- The attack chain combined `pull_request_target`, GitHub Actions cache
|
||||
poisoning across a fork/base trust boundary, and OIDC token extraction from a
|
||||
GitHub Actions runner.
|
||||
@@ -38,8 +46,8 @@ Run this before a release candidate, after a broad dependency bump, and after
|
||||
any package-registry incident.
|
||||
|
||||
```bash
|
||||
rg -n '(@tanstack|mistralai|uipath|opensearch|guardrails|axios)' \
|
||||
package.json package-lock.json .opencode/package.json .opencode/package-lock.json
|
||||
npm run security:ioc-scan
|
||||
node scripts/ci/scan-supply-chain-iocs.js --home
|
||||
npm ci --ignore-scripts
|
||||
npm audit signatures
|
||||
npm audit --audit-level=high
|
||||
@@ -63,16 +71,23 @@ If ECC or a maintainer machine installed a known-bad package version:
|
||||
- npm package versions and tarball integrity hashes;
|
||||
- outbound network logs where available.
|
||||
3. Treat the install host as compromised if lifecycle scripts may have run.
|
||||
4. Rotate every credential reachable by the process:
|
||||
4. Remove persistence hooks before token revocation:
|
||||
- `~/.claude/settings.json` `SessionStart` hooks and adjacent
|
||||
`router_runtime.js` / `setup.mjs` payload files;
|
||||
- `.vscode/tasks.json` folder-open tasks and adjacent payload files;
|
||||
- `~/Library/LaunchAgents/com.user.gh-token-monitor.plist`;
|
||||
- `~/.config/systemd/user/gh-token-monitor.service`;
|
||||
- `~/.local/bin/gh-token-monitor.sh`.
|
||||
5. Rotate every credential reachable by the process:
|
||||
- npm automation tokens and maintainer tokens;
|
||||
- GitHub PATs, fine-grained tokens, deploy keys, and Actions secrets;
|
||||
- cloud credentials, Vault tokens, Kubernetes service-account tokens, SSH
|
||||
keys, and local `.npmrc` tokens;
|
||||
- any MCP, plugin, or harness credentials available in environment variables
|
||||
or user-scope config.
|
||||
5. Purge GitHub Actions caches for affected repositories.
|
||||
6. Reinstall from a clean environment with `npm ci --ignore-scripts` first.
|
||||
7. Re-enable lifecycle scripts only after the dependency tree and package
|
||||
6. Purge GitHub Actions caches for affected repositories.
|
||||
7. Reinstall from a clean environment with `npm ci --ignore-scripts` first.
|
||||
8. Re-enable lifecycle scripts only after the dependency tree and package
|
||||
versions are pinned to known-clean releases.
|
||||
|
||||
## GitHub Actions Rules
|
||||
@@ -108,6 +123,8 @@ Before tagging or publishing ECC:
|
||||
Escalate to a maintainer security review before any release or merge if:
|
||||
|
||||
- a dependency lockfile references a package named in an active advisory;
|
||||
- `node scripts/ci/scan-supply-chain-iocs.js --home` finds Claude Code,
|
||||
VS Code, or OS-level persistence indicators;
|
||||
- a workflow combines `pull_request_target` with dependency installation,
|
||||
cache restore/save, PR-head checkout, or write permissions;
|
||||
- a release workflow combines `id-token: write` with shared cache usage;
|
||||
|
||||
@@ -390,7 +390,7 @@ Evet. Seçenek 2'yi (manuel kurulum) kullanın ve yalnızca ihtiyacınız olanı
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Sadece rule'lar
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
```
|
||||
|
||||
Her component tamamen bağımsızdır.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Everything Claude Code (ECC) — 智能体指令
|
||||
|
||||
这是一个**生产就绪的 AI 编码插件**,提供 60 个专业代理、228 项技能、75 条命令以及自动化钩子工作流,用于软件开发。
|
||||
这是一个**生产就绪的 AI 编码插件**,提供 60 个专业代理、229 项技能、75 条命令以及自动化钩子工作流,用于软件开发。
|
||||
|
||||
**版本:** 2.0.0-rc.1
|
||||
|
||||
@@ -147,7 +147,7 @@
|
||||
|
||||
```
|
||||
agents/ — 60 个专业子代理
|
||||
skills/ — 228 个工作流技能和领域知识
|
||||
skills/ — 229 个工作流技能和领域知识
|
||||
commands/ — 75 个斜杠命令
|
||||
hooks/ — 基于触发的自动化
|
||||
rules/ — 始终遵循的指导方针(通用 + 每种语言)
|
||||
|
||||
@@ -224,7 +224,7 @@ Copy-Item -Recurse rules/typescript "$HOME/.claude/rules/"
|
||||
/plugin list ecc@ecc
|
||||
```
|
||||
|
||||
**搞定!** 你现在可以使用 60 个智能体、228 项技能和 75 个命令了。
|
||||
**搞定!** 你现在可以使用 60 个智能体、229 项技能和 75 个命令了。
|
||||
|
||||
***
|
||||
|
||||
@@ -637,16 +637,16 @@ Claude Code v2.1+ **会自动加载** 任何已安装插件中的 `hooks/hooks.j
|
||||
>
|
||||
> # 选项 A:用户级规则(适用于所有项目)
|
||||
> mkdir -p ~/.claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # 选择您的技术栈
|
||||
> cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/php/* ~/.claude/rules/
|
||||
> cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
> cp -r everything-claude-code/rules/typescript ~/.claude/rules/typescript # 选择您的技术栈
|
||||
> cp -r everything-claude-code/rules/python ~/.claude/rules/python
|
||||
> cp -r everything-claude-code/rules/golang ~/.claude/rules/golang
|
||||
> cp -r everything-claude-code/rules/php ~/.claude/rules/php
|
||||
>
|
||||
> # 选项 B:项目级规则(仅适用于当前项目)
|
||||
> mkdir -p .claude/rules
|
||||
> cp -r everything-claude-code/rules/common/* .claude/rules/
|
||||
> cp -r everything-claude-code/rules/typescript/* .claude/rules/ # 选择您的技术栈
|
||||
> cp -r everything-claude-code/rules/common .claude/rules/common
|
||||
> cp -r everything-claude-code/rules/typescript .claude/rules/typescript # 选择您的技术栈
|
||||
> ```
|
||||
|
||||
***
|
||||
@@ -663,11 +663,11 @@ git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Copy rules (common + language-specific)
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/typescript/* ~/.claude/rules/ # pick your stack
|
||||
cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/php/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
cp -r everything-claude-code/rules/typescript ~/.claude/rules/typescript # pick your stack
|
||||
cp -r everything-claude-code/rules/python ~/.claude/rules/python
|
||||
cp -r everything-claude-code/rules/golang ~/.claude/rules/golang
|
||||
cp -r everything-claude-code/rules/php ~/.claude/rules/php
|
||||
|
||||
# Copy maintained commands
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
@@ -885,7 +885,7 @@ claude
|
||||
cp everything-claude-code/agents/*.md ~/.claude/agents/
|
||||
|
||||
# Just rules
|
||||
cp -r everything-claude-code/rules/common/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/common ~/.claude/rules/common
|
||||
```
|
||||
|
||||
每个组件都是完全独立的。
|
||||
@@ -1138,7 +1138,7 @@ opencode
|
||||
|---------|-------------|----------|--------|
|
||||
| 智能体 | PASS: 60 个 | PASS: 12 个 | **Claude Code 领先** |
|
||||
| 命令 | PASS: 75 个 | PASS: 35 个 | **Claude Code 领先** |
|
||||
| 技能 | PASS: 228 项 | PASS: 37 项 | **Claude Code 领先** |
|
||||
| 技能 | PASS: 229 项 | PASS: 37 项 | **Claude Code 领先** |
|
||||
| 钩子 | PASS: 8 种事件类型 | PASS: 11 种事件 | **OpenCode 更多!** |
|
||||
| 规则 | PASS: 29 条 | PASS: 13 条指令 | **Claude Code 领先** |
|
||||
| MCP 服务器 | PASS: 14 个 | PASS: 完整 | **完全对等** |
|
||||
@@ -1246,7 +1246,7 @@ ECC 是**第一个最大化利用每个主要 AI 编码工具的插件**。以
|
||||
|---------|------------|------------|-----------|----------|
|
||||
| **智能体** | 60 | 共享 (AGENTS.md) | 共享 (AGENTS.md) | 12 |
|
||||
| **命令** | 75 | 共享 | 基于指令 | 35 |
|
||||
| **技能** | 228 | 共享 | 10 (原生格式) | 37 |
|
||||
| **技能** | 229 | 共享 | 10 (原生格式) | 37 |
|
||||
| **钩子事件** | 8 种类型 | 15 种类型 | 暂无 | 11 种类型 |
|
||||
| **钩子脚本** | 20+ 个脚本 | 16 个脚本 (DRY 适配器) | N/A | 插件钩子 |
|
||||
| **规则** | 34 (通用 + 语言) | 34 (YAML 前页) | 基于指令 | 13 条指令 |
|
||||
|
||||
@@ -239,13 +239,13 @@ cp -R "${src%/}" "$TARGET/skills/$(basename "${src%/}")"
|
||||
执行安装:
|
||||
|
||||
```bash
|
||||
# Common rules (flat copy into rules/)
|
||||
cp -r $ECC_ROOT/rules/common/* $TARGET/rules/
|
||||
# Common rules
|
||||
cp -r $ECC_ROOT/rules/common $TARGET/rules/common
|
||||
|
||||
# Language-specific rules (flat copy into rules/)
|
||||
cp -r $ECC_ROOT/rules/typescript/* $TARGET/rules/ # if selected
|
||||
cp -r $ECC_ROOT/rules/python/* $TARGET/rules/ # if selected
|
||||
cp -r $ECC_ROOT/rules/golang/* $TARGET/rules/ # if selected
|
||||
# Language-specific rules (preserve per-language directories)
|
||||
cp -r $ECC_ROOT/rules/typescript $TARGET/rules/typescript # if selected
|
||||
cp -r $ECC_ROOT/rules/python $TARGET/rules/python # if selected
|
||||
cp -r $ECC_ROOT/rules/golang $TARGET/rules/golang # if selected
|
||||
```
|
||||
|
||||
**重要**:如果用户选择了任何特定语言的规则但**没有**选择通用规则,警告他们:
|
||||
|
||||
@@ -199,7 +199,8 @@
|
||||
"skills/database-migrations",
|
||||
"skills/jpa-patterns",
|
||||
"skills/mysql-patterns",
|
||||
"skills/postgres-patterns"
|
||||
"skills/postgres-patterns",
|
||||
"skills/prisma-patterns"
|
||||
],
|
||||
"targets": [
|
||||
"claude",
|
||||
|
||||
@@ -215,6 +215,7 @@
|
||||
"skills/perl-testing/",
|
||||
"skills/plankton-code-quality/",
|
||||
"skills/postgres-patterns/",
|
||||
"skills/prisma-patterns/",
|
||||
"skills/product-capability/",
|
||||
"skills/production-audit/",
|
||||
"skills/production-scheduling/",
|
||||
@@ -285,15 +286,19 @@
|
||||
"postinstall": "echo '\\n ecc-universal installed!\\n Run: npx ecc typescript\\n Compat: npx ecc-install typescript\\n Docs: https://github.com/affaan-m/everything-claude-code\\n'",
|
||||
"catalog:check": "node scripts/ci/catalog.js --text",
|
||||
"catalog:sync": "node scripts/ci/catalog.js --write --text",
|
||||
"command-registry:generate": "node scripts/ci/generate-command-registry.js",
|
||||
"command-registry:write": "node scripts/ci/generate-command-registry.js --write",
|
||||
"command-registry:check": "node scripts/ci/generate-command-registry.js --check",
|
||||
"lint": "eslint . && markdownlint '**/*.md' --ignore node_modules",
|
||||
"harness:adapters": "node scripts/harness-adapter-compliance.js",
|
||||
"harness:audit": "node scripts/harness-audit.js",
|
||||
"observability:ready": "node scripts/observability-readiness.js",
|
||||
"security:ioc-scan": "node scripts/ci/scan-supply-chain-iocs.js",
|
||||
"claw": "node scripts/claw.js",
|
||||
"orchestrate:status": "node scripts/orchestration-status.js",
|
||||
"orchestrate:worker": "bash scripts/orchestrate-codex-worker.sh",
|
||||
"orchestrate:tmux": "node scripts/orchestrate-worktrees.js",
|
||||
"test": "node scripts/ci/check-unicode-safety.js && node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-install-manifests.js && node scripts/ci/validate-no-personal-paths.js && npm run catalog:check && node tests/run-all.js",
|
||||
"test": "node scripts/ci/check-unicode-safety.js && node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-install-manifests.js && node scripts/ci/validate-no-personal-paths.js && npm run catalog:check && npm run command-registry:check && node tests/run-all.js",
|
||||
"coverage": "c8 --all --include=\"scripts/**/*.js\" --check-coverage --lines 80 --functions 80 --branches 80 --statements 80 --reporter=text --reporter=lcov node tests/run-all.js",
|
||||
"build:opencode": "node scripts/build-opencode.js",
|
||||
"prepack": "npm run build:opencode",
|
||||
|
||||
@@ -15,7 +15,7 @@ paths:
|
||||
Configure project-local hooks to prefer binstubs and checked-in tooling:
|
||||
|
||||
- **RuboCop**: run `bundle exec rubocop -A <file>` or the project's safer formatter command after Ruby edits.
|
||||
- **Brakeman**: run `bundle exec brakeman --no-pager` after security-sensitive Rails changes.
|
||||
- **Brakeman**: run `bundle exec brakeman --no-progress` after security-sensitive Rails changes.
|
||||
- **Tests**: run the narrowest matching `bin/rails test ...` or `bundle exec rspec ...` command for touched files.
|
||||
- **Bundler audit**: run `bundle exec bundle-audit check --update` when `Gemfile` or `Gemfile.lock` changes and the project has bundler-audit installed.
|
||||
|
||||
@@ -29,7 +29,7 @@ Configure project-local hooks to prefer binstubs and checked-in tooling:
|
||||
|
||||
```bash
|
||||
bundle exec rubocop
|
||||
bundle exec brakeman --no-pager
|
||||
bundle exec brakeman --no-progress
|
||||
bin/rails test
|
||||
bundle exec rspec
|
||||
```
|
||||
|
||||
@@ -34,8 +34,8 @@ paths:
|
||||
- Run dependency checks when the lockfile changes:
|
||||
|
||||
```bash
|
||||
bundle audit check --update
|
||||
bundle exec brakeman --no-pager
|
||||
bundle exec bundle-audit check --update
|
||||
bundle exec brakeman --no-progress
|
||||
```
|
||||
|
||||
- Review new gems for maintainer activity, native extension risk, transitive dependencies, and whether the same behavior can be implemented with Rails core.
|
||||
|
||||
@@ -101,6 +101,19 @@ function parseReadmeExpectations(readmeContent) {
|
||||
{ category: 'commands', mode: 'exact', expected: Number(quickStartMatch[3]), source: 'README.md quick-start summary' }
|
||||
);
|
||||
|
||||
const releaseNoteMatch = readmeContent.match(
|
||||
/actual OSS surface:\s+(\d+)\s+agents,\s+(\d+)\s+skills,\s+and\s+(\d+)\s+legacy command shims/i
|
||||
);
|
||||
if (!releaseNoteMatch) {
|
||||
throw new Error('README.md is missing the rc.1 release-note catalog summary');
|
||||
}
|
||||
|
||||
expectations.push(
|
||||
{ category: 'agents', mode: 'exact', expected: Number(releaseNoteMatch[1]), source: 'README.md rc.1 release-note summary' },
|
||||
{ category: 'skills', mode: 'exact', expected: Number(releaseNoteMatch[2]), source: 'README.md rc.1 release-note summary' },
|
||||
{ category: 'commands', mode: 'exact', expected: Number(releaseNoteMatch[3]), source: 'README.md rc.1 release-note summary' }
|
||||
);
|
||||
|
||||
const projectTreeAgentsMatch = readmeContent.match(/^\|\s*--\s*agents\/\s*#\s*(\d+)\s+specialized subagents for delegation\s*$/im);
|
||||
if (!projectTreeAgentsMatch) {
|
||||
throw new Error('README.md project tree is missing the agents count');
|
||||
@@ -415,6 +428,13 @@ function syncEnglishReadme(content, catalog) {
|
||||
`${prefix}${catalog.agents.count}${agentsSuffix}${catalog.skills.count}${skillsSuffix}${catalog.commands.count} legacy command shims`,
|
||||
'README.md quick-start summary'
|
||||
);
|
||||
nextContent = replaceOrThrow(
|
||||
nextContent,
|
||||
/(actual OSS surface:\s+)(\d+)(\s+agents,\s+)(\d+)(\s+skills,\s+and\s+)(\d+)(\s+legacy command shims)/i,
|
||||
(_, prefix, __, agentsSuffix, ___, skillsSuffix, ____, commandsSuffix) =>
|
||||
`${prefix}${catalog.agents.count}${agentsSuffix}${catalog.skills.count}${skillsSuffix}${catalog.commands.count}${commandsSuffix}`,
|
||||
'README.md rc.1 release-note summary'
|
||||
);
|
||||
nextContent = replaceOrThrow(
|
||||
nextContent,
|
||||
/^(\|\s*--\s*agents\/\s*#\s*)(\d+)(\s+specialized subagents for delegation\s*)$/im,
|
||||
|
||||
318
scripts/ci/generate-command-registry.js
Normal file
318
scripts/ci/generate-command-registry.js
Normal file
@@ -0,0 +1,318 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Generate a deterministic command-to-agent/skill registry.
|
||||
*
|
||||
* Usage:
|
||||
* node scripts/ci/generate-command-registry.js
|
||||
* node scripts/ci/generate-command-registry.js --json
|
||||
* node scripts/ci/generate-command-registry.js --write
|
||||
* node scripts/ci/generate-command-registry.js --check
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const ROOT = path.join(__dirname, '../..');
|
||||
const DEFAULT_OUTPUT_PATH = path.join(ROOT, 'docs', 'COMMAND-REGISTRY.json');
|
||||
|
||||
function normalizePath(relativePath) {
|
||||
return relativePath.split(path.sep).join('/');
|
||||
}
|
||||
|
||||
function listMarkdownFiles(root, relativeDir) {
|
||||
const directory = path.join(root, relativeDir);
|
||||
if (!fs.existsSync(directory)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return fs.readdirSync(directory, { withFileTypes: true })
|
||||
.filter(entry => entry.isFile() && entry.name.endsWith('.md'))
|
||||
.map(entry => entry.name)
|
||||
.sort();
|
||||
}
|
||||
|
||||
function listKnownAgents(root) {
|
||||
return new Set(
|
||||
listMarkdownFiles(root, 'agents')
|
||||
.map(filename => filename.replace(/\.md$/, ''))
|
||||
);
|
||||
}
|
||||
|
||||
function listKnownSkills(root) {
|
||||
const skillsDir = path.join(root, 'skills');
|
||||
if (!fs.existsSync(skillsDir)) {
|
||||
return new Set();
|
||||
}
|
||||
|
||||
return new Set(
|
||||
fs.readdirSync(skillsDir, { withFileTypes: true })
|
||||
.filter(entry => (
|
||||
entry.isDirectory() && fs.existsSync(path.join(skillsDir, entry.name, 'SKILL.md'))
|
||||
))
|
||||
.map(entry => entry.name)
|
||||
.sort()
|
||||
);
|
||||
}
|
||||
|
||||
function cleanYamlScalar(value) {
|
||||
return value.trim()
|
||||
.replace(/^['"]/, '')
|
||||
.replace(/['"]$/, '');
|
||||
}
|
||||
|
||||
function extractDescription(content) {
|
||||
const frontmatter = content.match(/^---\r?\n([\s\S]*?)\r?\n---/);
|
||||
if (frontmatter) {
|
||||
const description = frontmatter[1].match(/^description:\s*(.+)$/m);
|
||||
if (description) {
|
||||
return cleanYamlScalar(description[1]);
|
||||
}
|
||||
}
|
||||
|
||||
const heading = content.match(/^#\s+(.+)$/m);
|
||||
return heading ? heading[1].trim() : '';
|
||||
}
|
||||
|
||||
function collectKnownReferences(content, patterns, knownNames) {
|
||||
const refs = new Set();
|
||||
|
||||
for (const pattern of patterns) {
|
||||
for (const match of content.matchAll(pattern)) {
|
||||
const ref = match[1];
|
||||
if (knownNames.has(ref)) {
|
||||
refs.add(ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return refs;
|
||||
}
|
||||
|
||||
function extractReferences(content, knownAgents, knownSkills) {
|
||||
const agentPatterns = [
|
||||
/@([a-z][a-z0-9-]*)/gi,
|
||||
/\bagent:\s*['"]?([a-z][a-z0-9-]*)/gi,
|
||||
/\bsubagent(?:_type)?:\s*['"]?([a-z][a-z0-9-]*)/gi,
|
||||
/\bagents\/([a-z][a-z0-9-]*)\.md\b/gi,
|
||||
];
|
||||
|
||||
const skillPatterns = [
|
||||
/\bskill:\s*['"]?\/?([a-z][a-z0-9-]*)/gi,
|
||||
/\bskills\/([a-z][a-z0-9-]*)\/SKILL\.md\b/gi,
|
||||
/\bskills\/([a-z][a-z0-9-]*)\b/gi,
|
||||
/\/([a-z][a-z0-9-]*)\b/gi,
|
||||
];
|
||||
|
||||
return {
|
||||
agents: Array.from(collectKnownReferences(content, agentPatterns, knownAgents)).sort(),
|
||||
skills: Array.from(collectKnownReferences(content, skillPatterns, knownSkills)).sort(),
|
||||
};
|
||||
}
|
||||
|
||||
function inferCommandType(content, commandName) {
|
||||
const lower = `${commandName}\n${content}`.toLowerCase();
|
||||
|
||||
if (commandName.startsWith('multi-') || lower.includes('orchestrat')) {
|
||||
return 'orchestration';
|
||||
}
|
||||
if (lower.includes('test') || lower.includes('tdd') || lower.includes('coverage')) {
|
||||
return 'testing';
|
||||
}
|
||||
if (lower.includes('review') || lower.includes('audit') || lower.includes('security')) {
|
||||
return 'review';
|
||||
}
|
||||
if (lower.includes('plan') || lower.includes('design') || lower.includes('architecture')) {
|
||||
return 'planning';
|
||||
}
|
||||
if (lower.includes('refactor') || lower.includes('clean') || lower.includes('simplify')) {
|
||||
return 'refactoring';
|
||||
}
|
||||
if (lower.includes('build') || lower.includes('compile') || lower.includes('setup')) {
|
||||
return 'build';
|
||||
}
|
||||
|
||||
return 'general';
|
||||
}
|
||||
|
||||
function processCommandFile(root, filename, knownAgents, knownSkills) {
|
||||
const commandName = filename.replace(/\.md$/, '');
|
||||
const relativePath = normalizePath(path.join('commands', filename));
|
||||
const content = fs.readFileSync(path.join(root, relativePath), 'utf8');
|
||||
const references = extractReferences(content, knownAgents, knownSkills);
|
||||
|
||||
return {
|
||||
command: commandName,
|
||||
description: extractDescription(content),
|
||||
type: inferCommandType(content, commandName),
|
||||
primaryAgents: references.agents.slice(0, 3),
|
||||
allAgents: references.agents,
|
||||
skills: references.skills,
|
||||
path: relativePath,
|
||||
};
|
||||
}
|
||||
|
||||
function sortCountMap(countMap) {
|
||||
return Object.fromEntries(
|
||||
Object.entries(countMap).sort(([left], [right]) => left.localeCompare(right))
|
||||
);
|
||||
}
|
||||
|
||||
function topUsage(countMap, keyName) {
|
||||
return Object.entries(countMap)
|
||||
.sort(([leftName, leftCount], [rightName, rightCount]) => (
|
||||
rightCount - leftCount || leftName.localeCompare(rightName)
|
||||
))
|
||||
.slice(0, 10)
|
||||
.map(([name, count]) => ({ [keyName]: name, count }));
|
||||
}
|
||||
|
||||
function generateRegistry(options = {}) {
|
||||
const root = options.root || ROOT;
|
||||
const commandFiles = listMarkdownFiles(root, 'commands');
|
||||
const knownAgents = listKnownAgents(root);
|
||||
const knownSkills = listKnownSkills(root);
|
||||
|
||||
const commands = commandFiles.map(filename => (
|
||||
processCommandFile(root, filename, knownAgents, knownSkills)
|
||||
));
|
||||
|
||||
const byType = {};
|
||||
const agentUsage = {};
|
||||
const skillUsage = {};
|
||||
|
||||
for (const command of commands) {
|
||||
byType[command.type] = (byType[command.type] || 0) + 1;
|
||||
for (const agent of command.allAgents) {
|
||||
agentUsage[agent] = (agentUsage[agent] || 0) + 1;
|
||||
}
|
||||
for (const skill of command.skills) {
|
||||
skillUsage[skill] = (skillUsage[skill] || 0) + 1;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
schemaVersion: 1,
|
||||
totalCommands: commands.length,
|
||||
commands,
|
||||
statistics: {
|
||||
byType: sortCountMap(byType),
|
||||
topAgents: topUsage(agentUsage, 'agent'),
|
||||
topSkills: topUsage(skillUsage, 'skill'),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function formatRegistry(registry) {
|
||||
return `${JSON.stringify(registry, null, 2)}\n`;
|
||||
}
|
||||
|
||||
function writeRegistry(registry, outputPath = DEFAULT_OUTPUT_PATH) {
|
||||
fs.mkdirSync(path.dirname(outputPath), { recursive: true });
|
||||
fs.writeFileSync(outputPath, formatRegistry(registry), 'utf8');
|
||||
}
|
||||
|
||||
function checkRegistry(registry, outputPath = DEFAULT_OUTPUT_PATH) {
|
||||
const expected = formatRegistry(registry);
|
||||
let current;
|
||||
|
||||
try {
|
||||
current = fs.readFileSync(outputPath, 'utf8');
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to read ${normalizePath(path.relative(ROOT, outputPath))}: ${error.message}`);
|
||||
}
|
||||
|
||||
if (current !== expected) {
|
||||
throw new Error(`${normalizePath(path.relative(ROOT, outputPath))} is out of date; run npm run command-registry:write`);
|
||||
}
|
||||
}
|
||||
|
||||
function formatTextSummary(registry) {
|
||||
const lines = [
|
||||
'Command registry statistics',
|
||||
'',
|
||||
`Total commands: ${registry.totalCommands}`,
|
||||
'',
|
||||
'By type:',
|
||||
];
|
||||
|
||||
for (const [type, count] of Object.entries(registry.statistics.byType)) {
|
||||
lines.push(` ${type}: ${count}`);
|
||||
}
|
||||
|
||||
lines.push('', 'Top agents:');
|
||||
for (const { agent, count } of registry.statistics.topAgents) {
|
||||
lines.push(` ${agent}: ${count}`);
|
||||
}
|
||||
|
||||
lines.push('', 'Top skills:');
|
||||
for (const { skill, count } of registry.statistics.topSkills) {
|
||||
lines.push(` ${skill}: ${count}`);
|
||||
}
|
||||
|
||||
return `${lines.join('\n')}\n`;
|
||||
}
|
||||
|
||||
function parseArgs(argv) {
|
||||
const allowed = new Set(['--json', '--write', '--check']);
|
||||
const flags = new Set();
|
||||
|
||||
for (const arg of argv) {
|
||||
if (!allowed.has(arg)) {
|
||||
throw new Error(`Unknown argument: ${arg}`);
|
||||
}
|
||||
flags.add(arg);
|
||||
}
|
||||
|
||||
return {
|
||||
json: flags.has('--json'),
|
||||
write: flags.has('--write'),
|
||||
check: flags.has('--check'),
|
||||
};
|
||||
}
|
||||
|
||||
function run(argv = process.argv.slice(2), options = {}) {
|
||||
const stdout = options.stdout || process.stdout;
|
||||
const stderr = options.stderr || process.stderr;
|
||||
const outputPath = options.outputPath || DEFAULT_OUTPUT_PATH;
|
||||
|
||||
try {
|
||||
const args = parseArgs(argv);
|
||||
const registry = generateRegistry({ root: options.root || ROOT });
|
||||
|
||||
if (args.check) {
|
||||
checkRegistry(registry, outputPath);
|
||||
stdout.write('Command registry is up to date.\n');
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (args.write) {
|
||||
writeRegistry(registry, outputPath);
|
||||
stdout.write(`Command registry written to ${normalizePath(path.relative(process.cwd(), outputPath))}\n`);
|
||||
return 0;
|
||||
}
|
||||
|
||||
stdout.write(args.json ? formatRegistry(registry) : formatTextSummary(registry));
|
||||
return 0;
|
||||
} catch (error) {
|
||||
stderr.write(`${error.message}\n`);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
process.exit(run());
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
checkRegistry,
|
||||
extractDescription,
|
||||
extractReferences,
|
||||
formatRegistry,
|
||||
generateRegistry,
|
||||
inferCommandType,
|
||||
parseArgs,
|
||||
run,
|
||||
writeRegistry,
|
||||
};
|
||||
371
scripts/ci/scan-supply-chain-iocs.js
Executable file
371
scripts/ci/scan-supply-chain-iocs.js
Executable file
@@ -0,0 +1,371 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Scan dependency manifests, lockfiles, AI-tool configs, and installed package
|
||||
* payload paths for active supply-chain incident indicators.
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
|
||||
const DEFAULT_ROOT = path.resolve(__dirname, '../..');
|
||||
|
||||
const MALICIOUS_PACKAGE_VERSIONS = {
|
||||
'@mistralai/mistralai': ['2.2.3', '2.2.4'],
|
||||
'@mistralai/mistralai-azure': ['1.7.2', '1.7.3'],
|
||||
'@mistralai/mistralai-gcp': ['1.7.2', '1.7.3'],
|
||||
'@opensearch-project/opensearch': ['3.6.2', '3.8.0'],
|
||||
'@tanstack/arktype-adapter': ['1.166.12', '1.166.15'],
|
||||
'@tanstack/eslint-plugin-router': ['1.161.9', '1.161.12'],
|
||||
'@tanstack/eslint-plugin-start': ['0.0.4', '0.0.7'],
|
||||
'@tanstack/history': ['1.161.9', '1.161.12'],
|
||||
'@tanstack/nitro-v2-vite-plugin': ['1.154.12', '1.154.15'],
|
||||
'@tanstack/react-router': ['1.169.5', '1.169.8'],
|
||||
'@tanstack/react-router-devtools': ['1.166.16', '1.166.19'],
|
||||
'@tanstack/react-router-ssr-query': ['1.166.15', '1.166.18'],
|
||||
'@tanstack/react-start': ['1.167.68', '1.167.71'],
|
||||
'@tanstack/react-start-client': ['1.166.51', '1.166.54'],
|
||||
'@tanstack/react-start-rsc': ['0.0.47', '0.0.50'],
|
||||
'@tanstack/react-start-server': ['1.166.55', '1.166.58'],
|
||||
'@tanstack/router-cli': ['1.166.46', '1.166.49'],
|
||||
'@tanstack/router-core': ['1.169.5', '1.169.8'],
|
||||
'@tanstack/router-devtools': ['1.166.16', '1.166.19'],
|
||||
'@tanstack/router-devtools-core': ['1.167.6', '1.167.9'],
|
||||
'@tanstack/router-generator': ['1.166.45', '1.166.48'],
|
||||
'@tanstack/router-plugin': ['1.167.38', '1.167.41'],
|
||||
'@tanstack/router-ssr-query-core': ['1.168.3', '1.168.6'],
|
||||
'@tanstack/router-utils': ['1.161.11', '1.161.14'],
|
||||
'@tanstack/router-vite-plugin': ['1.166.53', '1.166.56'],
|
||||
'@tanstack/solid-router': ['1.169.5', '1.169.8'],
|
||||
'@tanstack/solid-router-devtools': ['1.166.16', '1.166.19'],
|
||||
'@tanstack/solid-router-ssr-query': ['1.166.15', '1.166.18'],
|
||||
'@tanstack/solid-start': ['1.167.65', '1.167.68'],
|
||||
'@tanstack/solid-start-client': ['1.166.50', '1.166.53'],
|
||||
'@tanstack/solid-start-server': ['1.166.54', '1.166.57'],
|
||||
'@tanstack/start-client-core': ['1.168.5', '1.168.8'],
|
||||
'@tanstack/start-fn-stubs': ['1.161.9', '1.161.12'],
|
||||
'@tanstack/start-plugin-core': ['1.169.23', '1.169.26'],
|
||||
'@tanstack/start-server-core': ['1.167.33', '1.167.36'],
|
||||
'@tanstack/start-static-server-functions': ['1.166.44', '1.166.47'],
|
||||
'@tanstack/start-storage-context': ['1.166.38', '1.166.41'],
|
||||
'@tanstack/valibot-adapter': ['1.166.12', '1.166.15'],
|
||||
'@tanstack/virtual-file-routes': ['1.161.10', '1.161.13'],
|
||||
'@tanstack/vue-router': ['1.169.5', '1.169.8'],
|
||||
'@tanstack/vue-router-devtools': ['1.166.16', '1.166.19'],
|
||||
'@tanstack/vue-router-ssr-query': ['1.166.15', '1.166.18'],
|
||||
'@tanstack/vue-start': ['1.167.61', '1.167.64'],
|
||||
'@tanstack/vue-start-client': ['1.166.46', '1.166.49'],
|
||||
'@tanstack/vue-start-server': ['1.166.50', '1.166.53'],
|
||||
'@tanstack/zod-adapter': ['1.166.12', '1.166.15'],
|
||||
'@uipath/agent.sdk': ['0.0.18'],
|
||||
'@uipath/agent-sdk': ['1.0.2'],
|
||||
'@uipath/apollo-core': ['5.9.2'],
|
||||
'@uipath/cli': ['1.0.1'],
|
||||
'@uipath/robot': ['1.3.4'],
|
||||
'cmux-agent-mcp': ['0.1.3', '0.1.4', '0.1.5', '0.1.6', '0.1.7', '0.1.8'],
|
||||
'guardrails-ai': ['0.10.1'],
|
||||
'mistralai': ['2.4.6'],
|
||||
'nextmove-mcp': ['0.1.3', '0.1.4', '0.1.5', '0.1.7'],
|
||||
'safe-action': ['0.8.3', '0.8.4'],
|
||||
};
|
||||
|
||||
const CRITICAL_TEXT_INDICATORS = [
|
||||
'@tanstack/setup',
|
||||
'github:tanstack/router#79ac49eedf774dd4b0cfa308722bc463cfe5885c',
|
||||
'router_init.js',
|
||||
'router_runtime.js',
|
||||
'tanstack_runner.js',
|
||||
'gh-token-monitor',
|
||||
'com.user.gh-token-monitor',
|
||||
'filev2.getsession.org',
|
||||
'seed1.getsession.org',
|
||||
'seed2.getsession.org',
|
||||
'seed3.getsession.org',
|
||||
'git-tanstack.com',
|
||||
'83.142.209.194',
|
||||
'api.masscan.cloud',
|
||||
'A Mini Shai-Hulud has Appeared',
|
||||
'PUSH UR T3MPRR',
|
||||
];
|
||||
|
||||
const DEPENDENCY_FILENAMES = new Set([
|
||||
'package.json',
|
||||
'package-lock.json',
|
||||
'pnpm-lock.yaml',
|
||||
'yarn.lock',
|
||||
'bun.lock',
|
||||
'pyproject.toml',
|
||||
'poetry.lock',
|
||||
'requirements.txt',
|
||||
]);
|
||||
|
||||
const PERSISTENCE_FILENAMES = new Set([
|
||||
'settings.json',
|
||||
'tasks.json',
|
||||
'router_runtime.js',
|
||||
'setup.mjs',
|
||||
'gh-token-monitor.sh',
|
||||
'com.user.gh-token-monitor.plist',
|
||||
'gh-token-monitor.service',
|
||||
]);
|
||||
|
||||
const PAYLOAD_FILENAMES = new Set([
|
||||
'router_init.js',
|
||||
'router_runtime.js',
|
||||
'tanstack_runner.js',
|
||||
'gh-token-monitor.sh',
|
||||
]);
|
||||
|
||||
const IGNORED_DIRS = new Set([
|
||||
'.git',
|
||||
'.next',
|
||||
'.pytest_cache',
|
||||
'__pycache__',
|
||||
'coverage',
|
||||
'dist',
|
||||
'docs',
|
||||
'target',
|
||||
'tests',
|
||||
]);
|
||||
|
||||
function normalizeForMatch(value) {
|
||||
return value.toLowerCase();
|
||||
}
|
||||
|
||||
function isInSpecialConfigPath(filePath) {
|
||||
const normalized = filePath.split(path.sep).join('/');
|
||||
return /\/\.claude\//.test(normalized)
|
||||
|| /\/\.vscode\//.test(normalized)
|
||||
|| /\/\.kiro\/settings\//.test(normalized)
|
||||
|| /\/Library\/LaunchAgents\//.test(normalized)
|
||||
|| /\/\.config\/systemd\/user\//.test(normalized)
|
||||
|| /\/\.local\/bin\//.test(normalized);
|
||||
}
|
||||
|
||||
function shouldInspectFile(filePath) {
|
||||
const base = path.basename(filePath);
|
||||
if (DEPENDENCY_FILENAMES.has(base)) return true;
|
||||
if (PERSISTENCE_FILENAMES.has(base) && isInSpecialConfigPath(filePath)) return true;
|
||||
if (PAYLOAD_FILENAMES.has(base) && filePath.includes(`${path.sep}node_modules${path.sep}`)) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
function walkFiles(rootDir, files = []) {
|
||||
if (!fs.existsSync(rootDir)) return files;
|
||||
|
||||
const stat = fs.statSync(rootDir);
|
||||
if (stat.isFile()) {
|
||||
if (shouldInspectFile(rootDir)) files.push(rootDir);
|
||||
return files;
|
||||
}
|
||||
|
||||
for (const entry of fs.readdirSync(rootDir, { withFileTypes: true })) {
|
||||
const fullPath = path.join(rootDir, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
if (IGNORED_DIRS.has(entry.name) && entry.name !== 'node_modules') continue;
|
||||
if (entry.name === 'node_modules') {
|
||||
walkNodeModules(fullPath, files);
|
||||
} else {
|
||||
walkFiles(fullPath, files);
|
||||
}
|
||||
} else if (entry.isFile() && shouldInspectFile(fullPath)) {
|
||||
files.push(fullPath);
|
||||
}
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
function walkNodeModules(nodeModulesDir, files) {
|
||||
if (!fs.existsSync(nodeModulesDir)) return;
|
||||
|
||||
for (const entry of fs.readdirSync(nodeModulesDir, { withFileTypes: true })) {
|
||||
if (entry.name.startsWith('.')) continue;
|
||||
const fullPath = path.join(nodeModulesDir, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
if (entry.name.startsWith('@')) {
|
||||
for (const scopedEntry of fs.readdirSync(fullPath, { withFileTypes: true })) {
|
||||
if (scopedEntry.isDirectory()) {
|
||||
inspectPackageDir(path.join(fullPath, scopedEntry.name), files);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
inspectPackageDir(fullPath, files);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function inspectPackageDir(packageDir, files) {
|
||||
for (const filename of [...DEPENDENCY_FILENAMES, ...PAYLOAD_FILENAMES, 'setup.mjs', 'execution.js']) {
|
||||
const candidate = path.join(packageDir, filename);
|
||||
if (fs.existsSync(candidate) && fs.statSync(candidate).isFile()) {
|
||||
files.push(candidate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function readText(filePath) {
|
||||
try {
|
||||
return fs.readFileSync(filePath, 'utf8');
|
||||
} catch {
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
function lineForIndex(text, index) {
|
||||
return text.slice(0, index).split(/\r?\n/).length;
|
||||
}
|
||||
|
||||
function escapeRegExp(value) {
|
||||
return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
||||
}
|
||||
|
||||
function addFinding(findings, severity, filePath, line, indicator, message) {
|
||||
findings.push({ severity, filePath, line, indicator, message });
|
||||
}
|
||||
|
||||
function scanFile(filePath, rootDir, findings) {
|
||||
const base = path.basename(filePath);
|
||||
const relativePath = path.relative(rootDir, filePath) || filePath;
|
||||
const text = readText(filePath);
|
||||
const lowerText = normalizeForMatch(text);
|
||||
|
||||
if (PAYLOAD_FILENAMES.has(base)) {
|
||||
addFinding(
|
||||
findings,
|
||||
'critical',
|
||||
relativePath,
|
||||
1,
|
||||
base,
|
||||
'Known Mini Shai-Hulud/TanStack payload or persistence filename is present',
|
||||
);
|
||||
}
|
||||
|
||||
for (const indicator of CRITICAL_TEXT_INDICATORS) {
|
||||
const index = lowerText.indexOf(normalizeForMatch(indicator));
|
||||
if (index !== -1) {
|
||||
addFinding(
|
||||
findings,
|
||||
'critical',
|
||||
relativePath,
|
||||
lineForIndex(text, index),
|
||||
indicator,
|
||||
'Known active supply-chain IOC is present',
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (!DEPENDENCY_FILENAMES.has(base)) return;
|
||||
|
||||
for (const [packageName, versions] of Object.entries(MALICIOUS_PACKAGE_VERSIONS)) {
|
||||
const packageIndex = lowerText.indexOf(normalizeForMatch(packageName));
|
||||
if (packageIndex === -1) continue;
|
||||
|
||||
for (const version of versions) {
|
||||
const versionPattern = new RegExp(`(^|[^0-9a-z.])${escapeRegExp(version)}([^0-9a-z.]|$)`, 'i');
|
||||
if (versionPattern.test(text) || lowerText.includes(`@${version}`)) {
|
||||
addFinding(
|
||||
findings,
|
||||
'critical',
|
||||
relativePath,
|
||||
lineForIndex(text, packageIndex),
|
||||
`${packageName}@${version}`,
|
||||
'Dependency manifest or lockfile references a known compromised package version',
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function homeTargets(homeDir) {
|
||||
return [
|
||||
'.claude/settings.json',
|
||||
'.claude/router_runtime.js',
|
||||
'.claude/setup.mjs',
|
||||
'.vscode/tasks.json',
|
||||
'.vscode/setup.mjs',
|
||||
'Library/LaunchAgents/com.user.gh-token-monitor.plist',
|
||||
'.config/systemd/user/gh-token-monitor.service',
|
||||
'.local/bin/gh-token-monitor.sh',
|
||||
].map(relativePath => path.join(homeDir, relativePath));
|
||||
}
|
||||
|
||||
function scanSupplyChainIocs(options = {}) {
|
||||
const rootDir = path.resolve(options.rootDir || DEFAULT_ROOT);
|
||||
const files = walkFiles(rootDir);
|
||||
const findings = [];
|
||||
|
||||
if (options.home) {
|
||||
for (const target of homeTargets(options.homeDir || os.homedir())) {
|
||||
if (fs.existsSync(target)) files.push(target);
|
||||
}
|
||||
}
|
||||
|
||||
for (const filePath of [...new Set(files)].sort()) {
|
||||
scanFile(filePath, rootDir, findings);
|
||||
}
|
||||
|
||||
return {
|
||||
rootDir,
|
||||
scannedFiles: files.length,
|
||||
findings,
|
||||
};
|
||||
}
|
||||
|
||||
function parseArgs(argv) {
|
||||
const options = {};
|
||||
for (let i = 0; i < argv.length; i++) {
|
||||
const arg = argv[i];
|
||||
if (arg === '--root') {
|
||||
options.rootDir = argv[++i];
|
||||
} else if (arg === '--home') {
|
||||
options.home = true;
|
||||
} else if (arg === '--home-dir') {
|
||||
options.home = true;
|
||||
options.homeDir = argv[++i];
|
||||
} else if (arg === '--json') {
|
||||
options.json = true;
|
||||
} else {
|
||||
throw new Error(`Unknown argument: ${arg}`);
|
||||
}
|
||||
}
|
||||
return options;
|
||||
}
|
||||
|
||||
function printReport(result, json = false) {
|
||||
if (json) {
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
return;
|
||||
}
|
||||
|
||||
if (result.findings.length === 0) {
|
||||
console.log(`Supply-chain IOC scan passed for ${result.rootDir} (${result.scannedFiles} files inspected)`);
|
||||
return;
|
||||
}
|
||||
|
||||
for (const finding of result.findings) {
|
||||
console.error(
|
||||
`${finding.severity.toUpperCase()}: ${finding.filePath}:${finding.line} ${finding.indicator}`,
|
||||
);
|
||||
console.error(` ${finding.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
try {
|
||||
const options = parseArgs(process.argv.slice(2));
|
||||
const result = scanSupplyChainIocs(options);
|
||||
printReport(result, options.json);
|
||||
process.exit(result.findings.length > 0 ? 1 : 0);
|
||||
} catch (error) {
|
||||
console.error(error.message);
|
||||
process.exit(2);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
CRITICAL_TEXT_INDICATORS,
|
||||
MALICIOUS_PACKAGE_VERSIONS,
|
||||
scanSupplyChainIocs,
|
||||
};
|
||||
@@ -7,12 +7,13 @@
|
||||
* the actual code. This hook steers the agent back to fixing the source.
|
||||
*
|
||||
* Exit codes:
|
||||
* 0 = allow (not a config file)
|
||||
* 2 = block (config file modification attempted)
|
||||
* 0 = allow (not a config file, or first-time creation of one)
|
||||
* 2 = block (existing config file modification attempted)
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
@@ -58,7 +59,7 @@ const PROTECTED_FILES = new Set([
|
||||
'.stylelintrc.yml',
|
||||
'.markdownlint.json',
|
||||
'.markdownlint.yaml',
|
||||
'.markdownlintrc',
|
||||
'.markdownlintrc'
|
||||
]);
|
||||
|
||||
function parseInput(inputOrRaw) {
|
||||
@@ -94,13 +95,41 @@ function run(inputOrRaw, options = {}) {
|
||||
|
||||
const basename = path.basename(filePath);
|
||||
if (PROTECTED_FILES.has(basename)) {
|
||||
// Allow first-time creation — there's no existing config to weaken.
|
||||
// The hook's purpose is blocking modifications; writing a brand-new
|
||||
// config file in a project that has none is a legitimate bootstrap
|
||||
// path (e.g. scaffolding ESLint into a fresh repo).
|
||||
//
|
||||
// Fail closed on any stat error other than ENOENT. Use lstatSync so a
|
||||
// symlink at the protected path is treated as present even if its target
|
||||
// is missing — a dangling symlink at e.g. .eslintrc.js still represents
|
||||
// an existing config entry that an agent should not silently replace.
|
||||
// fs.existsSync would swallow EACCES/EPERM as false; lstatSync exposes
|
||||
// the error code so we can treat only genuine "path not found" (ENOENT)
|
||||
// as absent.
|
||||
let exists = true;
|
||||
try {
|
||||
fs.lstatSync(filePath);
|
||||
// lstat succeeded — something (file, dir, or symlink) exists here.
|
||||
} catch (err) {
|
||||
if (err && err.code === 'ENOENT') {
|
||||
exists = false;
|
||||
}
|
||||
// Any other error (EACCES, EPERM, ELOOP, etc.) leaves exists=true
|
||||
// so the guard is never silently weakened.
|
||||
}
|
||||
|
||||
if (!exists) {
|
||||
return { exitCode: 0 };
|
||||
}
|
||||
|
||||
return {
|
||||
exitCode: 2,
|
||||
stderr:
|
||||
`BLOCKED: Modifying ${basename} is not allowed. ` +
|
||||
'Fix the source code to satisfy linter/formatter rules instead of ' +
|
||||
'weakening the config. If this is a legitimate config change, ' +
|
||||
'disable the config-protection hook temporarily.',
|
||||
'disable the config-protection hook temporarily.'
|
||||
};
|
||||
}
|
||||
|
||||
@@ -125,7 +154,7 @@ process.stdin.on('data', chunk => {
|
||||
process.stdin.on('end', () => {
|
||||
const result = run(raw, {
|
||||
truncated,
|
||||
maxStdin: Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN,
|
||||
maxStdin: Number(process.env.ECC_HOOK_INPUT_MAX_BYTES) || MAX_STDIN
|
||||
});
|
||||
|
||||
if (result.stderr) {
|
||||
|
||||
@@ -1,63 +1,157 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Cost Tracker Hook
|
||||
* Cost Tracker Hook (v2)
|
||||
*
|
||||
* Appends lightweight session usage metrics to ~/.claude/metrics/costs.jsonl.
|
||||
* Reads transcript_path from Stop hook stdin, sums usage across all
|
||||
* assistant turns in the session JSONL, and appends one row to
|
||||
* ~/.claude/metrics/costs.jsonl.
|
||||
*
|
||||
* Stop hook stdin payload: { session_id, transcript_path, cwd, hook_event_name, ... }
|
||||
* The Stop payload does NOT include `usage` or `model` directly. The previous
|
||||
* version of this hook expected those fields and silently produced zero-filled
|
||||
* rows (verified: 2,340 rows captured with 0.0% non-zero token rate over 52
|
||||
* days). The fix is to read the transcript file Claude Code already passes us.
|
||||
*
|
||||
* JSONL assistant entry shape (per Claude Code):
|
||||
* { type: "assistant", message: { model, usage: { input_tokens, output_tokens,
|
||||
* cache_creation_input_tokens, cache_read_input_tokens } } }
|
||||
*
|
||||
* Cumulative behavior: Stop fires per assistant response, not per session.
|
||||
* Each row therefore represents the cumulative session total up to that point.
|
||||
* To get per-session cost, take the last row per session_id. To get per-day
|
||||
* spend, aggregate.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { ensureDir, appendFile, getClaudeDir } = require('../lib/utils');
|
||||
const { estimateCost } = require('../lib/cost-estimate');
|
||||
const { sanitizeSessionId } = require('../lib/session-bridge');
|
||||
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
let raw = '';
|
||||
// Approximate per-1M-token billing rates (USD).
|
||||
// Cache creation: 1.25x input rate. Cache read: 0.1x input rate.
|
||||
const RATE_TABLE = {
|
||||
haiku: { in: 0.80, out: 4.0, cacheWrite: 1.00, cacheRead: 0.08 },
|
||||
sonnet: { in: 3.00, out: 15.0, cacheWrite: 3.75, cacheRead: 0.30 },
|
||||
opus: { in: 15.00, out: 75.0, cacheWrite: 18.75, cacheRead: 1.50 }
|
||||
};
|
||||
|
||||
function toNumber(value) {
|
||||
const n = Number(value);
|
||||
function getRates(model) {
|
||||
const m = String(model || '').toLowerCase();
|
||||
if (m.includes('haiku')) return RATE_TABLE.haiku;
|
||||
if (m.includes('opus')) return RATE_TABLE.opus;
|
||||
return RATE_TABLE.sonnet;
|
||||
}
|
||||
|
||||
function toNumber(v) {
|
||||
const n = Number(v);
|
||||
return Number.isFinite(n) ? n : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan the session JSONL and sum token usage across all assistant turns.
|
||||
* Returns { inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens, model }
|
||||
* or null on read failure.
|
||||
*/
|
||||
function sumUsageFromTranscript(transcriptPath) {
|
||||
let content;
|
||||
try {
|
||||
content = fs.readFileSync(transcriptPath, 'utf8');
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
|
||||
let inputTokens = 0;
|
||||
let outputTokens = 0;
|
||||
let cacheWriteTokens = 0;
|
||||
let cacheReadTokens = 0;
|
||||
let model = 'unknown';
|
||||
|
||||
for (const line of content.split('\n')) {
|
||||
if (!line.trim()) continue;
|
||||
let entry;
|
||||
try { entry = JSON.parse(line); } catch { continue; }
|
||||
|
||||
if (entry.type !== 'assistant') continue;
|
||||
const msg = entry.message;
|
||||
if (!msg || !msg.usage) continue;
|
||||
|
||||
const u = msg.usage;
|
||||
inputTokens += toNumber(u.input_tokens);
|
||||
outputTokens += toNumber(u.output_tokens);
|
||||
cacheWriteTokens += toNumber(u.cache_creation_input_tokens);
|
||||
cacheReadTokens += toNumber(u.cache_read_input_tokens);
|
||||
|
||||
if (msg.model && msg.model !== 'unknown') model = msg.model;
|
||||
}
|
||||
|
||||
return { inputTokens, outputTokens, cacheWriteTokens, cacheReadTokens, model };
|
||||
}
|
||||
|
||||
const MAX_STDIN = 64 * 1024;
|
||||
let raw = '';
|
||||
|
||||
process.stdin.setEncoding('utf8');
|
||||
process.stdin.on('data', chunk => {
|
||||
if (raw.length < MAX_STDIN) {
|
||||
const remaining = MAX_STDIN - raw.length;
|
||||
raw += chunk.substring(0, remaining);
|
||||
}
|
||||
if (raw.length < MAX_STDIN) raw += chunk.substring(0, MAX_STDIN - raw.length);
|
||||
});
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = raw.trim() ? JSON.parse(raw) : {};
|
||||
const usage = input.usage || input.token_usage || {};
|
||||
const inputTokens = toNumber(usage.input_tokens || usage.prompt_tokens || 0);
|
||||
const outputTokens = toNumber(usage.output_tokens || usage.completion_tokens || 0);
|
||||
|
||||
const model = String(input.model || input._cursor?.model || process.env.CLAUDE_MODEL || 'unknown');
|
||||
const transcriptPath = (typeof input.transcript_path === 'string' && input.transcript_path)
|
||||
? input.transcript_path
|
||||
: process.env.CLAUDE_TRANSCRIPT_PATH || null;
|
||||
|
||||
const sessionId =
|
||||
sanitizeSessionId(input.session_id) ||
|
||||
sanitizeSessionId(process.env.ECC_SESSION_ID) ||
|
||||
sanitizeSessionId(process.env.CLAUDE_SESSION_ID) ||
|
||||
'default';
|
||||
|
||||
let usageTotals = null;
|
||||
if (transcriptPath && fs.existsSync(transcriptPath)) {
|
||||
usageTotals = sumUsageFromTranscript(transcriptPath);
|
||||
}
|
||||
|
||||
const {
|
||||
inputTokens = 0,
|
||||
outputTokens = 0,
|
||||
cacheWriteTokens = 0,
|
||||
cacheReadTokens = 0,
|
||||
model = 'unknown'
|
||||
} = usageTotals || {};
|
||||
|
||||
const rates = getRates(model);
|
||||
const estimatedCostUsd = Math.round((
|
||||
(inputTokens / 1e6) * rates.in +
|
||||
(outputTokens / 1e6) * rates.out +
|
||||
(cacheWriteTokens / 1e6) * rates.cacheWrite +
|
||||
(cacheReadTokens / 1e6) * rates.cacheRead
|
||||
) * 1e6) / 1e6;
|
||||
|
||||
const metricsDir = path.join(getClaudeDir(), 'metrics');
|
||||
ensureDir(metricsDir);
|
||||
|
||||
const row = {
|
||||
timestamp: new Date().toISOString(),
|
||||
session_id: sessionId,
|
||||
timestamp: new Date().toISOString(),
|
||||
session_id: sessionId,
|
||||
transcript_path: transcriptPath || '',
|
||||
model,
|
||||
input_tokens: inputTokens,
|
||||
output_tokens: outputTokens,
|
||||
estimated_cost_usd: estimateCost(model, inputTokens, outputTokens)
|
||||
input_tokens: inputTokens,
|
||||
output_tokens: outputTokens,
|
||||
cache_write_tokens: cacheWriteTokens,
|
||||
cache_read_tokens: cacheReadTokens,
|
||||
estimated_cost_usd: estimatedCostUsd
|
||||
};
|
||||
|
||||
appendFile(path.join(metricsDir, 'costs.jsonl'), `${JSON.stringify(row)}\n`);
|
||||
} catch {
|
||||
// Keep hook non-blocking.
|
||||
// Non-blocking — never fail the Stop hook.
|
||||
}
|
||||
|
||||
// Pass stdin through (required by ECC hook convention).
|
||||
process.stdout.write(raw);
|
||||
});
|
||||
|
||||
@@ -4,6 +4,10 @@
|
||||
const MAX_STDIN = 1024 * 1024;
|
||||
const path = require('path');
|
||||
const { splitShellSegments } = require('../lib/shell-split');
|
||||
const {
|
||||
extractCommandSubstitutions,
|
||||
extractSubshellGroups
|
||||
} = require('../lib/shell-substitution');
|
||||
|
||||
const DEV_COMMAND_WORDS = new Set([
|
||||
'npm',
|
||||
@@ -123,6 +127,8 @@ function getLeadingCommandWord(segment) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (token === '{' || token === '}') continue;
|
||||
|
||||
if (/^[A-Za-z_][A-Za-z0-9_]*=.*/.test(token)) continue;
|
||||
|
||||
const normalizedToken = normalizeCommandWord(token);
|
||||
@@ -154,23 +160,55 @@ process.stdin.on('data', chunk => {
|
||||
}
|
||||
});
|
||||
|
||||
const TMUX_LAUNCHER = /^\s*tmux\s+(new|new-session|new-window|split-window)\b/;
|
||||
const DEV_PATTERN = /\b(npm\s+run\s+dev|pnpm(?:\s+run)?\s+dev|yarn(?:\s+run)?\s+dev|bun(?:\s+run)?\s+dev)\b/;
|
||||
|
||||
/**
|
||||
* Collect every command-line segment we should evaluate. Returns the top-level
|
||||
* segments first, then segments harvested from `$(...)` / backtick command
|
||||
* substitutions and plain `(...)` subshell groups, recursively.
|
||||
*
|
||||
* Without this expansion the leading-command and dev-pattern check below only
|
||||
* sees the outermost command, so wrappers like `$(npm run dev)` and
|
||||
* `(npm run dev)` (which still spawn a dev server) sneak past.
|
||||
*/
|
||||
function collectCheckSegments(cmd) {
|
||||
const segments = [...splitShellSegments(cmd)];
|
||||
const queue = [cmd];
|
||||
const seen = new Set();
|
||||
|
||||
while (queue.length) {
|
||||
const current = queue.shift();
|
||||
if (seen.has(current)) continue;
|
||||
seen.add(current);
|
||||
|
||||
for (const body of extractCommandSubstitutions(current)) {
|
||||
for (const seg of splitShellSegments(body)) segments.push(seg);
|
||||
queue.push(body);
|
||||
}
|
||||
for (const body of extractSubshellGroups(current)) {
|
||||
for (const seg of splitShellSegments(body)) segments.push(seg);
|
||||
queue.push(body);
|
||||
}
|
||||
}
|
||||
|
||||
return segments;
|
||||
}
|
||||
|
||||
function isBlockedDevSegment(segment) {
|
||||
const commandWord = getLeadingCommandWord(segment);
|
||||
if (!commandWord || !DEV_COMMAND_WORDS.has(commandWord)) return false;
|
||||
return DEV_PATTERN.test(segment) && !TMUX_LAUNCHER.test(segment);
|
||||
}
|
||||
|
||||
process.stdin.on('end', () => {
|
||||
try {
|
||||
const input = JSON.parse(raw);
|
||||
const cmd = String(input.tool_input?.command || '');
|
||||
|
||||
if (process.platform !== 'win32') {
|
||||
const segments = splitShellSegments(cmd);
|
||||
const tmuxLauncher = /^\s*tmux\s+(new|new-session|new-window|split-window)\b/;
|
||||
const devPattern = /\b(npm\s+run\s+dev|pnpm(?:\s+run)?\s+dev|yarn\s+dev|bun\s+run\s+dev)\b/;
|
||||
|
||||
const hasBlockedDev = segments.some(segment => {
|
||||
const commandWord = getLeadingCommandWord(segment);
|
||||
if (!commandWord || !DEV_COMMAND_WORDS.has(commandWord)) {
|
||||
return false;
|
||||
}
|
||||
return devPattern.test(segment) && !tmuxLauncher.test(segment);
|
||||
});
|
||||
const segments = collectCheckSegments(cmd);
|
||||
const hasBlockedDev = segments.some(isBlockedDevSegment);
|
||||
|
||||
if (hasBlockedDev) {
|
||||
console.error('[Hook] BLOCKED: Dev server must run in tmux for log access');
|
||||
|
||||
@@ -19,7 +19,8 @@ const {
|
||||
getTempDir,
|
||||
writeFile,
|
||||
readStdinJson,
|
||||
log
|
||||
log,
|
||||
output
|
||||
} = require('../lib/utils');
|
||||
|
||||
async function resolveSessionId() {
|
||||
@@ -77,14 +78,25 @@ async function main() {
|
||||
writeFile(counterFile, String(count));
|
||||
}
|
||||
|
||||
// Suggest compact after threshold tool calls
|
||||
// Suggest compact after threshold tool calls.
|
||||
//
|
||||
// log() writes to stderr (debug log). Per the Claude Code hooks guide,
|
||||
// non-blocking PreToolUse stderr (exit 0) is only written to the debug log;
|
||||
// it does not reach the model. To inject a user-facing suggestion without
|
||||
// blocking the tool call, emit structured JSON to stdout with
|
||||
// hookSpecificOutput.additionalContext — the documented mechanism for
|
||||
// PreToolUse hooks to add context to the next model turn.
|
||||
if (count === threshold) {
|
||||
log(`[StrategicCompact] ${threshold} tool calls reached - consider /compact if transitioning phases`);
|
||||
const msg = `[StrategicCompact] ${threshold} tool calls reached - consider /compact if transitioning phases`;
|
||||
log(msg);
|
||||
output({ hookSpecificOutput: { hookEventName: 'PreToolUse', additionalContext: msg } });
|
||||
}
|
||||
|
||||
// Suggest at regular intervals after threshold (every 25 calls from threshold)
|
||||
if (count > threshold && (count - threshold) % 25 === 0) {
|
||||
log(`[StrategicCompact] ${count} tool calls - good checkpoint for /compact if context is stale`);
|
||||
const msg = `[StrategicCompact] ${count} tool calls - good checkpoint for /compact if context is stale`;
|
||||
log(msg);
|
||||
output({ hookSpecificOutput: { hookEventName: 'PreToolUse', additionalContext: msg } });
|
||||
}
|
||||
|
||||
process.exit(0);
|
||||
|
||||
246
scripts/lib/shell-substitution.js
Normal file
246
scripts/lib/shell-substitution.js
Normal file
@@ -0,0 +1,246 @@
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* Extract executable command-substitution bodies from a shell line.
|
||||
*
|
||||
* Single quotes are literal, so substitutions inside them are ignored;
|
||||
* double quotes still permit substitutions, so those bodies are scanned
|
||||
* before quoted text is stripped. Returns each substitution body plus
|
||||
* any nested substitutions discovered recursively.
|
||||
*
|
||||
* Originally introduced in scripts/hooks/gateguard-fact-force.js
|
||||
* (PR #1853 round 2). Extracted to a shared lib so other PreToolUse
|
||||
* hooks that need the same "scan inside `$(...)` and backticks"
|
||||
* behavior can reuse it without duplicating the parser.
|
||||
*
|
||||
* @param {string} input
|
||||
* @returns {string[]}
|
||||
*/
|
||||
function extractCommandSubstitutions(input) {
|
||||
const source = String(input || '');
|
||||
const substitutions = [];
|
||||
let inSingle = false;
|
||||
let inDouble = false;
|
||||
|
||||
for (let i = 0; i < source.length; i++) {
|
||||
const ch = source[i];
|
||||
const prev = source[i - 1];
|
||||
|
||||
if (ch === '\\' && !inSingle) {
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === "'" && !inDouble && prev !== '\\') {
|
||||
inSingle = !inSingle;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '"' && !inSingle && prev !== '\\') {
|
||||
inDouble = !inDouble;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (inSingle) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '`') {
|
||||
let body = '';
|
||||
i += 1;
|
||||
while (i < source.length) {
|
||||
const inner = source[i];
|
||||
if (inner === '\\') {
|
||||
body += inner;
|
||||
if (i + 1 < source.length) {
|
||||
body += source[i + 1];
|
||||
i += 2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (inner === '`') {
|
||||
break;
|
||||
}
|
||||
body += inner;
|
||||
i += 1;
|
||||
}
|
||||
if (body.trim()) {
|
||||
substitutions.push(body);
|
||||
substitutions.push(...extractCommandSubstitutions(body));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '$' && source[i + 1] === '(') {
|
||||
let depth = 1;
|
||||
let body = '';
|
||||
let bodyInSingle = false;
|
||||
let bodyInDouble = false;
|
||||
i += 2;
|
||||
while (i < source.length && depth > 0) {
|
||||
const inner = source[i];
|
||||
const innerPrev = source[i - 1];
|
||||
if (inner === '\\' && !bodyInSingle) {
|
||||
body += inner;
|
||||
if (i + 1 < source.length) {
|
||||
body += source[i + 1];
|
||||
i += 2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (inner === "'" && !bodyInDouble && innerPrev !== '\\') {
|
||||
bodyInSingle = !bodyInSingle;
|
||||
} else if (inner === '"' && !bodyInSingle && innerPrev !== '\\') {
|
||||
bodyInDouble = !bodyInDouble;
|
||||
} else if (!bodyInSingle && !bodyInDouble) {
|
||||
if (inner === '(') {
|
||||
depth += 1;
|
||||
} else if (inner === ')') {
|
||||
depth -= 1;
|
||||
if (depth === 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
body += inner;
|
||||
i += 1;
|
||||
}
|
||||
if (body.trim()) {
|
||||
substitutions.push(body);
|
||||
substitutions.push(...extractCommandSubstitutions(body));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return substitutions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract bodies of plain `(...)` subshell groups.
|
||||
*
|
||||
* Bash treats `(npm run dev)` as a subshell that executes its contents, but
|
||||
* the regex-light segment splitters used by our PreToolUse hooks don't peer
|
||||
* inside those parens. This helper finds top-level `(...)` groups (skipping
|
||||
* `$(...)` command substitutions and backticks, which `extractCommandSubstitutions`
|
||||
* already covers) and returns each body, recursing for nested groups.
|
||||
*
|
||||
* Quote semantics:
|
||||
* - Single quotes are literal: `'( ... )'` is a string, not a subshell.
|
||||
* - Double quotes are literal *for parens*: `"( ... )"` is a string too —
|
||||
* bash only honors `$( )` inside double quotes, not bare `( )`.
|
||||
*
|
||||
* @param {string} input
|
||||
* @returns {string[]}
|
||||
*/
|
||||
function extractSubshellGroups(input) {
|
||||
const source = String(input || '');
|
||||
const groups = [];
|
||||
let inSingle = false;
|
||||
let inDouble = false;
|
||||
|
||||
for (let i = 0; i < source.length; i++) {
|
||||
const ch = source[i];
|
||||
const prev = source[i - 1];
|
||||
|
||||
if (ch === '\\' && !inSingle) {
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === "'" && !inDouble && prev !== '\\') {
|
||||
inSingle = !inSingle;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '"' && !inSingle && prev !== '\\') {
|
||||
inDouble = !inDouble;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (inSingle || inDouble) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '$' && source[i + 1] === '(') {
|
||||
let depth = 1;
|
||||
let skipInSingle = false;
|
||||
let skipInDouble = false;
|
||||
i += 2;
|
||||
while (i < source.length && depth > 0) {
|
||||
const inner = source[i];
|
||||
const innerPrev = source[i - 1];
|
||||
if (inner === '\\' && !skipInSingle) {
|
||||
i += 2;
|
||||
continue;
|
||||
}
|
||||
if (inner === "'" && !skipInDouble && innerPrev !== '\\') {
|
||||
skipInSingle = !skipInSingle;
|
||||
} else if (inner === '"' && !skipInSingle && innerPrev !== '\\') {
|
||||
skipInDouble = !skipInDouble;
|
||||
} else if (!skipInSingle && !skipInDouble) {
|
||||
if (inner === '(') depth += 1;
|
||||
else if (inner === ')') depth -= 1;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
i -= 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '`') {
|
||||
i += 1;
|
||||
while (i < source.length && source[i] !== '`') {
|
||||
if (source[i] === '\\' && i + 1 < source.length) {
|
||||
i += 2;
|
||||
continue;
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ch === '(') {
|
||||
let depth = 1;
|
||||
let body = '';
|
||||
let bodyInSingle = false;
|
||||
let bodyInDouble = false;
|
||||
i += 1;
|
||||
while (i < source.length && depth > 0) {
|
||||
const inner = source[i];
|
||||
const innerPrev = source[i - 1];
|
||||
if (inner === '\\' && !bodyInSingle) {
|
||||
body += inner;
|
||||
if (i + 1 < source.length) {
|
||||
body += source[i + 1];
|
||||
i += 2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (inner === "'" && !bodyInDouble && innerPrev !== '\\') {
|
||||
bodyInSingle = !bodyInSingle;
|
||||
} else if (inner === '"' && !bodyInSingle && innerPrev !== '\\') {
|
||||
bodyInDouble = !bodyInDouble;
|
||||
} else if (!bodyInSingle && !bodyInDouble) {
|
||||
if (inner === '(') {
|
||||
depth += 1;
|
||||
} else if (inner === ')') {
|
||||
depth -= 1;
|
||||
if (depth === 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
body += inner;
|
||||
i += 1;
|
||||
}
|
||||
if (body.trim()) {
|
||||
groups.push(body);
|
||||
groups.push(...extractSubshellGroups(body));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return groups;
|
||||
}
|
||||
|
||||
module.exports = { extractCommandSubstitutions, extractSubshellGroups };
|
||||
@@ -291,7 +291,9 @@ function buildChecks(rootDir) {
|
||||
pass: fileExists(rootDir, 'docs/releases/2.0.0-rc.1/publication-readiness.md')
|
||||
&& fileExists(rootDir, 'docs/releases/2.0.0-rc.1/publication-evidence-2026-05-13-post-hardening.md')
|
||||
&& fileExists(rootDir, 'docs/security/supply-chain-incident-response.md')
|
||||
&& fileExists(rootDir, 'scripts/ci/scan-supply-chain-iocs.js')
|
||||
&& fileExists(rootDir, 'scripts/ci/validate-workflow-security.js')
|
||||
&& fileExists(rootDir, 'tests/ci/scan-supply-chain-iocs.test.js')
|
||||
&& fileExists(rootDir, 'tests/ci/validate-workflow-security.test.js')
|
||||
&& fileExists(rootDir, 'tests/scripts/npm-publish-surface.test.js')
|
||||
&& fileExists(rootDir, 'tests/docs/ecc2-release-surface.test.js')
|
||||
@@ -316,6 +318,10 @@ function buildChecks(rootDir) {
|
||||
&& includesAll(supplyChainIncidentResponse, [
|
||||
'TanStack',
|
||||
'Mini Shai-Hulud',
|
||||
'scan-supply-chain-iocs.js',
|
||||
'gh-token-monitor',
|
||||
'.claude/settings.json',
|
||||
'.vscode/tasks.json',
|
||||
'npm audit signatures',
|
||||
'trusted publishing',
|
||||
'pull_request_target',
|
||||
|
||||
@@ -234,13 +234,13 @@ Options:
|
||||
|
||||
Execute installation:
|
||||
```bash
|
||||
# Common rules (flat copy into rules/)
|
||||
cp -r $ECC_ROOT/rules/common/* $TARGET/rules/
|
||||
# Common rules
|
||||
cp -r $ECC_ROOT/rules/common $TARGET/rules/common
|
||||
|
||||
# Language-specific rules (flat copy into rules/)
|
||||
cp -r $ECC_ROOT/rules/typescript/* $TARGET/rules/ # if selected
|
||||
cp -r $ECC_ROOT/rules/python/* $TARGET/rules/ # if selected
|
||||
cp -r $ECC_ROOT/rules/golang/* $TARGET/rules/ # if selected
|
||||
# Language-specific rules (preserve per-language directories)
|
||||
cp -r $ECC_ROOT/rules/typescript $TARGET/rules/typescript # if selected
|
||||
cp -r $ECC_ROOT/rules/python $TARGET/rules/python # if selected
|
||||
cp -r $ECC_ROOT/rules/golang $TARGET/rules/golang # if selected
|
||||
```
|
||||
|
||||
**Important**: If the user selects any language-specific rules but NOT common rules, warn them:
|
||||
|
||||
371
skills/prisma-patterns/SKILL.md
Normal file
371
skills/prisma-patterns/SKILL.md
Normal file
@@ -0,0 +1,371 @@
|
||||
---
|
||||
name: prisma-patterns
|
||||
description: Prisma ORM patterns for TypeScript backends — schema design, query optimization, transactions, pagination, and critical traps like updateMany returning count not records, $transaction timeouts, migrate dev resetting the DB, @updatedAt skipped on bulk writes, and serverless connection exhaustion.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Prisma Patterns
|
||||
|
||||
Production patterns and non-obvious traps for Prisma ORM in TypeScript backends.
|
||||
Tested against Prisma 5.x and 6.x. Some behaviors differ from Prisma 4.
|
||||
|
||||
Check the Prisma version before applying version-specific patterns:
|
||||
|
||||
```bash
|
||||
npx prisma --version
|
||||
```
|
||||
|
||||
Prisma 5 introduced `relationJoins`, which can load relations via JOIN rather than separate queries depending on query strategy and configuration. The `omit` field modifier and `prisma.$extends` Client Extensions API were also added. Note: `relationJoins` can cause row explosion on large 1:N relations or deep nested `include` — benchmark both approaches when relations may return many rows per parent.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Designing or modifying Prisma schema models and relations
|
||||
- Writing queries, transactions, or pagination logic
|
||||
- Using `updateMany`, `deleteMany`, or any bulk operation
|
||||
- Running or planning database migrations
|
||||
- Deploying to serverless environments (Vercel, Lambda, Cloudflare Workers)
|
||||
- Implementing soft delete or multi-tenant row filtering
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### ID Strategy
|
||||
|
||||
| Strategy | Use When | Avoid When |
|
||||
|---|---|---|
|
||||
| `@default(cuid())` | Default choice — URL-safe, sortable, no collisions | Sequential IDs needed for external systems |
|
||||
| `@default(uuid())` | Interoperability with non-Prisma systems required | High-write tables (random UUIDs fragment B-tree indexes) |
|
||||
| `@default(autoincrement())` | Internal join tables, audit logs | Public-facing IDs (exposes record count) |
|
||||
|
||||
### Schema Defaults
|
||||
|
||||
```prisma
|
||||
model User {
|
||||
id String @id @default(cuid())
|
||||
email String @unique // @unique already creates an index — no @@index needed
|
||||
name String
|
||||
role Role @default(USER)
|
||||
posts Post[]
|
||||
createdAt DateTime @default(now())
|
||||
updatedAt DateTime @updatedAt
|
||||
deletedAt DateTime?
|
||||
|
||||
@@index([createdAt])
|
||||
@@index([deletedAt, createdAt]) // composite for soft-delete + sort queries
|
||||
}
|
||||
```
|
||||
|
||||
- Add `@@index` on every foreign key and column used in `WHERE` or `ORDER BY`.
|
||||
- Declare `deletedAt DateTime?` upfront when soft delete is a foreseeable requirement — adding it later requires a migration on a live table.
|
||||
- `updatedAt @updatedAt` is set automatically by Prisma on `update` and `upsert` only (see Anti-Patterns for bulk update trap).
|
||||
|
||||
### `include` vs `select`
|
||||
|
||||
| | `include` | `select` |
|
||||
|---|---|---|
|
||||
| Returns | All scalar fields + specified relations | Only specified fields |
|
||||
| Use when | You need most fields plus a relation | Hot paths, large tables, avoiding over-fetch |
|
||||
| Performance | May over-fetch on wide tables | Minimal payload, faster on large datasets |
|
||||
| Prisma 5 note | Uses JOIN by default (`relationJoins`) | Same |
|
||||
|
||||
```ts
|
||||
// include — all columns + relation
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id },
|
||||
include: { posts: { select: { id: true, title: true } } },
|
||||
});
|
||||
|
||||
// select — explicit allowlist
|
||||
const user = await prisma.user.findUnique({
|
||||
where: { id },
|
||||
select: { id: true, email: true, name: true },
|
||||
});
|
||||
```
|
||||
|
||||
Never return raw Prisma entities from API responses — map to response DTOs to control exposed fields:
|
||||
|
||||
```ts
|
||||
// BAD: leaks passwordHash, deletedAt, internal fields
|
||||
return await prisma.user.findUniqueOrThrow({ where: { id } });
|
||||
|
||||
// GOOD: explicit DTO mapping
|
||||
const user = await prisma.user.findUniqueOrThrow({ where: { id } });
|
||||
return { id: user.id, name: user.name, email: user.email };
|
||||
```
|
||||
|
||||
### Transaction Form Selection
|
||||
|
||||
| Situation | Use |
|
||||
|---|---|
|
||||
| Independent operations, no inter-dependency | Array form |
|
||||
| Later step depends on earlier result | Interactive form |
|
||||
| External calls (email, HTTP) involved | Outside transaction entirely |
|
||||
|
||||
```ts
|
||||
// Array form — batched in one round trip
|
||||
const [user, post] = await prisma.$transaction([
|
||||
prisma.user.update({ where: { id }, data: { name } }),
|
||||
prisma.post.create({ data: { title, authorId: id } }),
|
||||
]);
|
||||
|
||||
// Interactive form — use tx client only, never the outer prisma client
|
||||
const post = await prisma.$transaction(async (tx) => {
|
||||
const user = await tx.user.findUniqueOrThrow({ where: { id } });
|
||||
if (user.role !== 'ADMIN') throw new Error('Forbidden');
|
||||
return tx.post.create({ data: { title, authorId: user.id } });
|
||||
});
|
||||
```
|
||||
|
||||
### PrismaClient Singleton
|
||||
|
||||
Each `PrismaClient` instance opens its own connection pool. Instantiate once.
|
||||
|
||||
```ts
|
||||
// lib/prisma.ts
|
||||
import { PrismaClient } from '@prisma/client';
|
||||
|
||||
const globalForPrisma = globalThis as unknown as { prisma?: PrismaClient };
|
||||
|
||||
export const prisma =
|
||||
globalForPrisma.prisma ??
|
||||
new PrismaClient({
|
||||
log: process.env.NODE_ENV === 'development' ? ['query', 'error'] : ['error'],
|
||||
});
|
||||
|
||||
if (process.env.NODE_ENV !== 'production') globalForPrisma.prisma = prisma;
|
||||
```
|
||||
|
||||
The `globalThis` pattern prevents duplicate instances during hot reload (Next.js, nodemon, ts-node-dev).
|
||||
|
||||
### N+1 Problem
|
||||
|
||||
Loading relations inside a loop issues one query per row.
|
||||
|
||||
```ts
|
||||
// BAD: N+1 — one extra query per user
|
||||
const users = await prisma.user.findMany();
|
||||
for (const user of users) {
|
||||
const posts = await prisma.post.findMany({ where: { authorId: user.id } });
|
||||
}
|
||||
|
||||
// GOOD: single query
|
||||
const users = await prisma.user.findMany({ include: { posts: true } });
|
||||
```
|
||||
|
||||
With Prisma 5+ `relationJoins`, the `include` form uses a single JOIN. On large 1:N sets this may increase result set size — benchmark both approaches if the relation can return many rows per parent.
|
||||
|
||||
## Code Examples
|
||||
|
||||
### Cursor Pagination (preferred for feeds and large datasets)
|
||||
|
||||
```ts
|
||||
async function getPosts(cursor?: string, limit = 20) {
|
||||
const items = await prisma.post.findMany({
|
||||
where: { published: true },
|
||||
orderBy: [
|
||||
{ createdAt: 'desc' },
|
||||
{ id: 'desc' }, // secondary sort prevents unstable pagination on duplicate timestamps
|
||||
],
|
||||
take: limit + 1,
|
||||
...(cursor && { cursor: { id: cursor }, skip: 1 }),
|
||||
});
|
||||
|
||||
const hasNextPage = items.length > limit;
|
||||
if (hasNextPage) items.pop();
|
||||
|
||||
return { items, nextCursor: hasNextPage ? items[items.length - 1].id : null };
|
||||
}
|
||||
```
|
||||
|
||||
Fetch `limit + 1` and pop — canonical way to detect `hasNextPage` without an extra count query. Always include a unique field (e.g. `id`) as a secondary `orderBy` to prevent unstable pagination when multiple rows share the same timestamp. Use offset pagination only when users need to jump to arbitrary pages (admin tables).
|
||||
|
||||
### Soft Delete
|
||||
|
||||
```ts
|
||||
// Always filter explicitly — do not rely on middleware (hides behavior, hard to debug)
|
||||
const activeUsers = await prisma.user.findMany({ where: { deletedAt: null } });
|
||||
|
||||
await prisma.user.update({ where: { id }, data: { deletedAt: new Date() } });
|
||||
await prisma.user.update({ where: { id }, data: { deletedAt: null } }); // restore
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
```ts
|
||||
import { Prisma } from '@prisma/client';
|
||||
|
||||
try {
|
||||
await prisma.user.create({ data: { email } });
|
||||
} catch (e) {
|
||||
if (e instanceof Prisma.PrismaClientKnownRequestError) {
|
||||
if (e.code === 'P2002') throw new ConflictError('Email already exists');
|
||||
if (e.code === 'P2025') throw new NotFoundError('Record not found');
|
||||
if (e.code === 'P2003') throw new BadRequestError('Referenced record does not exist');
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
```
|
||||
|
||||
Common codes: `P2002` unique violation · `P2025` not found · `P2003` foreign key violation.
|
||||
|
||||
Catch at the service boundary and translate to domain errors. Never expose raw Prisma messages to API consumers.
|
||||
|
||||
### Connection Pool — Serverless
|
||||
|
||||
Embed connection params directly in `DATABASE_URL` — string concatenation breaks if the URL already has query parameters (e.g. `?schema=public`):
|
||||
|
||||
```bash
|
||||
# .env — preferred: embed params in the URL
|
||||
DATABASE_URL="postgresql://user:pass@host/db?connection_limit=1&pool_timeout=20"
|
||||
|
||||
# With an external pooler (PgBouncer, Supabase pooler)
|
||||
DATABASE_URL="postgresql://user:pass@host/db?pgbouncer=true&connection_limit=1"
|
||||
```
|
||||
|
||||
```ts
|
||||
// Vercel, AWS Lambda, and similar serverless runtimes: cap pool to 1 per instance
|
||||
// connection_limit and pool_timeout are controlled via DATABASE_URL
|
||||
const prisma = new PrismaClient();
|
||||
```
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
### `updateMany` returns a count, not records
|
||||
|
||||
```ts
|
||||
// BAD: result is { count: 2 } — users[0] is undefined
|
||||
const users = await prisma.user.updateMany({ where: { role: 'GUEST' }, data: { role: 'USER' } });
|
||||
|
||||
// GOOD: capture IDs first, then update, then fetch only the affected rows
|
||||
const targets = await prisma.user.findMany({
|
||||
where: { role: 'GUEST' },
|
||||
select: { id: true },
|
||||
});
|
||||
const ids = targets.map((u) => u.id);
|
||||
await prisma.user.updateMany({ where: { id: { in: ids } }, data: { role: 'USER' } });
|
||||
const updated = await prisma.user.findMany({ where: { id: { in: ids } } });
|
||||
```
|
||||
|
||||
Same applies to `deleteMany` — returns `{ count: n }`, never the deleted rows.
|
||||
|
||||
### `$transaction` interactive form times out after 5 seconds
|
||||
|
||||
```ts
|
||||
// BAD: external call inside transaction exceeds 5s default → "Transaction already closed"
|
||||
await prisma.$transaction(async (tx) => {
|
||||
const user = await tx.user.findUniqueOrThrow({ where: { id } });
|
||||
await sendWelcomeEmail(user.email); // external call
|
||||
await tx.user.update({ where: { id }, data: { emailSent: true } });
|
||||
});
|
||||
|
||||
// GOOD: external calls outside the transaction
|
||||
const user = await prisma.user.findUniqueOrThrow({ where: { id } });
|
||||
await sendWelcomeEmail(user.email);
|
||||
await prisma.user.update({ where: { id }, data: { emailSent: true } });
|
||||
|
||||
// Only raise timeout when bulk processing genuinely needs it
|
||||
await prisma.$transaction(async (tx) => { ... }, { timeout: 30_000 });
|
||||
```
|
||||
|
||||
### `migrate dev` can reset the database
|
||||
|
||||
`migrate dev` detects schema drift and may prompt to reset the DB, dropping all data.
|
||||
|
||||
```bash
|
||||
# NEVER on shared dev, staging, or production
|
||||
npx prisma migrate dev --name add_column
|
||||
|
||||
# Safe everywhere except local solo dev
|
||||
npx prisma migrate deploy
|
||||
|
||||
# Check drift without applying
|
||||
npx prisma migrate diff \
|
||||
--from-migrations ./prisma/migrations \
|
||||
--to-schema-datamodel ./prisma/schema.prisma \
|
||||
--shadow-database-url "$SHADOW_DATABASE_URL"
|
||||
```
|
||||
|
||||
### Manually editing a migration file breaks future deploys
|
||||
|
||||
Prisma checksums every migration file. Editing after apply causes `P3006 checksum mismatch` on every environment where the original already ran. Create a new migration instead.
|
||||
|
||||
### Breaking schema changes require multi-step migration
|
||||
|
||||
Adding `NOT NULL` to an existing column or renaming a column in one migration will lock the table or drop data. Use expand-and-contract:
|
||||
|
||||
```bash
|
||||
# Step 1: create migration locally, then deploy
|
||||
npx prisma migrate dev --name add_new_column # local only
|
||||
npx prisma migrate deploy # staging / production
|
||||
```
|
||||
|
||||
```ts
|
||||
// Step 2: backfill data (run in a script or migration job, not in the shell)
|
||||
await prisma.user.updateMany({ data: { newColumn: derivedValue } });
|
||||
```
|
||||
|
||||
```bash
|
||||
# Step 3: create the NOT NULL constraint migration locally, then deploy
|
||||
npx prisma migrate dev --name make_new_column_required # local only
|
||||
npx prisma migrate deploy # staging / production
|
||||
```
|
||||
|
||||
### `@updatedAt` does not fire on `updateMany`
|
||||
|
||||
`@updatedAt` is set automatically only on `update` and `upsert`. Bulk writes leave it stale.
|
||||
|
||||
```ts
|
||||
// BAD: updatedAt stays at its old value
|
||||
await prisma.post.updateMany({ where: { authorId }, data: { published: true } });
|
||||
|
||||
// GOOD
|
||||
await prisma.post.updateMany({
|
||||
where: { authorId },
|
||||
data: { published: true, updatedAt: new Date() },
|
||||
});
|
||||
```
|
||||
|
||||
### Soft delete + `findUniqueOrThrow` leaks deleted records
|
||||
|
||||
`findUniqueOrThrow` throws `P2025` only when the row does not exist in the DB. Soft-deleted rows still exist and are returned without error.
|
||||
|
||||
`findUniqueOrThrow` requires a unique constraint field in `where` — adding `deletedAt: null` alongside `id` breaks the type because `{ id, deletedAt }` is not a compound unique constraint. Use `findFirstOrThrow` instead.
|
||||
|
||||
```ts
|
||||
// BAD: returns soft-deleted user
|
||||
const user = await prisma.user.findUniqueOrThrow({ where: { id } });
|
||||
|
||||
// BAD: Prisma type error — { id, deletedAt } is not a unique constraint
|
||||
const user = await prisma.user.findUniqueOrThrow({ where: { id, deletedAt: null } });
|
||||
|
||||
// GOOD: findFirstOrThrow supports arbitrary where conditions
|
||||
const user = await prisma.user.findFirstOrThrow({ where: { id, deletedAt: null } });
|
||||
```
|
||||
|
||||
### `deleteMany` without `where` deletes every row
|
||||
|
||||
```ts
|
||||
// BAD: silently wipes the table
|
||||
await prisma.post.deleteMany();
|
||||
|
||||
// GOOD
|
||||
await prisma.post.deleteMany({ where: { authorId: userId } });
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
| Rule | Reason |
|
||||
|---|---|
|
||||
| `migrate deploy` in CI/CD, `migrate dev` only locally | `migrate dev` can reset the DB on drift |
|
||||
| Map entities to response DTOs | Prevents leaking internal fields |
|
||||
| Catch `PrismaClientKnownRequestError` at service boundary | Translate to domain errors |
|
||||
| Prefer `*OrThrow` methods over manual null checks | Throws P2025 automatically; use `findFirstOrThrow` when filtering non-unique fields |
|
||||
| `connection_limit=1` + external pooler in serverless | Prevents connection exhaustion |
|
||||
| Always provide `where` on `deleteMany` | Prevents accidental table wipe |
|
||||
| Set `updatedAt: new Date()` manually in `updateMany` | `@updatedAt` skips bulk writes |
|
||||
|
||||
## Related Skills
|
||||
|
||||
- `nestjs-patterns` — NestJS service layer that integrates Prisma
|
||||
- `postgres-patterns` — PostgreSQL-level indexing and connection tuning
|
||||
- `database-migrations` — multi-step migration planning for production
|
||||
- `backend-patterns` — general API and service layer design
|
||||
@@ -1,28 +1,38 @@
|
||||
---
|
||||
name: tinystruct-patterns
|
||||
description: Use when developing application modules or microservices with the tinystruct Java framework. Covers routing, context management, JSON handling with Builder, and CLI/HTTP dual-mode patterns.
|
||||
description: Expert guidance for developing with the tinystruct Java framework. Use when working on the tinystruct codebase or any project built on tinystruct — including creating Application classes, @Action-mapped routes, unit tests, ActionRegistry, HTTP/CLI dual-mode handling, the built-in HTTP server, the event system, JSON with Builder/Builders, database persistence with AbstractData, POJO generation, Server-Sent Events (SSE), file uploads, and outbound HTTP networking.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# tinystruct Development Patterns
|
||||
|
||||
Architecture and implementation patterns for building modules with the **tinystruct** Java framework – a lightweight system where CLI and HTTP are equal citizens.
|
||||
Architecture and implementation patterns for building modules with the **tinystruct** Java framework – a lightweight, high-performance framework that treats CLI and HTTP as equal citizens, requiring no `main()` method and minimal configuration.
|
||||
|
||||
## When to Use
|
||||
## Core Principle
|
||||
|
||||
**CLI and HTTP are equal citizens.** Every method annotated with `@Action` should ideally be runnable from both a terminal and a web browser without modification. This "dual-mode" capability is the core design philosophy of tinystruct.
|
||||
|
||||
## When to Activate
|
||||
|
||||
### When to Use
|
||||
|
||||
- Creating new `Application` modules by extending `AbstractApplication`.
|
||||
- Defining routes and command-line actions using `@Action`.
|
||||
- Handling per-request state via `Context`.
|
||||
- Performing JSON serialization using the native `Builder` component.
|
||||
- Performing JSON serialization using the native `Builder` and `Builders` components.
|
||||
- Working with database persistence via `AbstractData` POJOs.
|
||||
- Generating POJOs from database tables using the `generate` command.
|
||||
- Implementing Server-Sent Events (SSE) for real-time push.
|
||||
- Handling file uploads via multipart data.
|
||||
- Making outbound HTTP requests with `URLRequest` and `HTTPHandler`.
|
||||
- Configuring database connections or system settings in `application.properties`.
|
||||
- Generating or re-generating the standard `bin/dispatcher` entry point via `ApplicationManager.init()`.
|
||||
- Debugging routing conflicts (Actions) or CLI argument parsing.
|
||||
|
||||
## How It Works
|
||||
|
||||
The tinystruct framework treats any method annotated with `@Action` as a routable endpoint for both terminal and web environments. Applications are created by extending `AbstractApplication`, which provides core lifecycle hooks like `init()` and access to the request `Context`.
|
||||
|
||||
Routing is handled by the `ActionRegistry`, which automatically maps path segments to method arguments and injects dependencies. For data-only services, the native `Builder` component should be used for JSON serialization to maintain a zero-dependency footprint. The framework also includes a utility in `ApplicationManager` to bootstrap the project's execution environment by generating the `bin/dispatcher` script.
|
||||
Routing is handled by the `ActionRegistry`, which automatically maps path segments to method arguments and injects dependencies. For data-only services, the native `Builder` and `Builders` components should be used for JSON serialization to maintain a zero-dependency footprint. The database layer uses `AbstractData` POJOs paired with XML mapping files for CRUD operations without external ORM libraries.
|
||||
|
||||
## Examples
|
||||
|
||||
@@ -40,38 +50,77 @@ public class MyService extends AbstractApplication {
|
||||
public String greet() {
|
||||
return "Hello from tinystruct!";
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Parameterized Routing (getUser)
|
||||
```java
|
||||
// Handles /api/user/123 (Web) or "bin/dispatcher api/user/123" (CLI)
|
||||
@Action("api/user/(\\d+)")
|
||||
public String getUser(int userId) {
|
||||
return "User ID: " + userId;
|
||||
// Path parameter: GET /?q=greet/James OR bin/dispatcher greet/James
|
||||
@Action("greet")
|
||||
public String greet(String name) {
|
||||
return "Hello, " + name + "!";
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### HTTP Mode Disambiguation (login)
|
||||
```java
|
||||
@Action(value = "login", mode = Mode.HTTP_POST)
|
||||
public boolean doLogin() {
|
||||
// Process login logic
|
||||
return true;
|
||||
public String doLogin(Request<?, ?> request) throws ApplicationException {
|
||||
request.getSession().setAttribute("userId", "42");
|
||||
return "Logged in";
|
||||
}
|
||||
```
|
||||
|
||||
### Native JSON Data Handling (getData)
|
||||
### Native JSON Data Handling (Builder + Builders)
|
||||
```java
|
||||
import org.tinystruct.data.component.Builder;
|
||||
import org.tinystruct.data.component.Builders;
|
||||
|
||||
@Action("api/data")
|
||||
public Builder getData() throws ApplicationException {
|
||||
Builder builder = new Builder();
|
||||
builder.put("status", "success");
|
||||
Builder nested = new Builder();
|
||||
nested.put("id", 1);
|
||||
nested.put("name", "James");
|
||||
builder.put("data", nested);
|
||||
return builder;
|
||||
public String getData() throws ApplicationException {
|
||||
Builders dataList = new Builders();
|
||||
Builder item = new Builder();
|
||||
item.put("id", 1);
|
||||
item.put("name", "James");
|
||||
dataList.add(item);
|
||||
|
||||
Builder response = new Builder();
|
||||
response.put("status", "success");
|
||||
response.put("data", dataList);
|
||||
return response.toString(); // {"status":"success","data":[{"id":1,"name":"James"}]}
|
||||
}
|
||||
```
|
||||
|
||||
### SSE (Server-Sent Events)
|
||||
```java
|
||||
import org.tinystruct.http.SSEPushManager;
|
||||
|
||||
@Action("sse/connect")
|
||||
public String connect() {
|
||||
return "{\"type\":\"connect\",\"message\":\"Connected to SSE\"}";
|
||||
}
|
||||
|
||||
// Push to a specific client
|
||||
String sessionId = getContext().getId();
|
||||
Builder msg = new Builder();
|
||||
msg.put("text", "Hello, user!");
|
||||
SSEPushManager.getInstance().push(sessionId, msg);
|
||||
|
||||
// Broadcast to all
|
||||
// Broadcast to all
|
||||
SSEPushManager.getInstance().broadcast(msg);
|
||||
```
|
||||
|
||||
### File Upload
|
||||
```java
|
||||
import org.tinystruct.data.FileEntity;
|
||||
|
||||
@Action(value = "upload", mode = Mode.HTTP_POST)
|
||||
public String upload(Request<?, ?> request) throws ApplicationException {
|
||||
List<FileEntity> files = request.getAttachments();
|
||||
if (files != null) {
|
||||
for (FileEntity file : files) {
|
||||
System.out.println("Uploaded: " + file.getFilename());
|
||||
}
|
||||
}
|
||||
return "Upload OK";
|
||||
}
|
||||
```
|
||||
|
||||
@@ -83,35 +132,48 @@ Settings are managed in `src/main/resources/application.properties`.
|
||||
# Database
|
||||
driver=org.h2.Driver
|
||||
database.url=jdbc:h2:~/mydb
|
||||
database.user=sa
|
||||
database.password=
|
||||
|
||||
# App specific
|
||||
my.service.endpoint=https://api.example.com
|
||||
# Server
|
||||
default.home.page=hello
|
||||
server.port=8080
|
||||
|
||||
# Locale
|
||||
default.language=en_US
|
||||
|
||||
# Session (Redis for clustered environments)
|
||||
# default.session.repository=org.tinystruct.http.RedisSessionRepository
|
||||
# redis.host=127.0.0.1
|
||||
# redis.port=6379
|
||||
```
|
||||
|
||||
## Testing Patterns
|
||||
|
||||
Use JUnit 5 to test actions by verifying they are registered in the `ActionRegistry`.
|
||||
|
||||
Access config values in your application:
|
||||
```java
|
||||
@Test
|
||||
void testActionRegistration() {
|
||||
Application app = new MyService();
|
||||
app.init();
|
||||
|
||||
ActionRegistry registry = ActionRegistry.getInstance();
|
||||
assertNotNull(registry.get("greet"));
|
||||
}
|
||||
String port = this.getConfiguration("server.port");
|
||||
```
|
||||
|
||||
## Red Flags & Anti-patterns
|
||||
|
||||
| Symptom | Correct Pattern |
|
||||
|---|---|
|
||||
| Importing `com.google.gson` or `com.fasterxml.jackson` | Use `org.tinystruct.data.component.Builder`. |
|
||||
| `FileNotFoundException` for `.view` files | Call `setTemplateRequired(false)` in `init()` for API-only apps. |
|
||||
| Importing `com.google.gson` or `com.fasterxml.jackson` | Use `org.tinystruct.data.component.Builder` / `Builders`. |
|
||||
| Using `List<Builder>` for JSON arrays | Use `Builders` to avoid generic type erasure issues. |
|
||||
| `ApplicationRuntimeException: template not found` | Call `setTemplateRequired(false)` in `init()` for API-only apps. |
|
||||
| Annotating `private` methods with `@Action` | Actions must be `public` to be registered by the framework. |
|
||||
| Hardcoding `main(String[] args)` in apps | Use `bin/dispatcher` as the entry point for all modules. |
|
||||
| Manual `ActionRegistry` registration | Prefer the `@Action` annotation for automatic discovery. |
|
||||
| Action not found at runtime | Ensure class is imported via `--import` or listed in `application.properties`. |
|
||||
| CLI arg not visible | Pass with `--key value`; access via `getContext().getAttribute("--key")`. |
|
||||
| Two methods same path, wrong one fires | Set explicit `mode` (e.g., `HTTP_GET` vs `HTTP_POST`) to disambiguate. |
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Granular Applications**: Break logic into smaller, focused applications rather than one monolithic class.
|
||||
2. **Setup in `init()`**: Leverage `init()` for setup (config, DB) rather than the constructor. Do NOT call `setAction()` — use `@Action` annotation.
|
||||
3. **Mode Awareness**: Use the `Mode` parameter in `@Action` to restrict sensitive operations to `CLI` only or specific HTTP methods.
|
||||
4. **Context over Params**: For optional CLI flags, use `getContext().getAttribute("--flag")` rather than adding parameters to the method signature.
|
||||
5. **Asynchronous Events**: For heavy tasks triggered by events, use `CompletableFuture.runAsync()` inside the event handler.
|
||||
|
||||
## Technical Reference
|
||||
|
||||
@@ -119,13 +181,23 @@ Detailed guides are available in the `references/` directory:
|
||||
|
||||
- [Architecture & Config](references/architecture.md) — Abstractions, Package Map, Properties
|
||||
- [Routing & @Action](references/routing.md) — Annotation details, Modes, Parameters
|
||||
- [Data Handling](references/data-handling.md) — Using the native `Builder` for JSON
|
||||
- [System & Usage](references/system-usage.md) — Context, Sessions, Events, CLI usage
|
||||
- [Testing Patterns](references/testing.md) — JUnit 5 integration and ActionRegistry testing
|
||||
- [Data Handling](references/data-handling.md) — Builder, Builders, JSON serialization & parsing
|
||||
- [Database Persistence](references/database.md) — AbstractData POJOs, CRUD, mapping XML, POJO generation
|
||||
- [System & Usage](references/system-usage.md) — Context, Sessions, SSE, File Uploads, Events, Networking
|
||||
- [Testing Patterns](references/testing.md) — JUnit 5 unit and HTTP integration testing
|
||||
|
||||
## Reference Source Files (Internal)
|
||||
|
||||
- `src/main/java/org/tinystruct/AbstractApplication.java` — Core base class
|
||||
- `src/main/java/org/tinystruct/AbstractApplication.java` — Core base class with lifecycle hooks
|
||||
- `src/main/java/org/tinystruct/system/annotation/Action.java` — Annotation & Modes
|
||||
- `src/main/java/org/tinystruct/application/ActionRegistry.java` — Routing Engine
|
||||
- `src/main/java/org/tinystruct/data/component/Builder.java` — JSON/Data Serializer
|
||||
- `src/main/java/org/tinystruct/data/component/Builder.java` — JSON object serializer
|
||||
- `src/main/java/org/tinystruct/data/component/Builders.java` — JSON array serializer
|
||||
- `src/main/java/org/tinystruct/data/component/AbstractData.java` — Base POJO class with CRUD
|
||||
- `src/main/java/org/tinystruct/data/Mapping.java` — Mapping XML parser
|
||||
- `src/main/java/org/tinystruct/data/tools/MySQLGenerator.java` — POJO generator reference
|
||||
- `src/main/java/org/tinystruct/data/component/FieldType.java` — SQL-to-Java type mappings
|
||||
- `src/main/java/org/tinystruct/data/component/Condition.java` — Fluent SQL query builder
|
||||
- `src/main/java/org/tinystruct/http/SSEPushManager.java` — SSE connection management
|
||||
- `src/test/java/org/tinystruct/application/ActionRegistryTest.java` — Registry test examples
|
||||
- `src/test/java/org/tinystruct/system/HttpServerHttpModeTest.java` — HTTP integration test patterns
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## When to Use
|
||||
|
||||
Choose **tinystruct** when you need a lightweight, high-performance Java framework that treats CLI and HTTP as equal citizens. It is ideal for building microservices, command-line utilities, and data-driven applications where a small footprint and zero-dependency JSON handling are required. Use it when you want to write logic once and expose it via both a terminal and a web server without modification.
|
||||
Choose **tinystruct** when you need a lightweight, high-performance Java framework that treats CLI and HTTP as equal citizens. Ideal for microservices, CLI utilities, and data-driven applications with a small footprint and zero-dependency JSON handling.
|
||||
|
||||
## How It Works
|
||||
|
||||
@@ -20,7 +20,7 @@ The framework operates on a singleton `ActionRegistry` that maps URL patterns (o
|
||||
| `Action` | Wraps a `MethodHandle` + regex pattern + priority + `Mode` for dispatch. |
|
||||
| `Context` | Per-request state store. Access via `getContext()`. Holds CLI args and HTTP request/response. |
|
||||
| `Dispatcher` | CLI entry point (`bin/dispatcher`). Reads `--import` to load applications. |
|
||||
| `HttpServer` | Built-in Netty-based HTTP server. Start with `bin/dispatcher start --import org.tinystruct.system.HttpServer`. |
|
||||
| `HttpServer` | Built-in HTTP server. Start with `bin/dispatcher start --import org.tinystruct.system.HttpServer`. |
|
||||
|
||||
### Package Map
|
||||
|
||||
@@ -40,13 +40,23 @@ org.tinystruct/
|
||||
│ ├── HttpServer.java ← built-in HTTP server
|
||||
│ ├── EventDispatcher.java ← event bus
|
||||
│ └── Settings.java ← reads application.properties
|
||||
├── data/component/Builder.java ← JSON serialization (use instead of Gson/Jackson)
|
||||
└── http/ ← Request, Response, Constants
|
||||
├── data/
|
||||
│ ├── component/Builder.java ← JSON object (use instead of Gson/Jackson)
|
||||
│ ├── component/Builders.java ← JSON array
|
||||
│ ├── component/AbstractData.java ← base POJO for DB persistence
|
||||
│ ├── component/Condition.java ← fluent SQL query builder
|
||||
│ ├── component/FieldType.java ← SQL-to-Java type mappings
|
||||
│ ├── Mapping.java ← reads .map.xml metadata
|
||||
│ ├── DatabaseOperator.java ← low-level JDBC wrapper
|
||||
│ └── FileEntity.java ← file upload representation
|
||||
├── http/ ← Request, Response, Constants
|
||||
│ └── SSEPushManager.java ← Server-Sent Events management
|
||||
└── net/ ← URLRequest, HTTPHandler (outbound HTTP)
|
||||
```
|
||||
|
||||
### Template Behavior and Dispatch Flow
|
||||
|
||||
By default, the framework assumes a view template is required. If `templateRequired` is `true`, `toString()` looks for a `.view` file in `src/main/resources/themes/<ClassName>.view`. Use `getContext()` to manage state and `setVariable("name", value)` to pass data to templates, which use `[%name%]` for interpolation.
|
||||
By default, the framework assumes a view template is required. If `templateRequired` is `true`, `toString()` looks for a `.view` file in `src/main/resources/themes/<ClassName>.view`. Use `setVariable("name", value)` to pass data to templates, which use `{%name%}` for interpolation.
|
||||
|
||||
## Examples
|
||||
|
||||
@@ -55,6 +65,7 @@ By default, the framework assumes a view template is required. If `templateRequi
|
||||
@Override
|
||||
public void init() {
|
||||
this.setTemplateRequired(false); // Skip .view template lookup for data-only apps
|
||||
// Do NOT call setAction() here — use @Action annotation instead
|
||||
}
|
||||
```
|
||||
|
||||
@@ -68,6 +79,8 @@ public String hello() {
|
||||
**Execution via Dispatcher:**
|
||||
```bash
|
||||
bin/dispatcher hello
|
||||
bin/dispatcher greet/James
|
||||
bin/dispatcher echo --words "Hello" --import com.example.HelloApp
|
||||
```
|
||||
|
||||
### Configuration Access
|
||||
|
||||
@@ -2,34 +2,59 @@
|
||||
|
||||
## When to Use
|
||||
|
||||
Prefer `org.tinystruct.data.component.Builder` in scenarios where you need a lightweight, high-performance JSON solution with **zero external dependencies**. It is specifically designed to keep your tinystruct applications lean and fast, making it the ideal choice for microservices and CLI tools where including heavy libraries like Jackson or Gson would be overkill.
|
||||
Prefer `org.tinystruct.data.component.Builder` and `Builders` for lightweight, zero-dependency JSON. Use `Builder` for JSON objects (`{}`), `Builders` for JSON arrays (`[]`). **Always use `Builders` instead of `List<Builder>`** to avoid generic type erasure issues.
|
||||
|
||||
## How It Works
|
||||
|
||||
The `Builder` class provides a simple key-value interface for both creating and reading JSON structures. It integrates directly with `AbstractApplication` result handling; when an action method returns a `Builder` object, the framework automatically serializes it to the response stream. This prevents the need for manual string conversion and ensures consistent data formatting across your application modules.
|
||||
`Builder` provides a key-value interface for creating and reading JSON objects. `Builders` provides an indexed list for JSON arrays. Both integrate directly with `AbstractApplication` result handling.
|
||||
|
||||
### Why Builder/Builders?
|
||||
- **Zero External Dependencies** — lean and fast
|
||||
- **Native Integration** — works with framework result handling
|
||||
- **Type Safety** — `Builders` serializes properly to `[]`; `List<Builder>` can cause casting issues
|
||||
|
||||
## Examples
|
||||
|
||||
### Serialization
|
||||
### Serialize a Single Object
|
||||
```java
|
||||
import org.tinystruct.data.component.Builder;
|
||||
|
||||
// Create and populate
|
||||
Builder response = new Builder();
|
||||
response.put("status", "success");
|
||||
response.put("count", 42);
|
||||
response.put("data", someList);
|
||||
|
||||
return response; // {"status":"success","count":42,...}
|
||||
return response.toString(); // {"status":"success","count":42}
|
||||
```
|
||||
|
||||
### Parsing
|
||||
### Serialize a List using Builders
|
||||
```java
|
||||
import org.tinystruct.data.component.Builder;
|
||||
import org.tinystruct.data.component.Builders;
|
||||
|
||||
// Parse a JSON string
|
||||
Builders dataList = new Builders();
|
||||
for (MyModel item : myCollection) {
|
||||
Builder b = new Builder();
|
||||
b.put("id", item.getId());
|
||||
b.put("name", item.getName());
|
||||
dataList.add(b);
|
||||
}
|
||||
Builder response = new Builder();
|
||||
response.put("data", dataList);
|
||||
return response.toString(); // {"data":[{"id":1,"name":"X"}]}
|
||||
```
|
||||
|
||||
### Parse a JSON Object
|
||||
```java
|
||||
Builder parsed = new Builder();
|
||||
parsed.parse(jsonString);
|
||||
|
||||
String status = parsed.get("status").toString();
|
||||
```
|
||||
|
||||
### Parse a JSON Array
|
||||
```java
|
||||
Builders parsedArray = new Builders();
|
||||
parsedArray.parse(jsonArrayString);
|
||||
for (int i = 0; i < parsedArray.size(); i++) {
|
||||
Builder item = parsedArray.get(i);
|
||||
System.out.println(item.get("name"));
|
||||
}
|
||||
```
|
||||
|
||||
99
skills/tinystruct-patterns/references/database.md
Normal file
99
skills/tinystruct-patterns/references/database.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# tinystruct Database Persistence
|
||||
|
||||
## When to Use
|
||||
|
||||
Use the built-in ORM-like data layer for database operations. It provides a lightweight alternative to JPA/Hibernate using POJOs extending `AbstractData` and XML mapping files.
|
||||
|
||||
## How It Works
|
||||
|
||||
### Architecture
|
||||
|
||||
Each table is represented by:
|
||||
1. **Java POJO**: Extends `AbstractData`, provides getters/setters and `setData(Row)`.
|
||||
2. **Mapping XML**: `ClassName.map.xml` in resources, binding Java fields to DB columns.
|
||||
|
||||
#### Key Base Class: `AbstractData`
|
||||
Provides CRUD methods:
|
||||
- `append()` / `appendAndGetId()`
|
||||
- `update()`
|
||||
- `delete()`
|
||||
- `findAll()` / `findOneById()` / `findOneByKey(key, value)`
|
||||
- `findWith(where, params)`
|
||||
- `find(SQL, params)`
|
||||
|
||||
### POJO Generation (CLI)
|
||||
|
||||
Introspect a live database table to produce the POJO and mapping file.
|
||||
|
||||
#### Configuration
|
||||
`application.properties`:
|
||||
```properties
|
||||
driver=com.mysql.cj.jdbc.Driver
|
||||
database.url=jdbc:mysql://localhost:3306/mydb
|
||||
database.user=root
|
||||
database.password=secret
|
||||
```
|
||||
|
||||
#### Command
|
||||
```bash
|
||||
# Interactive mode
|
||||
bin/dispatcher generate
|
||||
|
||||
# Specify table
|
||||
bin/dispatcher generate --tables users
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### CRUD Operations
|
||||
```java
|
||||
// CREATE
|
||||
User user = new User();
|
||||
user.setUsername("james");
|
||||
user.append();
|
||||
|
||||
// READ
|
||||
User user = new User();
|
||||
user.setId(42);
|
||||
user.findOneById();
|
||||
|
||||
// UPDATE
|
||||
user.setEmail("new@example.com");
|
||||
user.update();
|
||||
|
||||
// DELETE
|
||||
user.delete();
|
||||
```
|
||||
|
||||
### Querying with Conditions
|
||||
```java
|
||||
User user = new User();
|
||||
Table results = user.findWith("username LIKE ?", new Object[]{"%jam%"});
|
||||
|
||||
// Fluent Condition Builder
|
||||
Condition condition = new Condition();
|
||||
condition.setRequestFields("id,username");
|
||||
Table filtered = user.find(
|
||||
condition.select("`users`").and("email LIKE ?").orderBy("id DESC"),
|
||||
new Object[]{"%@example.com"}
|
||||
);
|
||||
```
|
||||
|
||||
### Mapping XML Structure
|
||||
`User.map.xml`:
|
||||
```xml
|
||||
<mapping>
|
||||
<class name="User" table="users">
|
||||
<id name="Id" column="id" increment="true" generate="false" length="11" type="int"/>
|
||||
<property name="username" column="username" length="50" type="varchar"/>
|
||||
<property name="email" column="email" length="100" type="varchar"/>
|
||||
</class>
|
||||
</mapping>
|
||||
```
|
||||
|
||||
## Important Rules
|
||||
|
||||
1. **File Placement**: The mapping XML **must** mirror the POJO's package path under `src/main/resources/`.
|
||||
2. **Naming**: Table names are singularized for class names (`users` → `User`). Underscored columns become camelCase fields (`created_at` → `createdAt`).
|
||||
3. **Setters**: Use `setFieldAsXxx` methods (e.g., `setFieldAsString`) in setters to sync state with the internal field map.
|
||||
4. **Id Field**: The primary key field in Java is always named `Id` (inherited from `AbstractData`).
|
||||
@@ -2,13 +2,17 @@
|
||||
|
||||
## When to Use
|
||||
|
||||
Use the `@Action` annotation in your applications to define routes for both CLI commands and HTTP endpoints. It is appropriate whenever you need to map logic to a specific path, handle parameterized requests (e.g., retrieving a resource by ID), or restrict execution to specific HTTP methods (GET, POST, etc.) while maintaining a consistent command structure across environments.
|
||||
Use the `@Action` annotation in your applications to define routes for both CLI commands and HTTP endpoints. It is appropriate whenever you need to map logic to a specific path, handle parameterized requests, or restrict execution to specific HTTP methods while maintaining a consistent command structure across environments.
|
||||
|
||||
## How It Works
|
||||
|
||||
The `ActionRegistry` parses `@Action` annotations to build a routing table. For parameterized methods, the framework automatically maps Java parameter types (int, String, etc.) to corresponding regex segments to generate an internal matching pattern. For instance, `getUser(int id)` generates a regex targeting digits, while `search(String query)` targets generic path segments.
|
||||
The `ActionRegistry` parses `@Action` annotations to build a routing table. For parameterized methods, the framework automatically maps Java parameter types to corresponding regex segments.
|
||||
|
||||
When a request is dispatched, the `ActionRegistry` automatically injects dependencies like `Request` and `Response` into the action method if they are specified as parameters, drawing them directly from the current request's `Context`. Execution is further filtered by the `Mode` value, allowing a single path to invoke different logic depending on whether the trigger was a terminal command or a specific type of HTTP request.
|
||||
### Regex Generation Rules
|
||||
- `getUser(int id)` → pattern: `^/?user/(-?\d+)$`
|
||||
- `search(String query)` → pattern: `^/?search/([^/]+)$`
|
||||
|
||||
Supported parameter types: `String`, `int/Integer`, `long/Long`, `float/Float`, `double/Double`, `boolean/Boolean`, `char/Character`, `short/Short`, `byte/Byte`, `Date` (parsed as `yyyy-MM-dd HH:mm:ss`).
|
||||
|
||||
### Mode Values
|
||||
|
||||
@@ -22,6 +26,8 @@ When a request is dispatched, the `ActionRegistry` automatically injects depende
|
||||
| `HTTP_DELETE` | HTTP DELETE only |
|
||||
| `HTTP_PATCH` | HTTP PATCH only |
|
||||
|
||||
> **Note:** You can map HTTP method names to `Mode` using `Action.Mode.fromName(String methodName)`. Unknown or null values return `Mode.DEFAULT`.
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Action Declaration
|
||||
@@ -29,29 +35,30 @@ When a request is dispatched, the `ActionRegistry` automatically injects depende
|
||||
@Action(
|
||||
value = "path/subpath", // required: URI segment or CLI command
|
||||
description = "What it does", // shown in --help output
|
||||
mode = Mode.HTTP_POST, // default: Mode.DEFAULT (both CLI + HTTP)
|
||||
options = {}, // CLI option flags
|
||||
example = "curl -X POST http://localhost:8080/path/subpath/42"
|
||||
mode = Mode.DEFAULT, // default: Mode.DEFAULT
|
||||
example = "bin/dispatcher path/subpath/42"
|
||||
)
|
||||
public String myAction(int id) { ... }
|
||||
```
|
||||
|
||||
### Parameterized Paths (Regex Generation)
|
||||
### Parameterized Paths
|
||||
```java
|
||||
@Action("user/{id}")
|
||||
public String getUser(int id) { ... }
|
||||
// → pattern: ^/?user/(-?\d+)$
|
||||
|
||||
@Action("search")
|
||||
public String search(String query) { ... }
|
||||
// → pattern: ^/?search/([^/]+)$
|
||||
// → CLI: bin/dispatcher user/42
|
||||
// → HTTP: /?q=user/42
|
||||
```
|
||||
|
||||
### Request and Response Injection
|
||||
### Dependency Injection
|
||||
`ActionRegistry` automatically injects `Request` and/or `Response` from `Context` if they are parameters:
|
||||
|
||||
```java
|
||||
@Action(value = "upload", mode = Mode.HTTP_POST)
|
||||
public String upload(Request<?, ?> req, Response<?, ?> res) throws ApplicationException {
|
||||
// req.getParameter("file"), res.setHeader(...), etc.
|
||||
// Access raw request/response if needed
|
||||
return "ok";
|
||||
}
|
||||
```
|
||||
|
||||
### Path Matching Priority
|
||||
If two methods share the same path, the framework uses the first match in the `ActionRegistry`. Use explicit `Mode` values to disambiguate (e.g., separating a GET for a form and a POST for submission).
|
||||
|
||||
@@ -2,13 +2,26 @@
|
||||
|
||||
## When to Use
|
||||
|
||||
Use the system and usage patterns described here when you need to handle stateful interactions across CLI and HTTP modes, manage user sessions in web applications, or implement loosely coupled communication between application modules using an event-driven architecture.
|
||||
Use these patterns to handle request state, manage web sessions, implement Server-Sent Events (SSE), handle file uploads, or perform outbound HTTP networking.
|
||||
|
||||
## How It Works
|
||||
|
||||
The framework's `Context` serves as the primary data store for request-specific state. In CLI mode, flags passed as `--key value` are automatically parsed and stored in the `Context` with the `--` prefix, allowing action methods to retrieve command parameters easily. For web applications, the system provides standard session management via the `Request` object, enabling the storage of user data across multiple HTTP requests.
|
||||
### Context and CLI Arguments
|
||||
`Context` is the primary data store for request-specific state. CLI flags passed as `--key value` are stored in `Context` as `"--key"`.
|
||||
|
||||
The internal `EventDispatcher` facilitates an asynchronous event bus. By defining custom `Event` classes and registering handlers (typically within an application's `init()` method), you can trigger background tasks—such as sending emails or logging audit trails—without blocking the main execution path.
|
||||
### Session Management
|
||||
Pluggable architecture. Default is `MemorySessionRepository`. Configure Redis in `application.properties`:
|
||||
```properties
|
||||
default.session.repository=org.tinystruct.http.RedisSessionRepository
|
||||
redis.host=127.0.0.1
|
||||
redis.port=6379
|
||||
```
|
||||
|
||||
### Server-Sent Events (SSE)
|
||||
Built-in support for real-time push. The `HttpServer` automatically handles the SSE lifecycle when it detects the `Accept: text/event-stream` header. Connections are tracked by session ID in `SSEPushManager`.
|
||||
|
||||
### Outbound Networking
|
||||
Use `URLRequest` and `HTTPHandler` for making HTTP requests to external services.
|
||||
|
||||
## Examples
|
||||
|
||||
@@ -23,52 +36,62 @@ public String echo() {
|
||||
}
|
||||
```
|
||||
|
||||
### Session Management (Web Mode)
|
||||
### Session Management
|
||||
```java
|
||||
@Action(value = "login", mode = Mode.HTTP_POST)
|
||||
public String login(Request request) {
|
||||
public String login(Request<?, ?> request) {
|
||||
request.getSession().setAttribute("userId", "42");
|
||||
return "Logged in";
|
||||
}
|
||||
```
|
||||
|
||||
@Action("profile")
|
||||
public String profile(Request request) {
|
||||
Object userId = request.getSession().getAttribute("userId");
|
||||
if (userId == null) return "Not logged in";
|
||||
return "User: " + userId;
|
||||
### Server-Sent Events (SSE)
|
||||
```java
|
||||
@Action("sse/connect")
|
||||
public String connect() {
|
||||
return "{\"type\":\"connect\",\"message\":\"Connected\"}";
|
||||
}
|
||||
|
||||
// In another method or event handler:
|
||||
String sessionId = getContext().getId();
|
||||
SSEPushManager.getInstance().push(sessionId, new Builder().put("msg", "hello"));
|
||||
```
|
||||
|
||||
### File Uploads
|
||||
```java
|
||||
import org.tinystruct.data.FileEntity;
|
||||
|
||||
@Action(value = "upload", mode = Mode.HTTP_POST)
|
||||
public String upload(Request<?, ?> request) throws ApplicationException {
|
||||
List<FileEntity> files = request.getAttachments();
|
||||
if (files != null) {
|
||||
for (FileEntity file : files) {
|
||||
// file.getFilename(), file.getContent()
|
||||
}
|
||||
}
|
||||
return "Uploaded";
|
||||
}
|
||||
```
|
||||
|
||||
### Outbound HTTP
|
||||
```java
|
||||
import org.tinystruct.net.URLRequest;
|
||||
import org.tinystruct.net.handlers.HTTPHandler;
|
||||
|
||||
URLRequest request = new URLRequest(new URL("https://api.example.com"));
|
||||
request.setMethod("POST").setBody("{\"data\":\"val\"}");
|
||||
|
||||
HTTPHandler handler = new HTTPHandler();
|
||||
var response = handler.handleRequest(request);
|
||||
if (response.getStatusCode() == 200) {
|
||||
String body = response.getBody();
|
||||
}
|
||||
```
|
||||
|
||||
### Event System
|
||||
Register handlers in `init()` for asynchronous task execution.
|
||||
```java
|
||||
// 1. Define an event
|
||||
public class OrderCreatedEvent implements org.tinystruct.system.Event<Order> {
|
||||
private final Order order;
|
||||
public OrderCreatedEvent(Order order) { this.order = order; }
|
||||
|
||||
@Override public String getName() { return "order_created"; }
|
||||
@Override public Order getPayload() { return order; }
|
||||
}
|
||||
|
||||
// 2. Register a handler
|
||||
EventDispatcher.getInstance().registerHandler(OrderCreatedEvent.class, event -> {
|
||||
CompletableFuture.runAsync(() -> sendConfirmationEmail(event.getPayload()));
|
||||
EventDispatcher.getInstance().registerHandler(MyEvent.class, event -> {
|
||||
CompletableFuture.runAsync(() -> doHeavyWork(event.getPayload()));
|
||||
});
|
||||
|
||||
// 3. Dispatch
|
||||
EventDispatcher.getInstance().dispatch(new OrderCreatedEvent(newOrder));
|
||||
```
|
||||
|
||||
### Running the Application
|
||||
```bash
|
||||
# CLI mode
|
||||
bin/dispatcher hello
|
||||
bin/dispatcher echo --words "Hello" --import com.example.HelloApp
|
||||
|
||||
# HTTP server (listens on :8080 by default)
|
||||
bin/dispatcher start --import org.tinystruct.system.HttpServer
|
||||
|
||||
# Database utilities
|
||||
bin/dispatcher generate --table users
|
||||
bin/dispatcher sql-query "SELECT * FROM users"
|
||||
```
|
||||
|
||||
@@ -2,58 +2,71 @@
|
||||
|
||||
## When to Use
|
||||
|
||||
Use the testing patterns described here when writing units tests for your tinystruct applications with **JUnit 5**. These patterns are essential for verifying that your `@Action` methods return the correct results and that your routing logic is properly registered within the singleton `ActionRegistry`.
|
||||
Use these patterns when writing unit tests for your applications with **JUnit 5**. Essential for verifying action logic, routing registration, and HTTP mode behavior.
|
||||
|
||||
## How It Works
|
||||
|
||||
Testing tinystruct applications requires a specific setup to ensure framework-level features like annotation processing and configuration management are active. By creating a new instance of your application and passing it a `Settings` object in the `setUp()` method, you trigger the `init()` lifecycle. This ensures all `@Action` methods are discovered and registered.
|
||||
### Unit Testing Applications
|
||||
ActionRegistry is a singleton. To test an application:
|
||||
1. Instantiate the application.
|
||||
2. Provide a `Settings` object (triggers `init()` and annotation processing).
|
||||
3. Use `app.invoke(path, args)` to test logic directly.
|
||||
|
||||
Because the `ActionRegistry` is a singleton, it is critical to maintain isolation between tests by properly initializing your application state before each test execution, preventing side effects from leaking across the test suite.
|
||||
### HTTP Integration Testing
|
||||
For tests involving the built-in HTTP server:
|
||||
1. Start `HttpServer` in a background thread.
|
||||
2. Use `ApplicationManager.call("start", context, Action.Mode.CLI)` to boot.
|
||||
3. Wait for the port to be open using a `Socket`.
|
||||
4. Use `URLRequest` and `HTTPHandler` to perform actual requests.
|
||||
|
||||
## Examples
|
||||
|
||||
### Unit Testing an Application
|
||||
### Unit Test
|
||||
```java
|
||||
import org.junit.jupiter.api.*;
|
||||
import org.tinystruct.application.ActionRegistry;
|
||||
import org.tinystruct.system.Settings;
|
||||
|
||||
class MyAppTest {
|
||||
|
||||
private MyApp app;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
app = new MyApp();
|
||||
Settings config = new Settings();
|
||||
app.setConfiguration(config);
|
||||
app.init(); // triggers @Action annotation processing
|
||||
app.setConfiguration(new Settings());
|
||||
app.init(); // triggers @Action annotation processing and registers all actions
|
||||
}
|
||||
|
||||
@Test
|
||||
void testHello() throws Exception {
|
||||
// Direct invocation via the application object
|
||||
Object result = app.invoke("hello");
|
||||
Assertions.assertEquals("Hello, tinystruct!", result);
|
||||
Assertions.assertEquals("Hello!", result);
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGreet() throws Exception {
|
||||
// Invocation with arguments
|
||||
Object result = app.invoke("greet", new Object[]{"James"});
|
||||
Assertions.assertEquals("Hello, James!", result);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Testing via ActionRegistry
|
||||
If you need to test the routing logic itself, use the `ActionRegistry` singleton to verify path matching:
|
||||
|
||||
### ActionRegistry Match Testing
|
||||
```java
|
||||
@Test
|
||||
void testRouting() {
|
||||
ActionRegistry registry = ActionRegistry.getInstance();
|
||||
// Verify a path matches an action
|
||||
Action action = registry.getAction("greet/James");
|
||||
Assertions.assertNotNull(action);
|
||||
}
|
||||
```
|
||||
Reference: `src/test/java/org/tinystruct/application/ActionRegistryTest.java`
|
||||
|
||||
### HTTP Integration Pattern
|
||||
Reference: `src/test/java/org/tinystruct/system/HttpServerHttpModeTest.java`
|
||||
|
||||
```java
|
||||
// Pattern:
|
||||
// 1. Start server in thread
|
||||
// 2. Poll for port availability
|
||||
// 3. Send HTTP request via HTTPHandler
|
||||
// 4. Assert response body/status
|
||||
```
|
||||
|
||||
@@ -44,6 +44,7 @@ function writeEnglishReadme(root, counts, options = {}) {
|
||||
const unrelatedSkillsCount = options.unrelatedSkillsCount || 16;
|
||||
|
||||
fs.writeFileSync(path.join(root, 'README.md'), `Access to ${counts.agents} agents, ${counts.skills} skills, and ${counts.commands} commands.
|
||||
- **Public surface synced to the live repo** - metadata, catalog counts, plugin manifests, and install-facing docs now match the actual OSS surface: ${counts.agents} agents, ${counts.skills} skills, and ${counts.commands} legacy command shims.
|
||||
|-- agents/ # ${counts.agents} specialized subagents for delegation
|
||||
| Feature | Claude Code | Cursor IDE | Codex CLI | OpenCode |
|
||||
| --- | --- | --- | --- | --- |
|
||||
@@ -221,6 +222,7 @@ function runTests() {
|
||||
.join('\n');
|
||||
|
||||
assert.ok(formatted.includes('README.md quick-start summary'));
|
||||
assert.ok(formatted.includes('README.md rc.1 release-note summary'));
|
||||
assert.ok(formatted.includes('README.md project tree'));
|
||||
assert.ok(formatted.includes('AGENTS.md summary'));
|
||||
assert.ok(formatted.includes('.claude-plugin/plugin.json description'));
|
||||
@@ -255,6 +257,7 @@ function runTests() {
|
||||
const marketplaceJson = fs.readFileSync(path.join(testDir, '.claude-plugin', 'marketplace.json'), 'utf8');
|
||||
|
||||
assert.ok(readme.includes('Access to 1 agents, 1 skills, and 1 legacy command shims'));
|
||||
assert.ok(readme.includes('actual OSS surface: 1 agents, 1 skills, and 1 legacy command shims'));
|
||||
assert.ok(readme.includes('|-- agents/ # 1 specialized subagents for delegation'));
|
||||
assert.ok(readme.includes('| Skills | 42 | .agents/skills/ |'));
|
||||
assert.ok(agentsDoc.includes('providing 1 specialized agents, 1+ skills, 1 commands'));
|
||||
|
||||
176
tests/ci/command-registry.test.js
Normal file
176
tests/ci/command-registry.test.js
Normal file
@@ -0,0 +1,176 @@
|
||||
/**
|
||||
* Direct coverage for scripts/ci/generate-command-registry.js.
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
|
||||
const {
|
||||
checkRegistry,
|
||||
formatRegistry,
|
||||
generateRegistry,
|
||||
parseArgs,
|
||||
run,
|
||||
writeRegistry,
|
||||
} = require('../../scripts/ci/generate-command-registry');
|
||||
|
||||
function createTestDir() {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-command-registry-'));
|
||||
}
|
||||
|
||||
function cleanupTestDir(testDir) {
|
||||
fs.rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function writeFixture(root) {
|
||||
fs.mkdirSync(path.join(root, 'commands'), { recursive: true });
|
||||
fs.mkdirSync(path.join(root, 'agents'), { recursive: true });
|
||||
fs.mkdirSync(path.join(root, 'skills', 'tdd-workflow'), { recursive: true });
|
||||
fs.mkdirSync(path.join(root, 'skills', 'security-review'), { recursive: true });
|
||||
|
||||
fs.writeFileSync(path.join(root, 'agents', 'code-reviewer.md'), '---\nmodel: sonnet\ntools: Read\n---\n');
|
||||
fs.writeFileSync(path.join(root, 'agents', 'test-writer.md'), '---\nmodel: sonnet\ntools: Read\n---\n');
|
||||
fs.writeFileSync(path.join(root, 'skills', 'tdd-workflow', 'SKILL.md'), '# TDD workflow\n');
|
||||
fs.writeFileSync(path.join(root, 'skills', 'security-review', 'SKILL.md'), '# Security review\n');
|
||||
|
||||
fs.writeFileSync(path.join(root, 'commands', 'review.md'), `---
|
||||
description: Review changes
|
||||
---
|
||||
# Review
|
||||
|
||||
Use @code-reviewer and skill: security-review.
|
||||
`);
|
||||
|
||||
fs.writeFileSync(path.join(root, 'commands', 'tdd.md'), `---
|
||||
description: "Write tests first"
|
||||
---
|
||||
# TDD
|
||||
|
||||
Call subagent_type: test-writer and skills/tdd-workflow/SKILL.md.
|
||||
`);
|
||||
}
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` PASS ${name}`);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.log(` FAIL ${name}`);
|
||||
console.log(` Error: ${error.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing command registry generation ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
if (test('generates deterministic command metadata and usage statistics', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
writeFixture(testDir);
|
||||
|
||||
const registry = generateRegistry({ root: testDir });
|
||||
|
||||
assert.strictEqual(registry.schemaVersion, 1);
|
||||
assert.strictEqual(registry.totalCommands, 2);
|
||||
assert.deepStrictEqual(
|
||||
registry.commands.map(command => command.command),
|
||||
['review', 'tdd']
|
||||
);
|
||||
assert.deepStrictEqual(registry.commands[0].allAgents, ['code-reviewer']);
|
||||
assert.deepStrictEqual(registry.commands[0].skills, ['security-review']);
|
||||
assert.deepStrictEqual(registry.commands[1].allAgents, ['test-writer']);
|
||||
assert.deepStrictEqual(registry.commands[1].skills, ['tdd-workflow']);
|
||||
assert.deepStrictEqual(registry.statistics.byType, { review: 1, testing: 1 });
|
||||
assert.deepStrictEqual(registry.statistics.topAgents[0], { agent: 'code-reviewer', count: 1 });
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('write and check modes use stable JSON without timestamps', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
writeFixture(testDir);
|
||||
const outputPath = path.join(testDir, 'docs', 'COMMAND-REGISTRY.json');
|
||||
const registry = generateRegistry({ root: testDir });
|
||||
|
||||
writeRegistry(registry, outputPath);
|
||||
const firstWrite = fs.readFileSync(outputPath, 'utf8');
|
||||
writeRegistry(registry, outputPath);
|
||||
const secondWrite = fs.readFileSync(outputPath, 'utf8');
|
||||
|
||||
assert.strictEqual(firstWrite, secondWrite);
|
||||
assert.ok(!firstWrite.includes('generated'));
|
||||
assert.doesNotThrow(() => checkRegistry(registry, outputPath));
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('check mode fails when the registry file is stale', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
writeFixture(testDir);
|
||||
const outputPath = path.join(testDir, 'docs', 'COMMAND-REGISTRY.json');
|
||||
const registry = generateRegistry({ root: testDir });
|
||||
|
||||
fs.mkdirSync(path.dirname(outputPath), { recursive: true });
|
||||
fs.writeFileSync(outputPath, `${formatRegistry(registry).trimEnd()}\n \n`);
|
||||
|
||||
assert.throws(
|
||||
() => checkRegistry(registry, outputPath),
|
||||
/out of date/
|
||||
);
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('CLI reports unknown arguments and supports check output', () => {
|
||||
const testDir = createTestDir();
|
||||
try {
|
||||
writeFixture(testDir);
|
||||
const outputPath = path.join(testDir, 'docs', 'COMMAND-REGISTRY.json');
|
||||
const registry = generateRegistry({ root: testDir });
|
||||
writeRegistry(registry, outputPath);
|
||||
|
||||
let stdout = '';
|
||||
let stderr = '';
|
||||
const streams = {
|
||||
stdout: { write: chunk => { stdout += chunk; } },
|
||||
stderr: { write: chunk => { stderr += chunk; } },
|
||||
};
|
||||
|
||||
assert.deepStrictEqual(parseArgs(['--json', '--write']), {
|
||||
json: true,
|
||||
write: true,
|
||||
check: false,
|
||||
});
|
||||
assert.strictEqual(run(['--check'], { root: testDir, outputPath, ...streams }), 0);
|
||||
assert.ok(stdout.includes('up to date'));
|
||||
assert.strictEqual(stderr, '');
|
||||
|
||||
stdout = '';
|
||||
stderr = '';
|
||||
assert.strictEqual(run(['--bogus'], { root: testDir, outputPath, ...streams }), 1);
|
||||
assert.strictEqual(stdout, '');
|
||||
assert.ok(stderr.includes('Unknown argument'));
|
||||
} finally {
|
||||
cleanupTestDir(testDir);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
145
tests/ci/scan-supply-chain-iocs.test.js
Executable file
145
tests/ci/scan-supply-chain-iocs.test.js
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Validate the active supply-chain IOC scanner.
|
||||
*/
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const SCRIPT_PATH = path.join(__dirname, '..', '..', 'scripts', 'ci', 'scan-supply-chain-iocs.js');
|
||||
const { scanSupplyChainIocs } = require(SCRIPT_PATH);
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${error.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function withFixture(files, fn) {
|
||||
const rootDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-supply-chain-ioc-'));
|
||||
try {
|
||||
for (const [relativePath, contents] of Object.entries(files)) {
|
||||
const fullPath = path.join(rootDir, relativePath);
|
||||
fs.mkdirSync(path.dirname(fullPath), { recursive: true });
|
||||
fs.writeFileSync(fullPath, contents);
|
||||
}
|
||||
fn(rootDir);
|
||||
} finally {
|
||||
fs.rmSync(rootDir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
function run() {
|
||||
console.log('\n=== Testing supply-chain IOC scanner ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
if (test('passes a clean dependency manifest', () => {
|
||||
withFixture({
|
||||
'package.json': JSON.stringify({ dependencies: { leftpad: '1.0.0' } }, null, 2),
|
||||
}, rootDir => {
|
||||
const result = scanSupplyChainIocs({ rootDir });
|
||||
assert.deepStrictEqual(result.findings, []);
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects known compromised TanStack package versions in lockfiles', () => {
|
||||
withFixture({
|
||||
'package-lock.json': JSON.stringify({
|
||||
packages: {
|
||||
'node_modules/@tanstack/react-router': {
|
||||
version: '1.169.5',
|
||||
},
|
||||
},
|
||||
}, null, 2),
|
||||
}, rootDir => {
|
||||
const result = scanSupplyChainIocs({ rootDir });
|
||||
assert.match(result.findings[0].indicator, /@tanstack\/react-router@1\.169\.5/);
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('passes clean versions of watched packages', () => {
|
||||
withFixture({
|
||||
'package-lock.json': JSON.stringify({
|
||||
packages: {
|
||||
'node_modules/@tanstack/react-router': {
|
||||
version: '1.170.0',
|
||||
},
|
||||
},
|
||||
}, null, 2),
|
||||
}, rootDir => {
|
||||
const result = scanSupplyChainIocs({ rootDir });
|
||||
assert.deepStrictEqual(result.findings, []);
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects malicious optional dependency markers', () => {
|
||||
withFixture({
|
||||
'package-lock.json': JSON.stringify({
|
||||
packages: {
|
||||
'node_modules/@tanstack/history': {
|
||||
optionalDependencies: {
|
||||
'@tanstack/setup': 'github:tanstack/router#79ac49eedf774dd4b0cfa308722bc463cfe5885c',
|
||||
},
|
||||
},
|
||||
},
|
||||
}, null, 2),
|
||||
}, rootDir => {
|
||||
const result = scanSupplyChainIocs({ rootDir });
|
||||
assert.ok(result.findings.some(finding => finding.indicator === '@tanstack/setup'));
|
||||
assert.ok(result.findings.some(finding => /79ac49/.test(finding.indicator)));
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects Claude Code persistence payload references', () => {
|
||||
withFixture({
|
||||
'.claude/settings.json': JSON.stringify({
|
||||
hooks: {
|
||||
SessionStart: [{
|
||||
hooks: [{ command: 'node ~/.claude/router_runtime.js' }],
|
||||
}],
|
||||
},
|
||||
}, null, 2),
|
||||
}, rootDir => {
|
||||
const result = scanSupplyChainIocs({ rootDir });
|
||||
assert.ok(result.findings.some(finding => finding.indicator === 'router_runtime.js'));
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects installed payload filenames in node_modules', () => {
|
||||
withFixture({
|
||||
'node_modules/@tanstack/react-router/router_init.js': '/* payload */',
|
||||
}, rootDir => {
|
||||
const result = scanSupplyChainIocs({ rootDir });
|
||||
assert.ok(result.findings.some(finding => finding.indicator === 'router_init.js'));
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('supports CLI JSON output and non-zero exit on findings', () => {
|
||||
withFixture({
|
||||
'package.json': JSON.stringify({ dependencies: { '@opensearch-project/opensearch': '3.8.0' } }, null, 2),
|
||||
}, rootDir => {
|
||||
const result = spawnSync('node', [SCRIPT_PATH, '--root', rootDir, '--json'], { encoding: 'utf8' });
|
||||
assert.notStrictEqual(result.status, 0);
|
||||
const parsed = JSON.parse(result.stdout);
|
||||
assert.ok(parsed.findings.some(finding => finding.indicator === '@opensearch-project/opensearch@3.8.0'));
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
run();
|
||||
@@ -270,7 +270,7 @@ function writeCatalogFixture(testDir, options = {}) {
|
||||
fs.writeFileSync(path.join(testDir, 'commands', 'plan.md'), '---\ndescription: Plan\n---\n# Plan');
|
||||
fs.writeFileSync(path.join(testDir, 'skills', 'demo-skill', 'SKILL.md'), '---\nname: demo-skill\ndescription: Demo skill\norigin: ECC\n---\n# Demo Skill');
|
||||
|
||||
fs.writeFileSync(readmePath, `Access to ${readmeCounts.agents} agents, ${readmeCounts.skills} skills, and ${readmeCounts.commands} commands.\n|-- agents/ # ${readmeProjectTreeAgents} specialized subagents for delegation\n| Feature | Claude Code | Cursor IDE | Codex CLI | OpenCode |\n|---------|------------|------------|-----------|----------|\n| Agents | PASS: ${readmeTableCounts.agents} agents | Shared | Shared | 1 |\n| Commands | PASS: ${readmeTableCounts.commands} commands | Shared | Shared | 1 |\n| Skills | PASS: ${readmeTableCounts.skills} skills | Shared | Shared | 1 |\n\n| Feature | Count | Format |\n|-----------|-------|---------|\n| Skills | ${readmeUnrelatedSkillsCount} | .agents/skills/ |\n\n## Cross-Tool Feature Parity\n\n| Feature | Claude Code | Cursor IDE | Codex CLI | OpenCode |\n|---------|------------|------------|-----------|----------|\n| **Agents** | ${readmeParityCounts.agents} | Shared (AGENTS.md) | Shared (AGENTS.md) | 12 |\n| **Commands** | ${readmeParityCounts.commands} | Shared | Instruction-based | 31 |\n| **Skills** | ${readmeParityCounts.skills} | Shared | 10 (native format) | 37 |\n`);
|
||||
fs.writeFileSync(readmePath, `Access to ${readmeCounts.agents} agents, ${readmeCounts.skills} skills, and ${readmeCounts.commands} commands.\n- **Public surface synced to the live repo** - metadata, catalog counts, plugin manifests, and install-facing docs now match the actual OSS surface: ${readmeCounts.agents} agents, ${readmeCounts.skills} skills, and ${readmeCounts.commands} legacy command shims.\n|-- agents/ # ${readmeProjectTreeAgents} specialized subagents for delegation\n| Feature | Claude Code | Cursor IDE | Codex CLI | OpenCode |\n|---------|------------|------------|-----------|----------|\n| Agents | PASS: ${readmeTableCounts.agents} agents | Shared | Shared | 1 |\n| Commands | PASS: ${readmeTableCounts.commands} commands | Shared | Shared | 1 |\n| Skills | PASS: ${readmeTableCounts.skills} skills | Shared | Shared | 1 |\n\n| Feature | Count | Format |\n|-----------|-------|---------|\n| Skills | ${readmeUnrelatedSkillsCount} | .agents/skills/ |\n\n## Cross-Tool Feature Parity\n\n| Feature | Claude Code | Cursor IDE | Codex CLI | OpenCode |\n|---------|------------|------------|-----------|----------|\n| **Agents** | ${readmeParityCounts.agents} | Shared (AGENTS.md) | Shared (AGENTS.md) | 12 |\n| **Commands** | ${readmeParityCounts.commands} | Shared | Instruction-based | 31 |\n| **Skills** | ${readmeParityCounts.skills} | Shared | 10 (native format) | 37 |\n`);
|
||||
fs.writeFileSync(agentsPath, `This is a **production-ready AI coding plugin** providing ${summaryCounts.agents} specialized agents, ${summaryCounts.skills} skills, ${summaryCounts.commands} commands, and automated hook workflows for software development.\n\n\`\`\`\n${structureLines.join('\n')}\n\`\`\`\n`);
|
||||
fs.writeFileSync(zhRootReadmePath, `**完成!** 你现在可以使用 ${zhRootReadmeCounts.agents} 个代理、${zhRootReadmeCounts.skills} 个技能和 ${zhRootReadmeCounts.commands} 个命令。\n`);
|
||||
fs.writeFileSync(zhDocsReadmePath, `**搞定!** 你现在可以使用 ${zhDocsReadmeCounts.agents} 个智能体、${zhDocsReadmeCounts.skills} 项技能和 ${zhDocsReadmeCounts.commands} 个命令了。\n| 功能特性 | Claude Code | OpenCode | 状态 |\n|---------|-------------|----------|--------|\n| 智能体 | \u2705 ${zhDocsTableCounts.agents} 个 | \u2705 12 个 | **Claude Code 领先** |\n| 命令 | \u2705 ${zhDocsTableCounts.commands} 个 | \u2705 31 个 | **Claude Code 领先** |\n| 技能 | \u2705 ${zhDocsTableCounts.skills} 项 | \u2705 37 项 | **Claude Code 领先** |\n\n| 功能特性 | 数量 | 格式 |\n|-----------|-------|---------|\n| 技能 | ${zhDocsUnrelatedSkillsCount} | .agents/skills/ |\n\n## 跨工具功能对等\n\n| 功能特性 | Claude Code | Cursor IDE | Codex CLI | OpenCode |\n|---------|------------|------------|-----------|----------|\n| **智能体** | ${zhDocsParityCounts.agents} | 共享 (AGENTS.md) | 共享 (AGENTS.md) | 12 |\n| **命令** | ${zhDocsParityCounts.commands} | 共享 | 基于指令 | 31 |\n| **技能** | ${zhDocsParityCounts.skills} | 共享 | 10 (原生格式) | 37 |\n`);
|
||||
@@ -595,6 +595,7 @@ function runTests() {
|
||||
const marketplaceJson = fs.readFileSync(marketplaceJsonPath, 'utf8');
|
||||
|
||||
assert.ok(readme.includes('Access to 1 agents, 1 skills, and 1 legacy command shims'), 'Should sync README quick-start summary');
|
||||
assert.ok(readme.includes('actual OSS surface: 1 agents, 1 skills, and 1 legacy command shims'), 'Should sync README release-note summary');
|
||||
assert.ok(readme.includes('|-- agents/ # 1 specialized subagents for delegation'), 'Should sync README project tree agents count');
|
||||
assert.ok(readme.includes('| Agents | PASS: 1 agents |'), 'Should sync README comparison table');
|
||||
assert.ok(readme.includes('| Skills | 16 | .agents/skills/ |'), 'Should not rewrite unrelated README tables');
|
||||
|
||||
@@ -130,12 +130,12 @@ test('candidate playbook preserves stale-salvage operating rules', () => {
|
||||
}
|
||||
});
|
||||
|
||||
test('roadmap points to the evaluator RAG prototype and keeps hosted integration open', () => {
|
||||
test('roadmap points to the evaluator RAG prototype and hosted PR check', () => {
|
||||
const roadmap = read('docs/ECC-2.0-GA-ROADMAP.md');
|
||||
|
||||
assert.ok(roadmap.includes('docs/architecture/evaluator-rag-prototype.md'));
|
||||
assert.ok(roadmap.includes('examples/evaluator-rag-prototype/'));
|
||||
assert.ok(roadmap.includes('Local corpus complete; hosted integration remains future'));
|
||||
assert.ok(roadmap.includes('Deterministic hosted PR check, cached output scoring, retrieval planning, judge contract, and gated model execution integrated'));
|
||||
});
|
||||
|
||||
test('billing readiness scenario rejects launch copy overclaims', () => {
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
@@ -70,85 +71,249 @@ function runTests() {
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
if (test('blocks protected config file edits through run-with-flags', () => {
|
||||
const input = {
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: '.eslintrc.js',
|
||||
content: 'module.exports = {};'
|
||||
if (
|
||||
test('blocks protected config file edits through run-with-flags', () => {
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-config-protect-'));
|
||||
try {
|
||||
const absPath = path.join(tmpDir, '.eslintrc.js');
|
||||
fs.writeFileSync(absPath, 'module.exports = {};');
|
||||
|
||||
const input = {
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: absPath,
|
||||
content: 'module.exports = {};'
|
||||
}
|
||||
};
|
||||
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 2, 'Expected protected config edit to be blocked');
|
||||
assert.strictEqual(result.stdout, '', 'Blocked hook should not echo raw input');
|
||||
assert.ok(result.stderr.includes('BLOCKED: Modifying .eslintrc.js is not allowed.'), `Expected block message, got: ${result.stderr}`);
|
||||
} finally {
|
||||
try {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
} catch {
|
||||
// best-effort cleanup
|
||||
}
|
||||
}
|
||||
};
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 2, 'Expected protected config edit to be blocked');
|
||||
assert.strictEqual(result.stdout, '', 'Blocked hook should not echo raw input');
|
||||
assert.ok(result.stderr.includes('BLOCKED: Modifying .eslintrc.js is not allowed.'), `Expected block message, got: ${result.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
if (
|
||||
test('passes through safe file edits unchanged', () => {
|
||||
const input = {
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: 'src/index.js',
|
||||
content: 'console.log("ok");'
|
||||
}
|
||||
};
|
||||
|
||||
if (test('passes through safe file edits unchanged', () => {
|
||||
const input = {
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: 'src/index.js',
|
||||
content: 'console.log("ok");'
|
||||
}
|
||||
};
|
||||
|
||||
const rawInput = JSON.stringify(input);
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 0, 'Expected safe file edit to pass');
|
||||
assert.strictEqual(result.stdout, rawInput, 'Expected exact raw JSON passthrough');
|
||||
assert.strictEqual(result.stderr, '', 'Expected no stderr for safe edits');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('blocks truncated protected config payloads instead of failing open', () => {
|
||||
const rawInput = JSON.stringify({
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: '.eslintrc.js',
|
||||
content: 'x'.repeat(1024 * 1024 + 2048)
|
||||
}
|
||||
});
|
||||
|
||||
const result = runHook(rawInput);
|
||||
assert.strictEqual(result.code, 2, 'Expected truncated protected payload to be blocked');
|
||||
assert.strictEqual(result.stdout, '', 'Blocked truncated payload should not echo raw input');
|
||||
assert.ok(result.stderr.includes('Hook input exceeded 1048576 bytes'), `Expected size warning, got: ${result.stderr}`);
|
||||
assert.ok(result.stderr.includes('truncated payload'), `Expected truncated payload warning, got: ${result.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('legacy hooks do not echo raw input when they fail without stdout', () => {
|
||||
const pluginRoot = path.join(__dirname, '..', `tmp-runner-plugin-${Date.now()}`);
|
||||
const scriptDir = path.join(pluginRoot, 'scripts', 'hooks');
|
||||
const scriptPath = path.join(scriptDir, 'legacy-block.js');
|
||||
|
||||
try {
|
||||
fs.mkdirSync(scriptDir, { recursive: true });
|
||||
fs.writeFileSync(
|
||||
scriptPath,
|
||||
'#!/usr/bin/env node\nprocess.stderr.write("blocked by legacy hook\\n");\nprocess.exit(2);\n'
|
||||
);
|
||||
const rawInput = JSON.stringify(input);
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 0, 'Expected safe file edit to pass');
|
||||
assert.strictEqual(result.stdout, rawInput, 'Expected exact raw JSON passthrough');
|
||||
assert.strictEqual(result.stderr, '', 'Expected no stderr for safe edits');
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('blocks truncated protected config payloads instead of failing open', () => {
|
||||
const rawInput = JSON.stringify({
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: '.eslintrc.js',
|
||||
content: 'module.exports = {};'
|
||||
content: 'x'.repeat(1024 * 1024 + 2048)
|
||||
}
|
||||
});
|
||||
|
||||
const result = runCustomHook(pluginRoot, 'pre:legacy-block', 'scripts/hooks/legacy-block.js', rawInput);
|
||||
assert.strictEqual(result.code, 2, 'Expected failing legacy hook exit code to propagate');
|
||||
assert.strictEqual(result.stdout, '', 'Expected failing legacy hook to avoid raw passthrough');
|
||||
assert.ok(result.stderr.includes('blocked by legacy hook'), `Expected legacy hook stderr, got: ${result.stderr}`);
|
||||
} finally {
|
||||
const result = runHook(rawInput);
|
||||
assert.strictEqual(result.code, 2, 'Expected truncated protected payload to be blocked');
|
||||
assert.strictEqual(result.stdout, '', 'Blocked truncated payload should not echo raw input');
|
||||
assert.ok(result.stderr.includes('Hook input exceeded 1048576 bytes'), `Expected size warning, got: ${result.stderr}`);
|
||||
assert.ok(result.stderr.includes('truncated payload'), `Expected truncated payload warning, got: ${result.stderr}`);
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('allows first-time creation of a protected config file', () => {
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-config-protect-'));
|
||||
try {
|
||||
fs.rmSync(pluginRoot, { recursive: true, force: true });
|
||||
} catch {
|
||||
// best-effort cleanup
|
||||
const absPath = path.join(tmpDir, 'eslint.config.mjs');
|
||||
const input = {
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: absPath,
|
||||
content: 'export default [];'
|
||||
}
|
||||
};
|
||||
|
||||
const rawInput = JSON.stringify(input);
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 0, `Expected exit 0 for first-time creation, got ${result.code}; stderr: ${result.stderr}`);
|
||||
assert.strictEqual(result.stdout, rawInput, 'Expected raw passthrough when creation is allowed');
|
||||
assert.strictEqual(result.stderr, '', `Expected no stderr for first-time creation, got: ${result.stderr}`);
|
||||
} finally {
|
||||
try {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
} catch {
|
||||
// best-effort cleanup
|
||||
}
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('allows first-time creation when the parent directory does not exist yet', () => {
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-config-protect-'));
|
||||
try {
|
||||
// Path under a non-existent subdirectory — statSync returns ENOENT
|
||||
// on the final segment, which should be treated as "does not exist"
|
||||
// and allow the write. (Agent or CLI is expected to create parents
|
||||
// during the Write itself; this hook does not need to.)
|
||||
const absPath = path.join(tmpDir, 'no-such-parent', '.prettierrc');
|
||||
const input = {
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: absPath,
|
||||
content: '{}'
|
||||
}
|
||||
};
|
||||
|
||||
const rawInput = JSON.stringify(input);
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 0, `Expected exit 0 for ENOENT path, got ${result.code}; stderr: ${result.stderr}`);
|
||||
assert.strictEqual(result.stdout, rawInput, 'Expected raw passthrough when path does not exist');
|
||||
} finally {
|
||||
try {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
} catch {
|
||||
// best-effort cleanup
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('blocks protected paths that exist as a dangling symlink', () => {
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-config-protect-'));
|
||||
try {
|
||||
const missingTarget = path.join(tmpDir, 'nowhere.js');
|
||||
const linkPath = path.join(tmpDir, '.eslintrc.js');
|
||||
try {
|
||||
fs.symlinkSync(missingTarget, linkPath);
|
||||
} catch (err) {
|
||||
// Windows without Developer Mode or certain sandboxes disallow
|
||||
// symlinks. Skip cleanly rather than fail the suite.
|
||||
if (err.code === 'EPERM' || err.code === 'EACCES') {
|
||||
console.log(' (skipped: symlink creation not permitted here)');
|
||||
return;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
const input = {
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: linkPath,
|
||||
content: 'module.exports = {};'
|
||||
}
|
||||
};
|
||||
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 2, `Expected exit 2 for dangling symlink, got ${result.code}; stderr: ${result.stderr}`);
|
||||
assert.strictEqual(result.stdout, '', 'Blocked hook should not echo raw input');
|
||||
assert.ok(
|
||||
result.stderr.includes('BLOCKED: Modifying .eslintrc.js is not allowed.'),
|
||||
`Expected block message, got: ${result.stderr}`
|
||||
);
|
||||
} finally {
|
||||
try {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
} catch {
|
||||
// best-effort cleanup
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('still blocks writes to an existing protected config file', () => {
|
||||
const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-config-protect-'));
|
||||
try {
|
||||
const absPath = path.join(tmpDir, '.eslintrc.js');
|
||||
fs.writeFileSync(absPath, 'module.exports = { rules: {} };');
|
||||
|
||||
const input = {
|
||||
tool_name: 'Edit',
|
||||
tool_input: {
|
||||
file_path: absPath,
|
||||
content: 'module.exports = { rules: { "no-console": "off" } };'
|
||||
}
|
||||
};
|
||||
|
||||
const result = runHook(input);
|
||||
assert.strictEqual(result.code, 2, 'Expected exit 2 when modifying an existing protected config');
|
||||
assert.strictEqual(result.stdout, '', 'Blocked hook should not echo raw input');
|
||||
assert.ok(result.stderr.includes('BLOCKED: Modifying .eslintrc.js is not allowed.'), `Expected block message, got: ${result.stderr}`);
|
||||
} finally {
|
||||
try {
|
||||
fs.rmSync(tmpDir, { recursive: true, force: true });
|
||||
} catch {
|
||||
// best-effort cleanup
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
test('legacy hooks do not echo raw input when they fail without stdout', () => {
|
||||
const pluginRoot = path.join(__dirname, '..', `tmp-runner-plugin-${Date.now()}`);
|
||||
const scriptDir = path.join(pluginRoot, 'scripts', 'hooks');
|
||||
const scriptPath = path.join(scriptDir, 'legacy-block.js');
|
||||
|
||||
try {
|
||||
fs.mkdirSync(scriptDir, { recursive: true });
|
||||
fs.writeFileSync(scriptPath, '#!/usr/bin/env node\nprocess.stderr.write("blocked by legacy hook\\n");\nprocess.exit(2);\n');
|
||||
|
||||
const rawInput = JSON.stringify({
|
||||
tool_name: 'Write',
|
||||
tool_input: {
|
||||
file_path: '.eslintrc.js',
|
||||
content: 'module.exports = {};'
|
||||
}
|
||||
});
|
||||
|
||||
const result = runCustomHook(pluginRoot, 'pre:legacy-block', 'scripts/hooks/legacy-block.js', rawInput);
|
||||
assert.strictEqual(result.code, 2, 'Expected failing legacy hook exit code to propagate');
|
||||
assert.strictEqual(result.stdout, '', 'Expected failing legacy hook to avoid raw passthrough');
|
||||
assert.ok(result.stderr.includes('blocked by legacy hook'), `Expected legacy hook stderr, got: ${result.stderr}`);
|
||||
} finally {
|
||||
try {
|
||||
fs.rmSync(pluginRoot, { recursive: true, force: true });
|
||||
} catch {
|
||||
// best-effort cleanup
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
|
||||
@@ -35,6 +35,14 @@ function withTempHome(homeDir) {
|
||||
};
|
||||
}
|
||||
|
||||
function writeTranscript(filePath, entries) {
|
||||
fs.writeFileSync(
|
||||
filePath,
|
||||
entries.map(entry => JSON.stringify(entry)).join('\n') + '\n',
|
||||
'utf8'
|
||||
);
|
||||
}
|
||||
|
||||
function runScript(input, envOverrides = {}) {
|
||||
const inputStr = typeof input === 'string' ? input : JSON.stringify(input);
|
||||
const result = spawnSync('node', [script], {
|
||||
@@ -64,12 +72,40 @@ function runTests() {
|
||||
assert.strictEqual(result.stdout, inputStr, 'Expected stdout to match original input');
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
// 2. Creates metrics file when given valid usage data
|
||||
(test('creates metrics file when given valid usage data', () => {
|
||||
// 2. Creates metrics file when given transcript usage data
|
||||
(test('creates metrics file when given transcript usage data', () => {
|
||||
const tmpHome = makeTempDir();
|
||||
const transcriptPath = path.join(tmpHome, 'session.jsonl');
|
||||
writeTranscript(transcriptPath, [
|
||||
{ type: 'user', message: { content: 'ignored' } },
|
||||
{
|
||||
type: 'assistant',
|
||||
message: {
|
||||
model: 'claude-sonnet-4-20250514',
|
||||
usage: {
|
||||
input_tokens: 1000,
|
||||
output_tokens: 500,
|
||||
cache_creation_input_tokens: 200,
|
||||
cache_read_input_tokens: 300,
|
||||
},
|
||||
},
|
||||
},
|
||||
{ notJsonShape: true },
|
||||
{
|
||||
type: 'assistant',
|
||||
message: {
|
||||
model: 'claude-opus-4-20250514',
|
||||
usage: {
|
||||
input_tokens: 25,
|
||||
output_tokens: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
]);
|
||||
|
||||
const input = {
|
||||
model: 'claude-sonnet-4-20250514',
|
||||
usage: { input_tokens: 1000, output_tokens: 500 },
|
||||
session_id: 'session-from-hook',
|
||||
transcript_path: transcriptPath,
|
||||
};
|
||||
const result = runScript(input, withTempHome(tmpHome));
|
||||
assert.strictEqual(result.code, 0, `Expected exit code 0, got ${result.code}`);
|
||||
@@ -79,8 +115,13 @@ function runTests() {
|
||||
|
||||
const content = fs.readFileSync(metricsFile, 'utf8').trim();
|
||||
const row = JSON.parse(content);
|
||||
assert.strictEqual(row.input_tokens, 1000, 'Expected input_tokens to be 1000');
|
||||
assert.strictEqual(row.output_tokens, 500, 'Expected output_tokens to be 500');
|
||||
assert.strictEqual(row.session_id, 'session-from-hook', 'Expected input session ID to be recorded');
|
||||
assert.strictEqual(row.transcript_path, transcriptPath, 'Expected transcript_path to be recorded');
|
||||
assert.strictEqual(row.model, 'claude-opus-4-20250514', 'Expected last assistant model to be recorded');
|
||||
assert.strictEqual(row.input_tokens, 1025, 'Expected input_tokens to be summed from transcript');
|
||||
assert.strictEqual(row.output_tokens, 505, 'Expected output_tokens to be summed from transcript');
|
||||
assert.strictEqual(row.cache_write_tokens, 200, 'Expected cache write tokens to be summed from transcript');
|
||||
assert.strictEqual(row.cache_read_tokens, 300, 'Expected cache read tokens to be summed from transcript');
|
||||
assert.ok(row.timestamp, 'Expected timestamp to be present');
|
||||
assert.ok(typeof row.estimated_cost_usd === 'number', 'Expected estimated_cost_usd to be a number');
|
||||
assert.ok(row.estimated_cost_usd > 0, 'Expected estimated_cost_usd to be positive');
|
||||
|
||||
@@ -89,6 +89,110 @@ function runTests() {
|
||||
assert.strictEqual(result.code, 0, `Expected exit code 0, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
// --- Subshell bypass regression (issue: dev server slipped past via $(), ``, ()) ---
|
||||
|
||||
if (!isWindows) {
|
||||
(test('blocks $(npm run dev) — command substitution', () => {
|
||||
const result = runScript('$(npm run dev)');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
assert.ok(result.stderr.includes('BLOCKED'), 'expected BLOCKED in stderr');
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks `npm run dev` — backtick substitution', () => {
|
||||
const result = runScript('`npm run dev`');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks echo $(npm run dev) — substitution nested in argument', () => {
|
||||
const result = runScript('echo $(npm run dev)');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks (npm run dev) — plain subshell group', () => {
|
||||
const result = runScript('(npm run dev)');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks $(echo a; npm run dev) — substitution with sequenced segments', () => {
|
||||
const result = runScript('$(echo a; npm run dev)');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks (pnpm dev) — plain subshell group with pnpm', () => {
|
||||
const result = runScript('(pnpm dev)');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('allows tmux launcher inside subshell wrapping (exit code 0)', () => {
|
||||
const result = runScript('(tmux new-session -d -s dev "npm run dev")');
|
||||
assert.strictEqual(result.code, 0, `Expected exit code 0, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('allows single-quoted "(npm run dev)" — literal string, not a subshell', () => {
|
||||
const result = runScript("git commit -m '(npm run dev)'");
|
||||
assert.strictEqual(result.code, 0, `Expected exit code 0, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('allows double-quoted "(npm run dev)" — literal in double quotes (bash does not subshell)', () => {
|
||||
const result = runScript('echo "(npm run dev)"');
|
||||
assert.strictEqual(result.code, 0, `Expected exit code 0, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test("allows single-quoted '$(npm run dev)' — literal string, no substitution", () => {
|
||||
const result = runScript("git commit -m '$(npm run dev) fix'");
|
||||
assert.strictEqual(result.code, 0, `Expected exit code 0, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
}
|
||||
|
||||
// --- Round 1 review fixes (Greptile + CodeRabbit on PR #1889) ---
|
||||
|
||||
if (!isWindows) {
|
||||
(test('blocks $(echo ")"; (npm run dev)) — quoted ) does not terminate $() early', () => {
|
||||
const result = runScript('$(echo ")"; (npm run dev))');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks (echo ")"; npm run dev) — quoted ) does not terminate (...) early', () => {
|
||||
const result = runScript('(echo ")"; npm run dev)');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('allows $(echo "(npm run dev)") — () inside double-quoted substitution body is literal', () => {
|
||||
const result = runScript('$(echo "(npm run dev)")');
|
||||
assert.strictEqual(result.code, 0, `Expected exit code 0, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks { npm run dev; } — brace group runs in current shell', () => {
|
||||
const result = runScript('{ npm run dev; }');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks echo hi && { npm run dev; } — brace group after &&', () => {
|
||||
const result = runScript('echo hi && { npm run dev; }');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('allows {npm run dev} — bash requires space after { to form a group', () => {
|
||||
const result = runScript('{npm run dev}');
|
||||
assert.strictEqual(result.code, 0, `Expected exit code 0, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks yarn run dev — yarn 1.x convention', () => {
|
||||
const result = runScript('yarn run dev');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks bun dev — bun bare form', () => {
|
||||
const result = runScript('bun dev');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
|
||||
(test('blocks "$(npm run dev)" — double-quoted substitution still substitutes', () => {
|
||||
const result = runScript('echo "$(npm run dev)"');
|
||||
assert.strictEqual(result.code, 2, `Expected exit code 2, got ${result.code}`);
|
||||
}) ? passed++ : failed++);
|
||||
}
|
||||
|
||||
// --- Edge cases ---
|
||||
|
||||
(test('empty/invalid input passes through (exit code 0)', () => {
|
||||
|
||||
@@ -366,6 +366,66 @@ function runTests() {
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// ── hookSpecificOutput JSON on stdout ──
|
||||
// Claude Code 2.1+ drops non-blocking PreToolUse stderr; the suggestion has
|
||||
// to ride on stdout as { hookSpecificOutput: { additionalContext } } to reach
|
||||
// the model. These tests pin that contract.
|
||||
console.log('\nhookSpecificOutput stdout JSON:');
|
||||
|
||||
if (test('emits hookSpecificOutput.additionalContext on stdout at threshold', () => {
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
assert.ok(result.stdout.trim().length > 0, `Expected stdout payload at threshold. Got: "${result.stdout}"`);
|
||||
const parsed = JSON.parse(result.stdout);
|
||||
assert.strictEqual(parsed.hookSpecificOutput.hookEventName, 'PreToolUse',
|
||||
`hookEventName should be PreToolUse. Got: ${JSON.stringify(parsed)}`);
|
||||
assert.ok(parsed.hookSpecificOutput.additionalContext.includes('50 tool calls reached'),
|
||||
`additionalContext should include threshold text. Got: ${parsed.hookSpecificOutput.additionalContext}`);
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('emits hookSpecificOutput.additionalContext on stdout at +25 interval', () => {
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
// threshold=3, set counter to 27 → next run = 28 → 28-3=25 → interval hit
|
||||
fs.writeFileSync(counterFile, '27');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '3' });
|
||||
assert.strictEqual(result.code, 0, 'Should exit 0');
|
||||
assert.ok(result.stdout.trim().length > 0, `Expected stdout payload at interval. Got: "${result.stdout}"`);
|
||||
const parsed = JSON.parse(result.stdout);
|
||||
assert.strictEqual(parsed.hookSpecificOutput.hookEventName, 'PreToolUse');
|
||||
assert.ok(parsed.hookSpecificOutput.additionalContext.includes('28 tool calls'),
|
||||
`additionalContext should include count. Got: ${parsed.hookSpecificOutput.additionalContext}`);
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('emits no stdout below threshold (silent)', () => {
|
||||
const { sessionId, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId, COMPACT_THRESHOLD: '5' });
|
||||
assert.strictEqual(result.code, 0);
|
||||
assert.strictEqual(result.stdout.trim(), '',
|
||||
`Expected empty stdout below threshold. Got: "${result.stdout}"`);
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
if (test('still writes [StrategicCompact] to stderr (debug log retained)', () => {
|
||||
const { sessionId, counterFile, cleanup } = createCounterContext();
|
||||
cleanup();
|
||||
fs.writeFileSync(counterFile, '49');
|
||||
const result = runCompact({ CLAUDE_SESSION_ID: sessionId });
|
||||
assert.ok(result.stderr.includes('[StrategicCompact]'),
|
||||
`stderr should retain [StrategicCompact] for debug log capture. Got: "${result.stderr}"`);
|
||||
cleanup();
|
||||
})) passed++;
|
||||
else failed++;
|
||||
|
||||
// ── Round 64: default session ID fallback ──
|
||||
console.log('\nDefault session ID fallback (Round 64):');
|
||||
|
||||
|
||||
@@ -114,6 +114,10 @@ function seedMinimalRepo(rootDir, overrides = {}) {
|
||||
'docs/security/supply-chain-incident-response.md': [
|
||||
'TanStack',
|
||||
'Mini Shai-Hulud',
|
||||
'scan-supply-chain-iocs.js',
|
||||
'gh-token-monitor',
|
||||
'.claude/settings.json',
|
||||
'.vscode/tasks.json',
|
||||
'npm audit signatures',
|
||||
'trusted publishing',
|
||||
'pull_request_target',
|
||||
@@ -126,6 +130,8 @@ function seedMinimalRepo(rootDir, overrides = {}) {
|
||||
'id-token: write',
|
||||
'shared cache'
|
||||
].join('\n'),
|
||||
'scripts/ci/scan-supply-chain-iocs.js': 'TanStack Mini Shai-Hulud gh-token-monitor',
|
||||
'tests/ci/scan-supply-chain-iocs.test.js': 'scan-supply-chain-iocs',
|
||||
'tests/ci/validate-workflow-security.test.js': 'npm audit signatures persist-credentials: false',
|
||||
'tests/scripts/npm-publish-surface.test.js': 'npm pack --dry-run Python bytecode',
|
||||
'tests/docs/ecc2-release-surface.test.js': 'publication-readiness.md',
|
||||
|
||||
Reference in New Issue
Block a user