mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-05-15 21:33:04 +08:00
Compare commits
27 Commits
fix/pre-ba
...
5881554a1c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5881554a1c | ||
|
|
d26d66fd3b | ||
|
|
0c61710c43 | ||
|
|
d49f0329a9 | ||
|
|
95ce9eaaeb | ||
|
|
06f9eca8e2 | ||
|
|
affbd33485 | ||
|
|
9627c201c7 | ||
|
|
1188aeafc4 | ||
|
|
17aafc4506 | ||
|
|
0dcde13384 | ||
|
|
3fadc37802 | ||
|
|
2006d2ee77 | ||
|
|
149fae7008 | ||
|
|
a7a56fa2a2 | ||
|
|
84ac76fa2b | ||
|
|
69b8ec4e0b | ||
|
|
4b67c3cac6 | ||
|
|
c3ea7a1e5e | ||
|
|
468c755abd | ||
|
|
fc96be4924 | ||
|
|
7ca48f376f | ||
|
|
8c7e6611e0 | ||
|
|
b5bdd9352f | ||
|
|
ae02b26cf9 | ||
|
|
cc89c40751 | ||
|
|
880c487c0f |
@@ -1,336 +0,0 @@
|
||||
---
|
||||
name: claude-api
|
||||
description: Anthropic Claude API patterns for Python and TypeScript. Covers Messages API, streaming, tool use, vision, extended thinking, batches, prompt caching, and Claude Agent SDK. Use when building applications with the Claude API or Anthropic SDKs.
|
||||
---
|
||||
|
||||
# Claude API
|
||||
|
||||
Build applications with the Anthropic Claude API and SDKs.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Building applications that call the Claude API
|
||||
- Code imports `anthropic` (Python) or `@anthropic-ai/sdk` (TypeScript)
|
||||
- User asks about Claude API patterns, tool use, streaming, or vision
|
||||
- Implementing agent workflows with Claude Agent SDK
|
||||
- Optimizing API costs, token usage, or latency
|
||||
|
||||
## Model Selection
|
||||
|
||||
| Model | ID | Best For |
|
||||
|-------|-----|----------|
|
||||
| Opus 4.6 | `claude-opus-4-6` | Complex reasoning, architecture, research |
|
||||
| Sonnet 4.6 | `claude-sonnet-4-6` | Balanced coding, most development tasks |
|
||||
| Haiku 4.5 | `claude-haiku-4-5-20251001` | Fast responses, high-volume, cost-sensitive |
|
||||
|
||||
Default to Sonnet 4.6 unless the task requires deep reasoning (Opus) or speed/cost optimization (Haiku).
|
||||
|
||||
## Python SDK
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
pip install anthropic
|
||||
```
|
||||
|
||||
### Basic Message
|
||||
|
||||
```python
|
||||
import anthropic
|
||||
|
||||
client = anthropic.Anthropic() # reads ANTHROPIC_API_KEY from env
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-6",
|
||||
max_tokens=1024,
|
||||
messages=[
|
||||
{"role": "user", "content": "Explain async/await in Python"}
|
||||
]
|
||||
)
|
||||
print(message.content[0].text)
|
||||
```
|
||||
|
||||
### Streaming
|
||||
|
||||
```python
|
||||
with client.messages.stream(
|
||||
model="claude-sonnet-4-6",
|
||||
max_tokens=1024,
|
||||
messages=[{"role": "user", "content": "Write a haiku about coding"}]
|
||||
) as stream:
|
||||
for text in stream.text_stream:
|
||||
print(text, end="", flush=True)
|
||||
```
|
||||
|
||||
### System Prompt
|
||||
|
||||
```python
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-6",
|
||||
max_tokens=1024,
|
||||
system="You are a senior Python developer. Be concise.",
|
||||
messages=[{"role": "user", "content": "Review this function"}]
|
||||
)
|
||||
```
|
||||
|
||||
## TypeScript SDK
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
npm install @anthropic-ai/sdk
|
||||
```
|
||||
|
||||
### Basic Message
|
||||
|
||||
```typescript
|
||||
import Anthropic from "@anthropic-ai/sdk";
|
||||
|
||||
const client = new Anthropic(); // reads ANTHROPIC_API_KEY from env
|
||||
|
||||
const message = await client.messages.create({
|
||||
model: "claude-sonnet-4-6",
|
||||
max_tokens: 1024,
|
||||
messages: [
|
||||
{ role: "user", content: "Explain async/await in TypeScript" }
|
||||
],
|
||||
});
|
||||
console.log(message.content[0].text);
|
||||
```
|
||||
|
||||
### Streaming
|
||||
|
||||
```typescript
|
||||
const stream = client.messages.stream({
|
||||
model: "claude-sonnet-4-6",
|
||||
max_tokens: 1024,
|
||||
messages: [{ role: "user", content: "Write a haiku" }],
|
||||
});
|
||||
|
||||
for await (const event of stream) {
|
||||
if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
|
||||
process.stdout.write(event.delta.text);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Tool Use
|
||||
|
||||
Define tools and let Claude call them:
|
||||
|
||||
```python
|
||||
tools = [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get current weather for a location",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {"type": "string", "description": "City name"},
|
||||
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
|
||||
},
|
||||
"required": ["location"]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-6",
|
||||
max_tokens=1024,
|
||||
tools=tools,
|
||||
messages=[{"role": "user", "content": "What's the weather in SF?"}]
|
||||
)
|
||||
|
||||
# Handle tool use response
|
||||
for block in message.content:
|
||||
if block.type == "tool_use":
|
||||
# Execute the tool with block.input
|
||||
result = get_weather(**block.input)
|
||||
# Send result back
|
||||
follow_up = client.messages.create(
|
||||
model="claude-sonnet-4-6",
|
||||
max_tokens=1024,
|
||||
tools=tools,
|
||||
messages=[
|
||||
{"role": "user", "content": "What's the weather in SF?"},
|
||||
{"role": "assistant", "content": message.content},
|
||||
{"role": "user", "content": [
|
||||
{"type": "tool_result", "tool_use_id": block.id, "content": str(result)}
|
||||
]}
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
## Vision
|
||||
|
||||
Send images for analysis:
|
||||
|
||||
```python
|
||||
import base64
|
||||
|
||||
with open("diagram.png", "rb") as f:
|
||||
image_data = base64.standard_b64encode(f.read()).decode("utf-8")
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-6",
|
||||
max_tokens=1024,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "source": {"type": "base64", "media_type": "image/png", "data": image_data}},
|
||||
{"type": "text", "text": "Describe this diagram"}
|
||||
]
|
||||
}]
|
||||
)
|
||||
```
|
||||
|
||||
## Extended Thinking
|
||||
|
||||
For complex reasoning tasks:
|
||||
|
||||
```python
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-6",
|
||||
max_tokens=16000,
|
||||
thinking={
|
||||
"type": "enabled",
|
||||
"budget_tokens": 10000
|
||||
},
|
||||
messages=[{"role": "user", "content": "Solve this math problem step by step..."}]
|
||||
)
|
||||
|
||||
for block in message.content:
|
||||
if block.type == "thinking":
|
||||
print(f"Thinking: {block.thinking}")
|
||||
elif block.type == "text":
|
||||
print(f"Answer: {block.text}")
|
||||
```
|
||||
|
||||
## Prompt Caching
|
||||
|
||||
Cache large system prompts or context to reduce costs:
|
||||
|
||||
```python
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-6",
|
||||
max_tokens=1024,
|
||||
system=[
|
||||
{"type": "text", "text": large_system_prompt, "cache_control": {"type": "ephemeral"}}
|
||||
],
|
||||
messages=[{"role": "user", "content": "Question about the cached context"}]
|
||||
)
|
||||
# Check cache usage
|
||||
print(f"Cache read: {message.usage.cache_read_input_tokens}")
|
||||
print(f"Cache creation: {message.usage.cache_creation_input_tokens}")
|
||||
```
|
||||
|
||||
## Batches API
|
||||
|
||||
Process large volumes asynchronously at 50% cost reduction:
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
batch = client.messages.batches.create(
|
||||
requests=[
|
||||
{
|
||||
"custom_id": f"request-{i}",
|
||||
"params": {
|
||||
"model": "claude-sonnet-4-6",
|
||||
"max_tokens": 1024,
|
||||
"messages": [{"role": "user", "content": prompt}]
|
||||
}
|
||||
}
|
||||
for i, prompt in enumerate(prompts)
|
||||
]
|
||||
)
|
||||
|
||||
# Poll for completion
|
||||
while True:
|
||||
status = client.messages.batches.retrieve(batch.id)
|
||||
if status.processing_status == "ended":
|
||||
break
|
||||
time.sleep(30)
|
||||
|
||||
# Get results
|
||||
for result in client.messages.batches.results(batch.id):
|
||||
print(result.result.message.content[0].text)
|
||||
```
|
||||
|
||||
## Claude Agent SDK
|
||||
|
||||
Build multi-step agents:
|
||||
|
||||
```python
|
||||
# Note: Agent SDK API surface may change — check official docs
|
||||
import anthropic
|
||||
|
||||
# Define tools as functions
|
||||
tools = [{
|
||||
"name": "search_codebase",
|
||||
"description": "Search the codebase for relevant code",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {"query": {"type": "string"}},
|
||||
"required": ["query"]
|
||||
}
|
||||
}]
|
||||
|
||||
# Run an agentic loop with tool use
|
||||
client = anthropic.Anthropic()
|
||||
messages = [{"role": "user", "content": "Review the auth module for security issues"}]
|
||||
|
||||
while True:
|
||||
response = client.messages.create(
|
||||
model="claude-sonnet-4-6",
|
||||
max_tokens=4096,
|
||||
tools=tools,
|
||||
messages=messages,
|
||||
)
|
||||
if response.stop_reason == "end_turn":
|
||||
break
|
||||
# Handle tool calls and continue the loop
|
||||
messages.append({"role": "assistant", "content": response.content})
|
||||
# ... execute tools and append tool_result messages
|
||||
```
|
||||
|
||||
## Cost Optimization
|
||||
|
||||
| Strategy | Savings | When to Use |
|
||||
|----------|---------|-------------|
|
||||
| Prompt caching | Up to 90% on cached tokens | Repeated system prompts or context |
|
||||
| Batches API | 50% | Non-time-sensitive bulk processing |
|
||||
| Haiku instead of Sonnet | ~75% | Simple tasks, classification, extraction |
|
||||
| Shorter max_tokens | Variable | When you know output will be short |
|
||||
| Streaming | None (same cost) | Better UX, same price |
|
||||
|
||||
## Error Handling
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
from anthropic import APIError, RateLimitError, APIConnectionError
|
||||
|
||||
try:
|
||||
message = client.messages.create(...)
|
||||
except RateLimitError:
|
||||
# Back off and retry
|
||||
time.sleep(60)
|
||||
except APIConnectionError:
|
||||
# Network issue, retry with backoff
|
||||
pass
|
||||
except APIError as e:
|
||||
print(f"API error {e.status_code}: {e.message}")
|
||||
```
|
||||
|
||||
## Environment Setup
|
||||
|
||||
```bash
|
||||
# Required
|
||||
export ANTHROPIC_API_KEY="your-api-key-here"
|
||||
|
||||
# Optional: set default model
|
||||
export ANTHROPIC_MODEL="claude-sonnet-4-6"
|
||||
```
|
||||
|
||||
Never hardcode API keys. Always use environment variables.
|
||||
@@ -1,7 +0,0 @@
|
||||
interface:
|
||||
display_name: "Claude API"
|
||||
short_description: "Claude API patterns for Python and TypeScript"
|
||||
brand_color: "#D97706"
|
||||
default_prompt: "Use $claude-api to build with Claude API and Anthropic SDK patterns."
|
||||
policy:
|
||||
allow_implicit_invocation: true
|
||||
@@ -1,144 +0,0 @@
|
||||
---
|
||||
name: frontend-design
|
||||
description: Create distinctive, production-grade frontend interfaces with high design quality. Use when the user asks to build web components, pages, or applications and the visual direction matters as much as the code quality.
|
||||
---
|
||||
|
||||
# Frontend Design
|
||||
|
||||
Use this when the task is not just "make it work" but "make it look designed."
|
||||
|
||||
This skill is for product pages, dashboards, app shells, components, or visual systems that need a clear point of view instead of generic AI-looking UI.
|
||||
|
||||
## When To Use
|
||||
|
||||
- building a landing page, dashboard, or app surface from scratch
|
||||
- upgrading a bland interface into something intentional and memorable
|
||||
- translating a product concept into a concrete visual direction
|
||||
- implementing a frontend where typography, composition, and motion matter
|
||||
|
||||
## Core Principle
|
||||
|
||||
Pick a direction and commit to it.
|
||||
|
||||
Safe-average UI is usually worse than a strong, coherent aesthetic with a few bold choices.
|
||||
|
||||
## Design Workflow
|
||||
|
||||
### 1. Frame the interface first
|
||||
|
||||
Before coding, settle:
|
||||
|
||||
- purpose
|
||||
- audience
|
||||
- emotional tone
|
||||
- visual direction
|
||||
- one thing the user should remember
|
||||
|
||||
Possible directions:
|
||||
|
||||
- brutally minimal
|
||||
- editorial
|
||||
- industrial
|
||||
- luxury
|
||||
- playful
|
||||
- geometric
|
||||
- retro-futurist
|
||||
- soft and organic
|
||||
- maximalist
|
||||
|
||||
Do not mix directions casually. Choose one and execute it cleanly.
|
||||
|
||||
### 2. Build the visual system
|
||||
|
||||
Define:
|
||||
|
||||
- type hierarchy
|
||||
- color variables
|
||||
- spacing rhythm
|
||||
- layout logic
|
||||
- motion rules
|
||||
- surface / border / shadow treatment
|
||||
|
||||
Use CSS variables or the project's token system so the interface stays coherent as it grows.
|
||||
|
||||
### 3. Compose with intention
|
||||
|
||||
Prefer:
|
||||
|
||||
- asymmetry when it sharpens hierarchy
|
||||
- overlap when it creates depth
|
||||
- strong whitespace when it clarifies focus
|
||||
- dense layouts only when the product benefits from density
|
||||
|
||||
Avoid defaulting to a symmetrical card grid unless it is clearly the right fit.
|
||||
|
||||
### 4. Make motion meaningful
|
||||
|
||||
Use animation to:
|
||||
|
||||
- reveal hierarchy
|
||||
- stage information
|
||||
- reinforce user action
|
||||
- create one or two memorable moments
|
||||
|
||||
Do not scatter generic micro-interactions everywhere. One well-directed load sequence is usually stronger than twenty random hover effects.
|
||||
|
||||
## Strong Defaults
|
||||
|
||||
### Typography
|
||||
|
||||
- pick fonts with character
|
||||
- pair a distinctive display face with a readable body face when appropriate
|
||||
- avoid generic defaults when the page is design-led
|
||||
|
||||
### Color
|
||||
|
||||
- commit to a clear palette
|
||||
- one dominant field with selective accents usually works better than evenly weighted rainbow palettes
|
||||
- avoid cliché purple-gradient-on-white unless the product genuinely calls for it
|
||||
|
||||
### Background
|
||||
|
||||
Use atmosphere:
|
||||
|
||||
- gradients
|
||||
- meshes
|
||||
- textures
|
||||
- subtle noise
|
||||
- patterns
|
||||
- layered transparency
|
||||
|
||||
Flat empty backgrounds are rarely the best answer for a product-facing page.
|
||||
|
||||
### Layout
|
||||
|
||||
- break the grid when the composition benefits from it
|
||||
- use diagonals, offsets, and grouping intentionally
|
||||
- keep reading flow obvious even when the layout is unconventional
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
Never default to:
|
||||
|
||||
- interchangeable SaaS hero sections
|
||||
- generic card piles with no hierarchy
|
||||
- random accent colors without a system
|
||||
- placeholder-feeling typography
|
||||
- motion that exists only because animation was easy to add
|
||||
|
||||
## Execution Rules
|
||||
|
||||
- preserve the established design system when working inside an existing product
|
||||
- match technical complexity to the visual idea
|
||||
- keep accessibility and responsiveness intact
|
||||
- frontends should feel deliberate on desktop and mobile
|
||||
|
||||
## Quality Gate
|
||||
|
||||
Before delivering:
|
||||
|
||||
- the interface has a clear visual point of view
|
||||
- typography and spacing feel intentional
|
||||
- color and motion support the product instead of decorating it randomly
|
||||
- the result does not read like generic AI UI
|
||||
- the implementation is production-grade, not just visually interesting
|
||||
@@ -1,7 +0,0 @@
|
||||
interface:
|
||||
display_name: "Frontend Design"
|
||||
short_description: "Production-grade frontend interface design"
|
||||
brand_color: "#0EA5E9"
|
||||
default_prompt: "Use $frontend-design to build a distinctive production-grade interface."
|
||||
policy:
|
||||
allow_implicit_invocation: true
|
||||
@@ -132,6 +132,26 @@ The test `plugin.json does NOT have explicit hooks declaration` in `tests/hooks/
|
||||
|
||||
---
|
||||
|
||||
## The `mcpServers` Field: Keep the Empty Opt-Out
|
||||
|
||||
ECC keeps `.mcp.json` at the repository root for Codex plugin installs and manual MCP setup.
|
||||
Claude Code also auto-discovers plugin-root `.mcp.json` files by convention, which would bundle the same MCP servers into Claude plugin installs.
|
||||
|
||||
Keep this field in `.claude-plugin/plugin.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {}
|
||||
}
|
||||
```
|
||||
|
||||
This explicit empty object prevents Claude plugin installs from auto-loading ECC's root MCP definitions.
|
||||
Without the opt-out, strict OpenAI-compatible gateways can reject plugin MCP tool names such as `mcp__plugin_everything-claude-code_github__create_pull_request_review` because they exceed 64 characters.
|
||||
|
||||
Users who want the bundled MCP servers should configure them manually from `.mcp.json` or `mcp-configs/mcp-servers.json`.
|
||||
|
||||
---
|
||||
|
||||
## Known Anti-Patterns
|
||||
|
||||
These look correct but are rejected:
|
||||
@@ -142,6 +162,7 @@ These look correct but are rejected:
|
||||
* Relying on inferred paths
|
||||
* Assuming marketplace behavior matches local validation
|
||||
* **Adding `"hooks": "./hooks/hooks.json"`** - auto-loaded by convention, causes duplicate error
|
||||
* Removing `"mcpServers": {}` - re-enables root `.mcp.json` auto-discovery for Claude plugin installs and can produce overlong MCP tool names
|
||||
|
||||
Avoid cleverness. Be explicit.
|
||||
|
||||
@@ -170,7 +191,8 @@ Before submitting changes that touch `plugin.json`:
|
||||
1. Ensure all component fields are arrays
|
||||
2. Include a `version`
|
||||
3. Do NOT add `agents` or `hooks` fields (both are auto-loaded by convention)
|
||||
4. Run:
|
||||
4. Preserve `"mcpServers": {}` unless you are intentionally changing Claude plugin MCP bundling behavior
|
||||
5. Run:
|
||||
|
||||
```bash
|
||||
claude plugin validate .claude-plugin/plugin.json
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"source": "./",
|
||||
"description": "The most comprehensive Claude Code plugin — 48 agents, 184 skills, 79 legacy command shims, selective install profiles, and production-ready hooks for TDD, security scanning, code review, and continuous learning",
|
||||
"description": "The most comprehensive Claude Code plugin — 48 agents, 182 skills, 68 legacy command shims, selective install profiles, and production-ready hooks for TDD, security scanning, code review, and continuous learning",
|
||||
"version": "2.0.0-rc.1",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "everything-claude-code",
|
||||
"version": "2.0.0-rc.1",
|
||||
"description": "Battle-tested Claude Code plugin for engineering teams — 48 agents, 184 skills, 79 legacy command shims, production-ready hooks, and selective install workflows evolved through continuous real-world use",
|
||||
"description": "Battle-tested Claude Code plugin for engineering teams — 48 agents, 182 skills, 68 legacy command shims, production-ready hooks, and selective install workflows evolved through continuous real-world use",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
"url": "https://x.com/affaanmustafa"
|
||||
@@ -22,6 +22,7 @@
|
||||
"automation",
|
||||
"best-practices"
|
||||
],
|
||||
"mcpServers": {},
|
||||
"skills": ["./skills/"],
|
||||
"commands": ["./commands/"]
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ This directory contains the **Codex plugin manifest** for Everything Claude Code
|
||||
|
||||
## What This Provides
|
||||
|
||||
- **156 skills** from `./skills/` — reusable Codex workflows for TDD, security,
|
||||
- **182 skills** from `./skills/` — reusable Codex workflows for TDD, security,
|
||||
code review, architecture, and more
|
||||
- **6 MCP servers** — GitHub, Context7, Exa, Memory, Playwright, Sequential Thinking
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "ecc",
|
||||
"version": "2.0.0-rc.1",
|
||||
"description": "Battle-tested Codex workflows — 156 shared ECC skills, production-ready MCP configs, and selective-install-aligned conventions for TDD, security scanning, code review, and autonomous development.",
|
||||
"description": "Battle-tested Codex workflows — 182 shared ECC skills, production-ready MCP configs, and selective-install-aligned conventions for TDD, security scanning, code review, and autonomous development.",
|
||||
"author": {
|
||||
"name": "Affaan Mustafa",
|
||||
"email": "me@affaanmustafa.com",
|
||||
@@ -15,7 +15,7 @@
|
||||
"mcpServers": "./.mcp.json",
|
||||
"interface": {
|
||||
"displayName": "Everything Claude Code",
|
||||
"shortDescription": "156 battle-tested ECC skills plus MCP configs for TDD, security, code review, and autonomous development.",
|
||||
"shortDescription": "182 battle-tested ECC skills plus MCP configs for TDD, security, code review, and autonomous development.",
|
||||
"longDescription": "Everything Claude Code (ECC) is a community-maintained collection of Codex-ready skills and MCP configs evolved over 10+ months of intensive daily use. It covers TDD workflows, security scanning, code review, architecture decisions, operator workflows, and more — all in one installable plugin.",
|
||||
"developerName": "Affaan Mustafa",
|
||||
"category": "Productivity",
|
||||
|
||||
@@ -43,6 +43,14 @@ export const ECCHooksPlugin: ECCHooksPluginFn = async ({
|
||||
return path.join(worktreePath, p)
|
||||
}
|
||||
|
||||
function hasProjectFile(relativePath: string): boolean {
|
||||
try {
|
||||
return fs.existsSync(resolvePath(relativePath))
|
||||
} catch {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
const pendingToolChanges = new Map<string, { path: string; type: "added" | "modified" }>()
|
||||
let writeCounter = 0
|
||||
|
||||
@@ -275,13 +283,8 @@ export const ECCHooksPlugin: ECCHooksPluginFn = async ({
|
||||
log("info", `[ECC] Session started - profile=${currentProfile}`)
|
||||
|
||||
// Check for project-specific context files
|
||||
try {
|
||||
const hasClaudeMd = await $`test -f ${worktree}/CLAUDE.md && echo "yes"`.text()
|
||||
if (hasClaudeMd.trim() === "yes") {
|
||||
log("info", "[ECC] Found CLAUDE.md - loading project context")
|
||||
}
|
||||
} catch {
|
||||
// No CLAUDE.md found
|
||||
if (hasProjectFile("CLAUDE.md")) {
|
||||
log("info", "[ECC] Found CLAUDE.md - loading project context")
|
||||
}
|
||||
},
|
||||
|
||||
@@ -400,7 +403,7 @@ export const ECCHooksPlugin: ECCHooksPluginFn = async ({
|
||||
ECC_PLUGIN: "true",
|
||||
ECC_HOOK_PROFILE: currentProfile,
|
||||
ECC_DISABLED_HOOKS: process.env.ECC_DISABLED_HOOKS || "",
|
||||
PROJECT_ROOT: worktree || directory,
|
||||
PROJECT_ROOT: worktreePath,
|
||||
}
|
||||
|
||||
// Detect package manager
|
||||
@@ -411,12 +414,9 @@ export const ECCHooksPlugin: ECCHooksPluginFn = async ({
|
||||
"package-lock.json": "npm",
|
||||
}
|
||||
for (const [lockfile, pm] of Object.entries(lockfiles)) {
|
||||
try {
|
||||
await $`test -f ${worktree}/${lockfile}`
|
||||
if (hasProjectFile(lockfile)) {
|
||||
env.PACKAGE_MANAGER = pm
|
||||
break
|
||||
} catch {
|
||||
// Not found, try next
|
||||
}
|
||||
}
|
||||
|
||||
@@ -430,11 +430,8 @@ export const ECCHooksPlugin: ECCHooksPluginFn = async ({
|
||||
}
|
||||
const detected: string[] = []
|
||||
for (const [file, lang] of Object.entries(langDetectors)) {
|
||||
try {
|
||||
await $`test -f ${worktree}/${file}`
|
||||
if (hasProjectFile(file)) {
|
||||
detected.push(lang)
|
||||
} catch {
|
||||
// Not found
|
||||
}
|
||||
}
|
||||
if (detected.length > 0) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Everything Claude Code (ECC) — Agent Instructions
|
||||
|
||||
This is a **production-ready AI coding plugin** providing 48 specialized agents, 184 skills, 79 commands, and automated hook workflows for software development.
|
||||
This is a **production-ready AI coding plugin** providing 48 specialized agents, 182 skills, 68 commands, and automated hook workflows for software development.
|
||||
|
||||
**Version:** 2.0.0-rc.1
|
||||
|
||||
@@ -146,8 +146,8 @@ Troubleshoot failures: check test isolation → verify mocks → fix implementat
|
||||
|
||||
```
|
||||
agents/ — 48 specialized subagents
|
||||
skills/ — 184 workflow skills and domain knowledge
|
||||
commands/ — 79 slash commands
|
||||
skills/ — 182 workflow skills and domain knowledge
|
||||
commands/ — 68 slash commands
|
||||
hooks/ — Trigger-based automations
|
||||
rules/ — Always-follow guidelines (common + per-language)
|
||||
scripts/ — Cross-platform Node.js utilities
|
||||
|
||||
95
README.md
95
README.md
@@ -89,7 +89,7 @@ This repo is the raw code only. The guides explain everything.
|
||||
### v2.0.0-rc.1 — Surface Refresh, Operator Workflows, and ECC 2.0 Alpha (Apr 2026)
|
||||
|
||||
- **Dashboard GUI** — New Tkinter-based desktop application (`ecc_dashboard.py` or `npm run dashboard`) with dark/light theme toggle, font customization, and project logo in header and taskbar.
|
||||
- **Public surface synced to the live repo** — metadata, catalog counts, plugin manifests, and install-facing docs now match the actual OSS surface: 38 agents, 156 skills, and 72 legacy command shims.
|
||||
- **Public surface synced to the live repo** — metadata, catalog counts, plugin manifests, and install-facing docs now match the actual OSS surface: 48 agents, 182 skills, and 68 legacy command shims.
|
||||
- **Operator and outbound workflow expansion** — `brand-voice`, `social-graph-ranker`, `connections-optimizer`, `customer-billing-ops`, `ecc-tools-cost-audit`, `google-workspace-ops`, `project-flow-ops`, and `workspace-surface-audit` round out the operator lane.
|
||||
- **Media and launch tooling** — `manim-video`, `remotion-video-creation`, and upgraded social publishing surfaces make technical explainers and launch content part of the same system.
|
||||
- **Framework and product surface growth** — `nestjs-patterns`, richer Codex/OpenCode install surfaces, and expanded cross-harness packaging keep the repo usable beyond Claude Code alone.
|
||||
@@ -179,6 +179,34 @@ Most Claude Code users should use exactly one install path:
|
||||
|
||||
If you already layered multiple installs and things look duplicated, skip straight to [Reset / Uninstall ECC](#reset--uninstall-ecc).
|
||||
|
||||
### Low-context / no-hooks path
|
||||
|
||||
If hooks feel too global or you only want ECC's rules, agents, commands, and core workflow skills, skip the plugin and use the minimal manual profile:
|
||||
|
||||
```bash
|
||||
./install.sh --profile minimal --target claude
|
||||
```
|
||||
|
||||
```powershell
|
||||
.\install.ps1 --profile minimal --target claude
|
||||
# or
|
||||
npx ecc-install --profile minimal --target claude
|
||||
```
|
||||
|
||||
This profile intentionally excludes `hooks-runtime`.
|
||||
|
||||
If you want the normal core profile but need hooks off, use:
|
||||
|
||||
```bash
|
||||
./install.sh --profile core --without baseline:hooks --target claude
|
||||
```
|
||||
|
||||
Add hooks later only if you want runtime enforcement:
|
||||
|
||||
```bash
|
||||
./install.sh --target claude --modules hooks-runtime
|
||||
```
|
||||
|
||||
### Step 1: Install the Plugin (Recommended)
|
||||
|
||||
> NOTE: The plugin is convenient, but the OSS installer below is still the most reliable path if your Claude Code build has trouble resolving self-hosted marketplace entries.
|
||||
@@ -312,7 +340,7 @@ If you stacked methods, clean up in this order:
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
**That's it!** You now have access to 48 agents, 184 skills, and 79 legacy command shims.
|
||||
**That's it!** You now have access to 48 agents, 182 skills, and 68 legacy command shims.
|
||||
|
||||
### Dashboard GUI
|
||||
|
||||
@@ -499,17 +527,15 @@ everything-claude-code/
|
||||
| |-- autonomous-loops/ # Autonomous loop patterns: sequential pipelines, PR loops, DAG orchestration (NEW)
|
||||
| |-- plankton-code-quality/ # Write-time code quality enforcement with Plankton hooks (NEW)
|
||||
|
|
||||
|-- commands/ # Legacy slash-entry shims; prefer skills/
|
||||
| |-- tdd.md # /tdd - Test-driven development
|
||||
|-- commands/ # Maintained slash-entry compatibility; prefer skills/
|
||||
| |-- plan.md # /plan - Implementation planning
|
||||
| |-- e2e.md # /e2e - E2E test generation
|
||||
| |-- code-review.md # /code-review - Quality review
|
||||
| |-- build-fix.md # /build-fix - Fix build errors
|
||||
| |-- refactor-clean.md # /refactor-clean - Dead code removal
|
||||
| |-- quality-gate.md # /quality-gate - Verification gate
|
||||
| |-- learn.md # /learn - Extract patterns mid-session (Longform Guide)
|
||||
| |-- learn-eval.md # /learn-eval - Extract, evaluate, and save patterns (NEW)
|
||||
| |-- checkpoint.md # /checkpoint - Save verification state (Longform Guide)
|
||||
| |-- verify.md # /verify - Run verification loop (Longform Guide)
|
||||
| |-- setup-pm.md # /setup-pm - Configure package manager
|
||||
| |-- go-review.md # /go-review - Go code review (NEW)
|
||||
| |-- go-test.md # /go-test - Go TDD workflow (NEW)
|
||||
@@ -526,13 +552,17 @@ everything-claude-code/
|
||||
| |-- multi-backend.md # /multi-backend - Backend multi-service orchestration (NEW)
|
||||
| |-- multi-frontend.md # /multi-frontend - Frontend multi-service orchestration (NEW)
|
||||
| |-- multi-workflow.md # /multi-workflow - General multi-service workflows (NEW)
|
||||
| |-- orchestrate.md # /orchestrate - Multi-agent coordination
|
||||
| |-- sessions.md # /sessions - Session history management
|
||||
| |-- eval.md # /eval - Evaluate against criteria
|
||||
| |-- test-coverage.md # /test-coverage - Test coverage analysis
|
||||
| |-- update-docs.md # /update-docs - Update documentation
|
||||
| |-- update-codemaps.md # /update-codemaps - Update codemaps
|
||||
| |-- python-review.md # /python-review - Python code review (NEW)
|
||||
|-- legacy-command-shims/ # Opt-in archive for retired shims such as /tdd and /eval
|
||||
| |-- tdd.md # /tdd - Prefer the tdd-workflow skill
|
||||
| |-- e2e.md # /e2e - Prefer the e2e-testing skill
|
||||
| |-- eval.md # /eval - Prefer the eval-harness skill
|
||||
| |-- verify.md # /verify - Prefer the verification-loop skill
|
||||
| |-- orchestrate.md # /orchestrate - Prefer dmux-workflows or multi-workflow
|
||||
|
|
||||
|-- rules/ # Always-follow guidelines (copy to ~/.claude/rules/)
|
||||
| |-- README.md # Structure overview and installation guide
|
||||
@@ -795,9 +825,12 @@ cp -r everything-claude-code/skills/search-first ~/.claude/skills/
|
||||
# cp -r everything-claude-code/skills/$s ~/.claude/skills/
|
||||
# done
|
||||
|
||||
# Optional: keep legacy slash-command compatibility during migration
|
||||
# Optional: keep maintained slash-command compatibility during migration
|
||||
mkdir -p ~/.claude/commands
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
|
||||
# Retired shims live in legacy-command-shims/commands/.
|
||||
# Copy individual files from there only if you still need old names such as /tdd.
|
||||
```
|
||||
|
||||
#### Install hooks
|
||||
@@ -824,6 +857,8 @@ Windows note: the Claude config directory is `%USERPROFILE%\\.claude`, not `~/cl
|
||||
|
||||
#### Configure MCPs
|
||||
|
||||
Claude plugin installs intentionally do not auto-enable ECC's bundled MCP server definitions. This avoids overlong plugin MCP tool names on strict third-party gateways while keeping manual MCP setup available.
|
||||
|
||||
Copy desired MCP server definitions from `mcp-configs/mcp-servers.json` into your official Claude Code config in `~/.claude/settings.json`, or into a project-scoped `.mcp.json` if you want repo-local MCP access.
|
||||
|
||||
If you already run your own copies of ECC-bundled MCPs, set:
|
||||
@@ -857,7 +892,7 @@ You are a senior code reviewer...
|
||||
|
||||
### Skills
|
||||
|
||||
Skills are the primary workflow surface. They can be invoked directly, suggested automatically, and reused by agents. ECC still ships `commands/` during migration, but new workflow development should land in `skills/` first.
|
||||
Skills are the primary workflow surface. They can be invoked directly, suggested automatically, and reused by agents. ECC still ships maintained `commands/` during migration, while retired short-name shims live under `legacy-command-shims/` for explicit opt-in only. New workflow development should land in `skills/` first.
|
||||
|
||||
```markdown
|
||||
# TDD Workflow
|
||||
@@ -903,16 +938,16 @@ See [`rules/README.md`](rules/README.md) for installation and structure details.
|
||||
|
||||
## Which Agent Should I Use?
|
||||
|
||||
Not sure where to start? Use this quick reference. Skills are the canonical workflow surface; slash entries below are the compatibility form most users already know.
|
||||
Not sure where to start? Use this quick reference. Skills are the canonical workflow surface; maintained slash entries stay available for command-first workflows.
|
||||
|
||||
| I want to... | Use this command | Agent used |
|
||||
| I want to... | Use this surface | Agent used |
|
||||
|--------------|-----------------|------------|
|
||||
| Plan a new feature | `/ecc:plan "Add auth"` | planner |
|
||||
| Design system architecture | `/ecc:plan` + architect agent | architect |
|
||||
| Write code with tests first | `/tdd` | tdd-guide |
|
||||
| Write code with tests first | `tdd-workflow` skill | tdd-guide |
|
||||
| Review code I just wrote | `/code-review` | code-reviewer |
|
||||
| Fix a failing build | `/build-fix` | build-error-resolver |
|
||||
| Run end-to-end tests | `/e2e` | e2e-runner |
|
||||
| Run end-to-end tests | `e2e-testing` skill | e2e-runner |
|
||||
| Find security vulnerabilities | `/security-scan` | security-reviewer |
|
||||
| Remove dead code | `/refactor-clean` | refactor-cleaner |
|
||||
| Update documentation | `/update-docs` | doc-updater |
|
||||
@@ -923,19 +958,19 @@ Not sure where to start? Use this quick reference. Skills are the canonical work
|
||||
|
||||
### Common Workflows
|
||||
|
||||
Slash forms below are shown because they are still the fastest familiar entrypoint. Under the hood, ECC is shifting these workflows toward skills-first definitions.
|
||||
Slash forms below are shown where they remain part of the maintained command surface. Retired short-name shims such as `/tdd` and `/eval` live in `legacy-command-shims/` for explicit opt-in only.
|
||||
|
||||
**Starting a new feature:**
|
||||
```
|
||||
/ecc:plan "Add user authentication with OAuth"
|
||||
→ planner creates implementation blueprint
|
||||
/tdd → tdd-guide enforces write-tests-first
|
||||
tdd-workflow skill → tdd-guide enforces write-tests-first
|
||||
/code-review → code-reviewer checks your work
|
||||
```
|
||||
|
||||
**Fixing a bug:**
|
||||
```
|
||||
/tdd → tdd-guide: write a failing test that reproduces it
|
||||
tdd-workflow skill → tdd-guide: write a failing test that reproduces it
|
||||
→ implement the fix, verify test passes
|
||||
/code-review → code-reviewer: catch regressions
|
||||
```
|
||||
@@ -943,7 +978,7 @@ Slash forms below are shown because they are still the fastest familiar entrypoi
|
||||
**Preparing for production:**
|
||||
```
|
||||
/security-scan → security-reviewer: OWASP Top 10 audit
|
||||
/e2e → e2e-runner: critical user flow tests
|
||||
e2e-testing skill → e2e-runner: critical user flow tests
|
||||
/test-coverage → verify 80%+ coverage
|
||||
```
|
||||
|
||||
@@ -1193,7 +1228,7 @@ Codex macOS app:
|
||||
|-----------|-------|---------|
|
||||
| Config | 1 | `.codex/config.toml` — top-level approvals/sandbox/web_search, MCP servers, notifications, profiles |
|
||||
| AGENTS.md | 2 | Root (universal) + `.codex/AGENTS.md` (Codex-specific supplement) |
|
||||
| Skills | 30 | `.agents/skills/` — SKILL.md + agents/openai.yaml per skill |
|
||||
| Skills | 32 | `.agents/skills/` — SKILL.md + agents/openai.yaml per skill |
|
||||
| MCP Servers | 6 | GitHub, Context7, Exa, Memory, Playwright, Sequential Thinking (7 with Supabase via `--update-mcp` sync) |
|
||||
| Profiles | 2 | `strict` (read-only sandbox) and `yolo` (full auto-approve) |
|
||||
| Agent Roles | 3 | `.codex/agents/` — explorer, reviewer, docs-researcher |
|
||||
@@ -1202,14 +1237,17 @@ Codex macOS app:
|
||||
|
||||
Skills at `.agents/skills/` are auto-loaded by Codex:
|
||||
|
||||
Canonical Anthropic skills such as `claude-api`, `frontend-design`, and `skill-creator` are intentionally not re-bundled here. Install those from [`anthropics/skills`](https://github.com/anthropics/skills) when you want the official versions.
|
||||
|
||||
| Skill | Description |
|
||||
|-------|-------------|
|
||||
| agent-introspection-debugging | Debug agent behavior, routing, and prompt boundaries |
|
||||
| agent-sort | Sort agent catalogs and assignment surfaces |
|
||||
| api-design | REST API design patterns |
|
||||
| article-writing | Long-form writing from notes and voice references |
|
||||
| backend-patterns | API design, database, caching |
|
||||
| brand-voice | Source-derived writing style profiles from real content |
|
||||
| bun-runtime | Bun as runtime, package manager, bundler, and test runner |
|
||||
| claude-api | Anthropic Claude API patterns for Python and TypeScript |
|
||||
| coding-standards | Universal coding standards |
|
||||
| content-engine | Platform-native social content and repurposing |
|
||||
| crosspost | Multi-platform content distribution across X, LinkedIn, Threads |
|
||||
@@ -1228,6 +1266,7 @@ Skills at `.agents/skills/` are auto-loaded by Codex:
|
||||
| market-research | Source-attributed market and competitor research |
|
||||
| mcp-server-patterns | Build MCP servers with Node/TypeScript SDK |
|
||||
| nextjs-turbopack | Next.js 16+ and Turbopack incremental bundling |
|
||||
| product-capability | Translate product goals into scoped capability maps |
|
||||
| security-review | Comprehensive security checklist |
|
||||
| strategic-compact | Context management |
|
||||
| tdd-workflow | Test-driven development with 80%+ coverage |
|
||||
@@ -1279,8 +1318,8 @@ The configuration is automatically detected from `.opencode/opencode.json`.
|
||||
| Feature | Claude Code | OpenCode | Status |
|
||||
|---------|-------------|----------|--------|
|
||||
| Agents | PASS: 48 agents | PASS: 12 agents | **Claude Code leads** |
|
||||
| Commands | PASS: 79 commands | PASS: 31 commands | **Claude Code leads** |
|
||||
| Skills | PASS: 184 skills | PASS: 37 skills | **Claude Code leads** |
|
||||
| Commands | PASS: 68 commands | PASS: 31 commands | **Claude Code leads** |
|
||||
| Skills | PASS: 182 skills | PASS: 37 skills | **Claude Code leads** |
|
||||
| Hooks | PASS: 8 event types | PASS: 11 events | **OpenCode has more!** |
|
||||
| Rules | PASS: 29 rules | PASS: 13 instructions | **Claude Code leads** |
|
||||
| MCP Servers | PASS: 14 servers | PASS: Full | **Full parity** |
|
||||
@@ -1300,21 +1339,17 @@ OpenCode's plugin system is MORE sophisticated than Claude Code with 20+ event t
|
||||
|
||||
**Additional OpenCode events**: `file.edited`, `file.watcher.updated`, `message.updated`, `lsp.client.diagnostics`, `tui.toast.show`, and more.
|
||||
|
||||
### Available Slash Entry Shims (31+)
|
||||
### Maintained Slash Entries
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/plan` | Create implementation plan |
|
||||
| `/tdd` | Enforce TDD workflow |
|
||||
| `/code-review` | Review code changes |
|
||||
| `/build-fix` | Fix build errors |
|
||||
| `/e2e` | Generate E2E tests |
|
||||
| `/refactor-clean` | Remove dead code |
|
||||
| `/orchestrate` | Multi-agent workflow |
|
||||
| `/learn` | Extract patterns from session |
|
||||
| `/checkpoint` | Save verification state |
|
||||
| `/verify` | Run verification loop |
|
||||
| `/eval` | Evaluate against criteria |
|
||||
| `/quality-gate` | Run the maintained verification gate |
|
||||
| `/update-docs` | Update documentation |
|
||||
| `/update-codemaps` | Update codemaps |
|
||||
| `/test-coverage` | Analyze coverage |
|
||||
@@ -1388,8 +1423,8 @@ ECC is the **first plugin to maximize every major AI coding tool**. Here's how e
|
||||
| Feature | Claude Code | Cursor IDE | Codex CLI | OpenCode |
|
||||
|---------|------------|------------|-----------|----------|
|
||||
| **Agents** | 48 | Shared (AGENTS.md) | Shared (AGENTS.md) | 12 |
|
||||
| **Commands** | 79 | Shared | Instruction-based | 31 |
|
||||
| **Skills** | 184 | Shared | 10 (native format) | 37 |
|
||||
| **Commands** | 68 | Shared | Instruction-based | 31 |
|
||||
| **Skills** | 182 | Shared | 10 (native format) | 37 |
|
||||
| **Hook Events** | 8 types | 15 types | None yet | 11 types |
|
||||
| **Hook Scripts** | 20+ scripts | 16 scripts (DRY adapter) | N/A | Plugin hooks |
|
||||
| **Rules** | 34 (common + lang) | 34 (YAML frontmatter) | Instruction-based | 13 instructions |
|
||||
|
||||
@@ -160,7 +160,7 @@ Copy-Item -Recurse rules/typescript "$HOME/.claude/rules/"
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
**完成!** 你现在可以使用 48 个代理、184 个技能和 79 个命令。
|
||||
**完成!** 你现在可以使用 48 个代理、182 个技能和 68 个命令。
|
||||
|
||||
### multi-* 命令需要额外配置
|
||||
|
||||
@@ -330,17 +330,15 @@ everything-claude-code/
|
||||
| |-- autonomous-loops/ # 自主循环模式:顺序流水线、PR 循环、DAG 编排(新增)
|
||||
| |-- plankton-code-quality/ # 基于 Plankton 钩子的实时代码质量管控(新增)
|
||||
|
|
||||
|-- commands/ # 传统斜杠命令兼容层;优先使用 skills/
|
||||
| |-- tdd.md # /tdd - 测试驱动开发
|
||||
|-- commands/ # 维护中的斜杠命令兼容层;优先使用 skills/
|
||||
| |-- plan.md # /plan - 实现规划
|
||||
| |-- e2e.md # /e2e - 生成端到端测试
|
||||
| |-- code-review.md # /code-review - 代码质量审查
|
||||
| |-- build-fix.md # /build-fix - 修复构建错误
|
||||
| |-- quality-gate.md # /quality-gate - 验证门禁
|
||||
| |-- refactor-clean.md # /refactor-clean - 清理无效代码
|
||||
| |-- learn.md # /learn - 会话中提取模式(长文本指南)
|
||||
| |-- learn-eval.md # /learn-eval - 提取、评估并保存模式(新增)
|
||||
| |-- checkpoint.md # /checkpoint - 保存验证状态(长文本指南)
|
||||
| |-- verify.md # /verify - 运行验证循环(长文本指南)
|
||||
| |-- setup-pm.md # /setup-pm - 配置包管理器
|
||||
| |-- go-review.md # /go-review - Go 代码审查(新增)
|
||||
| |-- go-test.md # /go-test - Go TDD 工作流(新增)
|
||||
@@ -357,13 +355,17 @@ everything-claude-code/
|
||||
| |-- multi-backend.md # /multi-backend - 后端多服务编排(新增)
|
||||
| |-- multi-frontend.md # /multi-frontend - 前端多服务编排(新增)
|
||||
| |-- multi-workflow.md # /multi-workflow - 通用多服务工作流(新增)
|
||||
| |-- orchestrate.md # /orchestrate - 多智能体协同调度
|
||||
| |-- sessions.md # /sessions - 会话历史管理
|
||||
| |-- eval.md # /eval - 按标准评估
|
||||
| |-- test-coverage.md # /test-coverage - 测试覆盖率分析
|
||||
| |-- update-docs.md # /update-docs - 更新文档
|
||||
| |-- update-codemaps.md # /update-codemaps - 更新代码映射
|
||||
| |-- python-review.md # /python-review - Python 代码审查(新增)
|
||||
|-- legacy-command-shims/ # 已退役短命令的按需归档,例如 /tdd 和 /eval
|
||||
| |-- tdd.md # /tdd - 优先使用 tdd-workflow 技能
|
||||
| |-- e2e.md # /e2e - 优先使用 e2e-testing 技能
|
||||
| |-- eval.md # /eval - 优先使用 eval-harness 技能
|
||||
| |-- verify.md # /verify - 优先使用 verification-loop 技能
|
||||
| |-- orchestrate.md # /orchestrate - 优先使用 dmux-workflows 或 multi-workflow
|
||||
|
|
||||
|-- rules/ # 必须遵守的规范(复制到 ~/.claude/rules/)
|
||||
| |-- README.md # 结构概览与安装指南
|
||||
@@ -618,9 +620,12 @@ cp -r everything-claude-code/skills/search-first ~/.claude/skills/
|
||||
# cp -r everything-claude-code/skills/$s ~/.claude/skills/
|
||||
# done
|
||||
|
||||
# 可选:迁移期间保留传统斜杠命令兼容
|
||||
# 可选:迁移期间保留维护中的斜杠命令兼容
|
||||
mkdir -p ~/.claude/commands
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
|
||||
# 已退役短命令位于 legacy-command-shims/commands/。
|
||||
# 仅在仍需要 /tdd 等旧名称时,单独复制对应文件。
|
||||
```
|
||||
|
||||
#### 将钩子配置添加到 settings.json
|
||||
|
||||
14
agent.yaml
14
agent.yaml
@@ -28,7 +28,6 @@ skills:
|
||||
- canary-watch
|
||||
- carrier-relationship-management
|
||||
- ck
|
||||
- claude-api
|
||||
- claude-devfleet
|
||||
- click-path-audit
|
||||
- clickhouse-io
|
||||
@@ -144,20 +143,14 @@ skills:
|
||||
- visa-doc-translate
|
||||
- x-api
|
||||
commands:
|
||||
- agent-sort
|
||||
- aside
|
||||
- auto-update
|
||||
- build-fix
|
||||
- checkpoint
|
||||
- claw
|
||||
- code-review
|
||||
- context-budget
|
||||
- cpp-build
|
||||
- cpp-review
|
||||
- cpp-test
|
||||
- devfleet
|
||||
- docs
|
||||
- e2e
|
||||
- eval
|
||||
- evolve
|
||||
- feature-dev
|
||||
- flutter-build
|
||||
@@ -191,12 +184,10 @@ commands:
|
||||
- multi-frontend
|
||||
- multi-plan
|
||||
- multi-workflow
|
||||
- orchestrate
|
||||
- plan
|
||||
- pm2
|
||||
- projects
|
||||
- promote
|
||||
- prompt-optimize
|
||||
- prp-commit
|
||||
- prp-implement
|
||||
- prp-plan
|
||||
@@ -208,7 +199,6 @@ commands:
|
||||
- refactor-clean
|
||||
- resume-session
|
||||
- review-pr
|
||||
- rules-distill
|
||||
- rust-build
|
||||
- rust-review
|
||||
- rust-test
|
||||
@@ -218,11 +208,9 @@ commands:
|
||||
- setup-pm
|
||||
- skill-create
|
||||
- skill-health
|
||||
- tdd
|
||||
- test-coverage
|
||||
- update-codemaps
|
||||
- update-docs
|
||||
- verify
|
||||
tags:
|
||||
- agent-harness
|
||||
- developer-tools
|
||||
|
||||
28
commands/auto-update.md
Normal file
28
commands/auto-update.md
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
description: Pull the latest ECC repo changes and reinstall the current managed targets.
|
||||
disable-model-invocation: true
|
||||
---
|
||||
|
||||
# Auto Update
|
||||
|
||||
Update ECC from its upstream repo and regenerate the current context's managed install using the original install-state request.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Preview the update without mutating anything
|
||||
ECC_ROOT="${CLAUDE_PLUGIN_ROOT:-$(node -e "var r=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var s of [['ecc'],['ecc@ecc'],['marketplace','ecc'],['everything-claude-code'],['everything-claude-code@everything-claude-code'],['marketplace','everything-claude-code']]){var l=p.join(d,'plugins',...s);if(f.existsSync(p.join(l,q)))return l}try{for(var g of ['ecc','everything-claude-code']){var b=p.join(d,'plugins','cache',g);for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}}catch(x){}return d})();console.log(r)")}"
|
||||
node "$ECC_ROOT/scripts/auto-update.js" --dry-run
|
||||
|
||||
# Update only Cursor-managed files in the current project
|
||||
node "$ECC_ROOT/scripts/auto-update.js" --target cursor
|
||||
|
||||
# Override the ECC repo root explicitly
|
||||
node "$ECC_ROOT/scripts/auto-update.js" --repo-root /path/to/everything-claude-code
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- This command uses the recorded install-state request and reruns `install-apply.js` after pulling the latest repo changes.
|
||||
- Reinstall is intentional: it handles upstream renames and deletions that `repair.js` cannot safely reconstruct from stale operations alone.
|
||||
- Use `--dry-run` first if you want to see the reconstructed reinstall plan before mutating anything.
|
||||
@@ -165,7 +165,7 @@ The agent will stop and report if:
|
||||
|
||||
- `/cpp-test` - Run tests after build succeeds
|
||||
- `/cpp-review` - Review code quality
|
||||
- `/verify` - Full verification loop
|
||||
- `verification-loop` skill - Full verification loop
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -243,7 +243,7 @@ genhtml coverage.info --output-directory coverage_html
|
||||
|
||||
- `/cpp-build` - Fix build errors
|
||||
- `/cpp-review` - Review code after implementation
|
||||
- `/verify` - Run full verification loop
|
||||
- `verification-loop` skill - Run full verification loop
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ The agent will stop and report if:
|
||||
|
||||
- `/flutter-test` — Run tests after build succeeds
|
||||
- `/flutter-review` — Review code quality
|
||||
- `/verify` — Full verification loop
|
||||
- `verification-loop` skill — Full verification loop
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@ Test Status: PASS ✓
|
||||
|
||||
- `/flutter-build` — Fix build errors before running tests
|
||||
- `/flutter-review` — Review code after tests pass
|
||||
- `/tdd` — Test-driven development workflow
|
||||
- `tdd-workflow` skill — Test-driven development workflow
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -175,7 +175,7 @@ The agent will stop and report if:
|
||||
|
||||
- `/go-test` - Run tests after build succeeds
|
||||
- `/go-review` - Review code quality
|
||||
- `/verify` - Full verification loop
|
||||
- `verification-loop` skill - Full verification loop
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -260,7 +260,7 @@ go test -race -cover ./...
|
||||
|
||||
- `/go-build` - Fix build errors
|
||||
- `/go-review` - Review code after implementation
|
||||
- `/verify` - Run full verification loop
|
||||
- `verification-loop` skill - Run full verification loop
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ Dependencies:
|
||||
|
||||
Recommended Next Steps:
|
||||
- /plan to create implementation plan
|
||||
- /tdd to implement with tests first
|
||||
- `tdd-workflow` skill to implement with tests first
|
||||
```
|
||||
|
||||
### `/jira comment <TICKET-KEY>`
|
||||
@@ -95,7 +95,7 @@ If credentials are missing, stop and direct the user to set them up.
|
||||
|
||||
After analyzing a ticket:
|
||||
- Use `/plan` to create an implementation plan from the requirements
|
||||
- Use `/tdd` to implement with test-driven development
|
||||
- Use the `tdd-workflow` skill to implement with test-driven development
|
||||
- Use `/code-review` after implementation
|
||||
- Use `/jira comment` to post progress back to the ticket
|
||||
- Use `/jira transition` to move the ticket when work is complete
|
||||
|
||||
@@ -166,7 +166,7 @@ The agent will stop and report if:
|
||||
|
||||
- `/kotlin-test` - Run tests after build succeeds
|
||||
- `/kotlin-review` - Review code quality
|
||||
- `/verify` - Full verification loop
|
||||
- `verification-loop` skill - Full verification loop
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -304,7 +304,7 @@ open build/reports/kover/html/index.html
|
||||
|
||||
- `/kotlin-build` - Fix build errors
|
||||
- `/kotlin-review` - Review code after implementation
|
||||
- `/verify` - Run full verification loop
|
||||
- `verification-loop` skill - Run full verification loop
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -4,7 +4,9 @@ description: Restate requirements, assess risks, and create step-by-step impleme
|
||||
|
||||
# Plan Command
|
||||
|
||||
This command invokes the **planner** agent to create a comprehensive implementation plan before writing any code.
|
||||
This command creates a comprehensive implementation plan before writing any code.
|
||||
|
||||
Run inline by default. Do not call the Task tool or any subagent by default. This keeps `/plan` usable from plugin installs that ship commands without agent files.
|
||||
|
||||
## What This Command Does
|
||||
|
||||
@@ -24,7 +26,7 @@ Use `/plan` when:
|
||||
|
||||
## How It Works
|
||||
|
||||
The planner agent will:
|
||||
The assistant will:
|
||||
|
||||
1. **Analyze the request** and restate requirements in clear terms
|
||||
2. **Break down into phases** with specific, actionable steps
|
||||
@@ -38,7 +40,7 @@ The planner agent will:
|
||||
```
|
||||
User: /plan I need to add real-time notifications when markets resolve
|
||||
|
||||
Agent (planner):
|
||||
Assistant:
|
||||
# Implementation Plan: Real-Time Market Resolution Notifications
|
||||
|
||||
## Requirements Restatement
|
||||
@@ -93,7 +95,7 @@ Agent (planner):
|
||||
|
||||
## Important Notes
|
||||
|
||||
**CRITICAL**: The planner agent will **NOT** write any code until you explicitly confirm the plan with "yes" or "proceed" or similar affirmative response.
|
||||
**CRITICAL**: This command will **NOT** write any code until you explicitly confirm the plan with "yes" or "proceed" or similar affirmative response.
|
||||
|
||||
If you want changes, respond with:
|
||||
- "modify: [your changes]"
|
||||
@@ -103,15 +105,17 @@ If you want changes, respond with:
|
||||
## Integration with Other Commands
|
||||
|
||||
After planning:
|
||||
- Use `/tdd` to implement with test-driven development
|
||||
- Use the `tdd-workflow` skill to implement with test-driven development
|
||||
- Use `/build-fix` if build errors occur
|
||||
- Use `/code-review` to review completed implementation
|
||||
|
||||
> **Need deeper planning?** Use `/prp-plan` for artifact-producing planning with PRD integration, codebase analysis, and pattern extraction. Use `/prp-implement` to execute those plans with rigorous validation loops.
|
||||
|
||||
## Related Agents
|
||||
## Optional Planner Agent
|
||||
|
||||
This command invokes the `planner` agent provided by ECC.
|
||||
ECC also provides a `planner` agent for manual installs that include agent files. Use it only when the local runtime already exposes that subagent and the user explicitly asks you to delegate planning.
|
||||
|
||||
If the `planner` subagent is unavailable, continue planning inline instead of surfacing an "Agent type 'planner' not found" error.
|
||||
|
||||
For manual installs, the source file lives at:
|
||||
`agents/planner.md`
|
||||
|
||||
@@ -171,7 +171,7 @@ Run: `black app/routes/user.py app/services/auth.py`
|
||||
|
||||
## Integration with Other Commands
|
||||
|
||||
- Use `/tdd` first to ensure tests pass
|
||||
- Use the `tdd-workflow` skill first to ensure tests pass
|
||||
- Use `/code-review` for non-Python specific concerns
|
||||
- Use `/python-review` before committing
|
||||
- Use `/build-fix` if static analysis tools fail
|
||||
|
||||
@@ -179,7 +179,7 @@ The agent will stop and report if:
|
||||
|
||||
- `/rust-test` - Run tests after build succeeds
|
||||
- `/rust-review` - Review code quality
|
||||
- `/verify` - Full verification loop
|
||||
- `verification-loop` skill - Full verification loop
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -300,7 +300,7 @@ cargo test --no-fail-fast
|
||||
|
||||
- `/rust-build` - Fix build errors
|
||||
- `/rust-review` - Review code after implementation
|
||||
- `/verify` - Run full verification loop
|
||||
- `verification-loop` skill - Run full verification loop
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -110,7 +110,7 @@ This document captures architect-level improvements for the Everything Claude Co
|
||||
|
||||
### 5.1 Hook Runtime Consistency
|
||||
|
||||
**Issue:** Most hooks invoke Node scripts via `run-with-flags.js`; one path uses `run-with-flags-shell.sh` + `observe.sh`. The mixed runtime is documented but could be simplified over time.
|
||||
**Issue:** Hooks should keep a consistent Node-mode dispatch surface. Continuous-learning observation now dispatches through `run-with-flags.js` and `observe-runner.js`, which delegates to the existing `observe.sh` implementation without exposing a shell-mode hook entry.
|
||||
|
||||
**Recommendation:**
|
||||
|
||||
|
||||
@@ -83,14 +83,27 @@ These stay local and should be configured per operator:
|
||||
## Suggested Bring-Up Order
|
||||
|
||||
0. Run `ecc migrate audit --source ~/.hermes` first to inventory the legacy workspace and see which parts already map onto ECC2.
|
||||
0.5. Generate and review artifacts with `ecc migrate plan` / `ecc migrate scaffold`, scaffold reusable legacy skills with `ecc migrate import-skills --output-dir migration-artifacts/skills`, scaffold legacy tool translation templates with `ecc migrate import-tools --output-dir migration-artifacts/tools`, scaffold legacy bridge plugins with `ecc migrate import-plugins --output-dir migration-artifacts/plugins`, preview recurring jobs with `ecc migrate import-schedules --dry-run`, preview gateway dispatch with `ecc migrate import-remote --dry-run`, preview safe env/service context with `ecc migrate import-env --dry-run`, then import sanitized workspace memory with `ecc migrate import-memory`.
|
||||
1. Install ECC and verify the baseline harness setup.
|
||||
0.5. Plan and scaffold migration artifacts before importing anything:
|
||||
- generate reviewable plans with `ecc migrate plan` and `ecc migrate scaffold`
|
||||
- scaffold reusable legacy skills with `ecc migrate import-skills --output-dir migration-artifacts/skills`
|
||||
- scaffold tool translation templates with `ecc migrate import-tools --output-dir migration-artifacts/tools`
|
||||
- scaffold bridge plugin templates with `ecc migrate import-plugins --output-dir migration-artifacts/plugins`
|
||||
- preview recurring jobs with `ecc migrate import-schedules --dry-run`
|
||||
- preview gateway dispatch with `ecc migrate import-remote --dry-run`
|
||||
- preview safe env/service context with `ecc migrate import-env --dry-run`
|
||||
- import sanitized workspace memory with `ecc migrate import-memory`
|
||||
1. Install ECC and verify the baseline harness setup with `node tests/run-all.js`; the expected result is a zero-failure test summary.
|
||||
2. Install Hermes and point it at ECC-imported skills.
|
||||
3. Register the MCP servers you actually use every day.
|
||||
4. Authenticate Google Drive first, then GitHub, then distribution channels.
|
||||
5. Start with a small cron surface: readiness check, content accountability, inbox triage, revenue monitor.
|
||||
6. Only then add heavier personal workflows like health, relationship graphing, or outbound sequencing.
|
||||
|
||||
## Related Docs
|
||||
|
||||
- [Hermes/OpenClaw migration guide](HERMES-OPENCLAW-MIGRATION.md)
|
||||
- [Cross-harness architecture](architecture/cross-harness.md)
|
||||
|
||||
## Why Hermes x ECC
|
||||
|
||||
This stack is useful when you want:
|
||||
|
||||
@@ -78,6 +78,25 @@ Do not ship:
|
||||
- private datasets
|
||||
- local-only automation packs that have not been reviewed
|
||||
|
||||
## Worked Example
|
||||
|
||||
Use `skills/hermes-imports/SKILL.md` as the same skill source across harnesses.
|
||||
|
||||
The workflow is:
|
||||
|
||||
1. Author the durable behavior once in `skills/hermes-imports/SKILL.md`.
|
||||
2. Keep secrets, local paths, and raw operator memory out of the skill.
|
||||
3. Let each harness adapt how the skill is loaded.
|
||||
4. Test the source skill and the harness-facing metadata separately.
|
||||
|
||||
Claude Code gets the skill through the Claude plugin surface and can enforce related hooks natively.
|
||||
|
||||
Codex reads the repo instructions, `.codex-plugin/plugin.json`, and the MCP reference config. The same skill source still describes the workflow, but hook parity is instruction-backed unless Codex adds a native hook surface.
|
||||
|
||||
OpenCode gets the skill through the OpenCode package/plugin surface. Event handling can reuse ECC hook logic through the adapter layer, while the skill text stays unchanged.
|
||||
|
||||
If a change requires editing three harness copies of the same workflow, the shared source is in the wrong place. Put the workflow back in `skills/`, then adapt only loading, event shape, or command routing at the harness edge.
|
||||
|
||||
## Today vs Later
|
||||
|
||||
Supported today:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Sessionsコマンド
|
||||
|
||||
Claude Codeセッション履歴を管理 - `~/.claude/sessions/` に保存されたセッションのリスト表示、読み込み、エイリアス設定、編集を行います。
|
||||
Claude Codeセッション履歴を管理 - `~/.claude/session-data/` に保存されたセッションのリスト表示、読み込み、エイリアス設定、編集を行います。旧 `~/.claude/sessions/` のファイルも後方互換のために読み取ります。
|
||||
|
||||
## 使用方法
|
||||
|
||||
@@ -81,7 +81,7 @@ const size = sm.getSessionSize(session.sessionPath);
|
||||
const aliases = aa.getAliasesForSession(session.filename);
|
||||
|
||||
console.log('Session: ' + session.filename);
|
||||
console.log('Path: ~/.claude/sessions/' + session.filename);
|
||||
console.log('Path: ' + session.sessionPath);
|
||||
console.log('');
|
||||
console.log('Statistics:');
|
||||
console.log(' Lines: ' + stats.lineCount);
|
||||
@@ -299,7 +299,7 @@ $ARGUMENTS:
|
||||
|
||||
## 備考
|
||||
|
||||
- セッションは `~/.claude/sessions/` にMarkdownファイルとして保存されます
|
||||
- セッションは `~/.claude/session-data/` にMarkdownファイルとして保存され、旧 `~/.claude/sessions/` のファイルも引き続き読み取られます
|
||||
- エイリアスは `~/.claude/session-aliases.json` に保存されます
|
||||
- セッションIDは短縮できます(通常、最初の4〜8文字で一意になります)
|
||||
- 頻繁に参照するセッションにはエイリアスを使用してください
|
||||
|
||||
@@ -58,7 +58,7 @@ claude plugin install typescript-lsp@claude-plugins-official
|
||||
|
||||
**ワークフロー:**
|
||||
- `commit-commands` - Gitワークフロー
|
||||
- `frontend-design` - UIパターン
|
||||
- `frontend-patterns` - UIパターン
|
||||
- `feature-dev` - 機能開発
|
||||
|
||||
---
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
## Release Surface
|
||||
|
||||
- confirm package and plugin version policy for `2.0.0-rc.1` (drafted in manifest bump prep)
|
||||
- confirm whether `ecc2/Cargo.toml` moves from `0.1.0` to `2.0.0-rc.1`
|
||||
- verify package, plugin, marketplace, OpenCode, and agent metadata stays at `2.0.0-rc.1`
|
||||
- verify `ecc2/Cargo.toml` stays at `0.1.0` for rc.1; `ecc2/` remains an alpha control-plane scaffold
|
||||
- update release metadata in one dedicated release-version PR
|
||||
- run the root test suite
|
||||
- run `cd ecc2 && cargo test`
|
||||
|
||||
61
docs/releases/2.0.0-rc.1/quickstart.md
Normal file
61
docs/releases/2.0.0-rc.1/quickstart.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# ECC v2.0.0-rc.1 Quickstart
|
||||
|
||||
This path is for a new contributor who wants to verify the release surface before touching feature work.
|
||||
|
||||
## Clone
|
||||
|
||||
```bash
|
||||
git clone https://github.com/affaan-m/everything-claude-code.git
|
||||
cd everything-claude-code
|
||||
```
|
||||
|
||||
Start from a clean checkout. Do not copy private operator state, raw workspace exports, tokens, or local Hermes files into the repo.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
npm ci
|
||||
```
|
||||
|
||||
This installs the Node-based validation and packaging toolchain used by the public release surface.
|
||||
|
||||
## Verify
|
||||
|
||||
```bash
|
||||
node tests/run-all.js
|
||||
```
|
||||
|
||||
Expected result: every test passes with zero failures. For release-specific drift, run the focused check:
|
||||
|
||||
```bash
|
||||
node tests/docs/ecc2-release-surface.test.js
|
||||
```
|
||||
|
||||
## First Skill
|
||||
|
||||
Read `skills/hermes-imports/SKILL.md` first.
|
||||
|
||||
It shows the intended ECC 2.0 pattern:
|
||||
|
||||
- take a repeated operator workflow
|
||||
- remove credentials, private paths, raw workspace exports, and personal memory
|
||||
- keep the durable workflow shape
|
||||
- publish the sanitized result as a reusable `SKILL.md`
|
||||
|
||||
Do not start by importing a private Hermes workflow wholesale. Start by distilling one reusable skill.
|
||||
|
||||
## Switch Harness
|
||||
|
||||
Use the same skill source across harnesses:
|
||||
|
||||
- Claude Code consumes ECC through the Claude plugin and native hooks.
|
||||
- Codex consumes ECC through `AGENTS.md`, `.codex-plugin/plugin.json`, and MCP reference config.
|
||||
- OpenCode consumes ECC through the OpenCode package/plugin surface.
|
||||
|
||||
The portable unit is still `skills/*/SKILL.md`. Harness-specific files should load or adapt that source, not redefine the workflow.
|
||||
|
||||
## Next Docs
|
||||
|
||||
- [Hermes setup](../../HERMES-SETUP.md)
|
||||
- [Cross-harness architecture](../../architecture/cross-harness.md)
|
||||
- [Release notes](release-notes.md)
|
||||
@@ -26,7 +26,7 @@ The system now has a clearer shape:
|
||||
- cross-harness install surfaces for Claude Code, Codex, OpenCode, Cursor, and related tools
|
||||
- Hermes as an optional operator shell for chat, cron, handoffs, and daily work routing
|
||||
|
||||
## Preview Boundaries
|
||||
## Release Candidate Boundaries
|
||||
|
||||
This is a release candidate, not the final GA claim.
|
||||
|
||||
@@ -47,8 +47,9 @@ What stays local:
|
||||
|
||||
## Upgrade Motion
|
||||
|
||||
1. Read the [Hermes setup guide](../../HERMES-SETUP.md).
|
||||
2. Review the [cross-harness architecture](../../architecture/cross-harness.md).
|
||||
3. Start with one workflow lane: engineering, research, content, or outreach.
|
||||
4. Import only sanitized operator patterns into ECC skills.
|
||||
5. Treat `ecc2/` as an alpha control plane until release packaging and installer behavior are finalized.
|
||||
1. Follow the [rc.1 quickstart](quickstart.md).
|
||||
2. Read the [Hermes setup guide](../../HERMES-SETUP.md).
|
||||
3. Review the [cross-harness architecture](../../architecture/cross-harness.md).
|
||||
4. Start with one workflow lane: engineering, research, content, or outreach.
|
||||
5. Import only sanitized operator patterns into ECC skills.
|
||||
6. Treat `ecc2/` as an alpha control plane until release packaging and installer behavior are finalized.
|
||||
|
||||
@@ -1,5 +1,45 @@
|
||||
# Değişiklik Günlüğü
|
||||
|
||||
## 2.0.0-rc.1 - 2026-04-28
|
||||
|
||||
### Öne Çıkanlar
|
||||
|
||||
- Hermes operatör hikayesi için genel ECC 2.0 sürüm adayı yüzeyi eklendi.
|
||||
- ECC, Claude Code, Codex, Cursor, OpenCode ve Gemini genelinde yeniden kullanılabilir cross-harness altyapı olarak belgelendi.
|
||||
- Özel operatör state'i yayımlamak yerine sanitize edilmiş Hermes import becerisi eklendi.
|
||||
|
||||
### Sürüm Yüzeyi
|
||||
|
||||
- Paket, plugin, marketplace, OpenCode, ajan ve README metadataları `2.0.0-rc.1` olarak güncellendi.
|
||||
- Sürüm notları, sosyal taslaklar, launch checklist, handoff notları ve demo prompt'ları `docs/releases/2.0.0-rc.1/` altında toplandı.
|
||||
- ECC/Hermes sınırı için `docs/architecture/cross-harness.md` ve regresyon kapsamı eklendi.
|
||||
- `ecc2/` sürümlemesi bağımsız tutuldu; release engineering aksi karar vermedikçe alpha control-plane scaffold olarak kalır.
|
||||
|
||||
### Notlar
|
||||
|
||||
- Bu bir sürüm adayıdır; tam ECC 2.0 control-plane yol haritası için GA iddiası değildir.
|
||||
- Ön sürüm npm yayımları, release engineering aksi karar vermedikçe `next` dist-tag kullanmalıdır.
|
||||
|
||||
## 1.10.0 - 2026-04-05
|
||||
|
||||
### Öne Çıkanlar
|
||||
|
||||
- Genel repo yüzeyi birkaç haftalık OSS büyümesi ve backlog merge'lerinden sonra canlı repo ile senkronize edildi.
|
||||
- Operatör iş akışı hattı voice, graph-ranking, billing, workspace ve outbound becerileriyle genişletildi.
|
||||
- Medya üretim hattı Manim ve Remotion odaklı launch araçlarıyla genişletildi.
|
||||
- ECC 2.0 alpha control-plane binary artık `ecc2/` üzerinden yerelde build ediliyor ve ilk kullanılabilir CLI/TUI yüzeyini sunuyor.
|
||||
|
||||
### Sürüm Yüzeyi
|
||||
|
||||
- Plugin, marketplace, Codex, OpenCode ve ajan metadataları `1.10.0` olarak güncellendi.
|
||||
- Yayınlanan sayımlar canlı OSS yüzeyine eşitlendi: 38 ajan, 156 beceri, 72 komut.
|
||||
- Üst seviye install dokümanları ve marketplace açıklamaları mevcut repo durumuyla eşitlendi.
|
||||
|
||||
### Notlar
|
||||
|
||||
- Claude plugin'i platform seviyesindeki rules dağıtım kısıtlarıyla sınırlı kalır; selective install / OSS yolu hâlâ en güvenilir tam kurulum yoludur.
|
||||
- Bu sürüm bir repo-yüzeyi düzeltmesi ve ekosistem senkronizasyonudur; tam ECC 2.0 yol haritasının tamamlandığı iddiası değildir.
|
||||
|
||||
## 1.9.0 - 2026-03-20
|
||||
|
||||
### Öne Çıkanlar
|
||||
|
||||
@@ -4,7 +4,7 @@ description: Claude Code session geçmişini, aliasları ve session metadata'sı
|
||||
|
||||
# Sessions Komutu
|
||||
|
||||
Claude Code session geçmişini yönet - `~/.claude/sessions/` dizininde saklanan session'ları listele, yükle, alias ata ve düzenle.
|
||||
Claude Code session geçmişini yönet - `~/.claude/session-data/` dizininde saklanan session'ları listele, yükle, alias ata ve düzenle; eski `~/.claude/sessions/` dosyalarını da geriye dönük uyumluluk için okuyun.
|
||||
|
||||
## Kullanım
|
||||
|
||||
@@ -89,7 +89,7 @@ const size = sm.getSessionSize(session.sessionPath);
|
||||
const aliases = aa.getAliasesForSession(session.filename);
|
||||
|
||||
console.log('Session: ' + session.filename);
|
||||
console.log('Path: ~/.claude/sessions/' + session.filename);
|
||||
console.log('Path: ' + session.sessionPath);
|
||||
console.log('');
|
||||
console.log('Statistics:');
|
||||
console.log(' Lines: ' + stats.lineCount);
|
||||
@@ -287,7 +287,7 @@ $ARGUMENTS:
|
||||
|
||||
## Notlar
|
||||
|
||||
- Session'lar `~/.claude/sessions/` dizininde markdown dosyaları olarak saklanır
|
||||
- Session'lar `~/.claude/session-data/` dizininde markdown dosyaları olarak saklanır; eski `~/.claude/sessions/` dosyaları da okunmaya devam eder
|
||||
- Aliaslar `~/.claude/session-aliases.json` dosyasında saklanır
|
||||
- Session ID'leri kısaltılabilir (ilk 4-8 karakter genellikle yeterince benzersizdir)
|
||||
- Sık referans verilen session'lar için aliasları kullanın
|
||||
|
||||
@@ -292,7 +292,7 @@ Bu da geçerli bir seçimdir ve Claude Code ile iyi çalışır. LSP işlevselli
|
||||
|
||||
```markdown
|
||||
ralph-wiggum@claude-code-plugins # Loop otomasyonu
|
||||
frontend-design@claude-code-plugins # UI/UX desenleri
|
||||
frontend-patterns@claude-code-plugins # UI/UX desenleri
|
||||
commit-commands@claude-code-plugins # Git iş akışı
|
||||
security-guidance@claude-code-plugins # Güvenlik kontrolleri
|
||||
pr-review-toolkit@claude-code-plugins # PR otomasyonu
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Everything Claude Code (ECC) — 智能体指令
|
||||
|
||||
这是一个**生产就绪的 AI 编码插件**,提供 48 个专业代理、184 项技能、79 条命令以及自动化钩子工作流,用于软件开发。
|
||||
这是一个**生产就绪的 AI 编码插件**,提供 48 个专业代理、182 项技能、68 条命令以及自动化钩子工作流,用于软件开发。
|
||||
|
||||
**版本:** 2.0.0-rc.1
|
||||
|
||||
@@ -147,8 +147,8 @@
|
||||
|
||||
```
|
||||
agents/ — 48 个专业子代理
|
||||
skills/ — 184 个工作流技能和领域知识
|
||||
commands/ — 79 个斜杠命令
|
||||
skills/ — 182 个工作流技能和领域知识
|
||||
commands/ — 68 个斜杠命令
|
||||
hooks/ — 基于触发的自动化
|
||||
rules/ — 始终遵循的指导方针(通用 + 每种语言)
|
||||
scripts/ — 跨平台 Node.js 实用工具
|
||||
|
||||
@@ -1,5 +1,45 @@
|
||||
# 更新日志
|
||||
|
||||
## 2.0.0-rc.1 - 2026-04-28
|
||||
|
||||
### 亮点
|
||||
|
||||
* 为 Hermes 操作员叙事新增公开的 ECC 2.0 release candidate 表面。
|
||||
* 将 ECC 明确记录为跨 Claude Code、Codex、Cursor、OpenCode 和 Gemini 的可复用 cross-harness 基础层。
|
||||
* 新增经过清理的 Hermes import 技能表面,而不是发布私有操作员状态。
|
||||
|
||||
### 发布表面
|
||||
|
||||
* 将 package、plugin、marketplace、OpenCode、agent 和 README 元数据更新为 `2.0.0-rc.1`。
|
||||
* 在 `docs/releases/2.0.0-rc.1/` 下集中发布说明、社交草稿、发布清单、交接说明和演示提示词。
|
||||
* 新增 `docs/architecture/cross-harness.md`,并补充 ECC/Hermes 边界的回归覆盖。
|
||||
* `ecc2/` 版本保持独立;除非 release engineering 另有决定,它仍是 alpha control-plane scaffold。
|
||||
|
||||
### 备注
|
||||
|
||||
* 这是 release candidate,不是完整 ECC 2.0 control-plane 路线图的 GA 声明。
|
||||
* 预发布 npm 发布应使用 `next` dist-tag,除非 release engineering 明确选择其他策略。
|
||||
|
||||
## 1.10.0 - 2026-04-05
|
||||
|
||||
### 亮点
|
||||
|
||||
* 在数周 OSS 增长和 backlog 合并后,公开发布表面已同步到当前仓库状态。
|
||||
* 操作员工作流扩展了 voice、graph-ranking、billing、workspace 和 outbound 技能。
|
||||
* 媒体生成工作流扩展了 Manim 和 Remotion 优先的发布工具。
|
||||
* ECC 2.0 alpha control-plane binary 现在可从 `ecc2/` 本地构建,并提供首个可用的 CLI/TUI 表面。
|
||||
|
||||
### 发布表面
|
||||
|
||||
* 将 plugin、marketplace、Codex、OpenCode 和 agent 元数据更新为 `1.10.0`。
|
||||
* 将公开计数同步到当前 OSS 表面:38 个代理、156 个技能、72 个命令。
|
||||
* 刷新顶层安装文档和 marketplace 描述,使其匹配当前仓库状态。
|
||||
|
||||
### 备注
|
||||
|
||||
* Claude plugin 仍受平台级 rules 分发限制影响;selective install / OSS 路径仍是最可靠的完整安装方式。
|
||||
* 这是仓库表面校正和生态同步版本,不表示完整 ECC 2.0 路线图已经完成。
|
||||
|
||||
## 1.9.0 - 2026-03-20
|
||||
|
||||
### 亮点
|
||||
|
||||
@@ -215,7 +215,7 @@ Copy-Item -Recurse rules/typescript "$HOME/.claude/rules/"
|
||||
/plugin list everything-claude-code@everything-claude-code
|
||||
```
|
||||
|
||||
**搞定!** 你现在可以使用 48 个智能体、184 项技能和 79 个命令了。
|
||||
**搞定!** 你现在可以使用 48 个智能体、182 项技能和 68 个命令了。
|
||||
|
||||
***
|
||||
|
||||
@@ -371,17 +371,15 @@ everything-claude-code/
|
||||
| |-- autonomous-loops/ # 自主循环模式:顺序流水线、PR 循环与 DAG 编排(新增)
|
||||
| |-- plankton-code-quality/ # 使用 Plankton hooks 的编写期代码质量控制(新增)
|
||||
|
|
||||
|-- commands/ # 快速执行的斜杠命令
|
||||
| |-- tdd.md # /tdd - 测试驱动开发
|
||||
|-- commands/ # 维护中的斜杠命令兼容层;优先使用 skills/
|
||||
| |-- plan.md # /plan - 实现规划
|
||||
| |-- e2e.md # /e2e - 端到端测试生成
|
||||
| |-- code-review.md # /code-review - 质量审查
|
||||
| |-- build-fix.md # /build-fix - 修复构建错误
|
||||
| |-- refactor-clean.md # /refactor-clean - 无用代码清理
|
||||
| |-- quality-gate.md # /quality-gate - 验证门禁
|
||||
| |-- learn.md # /learn - 会话中提取模式(长文指南)
|
||||
| |-- learn-eval.md # /learn-eval - 提取、评估并保存模式(新增)
|
||||
| |-- checkpoint.md # /checkpoint - 保存验证状态(长文指南)
|
||||
| |-- verify.md # /verify - 运行验证循环(长文指南)
|
||||
| |-- setup-pm.md # /setup-pm - 配置包管理器
|
||||
| |-- go-review.md # /go-review - Go 代码审查(新增)
|
||||
| |-- go-test.md # /go-test - Go TDD 工作流(新增)
|
||||
@@ -397,13 +395,17 @@ everything-claude-code/
|
||||
| |-- multi-backend.md # /multi-backend - 后端多服务编排(新增)
|
||||
| |-- multi-frontend.md # /multi-frontend - 前端多服务编排(新增)
|
||||
| |-- multi-workflow.md # /multi-workflow - 通用多服务工作流(新增)
|
||||
| |-- orchestrate.md # /orchestrate - 多代理协调
|
||||
| |-- sessions.md # /sessions - 会话历史管理
|
||||
| |-- eval.md # /eval - 按标准评估
|
||||
| |-- test-coverage.md # /test-coverage - 测试覆盖率分析
|
||||
| |-- update-docs.md # /update-docs - 更新文档
|
||||
| |-- update-codemaps.md # /update-codemaps - 更新代码映射
|
||||
| |-- python-review.md # /python-review - Python 代码审查(新增)
|
||||
|-- legacy-command-shims/ # 已退役短命令的按需归档,例如 /tdd 和 /eval
|
||||
| |-- tdd.md # /tdd - 优先使用 tdd-workflow 技能
|
||||
| |-- e2e.md # /e2e - 优先使用 e2e-testing 技能
|
||||
| |-- eval.md # /eval - 优先使用 eval-harness 技能
|
||||
| |-- verify.md # /verify - 优先使用 verification-loop 技能
|
||||
| |-- orchestrate.md # /orchestrate - 优先使用 dmux-workflows 或 multi-workflow
|
||||
|
|
||||
|-- rules/ # 必须遵循的规则(复制到 ~/.claude/rules/)
|
||||
| |-- README.md # 结构说明与安装指南
|
||||
@@ -654,9 +656,12 @@ cp -r everything-claude-code/rules/python/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/golang/* ~/.claude/rules/
|
||||
cp -r everything-claude-code/rules/php/* ~/.claude/rules/
|
||||
|
||||
# Copy commands
|
||||
# Copy maintained commands
|
||||
cp everything-claude-code/commands/*.md ~/.claude/commands/
|
||||
|
||||
# Retired shims live in legacy-command-shims/commands/.
|
||||
# Copy individual files from there only if you still need old names such as /tdd.
|
||||
|
||||
# Copy skills (core vs niche)
|
||||
# Recommended (new users): core/general skills only
|
||||
cp -r everything-claude-code/.agents/skills/* ~/.claude/skills/
|
||||
@@ -746,16 +751,16 @@ rules/
|
||||
|
||||
## 我应该使用哪个代理?
|
||||
|
||||
不确定从哪里开始?使用这个快速参考:
|
||||
不确定从哪里开始?使用这个快速参考。技能是规范工作流表面,维护中的斜杠命令保留给偏命令式工作流。
|
||||
|
||||
| 我想要... | 使用此命令 | 使用的智能体 |
|
||||
| 我想要... | 使用此表面 | 使用的智能体 |
|
||||
|--------------|-----------------|------------|
|
||||
| 规划新功能 | `/ecc:plan "Add auth"` | planner |
|
||||
| 设计系统架构 | `/ecc:plan` + architect agent | architect |
|
||||
| 先写测试再写代码 | `/tdd` | tdd-guide |
|
||||
| 先写测试再写代码 | `tdd-workflow` 技能 | tdd-guide |
|
||||
| 评审我刚写的代码 | `/code-review` | code-reviewer |
|
||||
| 修复失败的构建 | `/build-fix` | build-error-resolver |
|
||||
| 运行端到端测试 | `/e2e` | e2e-runner |
|
||||
| 运行端到端测试 | `e2e-testing` 技能 | e2e-runner |
|
||||
| 查找安全漏洞 | `/security-scan` | security-reviewer |
|
||||
| 移除死代码 | `/refactor-clean` | refactor-cleaner |
|
||||
| 更新文档 | `/update-docs` | doc-updater |
|
||||
@@ -771,14 +776,14 @@ rules/
|
||||
```
|
||||
/ecc:plan "使用 OAuth 添加用户身份验证"
|
||||
→ 规划器创建实现蓝图
|
||||
/tdd → tdd-guide 强制执行先写测试
|
||||
tdd-workflow 技能 → tdd-guide 强制执行先写测试
|
||||
/code-review → 代码审查员检查你的工作
|
||||
```
|
||||
|
||||
**修复错误:**
|
||||
|
||||
```
|
||||
/tdd → tdd-guide:编写一个能复现问题的失败测试
|
||||
tdd-workflow 技能 → tdd-guide:编写一个能复现问题的失败测试
|
||||
→ 实现修复,验证测试通过
|
||||
/code-review → code-reviewer:捕捉回归问题
|
||||
```
|
||||
@@ -787,7 +792,7 @@ rules/
|
||||
|
||||
```
|
||||
/security-scan → security-reviewer: OWASP Top 10 审计
|
||||
/e2e → e2e-runner: 关键用户流程测试
|
||||
e2e-testing 技能 → e2e-runner: 关键用户流程测试
|
||||
/test-coverage → verify 80%+ 覆盖率
|
||||
```
|
||||
|
||||
@@ -1029,7 +1034,7 @@ Codex macOS 应用:
|
||||
|-----------|-------|---------|
|
||||
| 配置 | 1 | `.codex/config.toml` —— 顶级 approvals/sandbox/web\_search, MCP 服务器,通知,配置文件 |
|
||||
| AGENTS.md | 2 | 根目录(通用)+ `.codex/AGENTS.md`(Codex 特定补充) |
|
||||
| 技能 | 16 | `.agents/skills/` —— SKILL.md + agents/openai.yaml 每个技能 |
|
||||
| 技能 | 32 | `.agents/skills/` —— SKILL.md + agents/openai.yaml 每个技能 |
|
||||
| MCP 服务器 | 4 | GitHub, Context7, Memory, Sequential Thinking(基于命令) |
|
||||
| 配置文件 | 2 | `strict`(只读沙箱)和 `yolo`(完全自动批准) |
|
||||
| 代理角色 | 3 | `.codex/agents/` —— explorer, reviewer, docs-researcher |
|
||||
@@ -1038,24 +1043,42 @@ Codex macOS 应用:
|
||||
|
||||
位于 `.agents/skills/` 的技能会被 Codex 自动加载:
|
||||
|
||||
`claude-api`、`frontend-design` 和 `skill-creator` 等 Anthropic 官方技能不会在此重复打包。需要这些官方版本时,请从 [`anthropics/skills`](https://github.com/anthropics/skills) 安装。
|
||||
|
||||
| 技能 | 描述 |
|
||||
|-------|-------------|
|
||||
| tdd-workflow | 测试驱动开发,覆盖率 80%+ |
|
||||
| security-review | 全面的安全检查清单 |
|
||||
| coding-standards | 通用编码标准 |
|
||||
| frontend-patterns | React/Next.js 模式 |
|
||||
| frontend-slides | HTML 演示文稿、PPTX 转换、视觉风格探索 |
|
||||
| agent-introspection-debugging | 调试智能体行为、路由和提示边界 |
|
||||
| agent-sort | 整理智能体目录和分配表面 |
|
||||
| api-design | REST API 设计模式 |
|
||||
| article-writing | 根据笔记和语音参考进行长文写作 |
|
||||
| content-engine | 平台原生的社交内容和再利用 |
|
||||
| market-research | 带来源归属的市场和竞争对手研究 |
|
||||
| investor-materials | 幻灯片、备忘录、模型和一页纸文档 |
|
||||
| investor-outreach | 个性化外联、跟进和介绍摘要 |
|
||||
| backend-patterns | API 设计、数据库、缓存 |
|
||||
| brand-voice | 从真实内容中提取来源驱动的写作风格 |
|
||||
| bun-runtime | Bun 运行时、包管理器、打包器和测试运行器 |
|
||||
| coding-standards | 通用编码标准 |
|
||||
| content-engine | 平台原生的社交内容和再利用 |
|
||||
| crosspost | X、LinkedIn、Threads 等多平台内容分发 |
|
||||
| deep-research | 多源研究、综合和来源归属 |
|
||||
| dmux-workflows | 使用 tmux pane manager 进行多智能体编排 |
|
||||
| documentation-lookup | 通过 Context7 MCP 获取最新库和框架文档 |
|
||||
| e2e-testing | Playwright 端到端测试 |
|
||||
| eval-harness | 评估驱动的开发 |
|
||||
| everything-claude-code | ECC 项目的开发约定和模式 |
|
||||
| exa-search | 通过 Exa MCP 进行网络、代码和公司研究 |
|
||||
| fal-ai-media | 图像、视频和音频的统一媒体生成 |
|
||||
| frontend-patterns | React/Next.js 模式 |
|
||||
| frontend-slides | HTML 演示文稿、PPTX 转换、视觉风格探索 |
|
||||
| investor-materials | 幻灯片、备忘录、模型和一页纸文档 |
|
||||
| investor-outreach | 个性化外联、跟进和介绍摘要 |
|
||||
| market-research | 带来源归属的市场和竞争对手研究 |
|
||||
| mcp-server-patterns | 使用 Node/TypeScript SDK 构建 MCP 服务器 |
|
||||
| nextjs-turbopack | Next.js 16+ 和 Turbopack 增量打包 |
|
||||
| product-capability | 将产品目标转化为有范围的能力图 |
|
||||
| security-review | 全面的安全检查清单 |
|
||||
| strategic-compact | 上下文管理 |
|
||||
| api-design | REST API 设计模式 |
|
||||
| tdd-workflow | 测试驱动开发,覆盖率 80%+ |
|
||||
| verification-loop | 构建、测试、代码检查、类型检查、安全 |
|
||||
| video-editing | 使用 FFmpeg 和 Remotion 的 AI 辅助视频编辑工作流 |
|
||||
| x-api | X/Twitter 发帖和分析 API 集成 |
|
||||
|
||||
### 关键限制
|
||||
|
||||
@@ -1101,8 +1124,8 @@ opencode
|
||||
| 功能特性 | Claude Code | OpenCode | 状态 |
|
||||
|---------|-------------|----------|--------|
|
||||
| 智能体 | PASS: 48 个 | PASS: 12 个 | **Claude Code 领先** |
|
||||
| 命令 | PASS: 79 个 | PASS: 31 个 | **Claude Code 领先** |
|
||||
| 技能 | PASS: 184 项 | PASS: 37 项 | **Claude Code 领先** |
|
||||
| 命令 | PASS: 68 个 | PASS: 31 个 | **Claude Code 领先** |
|
||||
| 技能 | PASS: 182 项 | PASS: 37 项 | **Claude Code 领先** |
|
||||
| 钩子 | PASS: 8 种事件类型 | PASS: 11 种事件 | **OpenCode 更多!** |
|
||||
| 规则 | PASS: 29 条 | PASS: 13 条指令 | **Claude Code 领先** |
|
||||
| MCP 服务器 | PASS: 14 个 | PASS: 完整 | **完全对等** |
|
||||
@@ -1122,21 +1145,17 @@ OpenCode 的插件系统比 Claude Code 更复杂,有 20 多种事件类型:
|
||||
|
||||
**额外的 OpenCode 事件**:`file.edited`、`file.watcher.updated`、`message.updated`、`lsp.client.diagnostics`、`tui.toast.show` 等等。
|
||||
|
||||
### 可用命令(31+)
|
||||
### 维护中的斜杠命令
|
||||
|
||||
| 命令 | 描述 |
|
||||
|---------|-------------|
|
||||
| `/plan` | 创建实施计划 |
|
||||
| `/tdd` | 强制执行 TDD 工作流 |
|
||||
| `/code-review` | 审查代码变更 |
|
||||
| `/build-fix` | 修复构建错误 |
|
||||
| `/e2e` | 生成端到端测试 |
|
||||
| `/refactor-clean` | 移除死代码 |
|
||||
| `/orchestrate` | 多智能体工作流 |
|
||||
| `/learn` | 从会话中提取模式 |
|
||||
| `/checkpoint` | 保存验证状态 |
|
||||
| `/verify` | 运行验证循环 |
|
||||
| `/eval` | 根据标准进行评估 |
|
||||
| `/quality-gate` | 运行维护中的验证门禁 |
|
||||
| `/update-docs` | 更新文档 |
|
||||
| `/update-codemaps` | 更新代码地图 |
|
||||
| `/test-coverage` | 分析覆盖率 |
|
||||
@@ -1213,8 +1232,8 @@ ECC 是**第一个最大化利用每个主要 AI 编码工具的插件**。以
|
||||
| 功能特性 | Claude Code | Cursor IDE | Codex CLI | OpenCode |
|
||||
|---------|------------|------------|-----------|----------|
|
||||
| **智能体** | 48 | 共享 (AGENTS.md) | 共享 (AGENTS.md) | 12 |
|
||||
| **命令** | 79 | 共享 | 基于指令 | 31 |
|
||||
| **技能** | 184 | 共享 | 10 (原生格式) | 37 |
|
||||
| **命令** | 68 | 共享 | 基于指令 | 31 |
|
||||
| **技能** | 182 | 共享 | 10 (原生格式) | 37 |
|
||||
| **钩子事件** | 8 种类型 | 15 种类型 | 暂无 | 11 种类型 |
|
||||
| **钩子脚本** | 20+ 个脚本 | 16 个脚本 (DRY 适配器) | N/A | 插件钩子 |
|
||||
| **规则** | 34 (通用 + 语言) | 34 (YAML 前页) | 基于指令 | 13 条指令 |
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
description: 从 ~/.claude/sessions/ 加载最新的会话文件,并从上次会话结束的地方恢复工作,保留完整上下文。
|
||||
description: 从 ~/.claude/session-data/ 加载最新的会话文件,并从上次会话结束的地方恢复工作,保留完整上下文。
|
||||
---
|
||||
|
||||
# 恢复会话命令
|
||||
@@ -17,10 +17,10 @@ description: 从 ~/.claude/sessions/ 加载最新的会话文件,并从上次
|
||||
## 用法
|
||||
|
||||
```
|
||||
/resume-session # 加载 ~/.claude/sessions/ 目录下最新的文件
|
||||
/resume-session # 加载 ~/.claude/session-data/ 目录下最新的文件
|
||||
/resume-session 2024-01-15 # 加载该日期最新的会话
|
||||
/resume-session ~/.claude/sessions/2024-01-15-session.tmp # 加载特定的旧格式文件
|
||||
/resume-session ~/.claude/sessions/2024-01-15-abc123de-session.tmp # 加载当前短ID格式的会话文件
|
||||
/resume-session ~/.claude/session-data/2024-01-15-abc123de-session.tmp # 加载当前短ID格式的会话文件
|
||||
```
|
||||
|
||||
## 流程
|
||||
@@ -29,18 +29,18 @@ description: 从 ~/.claude/sessions/ 加载最新的会话文件,并从上次
|
||||
|
||||
如果未提供参数:
|
||||
|
||||
1. 检查 `~/.claude/sessions/`
|
||||
1. 检查 `~/.claude/session-data/`
|
||||
2. 选择最近修改的 `*-session.tmp` 文件
|
||||
3. 如果文件夹不存在或没有匹配的文件,告知用户:
|
||||
```
|
||||
在 ~/.claude/sessions/ 中未找到会话文件。
|
||||
在 ~/.claude/session-data/ 中未找到会话文件。
|
||||
请在会话结束时运行 /save-session 来创建一个。
|
||||
```
|
||||
然后停止。
|
||||
|
||||
如果提供了参数:
|
||||
|
||||
* 如果看起来像日期 (`YYYY-MM-DD`),则在 `~/.claude/sessions/` 中搜索匹配
|
||||
* 如果看起来像日期 (`YYYY-MM-DD`),则先在 `~/.claude/session-data/` 中搜索,再回退到旧的 `~/.claude/sessions/`,匹配
|
||||
`YYYY-MM-DD-session.tmp`(旧格式)或 `YYYY-MM-DD-<shortid>-session.tmp`(当前格式)的文件,
|
||||
并加载该日期最近修改的版本
|
||||
* 如果看起来像文件路径,则直接读取该文件
|
||||
@@ -114,7 +114,7 @@ PASS: 已完成:[数量] 项已确认
|
||||
## 示例输出
|
||||
|
||||
```
|
||||
SESSION LOADED: /Users/you/.claude/sessions/2024-01-15-abc123de-session.tmp
|
||||
SESSION LOADED: /Users/you/.claude/session-data/2024-01-15-abc123de-session.tmp
|
||||
════════════════════════════════════════════════
|
||||
|
||||
项目:my-app — JWT 认证
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
description: 将当前会话状态保存到 ~/.claude/sessions/ 目录下带日期的文件中,以便在未来的会话中恢复完整上下文并继续工作。
|
||||
description: 将当前会话状态保存到 ~/.claude/session-data/ 目录下带日期的文件中,以便在未来的会话中恢复完整上下文并继续工作。
|
||||
---
|
||||
|
||||
# 保存会话命令
|
||||
@@ -29,12 +29,12 @@ description: 将当前会话状态保存到 ~/.claude/sessions/ 目录下带日
|
||||
在用户的 Claude 主目录中创建规范的会话文件夹:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.claude/sessions
|
||||
mkdir -p ~/.claude/session-data
|
||||
```
|
||||
|
||||
### 步骤 3:写入会话文件
|
||||
|
||||
创建 `~/.claude/sessions/YYYY-MM-DD-<short-id>-session.tmp`,使用今天的实际日期和一个满足 `session-manager.js` 中 `SESSION_FILENAME_REGEX` 强制规则的短 ID:
|
||||
创建 `~/.claude/session-data/YYYY-MM-DD-<short-id>-session.tmp`,使用今天的实际日期和一个满足 `session-manager.js` 中 `SESSION_FILENAME_REGEX` 强制规则的短 ID:
|
||||
|
||||
* 允许的字符:小写 `a-z`,数字 `0-9`,连字符 `-`
|
||||
* 最小长度:8 个字符
|
||||
@@ -248,5 +248,5 @@ mkdir -p ~/.claude/sessions
|
||||
* “什么没有成功”部分是最关键的——没有它,未来的会话将盲目地重试失败的方法
|
||||
* 如果用户要求中途保存会话(而不仅仅是在结束时),则保存目前已知的内容,并清楚地标记进行中的项目
|
||||
* 该文件旨在通过 `/resume-session` 在下次会话开始时由 Claude 读取
|
||||
* 使用规范的全局会话存储:`~/.claude/sessions/`
|
||||
* 使用规范的全局会话存储:`~/.claude/session-data/`
|
||||
* 对于任何新的会话文件,首选短 ID 文件名形式(`YYYY-MM-DD-<short-id>-session.tmp`)
|
||||
|
||||
@@ -4,7 +4,7 @@ description: 管理Claude Code会话历史、别名和会话元数据。
|
||||
|
||||
# Sessions 命令
|
||||
|
||||
管理 Claude Code 会话历史 - 列出、加载、设置别名和编辑存储在 `~/.claude/sessions/` 中的会话。
|
||||
管理 Claude Code 会话历史 - 列出、加载、设置别名和编辑存储在 `~/.claude/session-data/` 中的会话,同时兼容读取旧的 `~/.claude/sessions/` 文件。
|
||||
|
||||
## 用法
|
||||
|
||||
@@ -91,7 +91,7 @@ const size = sm.getSessionSize(session.sessionPath);
|
||||
const aliases = aa.getAliasesForSession(session.filename);
|
||||
|
||||
console.log('Session: ' + session.filename);
|
||||
console.log('Path: ~/.claude/sessions/' + session.filename);
|
||||
console.log('Path: ' + session.sessionPath);
|
||||
console.log('');
|
||||
console.log('Statistics:');
|
||||
console.log(' Lines: ' + stats.lineCount);
|
||||
@@ -334,7 +334,7 @@ $ARGUMENTS:
|
||||
|
||||
## 备注
|
||||
|
||||
* 会话以 Markdown 文件形式存储在 `~/.claude/sessions/`
|
||||
* 会话以 Markdown 文件形式存储在 `~/.claude/session-data/`,并继续兼容读取旧的 `~/.claude/sessions/`
|
||||
* 别名存储在 `~/.claude/session-aliases.json`
|
||||
* 会话 ID 可以缩短(通常前 4-8 个字符就足够唯一)
|
||||
* 为经常引用的会话使用别名
|
||||
|
||||
@@ -61,7 +61,7 @@ claude plugin install typescript-lsp@claude-plugins-official
|
||||
**工作流:**
|
||||
|
||||
* `commit-commands` - Git 工作流
|
||||
* `frontend-design` - UI 模式
|
||||
* `frontend-patterns` - UI 模式
|
||||
* `feature-dev` - 功能开发
|
||||
|
||||
***
|
||||
|
||||
@@ -1,337 +0,0 @@
|
||||
---
|
||||
name: claude-api
|
||||
description: Anthropic Claude API 的 Python 和 TypeScript 使用模式。涵盖 Messages API、流式处理、工具使用、视觉功能、扩展思维、批量处理、提示缓存和 Claude Agent SDK。适用于使用 Claude API 或 Anthropic SDK 构建应用程序的场景。
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Claude API
|
||||
|
||||
使用 Anthropic Claude API 和 SDK 构建应用程序。
|
||||
|
||||
## 何时激活
|
||||
|
||||
* 构建调用 Claude API 的应用程序
|
||||
* 代码导入 `anthropic` (Python) 或 `@anthropic-ai/sdk` (TypeScript)
|
||||
* 用户询问 Claude API 模式、工具使用、流式传输或视觉功能
|
||||
* 使用 Claude Agent SDK 实现智能体工作流
|
||||
* 优化 API 成本、令牌使用或延迟
|
||||
|
||||
## 模型选择
|
||||
|
||||
| 模型 | ID | 最适合 |
|
||||
|-------|-----|----------|
|
||||
| Opus 4.1 | `claude-opus-4-1` | 复杂推理、架构设计、研究 |
|
||||
| Sonnet 4 | `claude-sonnet-4-0` | 平衡的编码任务,大多数开发工作 |
|
||||
| Haiku 3.5 | `claude-3-5-haiku-latest` | 快速响应、高吞吐量、成本敏感型 |
|
||||
|
||||
默认使用 Sonnet 4,除非任务需要深度推理(Opus)或速度/成本优化(Haiku)。对于生产环境,优先使用固定的快照 ID 而非别名。
|
||||
|
||||
## Python SDK
|
||||
|
||||
### 安装
|
||||
|
||||
```bash
|
||||
pip install anthropic
|
||||
```
|
||||
|
||||
### 基本消息
|
||||
|
||||
```python
|
||||
import anthropic
|
||||
|
||||
client = anthropic.Anthropic() # reads ANTHROPIC_API_KEY from env
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
messages=[
|
||||
{"role": "user", "content": "Explain async/await in Python"}
|
||||
]
|
||||
)
|
||||
print(message.content[0].text)
|
||||
```
|
||||
|
||||
### 流式传输
|
||||
|
||||
```python
|
||||
with client.messages.stream(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
messages=[{"role": "user", "content": "Write a haiku about coding"}]
|
||||
) as stream:
|
||||
for text in stream.text_stream:
|
||||
print(text, end="", flush=True)
|
||||
```
|
||||
|
||||
### 系统提示词
|
||||
|
||||
```python
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
system="You are a senior Python developer. Be concise.",
|
||||
messages=[{"role": "user", "content": "Review this function"}]
|
||||
)
|
||||
```
|
||||
|
||||
## TypeScript SDK
|
||||
|
||||
### 安装
|
||||
|
||||
```bash
|
||||
npm install @anthropic-ai/sdk
|
||||
```
|
||||
|
||||
### 基本消息
|
||||
|
||||
```typescript
|
||||
import Anthropic from "@anthropic-ai/sdk";
|
||||
|
||||
const client = new Anthropic(); // reads ANTHROPIC_API_KEY from env
|
||||
|
||||
const message = await client.messages.create({
|
||||
model: "claude-sonnet-4-0",
|
||||
max_tokens: 1024,
|
||||
messages: [
|
||||
{ role: "user", content: "Explain async/await in TypeScript" }
|
||||
],
|
||||
});
|
||||
console.log(message.content[0].text);
|
||||
```
|
||||
|
||||
### 流式传输
|
||||
|
||||
```typescript
|
||||
const stream = client.messages.stream({
|
||||
model: "claude-sonnet-4-0",
|
||||
max_tokens: 1024,
|
||||
messages: [{ role: "user", content: "Write a haiku" }],
|
||||
});
|
||||
|
||||
for await (const event of stream) {
|
||||
if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
|
||||
process.stdout.write(event.delta.text);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 工具使用
|
||||
|
||||
定义工具并让 Claude 调用它们:
|
||||
|
||||
```python
|
||||
tools = [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get current weather for a location",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {"type": "string", "description": "City name"},
|
||||
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
|
||||
},
|
||||
"required": ["location"]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
tools=tools,
|
||||
messages=[{"role": "user", "content": "What's the weather in SF?"}]
|
||||
)
|
||||
|
||||
# Handle tool use response
|
||||
for block in message.content:
|
||||
if block.type == "tool_use":
|
||||
# Execute the tool with block.input
|
||||
result = get_weather(**block.input)
|
||||
# Send result back
|
||||
follow_up = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
tools=tools,
|
||||
messages=[
|
||||
{"role": "user", "content": "What's the weather in SF?"},
|
||||
{"role": "assistant", "content": message.content},
|
||||
{"role": "user", "content": [
|
||||
{"type": "tool_result", "tool_use_id": block.id, "content": str(result)}
|
||||
]}
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
## 视觉功能
|
||||
|
||||
发送图像进行分析:
|
||||
|
||||
```python
|
||||
import base64
|
||||
|
||||
with open("diagram.png", "rb") as f:
|
||||
image_data = base64.standard_b64encode(f.read()).decode("utf-8")
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "source": {"type": "base64", "media_type": "image/png", "data": image_data}},
|
||||
{"type": "text", "text": "Describe this diagram"}
|
||||
]
|
||||
}]
|
||||
)
|
||||
```
|
||||
|
||||
## 扩展思考
|
||||
|
||||
针对复杂推理任务:
|
||||
|
||||
```python
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=16000,
|
||||
thinking={
|
||||
"type": "enabled",
|
||||
"budget_tokens": 10000
|
||||
},
|
||||
messages=[{"role": "user", "content": "Solve this math problem step by step..."}]
|
||||
)
|
||||
|
||||
for block in message.content:
|
||||
if block.type == "thinking":
|
||||
print(f"Thinking: {block.thinking}")
|
||||
elif block.type == "text":
|
||||
print(f"Answer: {block.text}")
|
||||
```
|
||||
|
||||
## 提示词缓存
|
||||
|
||||
缓存大型系统提示词或上下文以降低成本:
|
||||
|
||||
```python
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
system=[
|
||||
{"type": "text", "text": large_system_prompt, "cache_control": {"type": "ephemeral"}}
|
||||
],
|
||||
messages=[{"role": "user", "content": "Question about the cached context"}]
|
||||
)
|
||||
# Check cache usage
|
||||
print(f"Cache read: {message.usage.cache_read_input_tokens}")
|
||||
print(f"Cache creation: {message.usage.cache_creation_input_tokens}")
|
||||
```
|
||||
|
||||
## 批量 API
|
||||
|
||||
以 50% 的成本降低异步处理大量数据:
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
batch = client.messages.batches.create(
|
||||
requests=[
|
||||
{
|
||||
"custom_id": f"request-{i}",
|
||||
"params": {
|
||||
"model": "claude-sonnet-4-0",
|
||||
"max_tokens": 1024,
|
||||
"messages": [{"role": "user", "content": prompt}]
|
||||
}
|
||||
}
|
||||
for i, prompt in enumerate(prompts)
|
||||
]
|
||||
)
|
||||
|
||||
# Poll for completion
|
||||
while True:
|
||||
status = client.messages.batches.retrieve(batch.id)
|
||||
if status.processing_status == "ended":
|
||||
break
|
||||
time.sleep(30)
|
||||
|
||||
# Get results
|
||||
for result in client.messages.batches.results(batch.id):
|
||||
print(result.result.message.content[0].text)
|
||||
```
|
||||
|
||||
## Claude Agent SDK
|
||||
|
||||
构建多步骤智能体:
|
||||
|
||||
```python
|
||||
# Note: Agent SDK API surface may change — check official docs
|
||||
import anthropic
|
||||
|
||||
# Define tools as functions
|
||||
tools = [{
|
||||
"name": "search_codebase",
|
||||
"description": "Search the codebase for relevant code",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {"query": {"type": "string"}},
|
||||
"required": ["query"]
|
||||
}
|
||||
}]
|
||||
|
||||
# Run an agentic loop with tool use
|
||||
client = anthropic.Anthropic()
|
||||
messages = [{"role": "user", "content": "Review the auth module for security issues"}]
|
||||
|
||||
while True:
|
||||
response = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=4096,
|
||||
tools=tools,
|
||||
messages=messages,
|
||||
)
|
||||
if response.stop_reason == "end_turn":
|
||||
break
|
||||
# Handle tool calls and continue the loop
|
||||
messages.append({"role": "assistant", "content": response.content})
|
||||
# ... execute tools and append tool_result messages
|
||||
```
|
||||
|
||||
## 成本优化
|
||||
|
||||
| 策略 | 节省幅度 | 使用时机 |
|
||||
|----------|---------|-------------|
|
||||
| 提示词缓存 | 缓存令牌成本降低高达 90% | 重复的系统提示词或上下文 |
|
||||
| 批量 API | 50% | 非时间敏感的批量处理 |
|
||||
| 使用 Haiku 而非 Sonnet | ~75% | 简单任务、分类、提取 |
|
||||
| 缩短 max\_tokens | 可变 | 已知输出较短时 |
|
||||
| 流式传输 | 无(成本相同) | 更好的用户体验,价格相同 |
|
||||
|
||||
## 错误处理
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
from anthropic import APIError, RateLimitError, APIConnectionError
|
||||
|
||||
try:
|
||||
message = client.messages.create(...)
|
||||
except RateLimitError:
|
||||
# Back off and retry
|
||||
time.sleep(60)
|
||||
except APIConnectionError:
|
||||
# Network issue, retry with backoff
|
||||
pass
|
||||
except APIError as e:
|
||||
print(f"API error {e.status_code}: {e.message}")
|
||||
```
|
||||
|
||||
## 环境设置
|
||||
|
||||
```bash
|
||||
# Required
|
||||
export ANTHROPIC_API_KEY="your-api-key-here"
|
||||
|
||||
# Optional: set default model
|
||||
export ANTHROPIC_MODEL="claude-sonnet-4-0"
|
||||
```
|
||||
|
||||
切勿硬编码 API 密钥。始终使用环境变量。
|
||||
@@ -162,13 +162,14 @@ mkdir -p $TARGET/skills $TARGET/rules
|
||||
| `investor-materials` | 宣传文稿、一页简介、投资者备忘录和财务模型 |
|
||||
| `investor-outreach` | 个性化的投资者冷邮件、熟人介绍和后续跟进 |
|
||||
|
||||
**类别:研究与API(3项技能)**
|
||||
**类别:研究与API(2项技能)**
|
||||
|
||||
| 技能 | 描述 |
|
||||
|-------|-------------|
|
||||
| `deep-research` | 使用 firecrawl 和 exa MCP 进行多源深度研究,并生成带引用的报告 |
|
||||
| `exa-search` | 通过 Exa MCP 进行网络、代码、公司和人员的神经搜索 |
|
||||
| `claude-api` | Anthropic Claude API 模式:消息、流式处理、工具使用、视觉、批处理、Agent SDK |
|
||||
|
||||
`claude-api` 是 Anthropic 官方技能;需要时请从 [`anthropics/skills`](https://github.com/anthropics/skills) 安装官方版本,而不是通过 ECC 重复打包。
|
||||
|
||||
**类别:社交与内容分发(2项技能)**
|
||||
|
||||
|
||||
@@ -292,7 +292,7 @@ mgrep --web "Next.js 15 app router changes" # Web search
|
||||
|
||||
```markdown
|
||||
ralph-wiggum@claude-code-plugins # 循环自动化
|
||||
frontend-design@claude-code-plugins # UI/UX 模式
|
||||
frontend-patterns@claude-code-plugins # UI/UX 模式
|
||||
commit-commands@claude-code-plugins # Git 工作流
|
||||
security-guidance@claude-code-plugins # 安全检查
|
||||
pr-review-toolkit@claude-code-plugins # PR 自动化
|
||||
|
||||
@@ -228,7 +228,7 @@ Async hooks run in the background. They cannot block tool execution.
|
||||
|
||||
## Cross-Platform Notes
|
||||
|
||||
Hook logic is implemented in Node.js scripts for cross-platform behavior on Windows, macOS, and Linux. A small number of shell wrappers are retained for continuous-learning observer hooks; those wrappers are profile-gated and have Windows-safe fallback behavior.
|
||||
Hook logic is implemented in Node.js scripts for cross-platform behavior on Windows, macOS, and Linux. The continuous-learning observer is exposed as a Node-mode hook and delegates to its existing `observe.sh` implementation through a profile-gated runner with Windows-safe fallback behavior.
|
||||
|
||||
## Related
|
||||
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"const p=require('path');const r=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var s of [[\\\"ecc\\\"],[\\\"ecc@ecc\\\"],[\\\"marketplace\\\",\\\"ecc\\\"],[\\\"everything-claude-code\\\"],[\\\"everything-claude-code@everything-claude-code\\\"],[\\\"marketplace\\\",\\\"everything-claude-code\\\"]]){var l=p.join(d,'plugins',...s);if(f.existsSync(p.join(l,q)))return l}try{for(var g of [\\\"ecc\\\",\\\"everything-claude-code\\\"]){var b=p.join(d,'plugins','cache',g);for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}}catch(x){}return d})();const s=p.join(r,'scripts/hooks/plugin-hook-bootstrap.js');process.env.CLAUDE_PLUGIN_ROOT=r;process.argv.splice(1,0,s);require(s)\" shell scripts/hooks/run-with-flags-shell.sh pre:observe skills/continuous-learning-v2/hooks/observe.sh standard,strict",
|
||||
"command": "node -e \"const p=require('path');const r=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var s of [[\\\"ecc\\\"],[\\\"ecc@ecc\\\"],[\\\"marketplace\\\",\\\"ecc\\\"],[\\\"everything-claude-code\\\"],[\\\"everything-claude-code@everything-claude-code\\\"],[\\\"marketplace\\\",\\\"everything-claude-code\\\"]]){var l=p.join(d,'plugins',...s);if(f.existsSync(p.join(l,q)))return l}try{for(var g of [\\\"ecc\\\",\\\"everything-claude-code\\\"]){var b=p.join(d,'plugins','cache',g);for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}}catch(x){}return d})();const s=p.join(r,'scripts/hooks/plugin-hook-bootstrap.js');process.env.CLAUDE_PLUGIN_ROOT=r;process.argv.splice(1,0,s);require(s)\" node scripts/hooks/run-with-flags.js pre:observe scripts/hooks/observe-runner.js standard,strict",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
@@ -212,7 +212,7 @@
|
||||
"hooks": [
|
||||
{
|
||||
"type": "command",
|
||||
"command": "node -e \"const p=require('path');const r=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var s of [[\\\"ecc\\\"],[\\\"ecc@ecc\\\"],[\\\"marketplace\\\",\\\"ecc\\\"],[\\\"everything-claude-code\\\"],[\\\"everything-claude-code@everything-claude-code\\\"],[\\\"marketplace\\\",\\\"everything-claude-code\\\"]]){var l=p.join(d,'plugins',...s);if(f.existsSync(p.join(l,q)))return l}try{for(var g of [\\\"ecc\\\",\\\"everything-claude-code\\\"]){var b=p.join(d,'plugins','cache',g);for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}}catch(x){}return d})();const s=p.join(r,'scripts/hooks/plugin-hook-bootstrap.js');process.env.CLAUDE_PLUGIN_ROOT=r;process.argv.splice(1,0,s);require(s)\" shell scripts/hooks/run-with-flags-shell.sh post:observe skills/continuous-learning-v2/hooks/observe.sh standard,strict",
|
||||
"command": "node -e \"const p=require('path');const r=(()=>{var e=process.env.CLAUDE_PLUGIN_ROOT;if(e&&e.trim())return e.trim();var p=require('path'),f=require('fs'),h=require('os').homedir(),d=p.join(h,'.claude'),q=p.join('scripts','lib','utils.js');if(f.existsSync(p.join(d,q)))return d;for(var s of [[\\\"ecc\\\"],[\\\"ecc@ecc\\\"],[\\\"marketplace\\\",\\\"ecc\\\"],[\\\"everything-claude-code\\\"],[\\\"everything-claude-code@everything-claude-code\\\"],[\\\"marketplace\\\",\\\"everything-claude-code\\\"]]){var l=p.join(d,'plugins',...s);if(f.existsSync(p.join(l,q)))return l}try{for(var g of [\\\"ecc\\\",\\\"everything-claude-code\\\"]){var b=p.join(d,'plugins','cache',g);for(var o of f.readdirSync(b,{withFileTypes:true})){if(!o.isDirectory())continue;for(var v of f.readdirSync(p.join(b,o.name),{withFileTypes:true})){if(!v.isDirectory())continue;var c=p.join(b,o.name,v.name);if(f.existsSync(p.join(c,q)))return c}}}}catch(x){}return d})();const s=p.join(r,'scripts/hooks/plugin-hook-bootstrap.js');process.env.CLAUDE_PLUGIN_ROOT=r;process.argv.splice(1,0,s);require(s)\" node scripts/hooks/run-with-flags.js post:observe scripts/hooks/observe-runner.js standard,strict",
|
||||
"async": true,
|
||||
"timeout": 10
|
||||
}
|
||||
|
||||
7
legacy-command-shims/README.md
Normal file
7
legacy-command-shims/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Legacy Command Shims
|
||||
|
||||
These slash-entry shims are no longer loaded by the default plugin command surface.
|
||||
|
||||
They remain here for users who still need short-term migration compatibility with old muscle-memory commands such as `/tdd`, `/eval`, or `/verify`.
|
||||
|
||||
Prefer the canonical skills or maintained commands referenced inside each shim. If you need one of these shims locally, copy the individual Markdown file into your project-level or user-level Claude commands directory instead of enabling the full archive by default.
|
||||
@@ -90,6 +90,7 @@
|
||||
".gemini",
|
||||
".opencode",
|
||||
"mcp-configs",
|
||||
"scripts/auto-update.js",
|
||||
"scripts/setup-package-manager.js"
|
||||
],
|
||||
"targets": [
|
||||
@@ -124,7 +125,6 @@
|
||||
"skills/django-tdd",
|
||||
"skills/django-verification",
|
||||
"skills/dotnet-patterns",
|
||||
"skills/frontend-design",
|
||||
"skills/frontend-patterns",
|
||||
"skills/frontend-slides",
|
||||
"skills/golang-patterns",
|
||||
@@ -272,7 +272,6 @@
|
||||
"kind": "skills",
|
||||
"description": "Research and API integration skills for deep investigations and model integrations.",
|
||||
"paths": [
|
||||
"skills/claude-api",
|
||||
"skills/deep-research",
|
||||
"skills/exa-search",
|
||||
"skills/research-ops"
|
||||
@@ -416,7 +415,6 @@
|
||||
"description": "Worktree/tmux orchestration runtime and workflow docs.",
|
||||
"paths": [
|
||||
"commands/multi-workflow.md",
|
||||
"commands/orchestrate.md",
|
||||
"commands/sessions.md",
|
||||
"scripts/lib/orchestration-session.js",
|
||||
"scripts/lib/tmux-worktree-orchestrator.js",
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
{
|
||||
"version": 1,
|
||||
"profiles": {
|
||||
"minimal": {
|
||||
"description": "Low-context Claude Code setup with rules, agents, commands, platform configs, and quality workflow support, but no hook runtime.",
|
||||
"modules": [
|
||||
"rules-core",
|
||||
"agents-core",
|
||||
"commands-core",
|
||||
"platform-configs",
|
||||
"workflow-quality"
|
||||
]
|
||||
},
|
||||
"core": {
|
||||
"description": "Minimal harness baseline with commands, hooks, platform configs, and quality workflow support.",
|
||||
"modules": [
|
||||
|
||||
@@ -62,6 +62,7 @@
|
||||
"rules/",
|
||||
"schemas/",
|
||||
"scripts/catalog.js",
|
||||
"scripts/auto-update.js",
|
||||
"scripts/claw.js",
|
||||
"scripts/codex/merge-codex-config.js",
|
||||
"scripts/codex/merge-mcp-config.js",
|
||||
@@ -100,7 +101,6 @@
|
||||
"skills/blueprint/",
|
||||
"skills/brand-voice/",
|
||||
"skills/carrier-relationship-management/",
|
||||
"skills/claude-api/",
|
||||
"skills/claude-devfleet/",
|
||||
"skills/clickhouse-io/",
|
||||
"skills/code-tour/",
|
||||
@@ -146,7 +146,6 @@
|
||||
"skills/fal-ai-media/",
|
||||
"skills/finance-billing-ops/",
|
||||
"skills/foundation-models-on-device/",
|
||||
"skills/frontend-design/",
|
||||
"skills/frontend-patterns/",
|
||||
"skills/frontend-slides/",
|
||||
"skills/github-ops/",
|
||||
|
||||
@@ -57,7 +57,7 @@ claude plugin install typescript-lsp@claude-plugins-official
|
||||
|
||||
**Workflow:**
|
||||
- `commit-commands` - Git workflow
|
||||
- `frontend-design` - UI patterns
|
||||
- `frontend-patterns` - UI patterns
|
||||
- `feature-dev` - Feature development
|
||||
|
||||
---
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"required": ["name"],
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"version": { "type": "string", "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+$" },
|
||||
"version": { "type": "string", "pattern": "^[0-9]+\\.[0-9]+\\.[0-9]+(?:-[0-9A-Za-z.-]+)?$" },
|
||||
"description": { "type": "string" },
|
||||
"author": {
|
||||
"oneOf": [
|
||||
@@ -31,6 +31,22 @@
|
||||
"type": "array",
|
||||
"items": { "type": "string" }
|
||||
},
|
||||
"commands": {
|
||||
"type": "array",
|
||||
"items": { "type": "string" }
|
||||
},
|
||||
"mcpServers": {
|
||||
"oneOf": [
|
||||
{ "type": "string" },
|
||||
{
|
||||
"type": "array",
|
||||
"items": { "type": "string" }
|
||||
},
|
||||
{
|
||||
"type": "object"
|
||||
}
|
||||
]
|
||||
},
|
||||
"features": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
||||
361
scripts/auto-update.js
Normal file
361
scripts/auto-update.js
Normal file
@@ -0,0 +1,361 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const { discoverInstalledStates } = require('./lib/install-lifecycle');
|
||||
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
|
||||
|
||||
function showHelp(exitCode = 0) {
|
||||
console.log(`
|
||||
Usage: node scripts/auto-update.js [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--repo-root <path>] [--dry-run] [--json]
|
||||
|
||||
Pull the latest ECC repo changes and reinstall the current context's managed targets
|
||||
using the original install-state request.
|
||||
`);
|
||||
process.exit(exitCode);
|
||||
}
|
||||
|
||||
function parseArgs(argv) {
|
||||
const args = argv.slice(2);
|
||||
const parsed = {
|
||||
targets: [],
|
||||
repoRoot: null,
|
||||
dryRun: false,
|
||||
json: false,
|
||||
help: false,
|
||||
};
|
||||
|
||||
for (let index = 0; index < args.length; index += 1) {
|
||||
const arg = args[index];
|
||||
|
||||
if (arg === '--target') {
|
||||
parsed.targets.push(args[index + 1] || null);
|
||||
index += 1;
|
||||
} else if (arg === '--repo-root') {
|
||||
parsed.repoRoot = args[index + 1] || null;
|
||||
index += 1;
|
||||
} else if (arg === '--dry-run') {
|
||||
parsed.dryRun = true;
|
||||
} else if (arg === '--json') {
|
||||
parsed.json = true;
|
||||
} else if (arg === '--help' || arg === '-h') {
|
||||
parsed.help = true;
|
||||
} else {
|
||||
throw new Error(`Unknown argument: ${arg}`);
|
||||
}
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function deriveRepoRootFromState(state) {
|
||||
const operations = Array.isArray(state && state.operations) ? state.operations : [];
|
||||
|
||||
for (const operation of operations) {
|
||||
if (typeof operation.sourcePath !== 'string' || !operation.sourcePath.trim()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (typeof operation.sourceRelativePath !== 'string' || !operation.sourceRelativePath.trim()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const relativeParts = operation.sourceRelativePath
|
||||
.split(/[\\/]+/)
|
||||
.filter(Boolean);
|
||||
|
||||
if (relativeParts.length === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let repoRoot = path.resolve(operation.sourcePath);
|
||||
for (let index = 0; index < relativeParts.length; index += 1) {
|
||||
repoRoot = path.dirname(repoRoot);
|
||||
}
|
||||
|
||||
return repoRoot;
|
||||
}
|
||||
|
||||
throw new Error('Unable to infer ECC repo root from install-state operations');
|
||||
}
|
||||
|
||||
function buildInstallApplyArgs(record) {
|
||||
const state = record.state;
|
||||
const target = state.target.target || record.adapter.target;
|
||||
const request = state.request || {};
|
||||
const args = [];
|
||||
|
||||
if (target) {
|
||||
args.push('--target', target);
|
||||
}
|
||||
|
||||
if (request.profile) {
|
||||
args.push('--profile', request.profile);
|
||||
}
|
||||
|
||||
if (Array.isArray(request.modules) && request.modules.length > 0) {
|
||||
args.push('--modules', request.modules.join(','));
|
||||
}
|
||||
|
||||
for (const componentId of Array.isArray(request.includeComponents) ? request.includeComponents : []) {
|
||||
args.push('--with', componentId);
|
||||
}
|
||||
|
||||
for (const componentId of Array.isArray(request.excludeComponents) ? request.excludeComponents : []) {
|
||||
args.push('--without', componentId);
|
||||
}
|
||||
|
||||
for (const language of Array.isArray(request.legacyLanguages) ? request.legacyLanguages : []) {
|
||||
args.push(language);
|
||||
}
|
||||
|
||||
return args;
|
||||
}
|
||||
|
||||
function determineInstallCwd(record, repoRoot) {
|
||||
if (record.adapter.kind === 'project') {
|
||||
return path.dirname(record.state.target.root);
|
||||
}
|
||||
|
||||
return repoRoot;
|
||||
}
|
||||
|
||||
function validateRepoRoot(repoRoot) {
|
||||
const normalized = path.resolve(repoRoot);
|
||||
const packageJsonPath = path.join(normalized, 'package.json');
|
||||
const installApplyPath = path.join(normalized, 'scripts', 'install-apply.js');
|
||||
|
||||
if (!fs.existsSync(packageJsonPath)) {
|
||||
throw new Error(`Invalid ECC repo root: missing package.json at ${packageJsonPath}`);
|
||||
}
|
||||
|
||||
if (!fs.existsSync(installApplyPath)) {
|
||||
throw new Error(`Invalid ECC repo root: missing install script at ${installApplyPath}`);
|
||||
}
|
||||
|
||||
return normalized;
|
||||
}
|
||||
|
||||
function runExternalCommand(command, args, options = {}) {
|
||||
const result = spawnSync(command, args, {
|
||||
cwd: options.cwd,
|
||||
env: options.env || process.env,
|
||||
encoding: 'utf8',
|
||||
maxBuffer: 10 * 1024 * 1024,
|
||||
});
|
||||
|
||||
if (result.error) {
|
||||
throw result.error;
|
||||
}
|
||||
|
||||
if (typeof result.status === 'number' && result.status !== 0) {
|
||||
const errorOutput = (result.stderr || result.stdout || '').trim();
|
||||
throw new Error(`${command} ${args.join(' ')} failed${errorOutput ? `: ${errorOutput}` : ''}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function runAutoUpdate(options = {}, dependencies = {}) {
|
||||
const discover = dependencies.discoverInstalledStates || discoverInstalledStates;
|
||||
const execute = dependencies.runExternalCommand || runExternalCommand;
|
||||
const homeDir = options.homeDir || process.env.HOME || os.homedir();
|
||||
const projectRoot = options.projectRoot || process.cwd();
|
||||
const requestedRepoRoot = options.repoRoot ? validateRepoRoot(options.repoRoot) : null;
|
||||
const records = discover({
|
||||
homeDir,
|
||||
projectRoot,
|
||||
targets: options.targets,
|
||||
}).filter(record => record.exists);
|
||||
|
||||
const results = [];
|
||||
if (records.length === 0) {
|
||||
return {
|
||||
dryRun: Boolean(options.dryRun),
|
||||
repoRoot: requestedRepoRoot,
|
||||
results,
|
||||
summary: {
|
||||
checkedCount: 0,
|
||||
updatedCount: 0,
|
||||
errorCount: 0,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const validRecords = [];
|
||||
const inferredRepoRoots = [];
|
||||
for (const record of records) {
|
||||
if (record.error || !record.state) {
|
||||
results.push({
|
||||
adapter: record.adapter,
|
||||
installStatePath: record.installStatePath,
|
||||
status: 'error',
|
||||
error: record.error || 'No valid install-state available',
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
const recordRepoRoot = requestedRepoRoot || validateRepoRoot(deriveRepoRootFromState(record.state));
|
||||
inferredRepoRoots.push(recordRepoRoot);
|
||||
validRecords.push({
|
||||
record,
|
||||
repoRoot: recordRepoRoot,
|
||||
});
|
||||
}
|
||||
|
||||
if (!requestedRepoRoot) {
|
||||
const uniqueRepoRoots = [...new Set(inferredRepoRoots)];
|
||||
if (uniqueRepoRoots.length > 1) {
|
||||
throw new Error(`Multiple ECC repo roots detected: ${uniqueRepoRoots.join(', ')}`);
|
||||
}
|
||||
}
|
||||
|
||||
const repoRoot = requestedRepoRoot || inferredRepoRoots[0] || null;
|
||||
if (!repoRoot) {
|
||||
return {
|
||||
dryRun: Boolean(options.dryRun),
|
||||
repoRoot,
|
||||
results,
|
||||
summary: {
|
||||
checkedCount: results.length,
|
||||
updatedCount: 0,
|
||||
errorCount: results.length,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const env = {
|
||||
...process.env,
|
||||
HOME: homeDir,
|
||||
USERPROFILE: homeDir,
|
||||
};
|
||||
|
||||
if (!options.dryRun) {
|
||||
execute('git', ['fetch', '--all', '--prune'], { cwd: repoRoot, env });
|
||||
execute('git', ['pull', '--ff-only'], { cwd: repoRoot, env });
|
||||
}
|
||||
|
||||
for (const entry of validRecords) {
|
||||
const installArgs = buildInstallApplyArgs(entry.record);
|
||||
const args = [
|
||||
path.join(repoRoot, 'scripts', 'install-apply.js'),
|
||||
...installArgs,
|
||||
'--json',
|
||||
];
|
||||
|
||||
if (options.dryRun) {
|
||||
args.push('--dry-run');
|
||||
}
|
||||
|
||||
try {
|
||||
const commandResult = execute(process.execPath, args, {
|
||||
cwd: determineInstallCwd(entry.record, repoRoot),
|
||||
env,
|
||||
});
|
||||
|
||||
let payload = null;
|
||||
if (commandResult.stdout && commandResult.stdout.trim()) {
|
||||
payload = JSON.parse(commandResult.stdout);
|
||||
}
|
||||
|
||||
results.push({
|
||||
adapter: entry.record.adapter,
|
||||
installStatePath: entry.record.installStatePath,
|
||||
repoRoot,
|
||||
cwd: determineInstallCwd(entry.record, repoRoot),
|
||||
installArgs,
|
||||
status: options.dryRun ? 'planned' : 'updated',
|
||||
payload,
|
||||
});
|
||||
} catch (error) {
|
||||
results.push({
|
||||
adapter: entry.record.adapter,
|
||||
installStatePath: entry.record.installStatePath,
|
||||
repoRoot,
|
||||
installArgs,
|
||||
status: 'error',
|
||||
error: error.message,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
dryRun: Boolean(options.dryRun),
|
||||
repoRoot,
|
||||
results,
|
||||
summary: {
|
||||
checkedCount: results.length,
|
||||
updatedCount: results.filter(result => result.status === 'updated' || result.status === 'planned').length,
|
||||
errorCount: results.filter(result => result.status === 'error').length,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function printHuman(result) {
|
||||
if (result.results.length === 0) {
|
||||
console.log('No ECC install-state files found for the current home/project context.');
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`${result.dryRun ? 'Auto-update dry run' : 'Auto-update summary'}:\n`);
|
||||
if (result.repoRoot) {
|
||||
console.log(`Repo root: ${result.repoRoot}\n`);
|
||||
}
|
||||
|
||||
for (const entry of result.results) {
|
||||
console.log(`- ${entry.adapter.id}`);
|
||||
console.log(` Status: ${entry.status.toUpperCase()}`);
|
||||
console.log(` Install-state: ${entry.installStatePath}`);
|
||||
if (entry.error) {
|
||||
console.log(` Error: ${entry.error}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(` Reinstall args: ${entry.installArgs.join(' ') || '(none)'}`);
|
||||
}
|
||||
|
||||
console.log(`\nSummary: checked=${result.summary.checkedCount}, ${result.dryRun ? 'planned' : 'updated'}=${result.summary.updatedCount}, errors=${result.summary.errorCount}`);
|
||||
}
|
||||
|
||||
function main() {
|
||||
try {
|
||||
const options = parseArgs(process.argv);
|
||||
if (options.help) {
|
||||
showHelp(0);
|
||||
}
|
||||
|
||||
const result = runAutoUpdate({
|
||||
homeDir: process.env.HOME || os.homedir(),
|
||||
projectRoot: process.cwd(),
|
||||
targets: options.targets,
|
||||
repoRoot: options.repoRoot,
|
||||
dryRun: options.dryRun,
|
||||
});
|
||||
|
||||
if (options.json) {
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
} else {
|
||||
printHuman(result);
|
||||
}
|
||||
|
||||
process.exitCode = result.summary.errorCount > 0 ? 1 : 0;
|
||||
} catch (error) {
|
||||
console.error(`Error: ${error.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
main();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
parseArgs,
|
||||
deriveRepoRootFromState,
|
||||
buildInstallApplyArgs,
|
||||
determineInstallCwd,
|
||||
runAutoUpdate,
|
||||
};
|
||||
@@ -33,6 +33,10 @@ const COMMANDS = {
|
||||
script: 'repair.js',
|
||||
description: 'Restore drifted or missing ECC-managed files',
|
||||
},
|
||||
'auto-update': {
|
||||
script: 'auto-update.js',
|
||||
description: 'Pull latest ECC changes and reinstall the current managed targets',
|
||||
},
|
||||
status: {
|
||||
script: 'status.js',
|
||||
description: 'Query the ECC SQLite state store status summary',
|
||||
@@ -58,6 +62,7 @@ const PRIMARY_COMMANDS = [
|
||||
'list-installed',
|
||||
'doctor',
|
||||
'repair',
|
||||
'auto-update',
|
||||
'status',
|
||||
'sessions',
|
||||
'session-inspect',
|
||||
@@ -90,6 +95,7 @@ Examples:
|
||||
ecc list-installed --json
|
||||
ecc doctor --target cursor
|
||||
ecc repair --dry-run
|
||||
ecc auto-update --dry-run
|
||||
ecc status --json
|
||||
ecc sessions
|
||||
ecc sessions session-active --json
|
||||
|
||||
@@ -392,11 +392,11 @@ function getRepoChecks(rootDir) {
|
||||
id: 'eval-commands',
|
||||
category: 'Eval Coverage',
|
||||
points: 4,
|
||||
scopes: ['repo', 'commands'],
|
||||
path: 'commands/eval.md',
|
||||
description: 'Eval and verification commands exist',
|
||||
pass: fileExists(rootDir, 'commands/eval.md') && fileExists(rootDir, 'commands/verify.md') && fileExists(rootDir, 'commands/checkpoint.md'),
|
||||
fix: 'Add eval/checkpoint/verify commands to standardize verification loops.',
|
||||
scopes: ['repo', 'commands', 'skills'],
|
||||
path: 'commands/checkpoint.md',
|
||||
description: 'Checkpoint command and eval/verification skills exist',
|
||||
pass: fileExists(rootDir, 'commands/checkpoint.md') && fileExists(rootDir, 'skills/eval-harness/SKILL.md') && fileExists(rootDir, 'skills/verification-loop/SKILL.md'),
|
||||
fix: 'Add checkpoint command plus eval-harness and verification-loop skills to standardize verification loops.',
|
||||
},
|
||||
{
|
||||
id: 'eval-tests-presence',
|
||||
|
||||
@@ -35,6 +35,212 @@ const GIT_COMMANDS_WITH_NO_VERIFY = [
|
||||
*/
|
||||
const VALID_BEFORE_GIT = ' \t\n\r;&|$`(<{!"\']/.~\\';
|
||||
|
||||
const GIT_CONFIG_KEY_PREFIX = 'core.hooksPath=';
|
||||
|
||||
const COMMIT_OPTIONS_WITH_VALUE = new Set([
|
||||
'-m',
|
||||
'--message',
|
||||
'-F',
|
||||
'--file',
|
||||
'-C',
|
||||
'--reuse-message',
|
||||
'-c',
|
||||
'--reedit-message',
|
||||
'--author',
|
||||
'--date',
|
||||
'--template',
|
||||
'--fixup',
|
||||
'--squash',
|
||||
'--pathspec-from-file',
|
||||
]);
|
||||
|
||||
const COMMIT_OPTIONS_WITH_INLINE_VALUE = [
|
||||
'--message=',
|
||||
'--file=',
|
||||
'--reuse-message=',
|
||||
'--reedit-message=',
|
||||
'--author=',
|
||||
'--date=',
|
||||
'--template=',
|
||||
'--fixup=',
|
||||
'--squash=',
|
||||
'--pathspec-from-file=',
|
||||
];
|
||||
|
||||
const COMMIT_SHORT_OPTIONS_WITH_VALUE = new Set(['m', 'F', 'C', 'c']);
|
||||
|
||||
function tokenizeShellWords(input, start = 0, end = input.length) {
|
||||
const tokens = [];
|
||||
let value = '';
|
||||
let tokenStart = null;
|
||||
let quote = null;
|
||||
let escaped = false;
|
||||
|
||||
function beginToken(index) {
|
||||
if (tokenStart === null) {
|
||||
tokenStart = index;
|
||||
}
|
||||
}
|
||||
|
||||
function pushToken(index) {
|
||||
if (tokenStart === null) {
|
||||
return;
|
||||
}
|
||||
|
||||
tokens.push({
|
||||
value,
|
||||
start: tokenStart,
|
||||
end: index,
|
||||
});
|
||||
value = '';
|
||||
tokenStart = null;
|
||||
}
|
||||
|
||||
for (let i = start; i < end; i++) {
|
||||
const char = input.charAt(i);
|
||||
|
||||
if (escaped) {
|
||||
beginToken(i - 1);
|
||||
value += char;
|
||||
escaped = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (quote) {
|
||||
if (char === quote) {
|
||||
quote = null;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (quote === '"' && char === '\\') {
|
||||
beginToken(i);
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
beginToken(i);
|
||||
value += char;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '"' || char === "'") {
|
||||
beginToken(i);
|
||||
quote = char;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '\\') {
|
||||
beginToken(i);
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (/\s/.test(char)) {
|
||||
pushToken(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
beginToken(i);
|
||||
value += char;
|
||||
}
|
||||
|
||||
if (escaped) {
|
||||
value += '\\';
|
||||
}
|
||||
pushToken(end);
|
||||
|
||||
return tokens;
|
||||
}
|
||||
|
||||
function findCommandSegmentEnd(input, start) {
|
||||
let quote = null;
|
||||
let escaped = false;
|
||||
|
||||
for (let i = start; i < input.length; i++) {
|
||||
const char = input.charAt(i);
|
||||
|
||||
if (escaped) {
|
||||
escaped = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (quote) {
|
||||
if (quote === '"' && char === '\\') {
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
if (char === quote) {
|
||||
quote = null;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '"' || char === "'") {
|
||||
quote = char;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === '\\') {
|
||||
escaped = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (char === ';' || char === '|' || char === '&' || char === '\n') {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
|
||||
return input.length;
|
||||
}
|
||||
|
||||
function commitOptionConsumesNextValue(value) {
|
||||
if (isCommitNoVerifyShortFlag(value)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (COMMIT_OPTIONS_WITH_VALUE.has(value)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const shortValueOption = getCommitShortValueOption(value);
|
||||
return Boolean(shortValueOption && shortValueOption.consumesNextValue);
|
||||
}
|
||||
|
||||
function commitOptionContainsInlineValue(value) {
|
||||
if (isCommitNoVerifyShortFlag(value)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (COMMIT_OPTIONS_WITH_INLINE_VALUE.some(prefix => value.startsWith(prefix))) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const shortValueOption = getCommitShortValueOption(value);
|
||||
return Boolean(shortValueOption && shortValueOption.containsInlineValue);
|
||||
}
|
||||
|
||||
function getCommitShortValueOption(value) {
|
||||
if (!value.startsWith('-') || value.startsWith('--') || value === '-') {
|
||||
return null;
|
||||
}
|
||||
|
||||
const options = value.slice(1);
|
||||
for (let i = 0; i < options.length; i++) {
|
||||
if (COMMIT_SHORT_OPTIONS_WITH_VALUE.has(options.charAt(i))) {
|
||||
return {
|
||||
consumesNextValue: i === options.length - 1,
|
||||
containsInlineValue: i < options.length - 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function isCommitNoVerifyShortFlag(value) {
|
||||
return value === '-n' || /^-n[a-zA-Z]/.test(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a position in the input is inside a shell comment.
|
||||
*/
|
||||
@@ -79,8 +285,7 @@ function findGit(input, start) {
|
||||
* Returns { command, offset } where offset is the position right after the
|
||||
* subcommand keyword, so callers can scope flag checks to only that portion.
|
||||
*/
|
||||
function detectGitCommand(input) {
|
||||
let start = 0;
|
||||
function detectGitCommand(input, start = 0) {
|
||||
while (start < input.length) {
|
||||
const git = findGit(input, start);
|
||||
if (!git) return null;
|
||||
@@ -141,7 +346,13 @@ function detectGitCommand(input) {
|
||||
}
|
||||
|
||||
if (bestCmd) {
|
||||
return { command: bestCmd, offset: bestIdx + bestCmd.length };
|
||||
return {
|
||||
command: bestCmd,
|
||||
offset: bestIdx + bestCmd.length,
|
||||
gitStart: git.idx,
|
||||
gitEnd: git.idx + git.len,
|
||||
commandStart: bestIdx,
|
||||
};
|
||||
}
|
||||
|
||||
start = git.idx + git.len;
|
||||
@@ -156,12 +367,39 @@ function detectGitCommand(input) {
|
||||
* earlier commands in a chain are not falsely matched.
|
||||
*/
|
||||
function hasNoVerifyFlag(input, command, offset) {
|
||||
const region = input.slice(offset);
|
||||
if (/--no-verify\b/.test(region)) return true;
|
||||
const segmentEnd = findCommandSegmentEnd(input, offset);
|
||||
const tokens = tokenizeShellWords(input, offset, segmentEnd);
|
||||
let skipNext = false;
|
||||
|
||||
// For commit, -n is shorthand for --no-verify
|
||||
if (command === 'commit') {
|
||||
if (/\s-n(?:\s|$)/.test(region) || /\s-n[a-zA-Z]/.test(region)) return true;
|
||||
for (const token of tokens) {
|
||||
const value = token.value;
|
||||
|
||||
if (skipNext) {
|
||||
skipNext = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (value === '--') {
|
||||
break;
|
||||
}
|
||||
|
||||
if (command === 'commit') {
|
||||
if (commitOptionConsumesNextValue(value)) {
|
||||
skipNext = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (commitOptionContainsInlineValue(value)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (value === '--no-verify') return true;
|
||||
|
||||
// For commit, -n is shorthand for --no-verify.
|
||||
if (command === 'commit' && isCommitNoVerifyShortFlag(value)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -170,31 +408,56 @@ function hasNoVerifyFlag(input, command, offset) {
|
||||
/**
|
||||
* Check if the input contains a -c core.hooksPath= override.
|
||||
*/
|
||||
function hasHooksPathOverride(input) {
|
||||
return /-c\s+["']?core\.hooksPath\s*=/.test(input);
|
||||
function hasHooksPathOverride(input, detected) {
|
||||
const tokens = tokenizeShellWords(input, detected.gitEnd, detected.commandStart);
|
||||
|
||||
for (let i = 0; i < tokens.length; i++) {
|
||||
const value = tokens[i].value;
|
||||
|
||||
if (value === '-c') {
|
||||
const next = tokens[i + 1] && tokens[i + 1].value;
|
||||
if (typeof next === 'string' && next.startsWith(GIT_CONFIG_KEY_PREFIX)) {
|
||||
return true;
|
||||
}
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (value.startsWith(`-c${GIT_CONFIG_KEY_PREFIX}`)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check a command string for git hook bypass attempts.
|
||||
*/
|
||||
function checkCommand(input) {
|
||||
const detected = detectGitCommand(input);
|
||||
if (!detected) return { blocked: false };
|
||||
let start = 0;
|
||||
|
||||
const { command: gitCommand, offset } = detected;
|
||||
while (start < input.length) {
|
||||
const detected = detectGitCommand(input, start);
|
||||
if (!detected) return { blocked: false };
|
||||
|
||||
if (hasNoVerifyFlag(input, gitCommand, offset)) {
|
||||
return {
|
||||
blocked: true,
|
||||
reason: `BLOCKED: --no-verify flag is not allowed with git ${gitCommand}. Git hooks must not be bypassed.`,
|
||||
};
|
||||
}
|
||||
const { command: gitCommand, offset } = detected;
|
||||
|
||||
if (hasHooksPathOverride(input)) {
|
||||
return {
|
||||
blocked: true,
|
||||
reason: `BLOCKED: Overriding core.hooksPath is not allowed with git ${gitCommand}. Git hooks must not be bypassed.`,
|
||||
};
|
||||
if (hasHooksPathOverride(input, detected)) {
|
||||
return {
|
||||
blocked: true,
|
||||
reason: `BLOCKED: Overriding core.hooksPath is not allowed with git ${gitCommand}. Git hooks must not be bypassed.`,
|
||||
};
|
||||
}
|
||||
|
||||
if (hasNoVerifyFlag(input, gitCommand, offset)) {
|
||||
return {
|
||||
blocked: true,
|
||||
reason: `BLOCKED: --no-verify flag is not allowed with git ${gitCommand}. Git hooks must not be bypassed.`,
|
||||
};
|
||||
}
|
||||
|
||||
start = findCommandSegmentEnd(input, offset) + 1;
|
||||
}
|
||||
|
||||
return { blocked: false };
|
||||
|
||||
@@ -39,7 +39,7 @@ const MAX_CHECKED_ENTRIES = 500;
|
||||
const MAX_SESSION_KEYS = 50;
|
||||
const ROUTINE_BASH_SESSION_KEY = '__bash_session__';
|
||||
|
||||
const DESTRUCTIVE_BASH = /\b(rm\s+-rf|git\s+reset\s+--hard|git\s+checkout\s+--|git\s+clean\s+-f|drop\s+table|delete\s+from|truncate|git\s+push\s+--force|dd\s+if=)\b/i;
|
||||
const DESTRUCTIVE_BASH = /\b(rm\s+-rf|git\s+reset\s+--hard|git\s+checkout\s+--|git\s+clean\s+-f|drop\s+table|delete\s+from|truncate|git\s+push\s+--force(?!-with-lease)|git\s+commit\s+--amend|dd\s+if=)\b/i;
|
||||
|
||||
// --- State management (per-session, atomic writes, bounded) ---
|
||||
|
||||
@@ -129,12 +129,33 @@ function saveState(state) {
|
||||
const stateFile = getStateFile();
|
||||
let tmpFile = null;
|
||||
try {
|
||||
state.last_active = Date.now();
|
||||
state.checked = pruneCheckedEntries(state.checked);
|
||||
fs.mkdirSync(STATE_DIR, { recursive: true });
|
||||
|
||||
let mergedChecked = Array.isArray(state.checked) ? state.checked : [];
|
||||
let mergedLastActive = typeof state.last_active === 'number' ? state.last_active : 0;
|
||||
|
||||
try {
|
||||
if (fs.existsSync(stateFile)) {
|
||||
const diskState = JSON.parse(fs.readFileSync(stateFile, 'utf8'));
|
||||
if (Array.isArray(diskState.checked)) {
|
||||
mergedChecked = Array.from(new Set([...diskState.checked, ...mergedChecked]));
|
||||
}
|
||||
if (typeof diskState.last_active === 'number') {
|
||||
mergedLastActive = Math.max(mergedLastActive, diskState.last_active);
|
||||
}
|
||||
}
|
||||
} catch (_) {
|
||||
/* ignore malformed or transient disk state */
|
||||
}
|
||||
|
||||
const finalState = {
|
||||
checked: pruneCheckedEntries(mergedChecked),
|
||||
last_active: Math.max(mergedLastActive, Date.now())
|
||||
};
|
||||
|
||||
// Atomic write: temp file + rename prevents partial reads
|
||||
tmpFile = stateFile + '.tmp.' + process.pid;
|
||||
fs.writeFileSync(tmpFile, JSON.stringify(state, null, 2), 'utf8');
|
||||
tmpFile = `${stateFile}.tmp.${process.pid}.${crypto.randomBytes(4).toString('hex')}`;
|
||||
fs.writeFileSync(tmpFile, JSON.stringify(finalState, null, 2), 'utf8');
|
||||
try {
|
||||
fs.renameSync(tmpFile, stateFile);
|
||||
} catch (error) {
|
||||
@@ -149,6 +170,7 @@ function saveState(state) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
tmpFile = null;
|
||||
} catch (_) {
|
||||
if (tmpFile) {
|
||||
try {
|
||||
@@ -183,7 +205,8 @@ function isChecked(key) {
|
||||
const files = fs.readdirSync(STATE_DIR);
|
||||
const now = Date.now();
|
||||
for (const f of files) {
|
||||
if (!f.startsWith('state-') || !f.endsWith('.json')) continue;
|
||||
const isStateFile = f.startsWith('state-') && (f.endsWith('.json') || f.includes('.json.tmp.'));
|
||||
if (!isStateFile) continue;
|
||||
const fp = path.join(STATE_DIR, f);
|
||||
try {
|
||||
const stat = fs.statSync(fp);
|
||||
|
||||
196
scripts/hooks/observe-runner.js
Normal file
196
scripts/hooks/observe-runner.js
Normal file
@@ -0,0 +1,196 @@
|
||||
#!/usr/bin/env node
|
||||
'use strict';
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const OBSERVE_RELATIVE_PATH = path.join('skills', 'continuous-learning-v2', 'hooks', 'observe.sh');
|
||||
const DEFAULT_TIMEOUT_MS = 9000;
|
||||
|
||||
function getPluginRoot(options = {}) {
|
||||
if (options.pluginRoot && String(options.pluginRoot).trim()) {
|
||||
return String(options.pluginRoot).trim();
|
||||
}
|
||||
if (process.env.CLAUDE_PLUGIN_ROOT && process.env.CLAUDE_PLUGIN_ROOT.trim()) {
|
||||
return process.env.CLAUDE_PLUGIN_ROOT.trim();
|
||||
}
|
||||
if (process.env.ECC_PLUGIN_ROOT && process.env.ECC_PLUGIN_ROOT.trim()) {
|
||||
return process.env.ECC_PLUGIN_ROOT.trim();
|
||||
}
|
||||
return path.resolve(__dirname, '..', '..');
|
||||
}
|
||||
|
||||
function resolveTarget(rootDir, relPath) {
|
||||
const resolvedRoot = path.resolve(rootDir);
|
||||
const resolvedTarget = path.resolve(rootDir, relPath);
|
||||
if (
|
||||
resolvedTarget !== resolvedRoot &&
|
||||
!resolvedTarget.startsWith(resolvedRoot + path.sep)
|
||||
) {
|
||||
throw new Error(`Path traversal rejected: ${relPath}`);
|
||||
}
|
||||
return resolvedTarget;
|
||||
}
|
||||
|
||||
function toShellPath(filePath) {
|
||||
const normalized = String(filePath || '');
|
||||
if (process.platform !== 'win32') {
|
||||
return normalized;
|
||||
}
|
||||
|
||||
return normalized
|
||||
.replace(/^([A-Za-z]):[\\/]/, (_, driveLetter) => `/${driveLetter.toLowerCase()}/`)
|
||||
.replace(/\\/g, '/');
|
||||
}
|
||||
|
||||
function findShellBinary() {
|
||||
const candidates = [];
|
||||
if (process.env.BASH && process.env.BASH.trim()) {
|
||||
candidates.push(process.env.BASH.trim());
|
||||
}
|
||||
|
||||
if (process.platform === 'win32') {
|
||||
candidates.push('bash.exe', 'bash', 'sh');
|
||||
} else {
|
||||
candidates.push('bash', 'sh');
|
||||
}
|
||||
|
||||
for (const candidate of candidates) {
|
||||
const probe = spawnSync(candidate, ['-c', ':'], {
|
||||
stdio: 'ignore',
|
||||
windowsHide: true
|
||||
});
|
||||
if (!probe.error) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function getPhaseFromHookId(hookId) {
|
||||
const prefix = String(hookId || process.env.ECC_HOOK_ID || '').split(':')[0];
|
||||
return prefix === 'pre' || prefix === 'post' ? prefix : null;
|
||||
}
|
||||
|
||||
function getTimeoutMs() {
|
||||
const parsed = Number.parseInt(process.env.ECC_OBSERVE_RUNNER_TIMEOUT_MS || '', 10);
|
||||
return Number.isFinite(parsed) && parsed > 0 ? parsed : DEFAULT_TIMEOUT_MS;
|
||||
}
|
||||
|
||||
function combineStderr(stderr, message) {
|
||||
const prefix = typeof stderr === 'string' && stderr.length > 0
|
||||
? stderr.endsWith('\n') ? stderr : `${stderr}\n`
|
||||
: '';
|
||||
return `${prefix}${message}\n`;
|
||||
}
|
||||
|
||||
function run(raw, options = {}) {
|
||||
const input = typeof raw === 'string' ? raw : String(raw ?? '');
|
||||
const phase = getPhaseFromHookId(options.hookId);
|
||||
if (!phase) {
|
||||
return {
|
||||
stderr: '[Hook] observe runner received an unsupported hook id; skipping observation',
|
||||
exitCode: 0
|
||||
};
|
||||
}
|
||||
|
||||
const pluginRoot = getPluginRoot(options);
|
||||
let observePath;
|
||||
try {
|
||||
observePath = resolveTarget(pluginRoot, OBSERVE_RELATIVE_PATH);
|
||||
} catch (error) {
|
||||
return {
|
||||
stderr: `[Hook] observe runner path resolution failed: ${error.message}`,
|
||||
exitCode: 0
|
||||
};
|
||||
}
|
||||
|
||||
if (!fs.existsSync(observePath)) {
|
||||
return {
|
||||
stderr: `[Hook] observe script not found: ${observePath}`,
|
||||
exitCode: 0
|
||||
};
|
||||
}
|
||||
|
||||
const shell = findShellBinary();
|
||||
if (!shell) {
|
||||
return {
|
||||
stderr: '[Hook] shell runtime unavailable; skipping continuous-learning observation',
|
||||
exitCode: 0
|
||||
};
|
||||
}
|
||||
|
||||
const result = spawnSync(shell, [toShellPath(observePath), phase], {
|
||||
input,
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
CLAUDE_PLUGIN_ROOT: pluginRoot,
|
||||
ECC_PLUGIN_ROOT: pluginRoot
|
||||
},
|
||||
cwd: process.cwd(),
|
||||
timeout: getTimeoutMs(),
|
||||
windowsHide: true
|
||||
});
|
||||
|
||||
const output = {
|
||||
exitCode: Number.isInteger(result.status) ? result.status : 0
|
||||
};
|
||||
|
||||
if (typeof result.stdout === 'string' && result.stdout.length > 0) {
|
||||
output.stdout = result.stdout;
|
||||
}
|
||||
if (typeof result.stderr === 'string' && result.stderr.length > 0) {
|
||||
output.stderr = result.stderr;
|
||||
}
|
||||
|
||||
if (result.error || result.signal || result.status === null) {
|
||||
const reason = result.error
|
||||
? result.error.message
|
||||
: result.signal
|
||||
? `terminated by signal ${result.signal}`
|
||||
: 'missing exit status';
|
||||
output.stderr = combineStderr(output.stderr, `[Hook] observe runner failed: ${reason}`);
|
||||
output.exitCode = 0;
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
function emitHookResult(raw, output) {
|
||||
if (output && typeof output === 'object') {
|
||||
if (output.stderr) {
|
||||
process.stderr.write(String(output.stderr).endsWith('\n') ? String(output.stderr) : `${output.stderr}\n`);
|
||||
}
|
||||
if (Object.prototype.hasOwnProperty.call(output, 'stdout')) {
|
||||
process.stdout.write(String(output.stdout ?? ''));
|
||||
} else if (!Number.isInteger(output.exitCode) || output.exitCode === 0) {
|
||||
process.stdout.write(raw);
|
||||
}
|
||||
return Number.isInteger(output.exitCode) ? output.exitCode : 0;
|
||||
}
|
||||
|
||||
process.stdout.write(raw);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (require.main === module) {
|
||||
let raw = '';
|
||||
try {
|
||||
raw = fs.readFileSync(0, 'utf8');
|
||||
} catch (_error) {
|
||||
raw = '';
|
||||
}
|
||||
const output = run(raw, { hookId: process.argv[2] || process.env.ECC_HOOK_ID });
|
||||
process.exit(emitHookResult(raw, output));
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
OBSERVE_RELATIVE_PATH,
|
||||
findShellBinary,
|
||||
getPhaseFromHookId,
|
||||
run,
|
||||
toShellPath
|
||||
};
|
||||
@@ -188,6 +188,54 @@ function validateCommitMessage(command) {
|
||||
return { message, issues };
|
||||
}
|
||||
|
||||
function getPathEnv() {
|
||||
const pathKey = Object.keys(process.env).find(key => key.toLowerCase() === 'path') || 'PATH';
|
||||
return process.env[pathKey] || '';
|
||||
}
|
||||
|
||||
function isPathLike(command) {
|
||||
return command.includes(path.sep) || (process.platform === 'win32' && /[\\/]/.test(command));
|
||||
}
|
||||
|
||||
function getExecutableCandidates(command) {
|
||||
if (process.platform !== 'win32' || path.extname(command)) {
|
||||
return [command];
|
||||
}
|
||||
|
||||
const pathExt = process.env.PATHEXT || '.COM;.EXE;.BAT;.CMD';
|
||||
return [command, ...pathExt.split(';').filter(Boolean).map(ext => `${command}${ext.toLowerCase()}`)];
|
||||
}
|
||||
|
||||
function resolveCommand(command) {
|
||||
if (isPathLike(command)) {
|
||||
return getExecutableCandidates(command).find(candidate => fs.existsSync(candidate)) || null;
|
||||
}
|
||||
|
||||
for (const dir of getPathEnv().split(path.delimiter).filter(Boolean)) {
|
||||
for (const candidate of getExecutableCandidates(path.join(dir, command))) {
|
||||
if (fs.existsSync(candidate)) {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function runLinterCommand(command, args) {
|
||||
const useShell = process.platform === 'win32' && /\.(?:cmd|bat)$/i.test(command);
|
||||
return spawnSync(command, args, {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000,
|
||||
shell: useShell
|
||||
});
|
||||
}
|
||||
|
||||
function commandOutput(result) {
|
||||
return result.stdout || result.stderr || result.error?.message || '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Run linter on staged files
|
||||
* @param {string[]} files
|
||||
@@ -209,14 +257,10 @@ function runLinter(files) {
|
||||
const eslintBin = process.platform === 'win32' ? 'eslint.cmd' : 'eslint';
|
||||
const eslintPath = path.join(process.cwd(), 'node_modules', '.bin', eslintBin);
|
||||
if (fs.existsSync(eslintPath)) {
|
||||
const result = spawnSync(eslintPath, ['--format', 'compact', ...jsFiles], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
});
|
||||
const result = runLinterCommand(eslintPath, ['--format', 'compact', ...jsFiles]);
|
||||
results.eslint = {
|
||||
success: result.status === 0,
|
||||
output: result.stdout || result.stderr
|
||||
output: commandOutput(result)
|
||||
};
|
||||
}
|
||||
}
|
||||
@@ -224,17 +268,14 @@ function runLinter(files) {
|
||||
// Run Pylint if available
|
||||
if (pyFiles.length > 0) {
|
||||
try {
|
||||
const result = spawnSync('pylint', ['--output-format=text', ...pyFiles], {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
});
|
||||
if (result.error && result.error.code === 'ENOENT') {
|
||||
const pylintPath = resolveCommand('pylint');
|
||||
if (!pylintPath) {
|
||||
results.pylint = null;
|
||||
} else {
|
||||
const result = runLinterCommand(pylintPath, ['--output-format=text', ...pyFiles]);
|
||||
results.pylint = {
|
||||
success: result.status === 0,
|
||||
output: result.stdout || result.stderr
|
||||
output: commandOutput(result)
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
@@ -245,17 +286,14 @@ function runLinter(files) {
|
||||
// Run golint if available
|
||||
if (goFiles.length > 0) {
|
||||
try {
|
||||
const result = spawnSync('golint', goFiles, {
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 30000
|
||||
});
|
||||
if (result.error && result.error.code === 'ENOENT') {
|
||||
const golintPath = resolveCommand('golint');
|
||||
if (!golintPath) {
|
||||
results.golint = null;
|
||||
} else {
|
||||
const result = runLinterCommand(golintPath, goFiles);
|
||||
results.golint = {
|
||||
success: !result.stdout || result.stdout.trim() === '',
|
||||
output: result.stdout
|
||||
output: commandOutput(result)
|
||||
};
|
||||
}
|
||||
} catch {
|
||||
|
||||
@@ -137,7 +137,13 @@ async function main() {
|
||||
|
||||
if (hookModule && typeof hookModule.run === 'function') {
|
||||
try {
|
||||
const output = hookModule.run(raw, { truncated, maxStdin: MAX_STDIN });
|
||||
const output = hookModule.run(raw, {
|
||||
hookId,
|
||||
pluginRoot,
|
||||
scriptPath,
|
||||
truncated,
|
||||
maxStdin: MAX_STDIN
|
||||
});
|
||||
process.exit(emitHookResult(raw, output));
|
||||
} catch (runErr) {
|
||||
process.stderr.write(`[Hook] run() error for ${hookId}: ${runErr.message}\n`);
|
||||
@@ -152,6 +158,9 @@ async function main() {
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
CLAUDE_PLUGIN_ROOT: pluginRoot,
|
||||
ECC_PLUGIN_ROOT: pluginRoot,
|
||||
ECC_HOOK_ID: hookId,
|
||||
ECC_HOOK_INPUT_TRUNCATED: truncated ? '1' : '0',
|
||||
ECC_HOOK_INPUT_MAX_BYTES: String(MAX_STDIN)
|
||||
},
|
||||
|
||||
@@ -30,6 +30,8 @@ const fs = require('fs');
|
||||
|
||||
const INSTINCT_CONFIDENCE_THRESHOLD = 0.7;
|
||||
const MAX_INJECTED_INSTINCTS = 6;
|
||||
const MAX_INJECTED_LEARNED_SKILLS = 6;
|
||||
const MAX_LEARNED_SKILL_SUMMARY_CHARS = 220;
|
||||
const DEFAULT_SESSION_RETENTION_DAYS = 30;
|
||||
|
||||
/**
|
||||
@@ -347,6 +349,119 @@ function summarizeActiveInstincts(observerContext) {
|
||||
return `Active instincts:\n${lines.join('\n')}`;
|
||||
}
|
||||
|
||||
function stripMarkdownInline(value) {
|
||||
return String(value || '')
|
||||
.replace(/`([^`]+)`/g, '$1')
|
||||
.replace(/\*\*([^*]+)\*\*/g, '$1')
|
||||
.replace(/\*([^*]+)\*/g, '$1')
|
||||
.replace(/\[([^\]]+)\]\([^)]+\)/g, '$1')
|
||||
.trim();
|
||||
}
|
||||
|
||||
function collapseWhitespace(value) {
|
||||
return String(value || '').replace(/\s+/g, ' ').trim();
|
||||
}
|
||||
|
||||
function truncateSummary(value, maxLength = MAX_LEARNED_SKILL_SUMMARY_CHARS) {
|
||||
const normalized = collapseWhitespace(stripMarkdownInline(value));
|
||||
if (normalized.length <= maxLength) {
|
||||
return normalized;
|
||||
}
|
||||
return `${normalized.slice(0, Math.max(0, maxLength - 3)).trimEnd()}...`;
|
||||
}
|
||||
|
||||
function extractMarkdownHeading(content) {
|
||||
const match = String(content || '').match(/^#\s+(.+)$/m);
|
||||
return match ? stripMarkdownInline(match[1]) : '';
|
||||
}
|
||||
|
||||
function extractSection(content, headingPattern) {
|
||||
const source = String(content || '');
|
||||
const match = source.match(new RegExp(`^##\\s+${headingPattern}\\s*\\n+([\\s\\S]+?)(?:\\n##\\s+|$)`, 'im'));
|
||||
return match ? match[1].trim() : '';
|
||||
}
|
||||
|
||||
function extractFirstParagraph(content) {
|
||||
const withoutHeading = String(content || '').replace(/^#\s+.+$/m, '').trim();
|
||||
return withoutHeading
|
||||
.split(/\n\s*\n/)
|
||||
.map(paragraph => paragraph.trim())
|
||||
.find(Boolean) || '';
|
||||
}
|
||||
|
||||
function summarizeLearnedSkillFile(filePath, learnedRoot) {
|
||||
const content = readFile(filePath);
|
||||
if (!content) return null;
|
||||
|
||||
const isDirectorySkill = path.basename(filePath).toLowerCase() === 'skill.md';
|
||||
const slug = isDirectorySkill
|
||||
? path.basename(path.dirname(filePath))
|
||||
: path.basename(filePath, path.extname(filePath));
|
||||
const title = extractMarkdownHeading(content) || slug;
|
||||
const summary = truncateSummary(
|
||||
extractSection(content, 'When to Use')
|
||||
|| extractSection(content, 'Trigger')
|
||||
|| extractSection(content, 'Problem')
|
||||
|| extractFirstParagraph(content)
|
||||
|| title
|
||||
);
|
||||
|
||||
if (!summary) return null;
|
||||
|
||||
let mtime = 0;
|
||||
try {
|
||||
mtime = fs.statSync(filePath).mtimeMs;
|
||||
} catch {
|
||||
// Keep unreadable/deleted files out of recency priority without failing the hook.
|
||||
}
|
||||
|
||||
const relativePath = path.relative(learnedRoot, filePath);
|
||||
return {
|
||||
slug,
|
||||
title: truncateSummary(title, 80),
|
||||
summary,
|
||||
relativePath,
|
||||
mtime,
|
||||
};
|
||||
}
|
||||
|
||||
function collectLearnedSkillFiles(learnedDir) {
|
||||
const flatMarkdownFiles = findFiles(learnedDir, '*.md');
|
||||
const directorySkillFiles = findFiles(learnedDir, 'SKILL.md', { recursive: true });
|
||||
const byPath = new Map();
|
||||
|
||||
for (const match of [...flatMarkdownFiles, ...directorySkillFiles]) {
|
||||
byPath.set(match.path, match);
|
||||
}
|
||||
|
||||
return Array.from(byPath.values())
|
||||
.sort((left, right) => right.mtime - left.mtime || left.path.localeCompare(right.path));
|
||||
}
|
||||
|
||||
function summarizeLearnedSkills(learnedDir, learnedSkillFiles = collectLearnedSkillFiles(learnedDir)) {
|
||||
const summaries = learnedSkillFiles
|
||||
.map(match => summarizeLearnedSkillFile(match.path, learnedDir))
|
||||
.filter(Boolean)
|
||||
.slice(0, MAX_INJECTED_LEARNED_SKILLS);
|
||||
|
||||
if (summaries.length === 0) {
|
||||
return '';
|
||||
}
|
||||
|
||||
log(`[SessionStart] Injecting ${summaries.length} learned skill(s) into session context`);
|
||||
|
||||
const lines = summaries.map(skill => {
|
||||
const titleSuffix = skill.title && skill.title !== skill.slug ? ` (${skill.title})` : '';
|
||||
return `- ${skill.slug}${titleSuffix}: ${skill.summary}`;
|
||||
});
|
||||
|
||||
return [
|
||||
'Available learned skills:',
|
||||
'Reference only; apply a learned skill only when it is relevant to the current user request.',
|
||||
...lines,
|
||||
].join('\n');
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const sessionsDir = getSessionsDir();
|
||||
const sessionSearchDirs = getSessionSearchDirs();
|
||||
@@ -428,12 +543,17 @@ async function main() {
|
||||
}
|
||||
|
||||
// Check for learned skills
|
||||
const learnedSkills = findFiles(learnedDir, '*.md');
|
||||
const learnedSkills = collectLearnedSkillFiles(learnedDir);
|
||||
|
||||
if (learnedSkills.length > 0) {
|
||||
log(`[SessionStart] ${learnedSkills.length} learned skill(s) available in ${learnedDir}`);
|
||||
}
|
||||
|
||||
const learnedSkillSummary = summarizeLearnedSkills(learnedDir, learnedSkills);
|
||||
if (learnedSkillSummary) {
|
||||
additionalContextParts.push(learnedSkillSummary);
|
||||
}
|
||||
|
||||
// Check for available session aliases
|
||||
const aliases = listAliases({ limit: 5 });
|
||||
|
||||
|
||||
@@ -133,6 +133,12 @@ module.exports = createInstallTargetAdapter({
|
||||
destinationPath: path.join(targetRoot, 'mcp.json'),
|
||||
});
|
||||
|
||||
if (sourceRelativePath === 'AGENTS.md') {
|
||||
// Cursor treats nested AGENTS.md files as directory context; do not
|
||||
// install ECC's root project identity into a host project's .cursor/.
|
||||
return [];
|
||||
}
|
||||
|
||||
if (sourceRelativePath === 'rules') {
|
||||
return takeUniqueOperations(createFlatRuleOperations({
|
||||
moduleId: module.id,
|
||||
|
||||
@@ -141,6 +141,6 @@ Switch(
|
||||
## Related Skills
|
||||
|
||||
- `frontend-patterns`
|
||||
- `frontend-design`
|
||||
- `design-system`
|
||||
- `liquid-glass-design`
|
||||
- `swiftui-patterns`
|
||||
|
||||
@@ -1,337 +0,0 @@
|
||||
---
|
||||
name: claude-api
|
||||
description: Anthropic Claude API patterns for Python and TypeScript. Covers Messages API, streaming, tool use, vision, extended thinking, batches, prompt caching, and Claude Agent SDK. Use when building applications with the Claude API or Anthropic SDKs.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Claude API
|
||||
|
||||
Build applications with the Anthropic Claude API and SDKs.
|
||||
|
||||
## When to Activate
|
||||
|
||||
- Building applications that call the Claude API
|
||||
- Code imports `anthropic` (Python) or `@anthropic-ai/sdk` (TypeScript)
|
||||
- User asks about Claude API patterns, tool use, streaming, or vision
|
||||
- Implementing agent workflows with Claude Agent SDK
|
||||
- Optimizing API costs, token usage, or latency
|
||||
|
||||
## Model Selection
|
||||
|
||||
| Model | ID | Best For |
|
||||
|-------|-----|----------|
|
||||
| Opus 4.1 | `claude-opus-4-1` | Complex reasoning, architecture, research |
|
||||
| Sonnet 4 | `claude-sonnet-4-0` | Balanced coding, most development tasks |
|
||||
| Haiku 3.5 | `claude-3-5-haiku-latest` | Fast responses, high-volume, cost-sensitive |
|
||||
|
||||
Default to Sonnet 4 unless the task requires deep reasoning (Opus) or speed/cost optimization (Haiku). For production, prefer pinned snapshot IDs over aliases.
|
||||
|
||||
## Python SDK
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
pip install anthropic
|
||||
```
|
||||
|
||||
### Basic Message
|
||||
|
||||
```python
|
||||
import anthropic
|
||||
|
||||
client = anthropic.Anthropic() # reads ANTHROPIC_API_KEY from env
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
messages=[
|
||||
{"role": "user", "content": "Explain async/await in Python"}
|
||||
]
|
||||
)
|
||||
print(message.content[0].text)
|
||||
```
|
||||
|
||||
### Streaming
|
||||
|
||||
```python
|
||||
with client.messages.stream(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
messages=[{"role": "user", "content": "Write a haiku about coding"}]
|
||||
) as stream:
|
||||
for text in stream.text_stream:
|
||||
print(text, end="", flush=True)
|
||||
```
|
||||
|
||||
### System Prompt
|
||||
|
||||
```python
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
system="You are a senior Python developer. Be concise.",
|
||||
messages=[{"role": "user", "content": "Review this function"}]
|
||||
)
|
||||
```
|
||||
|
||||
## TypeScript SDK
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
npm install @anthropic-ai/sdk
|
||||
```
|
||||
|
||||
### Basic Message
|
||||
|
||||
```typescript
|
||||
import Anthropic from "@anthropic-ai/sdk";
|
||||
|
||||
const client = new Anthropic(); // reads ANTHROPIC_API_KEY from env
|
||||
|
||||
const message = await client.messages.create({
|
||||
model: "claude-sonnet-4-0",
|
||||
max_tokens: 1024,
|
||||
messages: [
|
||||
{ role: "user", content: "Explain async/await in TypeScript" }
|
||||
],
|
||||
});
|
||||
console.log(message.content[0].text);
|
||||
```
|
||||
|
||||
### Streaming
|
||||
|
||||
```typescript
|
||||
const stream = client.messages.stream({
|
||||
model: "claude-sonnet-4-0",
|
||||
max_tokens: 1024,
|
||||
messages: [{ role: "user", content: "Write a haiku" }],
|
||||
});
|
||||
|
||||
for await (const event of stream) {
|
||||
if (event.type === "content_block_delta" && event.delta.type === "text_delta") {
|
||||
process.stdout.write(event.delta.text);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Tool Use
|
||||
|
||||
Define tools and let Claude call them:
|
||||
|
||||
```python
|
||||
tools = [
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Get current weather for a location",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {"type": "string", "description": "City name"},
|
||||
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
|
||||
},
|
||||
"required": ["location"]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
tools=tools,
|
||||
messages=[{"role": "user", "content": "What's the weather in SF?"}]
|
||||
)
|
||||
|
||||
# Handle tool use response
|
||||
for block in message.content:
|
||||
if block.type == "tool_use":
|
||||
# Execute the tool with block.input
|
||||
result = get_weather(**block.input)
|
||||
# Send result back
|
||||
follow_up = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
tools=tools,
|
||||
messages=[
|
||||
{"role": "user", "content": "What's the weather in SF?"},
|
||||
{"role": "assistant", "content": message.content},
|
||||
{"role": "user", "content": [
|
||||
{"type": "tool_result", "tool_use_id": block.id, "content": str(result)}
|
||||
]}
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
## Vision
|
||||
|
||||
Send images for analysis:
|
||||
|
||||
```python
|
||||
import base64
|
||||
|
||||
with open("diagram.png", "rb") as f:
|
||||
image_data = base64.standard_b64encode(f.read()).decode("utf-8")
|
||||
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "source": {"type": "base64", "media_type": "image/png", "data": image_data}},
|
||||
{"type": "text", "text": "Describe this diagram"}
|
||||
]
|
||||
}]
|
||||
)
|
||||
```
|
||||
|
||||
## Extended Thinking
|
||||
|
||||
For complex reasoning tasks:
|
||||
|
||||
```python
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=16000,
|
||||
thinking={
|
||||
"type": "enabled",
|
||||
"budget_tokens": 10000
|
||||
},
|
||||
messages=[{"role": "user", "content": "Solve this math problem step by step..."}]
|
||||
)
|
||||
|
||||
for block in message.content:
|
||||
if block.type == "thinking":
|
||||
print(f"Thinking: {block.thinking}")
|
||||
elif block.type == "text":
|
||||
print(f"Answer: {block.text}")
|
||||
```
|
||||
|
||||
## Prompt Caching
|
||||
|
||||
Cache large system prompts or context to reduce costs:
|
||||
|
||||
```python
|
||||
message = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=1024,
|
||||
system=[
|
||||
{"type": "text", "text": large_system_prompt, "cache_control": {"type": "ephemeral"}}
|
||||
],
|
||||
messages=[{"role": "user", "content": "Question about the cached context"}]
|
||||
)
|
||||
# Check cache usage
|
||||
print(f"Cache read: {message.usage.cache_read_input_tokens}")
|
||||
print(f"Cache creation: {message.usage.cache_creation_input_tokens}")
|
||||
```
|
||||
|
||||
## Batches API
|
||||
|
||||
Process large volumes asynchronously at 50% cost reduction:
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
batch = client.messages.batches.create(
|
||||
requests=[
|
||||
{
|
||||
"custom_id": f"request-{i}",
|
||||
"params": {
|
||||
"model": "claude-sonnet-4-0",
|
||||
"max_tokens": 1024,
|
||||
"messages": [{"role": "user", "content": prompt}]
|
||||
}
|
||||
}
|
||||
for i, prompt in enumerate(prompts)
|
||||
]
|
||||
)
|
||||
|
||||
# Poll for completion
|
||||
while True:
|
||||
status = client.messages.batches.retrieve(batch.id)
|
||||
if status.processing_status == "ended":
|
||||
break
|
||||
time.sleep(30)
|
||||
|
||||
# Get results
|
||||
for result in client.messages.batches.results(batch.id):
|
||||
print(result.result.message.content[0].text)
|
||||
```
|
||||
|
||||
## Claude Agent SDK
|
||||
|
||||
Build multi-step agents:
|
||||
|
||||
```python
|
||||
# Note: Agent SDK API surface may change — check official docs
|
||||
import anthropic
|
||||
|
||||
# Define tools as functions
|
||||
tools = [{
|
||||
"name": "search_codebase",
|
||||
"description": "Search the codebase for relevant code",
|
||||
"input_schema": {
|
||||
"type": "object",
|
||||
"properties": {"query": {"type": "string"}},
|
||||
"required": ["query"]
|
||||
}
|
||||
}]
|
||||
|
||||
# Run an agentic loop with tool use
|
||||
client = anthropic.Anthropic()
|
||||
messages = [{"role": "user", "content": "Review the auth module for security issues"}]
|
||||
|
||||
while True:
|
||||
response = client.messages.create(
|
||||
model="claude-sonnet-4-0",
|
||||
max_tokens=4096,
|
||||
tools=tools,
|
||||
messages=messages,
|
||||
)
|
||||
if response.stop_reason == "end_turn":
|
||||
break
|
||||
# Handle tool calls and continue the loop
|
||||
messages.append({"role": "assistant", "content": response.content})
|
||||
# ... execute tools and append tool_result messages
|
||||
```
|
||||
|
||||
## Cost Optimization
|
||||
|
||||
| Strategy | Savings | When to Use |
|
||||
|----------|---------|-------------|
|
||||
| Prompt caching | Up to 90% on cached tokens | Repeated system prompts or context |
|
||||
| Batches API | 50% | Non-time-sensitive bulk processing |
|
||||
| Haiku instead of Sonnet | ~75% | Simple tasks, classification, extraction |
|
||||
| Shorter max_tokens | Variable | When you know output will be short |
|
||||
| Streaming | None (same cost) | Better UX, same price |
|
||||
|
||||
## Error Handling
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
from anthropic import APIError, RateLimitError, APIConnectionError
|
||||
|
||||
try:
|
||||
message = client.messages.create(...)
|
||||
except RateLimitError:
|
||||
# Back off and retry
|
||||
time.sleep(60)
|
||||
except APIConnectionError:
|
||||
# Network issue, retry with backoff
|
||||
pass
|
||||
except APIError as e:
|
||||
print(f"API error {e.status_code}: {e.message}")
|
||||
```
|
||||
|
||||
## Environment Setup
|
||||
|
||||
```bash
|
||||
# Required
|
||||
export ANTHROPIC_API_KEY="your-api-key-here"
|
||||
|
||||
# Optional: set default model
|
||||
export ANTHROPIC_MODEL="claude-sonnet-4-0"
|
||||
```
|
||||
|
||||
Never hardcode API keys. Always use environment variables.
|
||||
@@ -158,13 +158,14 @@ For each selected category, print the full list of skills below and ask the user
|
||||
| `investor-materials` | Pitch decks, one-pagers, investor memos, and financial models |
|
||||
| `investor-outreach` | Personalized investor cold emails, warm intros, and follow-ups |
|
||||
|
||||
**Category: Research & APIs (3 skills)**
|
||||
**Category: Research & APIs (2 skills)**
|
||||
|
||||
| Skill | Description |
|
||||
|-------|-------------|
|
||||
| `deep-research` | Multi-source deep research using firecrawl and exa MCPs with cited reports |
|
||||
| `exa-search` | Neural search via Exa MCP for web, code, company, and people research |
|
||||
| `claude-api` | Anthropic Claude API patterns: Messages, streaming, tool use, vision, batches, Agent SDK |
|
||||
|
||||
`claude-api` is an Anthropic canonical skill. Install it from [`anthropics/skills`](https://github.com/anthropics/skills) when you want the official Claude API workflow instead of an ECC-bundled copy.
|
||||
|
||||
**Category: Social & Content Distribution (2 skills)**
|
||||
|
||||
|
||||
@@ -1,145 +0,0 @@
|
||||
---
|
||||
name: frontend-design
|
||||
description: Create distinctive, production-grade frontend interfaces with high design quality. Use when the user asks to build web components, pages, or applications and the visual direction matters as much as the code quality.
|
||||
origin: ECC
|
||||
---
|
||||
|
||||
# Frontend Design
|
||||
|
||||
Use this when the task is not just "make it work" but "make it look designed."
|
||||
|
||||
This skill is for product pages, dashboards, app shells, components, or visual systems that need a clear point of view instead of generic AI-looking UI.
|
||||
|
||||
## When To Use
|
||||
|
||||
- building a landing page, dashboard, or app surface from scratch
|
||||
- upgrading a bland interface into something intentional and memorable
|
||||
- translating a product concept into a concrete visual direction
|
||||
- implementing a frontend where typography, composition, and motion matter
|
||||
|
||||
## Core Principle
|
||||
|
||||
Pick a direction and commit to it.
|
||||
|
||||
Safe-average UI is usually worse than a strong, coherent aesthetic with a few bold choices.
|
||||
|
||||
## Design Workflow
|
||||
|
||||
### 1. Frame the interface first
|
||||
|
||||
Before coding, settle:
|
||||
|
||||
- purpose
|
||||
- audience
|
||||
- emotional tone
|
||||
- visual direction
|
||||
- one thing the user should remember
|
||||
|
||||
Possible directions:
|
||||
|
||||
- brutally minimal
|
||||
- editorial
|
||||
- industrial
|
||||
- luxury
|
||||
- playful
|
||||
- geometric
|
||||
- retro-futurist
|
||||
- soft and organic
|
||||
- maximalist
|
||||
|
||||
Do not mix directions casually. Choose one and execute it cleanly.
|
||||
|
||||
### 2. Build the visual system
|
||||
|
||||
Define:
|
||||
|
||||
- type hierarchy
|
||||
- color variables
|
||||
- spacing rhythm
|
||||
- layout logic
|
||||
- motion rules
|
||||
- surface / border / shadow treatment
|
||||
|
||||
Use CSS variables or the project's token system so the interface stays coherent as it grows.
|
||||
|
||||
### 3. Compose with intention
|
||||
|
||||
Prefer:
|
||||
|
||||
- asymmetry when it sharpens hierarchy
|
||||
- overlap when it creates depth
|
||||
- strong whitespace when it clarifies focus
|
||||
- dense layouts only when the product benefits from density
|
||||
|
||||
Avoid defaulting to a symmetrical card grid unless it is clearly the right fit.
|
||||
|
||||
### 4. Make motion meaningful
|
||||
|
||||
Use animation to:
|
||||
|
||||
- reveal hierarchy
|
||||
- stage information
|
||||
- reinforce user action
|
||||
- create one or two memorable moments
|
||||
|
||||
Do not scatter generic micro-interactions everywhere. One well-directed load sequence is usually stronger than twenty random hover effects.
|
||||
|
||||
## Strong Defaults
|
||||
|
||||
### Typography
|
||||
|
||||
- pick fonts with character
|
||||
- pair a distinctive display face with a readable body face when appropriate
|
||||
- avoid generic defaults when the page is design-led
|
||||
|
||||
### Color
|
||||
|
||||
- commit to a clear palette
|
||||
- one dominant field with selective accents usually works better than evenly weighted rainbow palettes
|
||||
- avoid cliché purple-gradient-on-white unless the product genuinely calls for it
|
||||
|
||||
### Background
|
||||
|
||||
Use atmosphere:
|
||||
|
||||
- gradients
|
||||
- meshes
|
||||
- textures
|
||||
- subtle noise
|
||||
- patterns
|
||||
- layered transparency
|
||||
|
||||
Flat empty backgrounds are rarely the best answer for a product-facing page.
|
||||
|
||||
### Layout
|
||||
|
||||
- break the grid when the composition benefits from it
|
||||
- use diagonals, offsets, and grouping intentionally
|
||||
- keep reading flow obvious even when the layout is unconventional
|
||||
|
||||
## Anti-Patterns
|
||||
|
||||
Never default to:
|
||||
|
||||
- interchangeable SaaS hero sections
|
||||
- generic card piles with no hierarchy
|
||||
- random accent colors without a system
|
||||
- placeholder-feeling typography
|
||||
- motion that exists only because animation was easy to add
|
||||
|
||||
## Execution Rules
|
||||
|
||||
- preserve the established design system when working inside an existing product
|
||||
- match technical complexity to the visual idea
|
||||
- keep accessibility and responsiveness intact
|
||||
- frontends should feel deliberate on desktop and mobile
|
||||
|
||||
## Quality Gate
|
||||
|
||||
Before delivering:
|
||||
|
||||
- the interface has a clear visual point of view
|
||||
- typography and spacing feel intentional
|
||||
- color and motion support the product instead of decorating it randomly
|
||||
- the result does not read like generic AI UI
|
||||
- the implementation is production-grade, not just visually interesting
|
||||
@@ -10,6 +10,29 @@ const path = require('path');
|
||||
const REPO_ROOT = path.join(__dirname, '..', '..');
|
||||
const AGENT_YAML_PATH = path.join(REPO_ROOT, 'agent.yaml');
|
||||
const COMMANDS_DIR = path.join(REPO_ROOT, 'commands');
|
||||
const SKILLS_DIR = path.join(REPO_ROOT, 'skills');
|
||||
const CODEX_SKILLS_DIR = path.join(REPO_ROOT, '.agents', 'skills');
|
||||
const LEGACY_COMMANDS_DIR = path.join(REPO_ROOT, 'legacy-command-shims', 'commands');
|
||||
|
||||
const RETIRED_LEGACY_SHIMS = [
|
||||
'agent-sort',
|
||||
'claw',
|
||||
'context-budget',
|
||||
'devfleet',
|
||||
'docs',
|
||||
'e2e',
|
||||
'eval',
|
||||
'orchestrate',
|
||||
'prompt-optimize',
|
||||
'rules-distill',
|
||||
'tdd',
|
||||
'verify',
|
||||
];
|
||||
|
||||
const CANONICAL_ANTHROPIC_SKILLS = [
|
||||
'claude-api',
|
||||
'frontend-design',
|
||||
];
|
||||
|
||||
function extractTopLevelList(yamlSource, key) {
|
||||
const lines = yamlSource.replace(/^\uFEFF/, '').split(/\r?\n/);
|
||||
@@ -70,6 +93,35 @@ function run() {
|
||||
assert.deepStrictEqual(declaredCommands, actualCommands);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('retired legacy slash-entry shims are not in the default commands export', () => {
|
||||
const defaultShimCommands = RETIRED_LEGACY_SHIMS
|
||||
.filter(command => actualCommands.includes(command));
|
||||
|
||||
assert.deepStrictEqual(defaultShimCommands, []);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('retired legacy slash-entry shims remain available from the opt-in archive', () => {
|
||||
const archivedCommands = fs.readdirSync(LEGACY_COMMANDS_DIR)
|
||||
.filter(file => file.endsWith('.md'))
|
||||
.map(file => path.basename(file, '.md'))
|
||||
.sort();
|
||||
|
||||
assert.deepStrictEqual(archivedCommands, RETIRED_LEGACY_SHIMS);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('canonical Anthropic skills are not re-bundled in active ECC skill surfaces', () => {
|
||||
for (const skillName of CANONICAL_ANTHROPIC_SKILLS) {
|
||||
assert.ok(
|
||||
!fs.existsSync(path.join(SKILLS_DIR, skillName, 'SKILL.md')),
|
||||
`${skillName} should be installed from anthropics/skills, not ECC skills/`
|
||||
);
|
||||
assert.ok(
|
||||
!fs.existsSync(path.join(CODEX_SKILLS_DIR, skillName, 'SKILL.md')),
|
||||
`${skillName} should be installed from anthropics/skills, not ECC .agents/skills/`
|
||||
);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
|
||||
|
||||
76
tests/commands/plan-command.test.js
Normal file
76
tests/commands/plan-command.test.js
Normal file
@@ -0,0 +1,76 @@
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const repoRoot = path.resolve(__dirname, '..', '..');
|
||||
const planCommandPath = path.join(repoRoot, 'commands', 'plan.md');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` ✓ ${name}`);
|
||||
passed++;
|
||||
} catch (error) {
|
||||
console.log(` ✗ ${name}`);
|
||||
console.log(` Error: ${error.message}`);
|
||||
failed++;
|
||||
}
|
||||
}
|
||||
|
||||
function readPlanCommand() {
|
||||
return fs.readFileSync(planCommandPath, 'utf8');
|
||||
}
|
||||
|
||||
console.log('\n=== Testing /plan command prompt ===\n');
|
||||
|
||||
test('/plan runs inline by default without requiring planner agent installation', () => {
|
||||
const source = readPlanCommand();
|
||||
|
||||
assert.ok(
|
||||
source.includes('Do not call the Task tool or any subagent by default'),
|
||||
'Expected /plan to avoid default subagent delegation',
|
||||
);
|
||||
assert.ok(
|
||||
source.includes('If the `planner` subagent is unavailable'),
|
||||
'Expected /plan to define a planner-unavailable fallback',
|
||||
);
|
||||
assert.ok(
|
||||
!source.includes('This command invokes the **planner** agent'),
|
||||
'Expected /plan not to claim unconditional planner invocation',
|
||||
);
|
||||
assert.ok(
|
||||
!source.includes('The planner agent will:'),
|
||||
'Expected /plan to describe inline behavior, not mandatory agent behavior',
|
||||
);
|
||||
assert.ok(
|
||||
!source.includes('Agent (planner):'),
|
||||
'Expected /plan examples not to imply the planner agent is required',
|
||||
);
|
||||
});
|
||||
|
||||
test('/plan preserves the explicit confirmation gate before code edits', () => {
|
||||
const source = readPlanCommand();
|
||||
|
||||
assert.ok(
|
||||
source.includes('WAIT for user CONFIRM before touching any code'),
|
||||
'Expected frontmatter to preserve the no-code-before-confirmation rule',
|
||||
);
|
||||
assert.ok(
|
||||
source.includes('WAITING FOR CONFIRMATION'),
|
||||
'Expected example output to preserve the confirmation handoff',
|
||||
);
|
||||
assert.ok(
|
||||
source.includes('will **NOT** write any code until you explicitly confirm'),
|
||||
'Expected important notes to preserve the confirmation contract',
|
||||
);
|
||||
});
|
||||
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
@@ -49,6 +49,7 @@ const expectedReleaseFiles = [
|
||||
'launch-checklist.md',
|
||||
'telegram-handoff.md',
|
||||
'demo-prompts.md',
|
||||
'quickstart.md',
|
||||
];
|
||||
|
||||
test('release candidate directory includes the public launch pack', () => {
|
||||
@@ -106,12 +107,65 @@ test('Hermes setup uses release-candidate wording for the rc.1 surface', () => {
|
||||
assert.ok(!source.includes('Public Preview Scope'));
|
||||
});
|
||||
|
||||
test('Hermes setup cross-links adjacent migration and architecture docs', () => {
|
||||
const source = read('docs/HERMES-SETUP.md');
|
||||
assert.ok(source.includes('HERMES-OPENCLAW-MIGRATION.md'));
|
||||
assert.ok(source.includes('architecture/cross-harness.md'));
|
||||
assert.ok(source.includes('Plan and scaffold migration artifacts'));
|
||||
assert.ok(!source.includes('0.5. Generate and review artifacts with `ecc migrate plan` /'));
|
||||
});
|
||||
|
||||
test('release docs preserve the ECC/Hermes boundary', () => {
|
||||
const releaseNotes = read('docs/releases/2.0.0-rc.1/release-notes.md');
|
||||
assert.ok(releaseNotes.includes('ECC is the reusable substrate'));
|
||||
assert.ok(releaseNotes.includes('Hermes as the operator shell'));
|
||||
});
|
||||
|
||||
test('release notes route new contributors through the rc.1 quickstart', () => {
|
||||
const releaseNotes = read('docs/releases/2.0.0-rc.1/release-notes.md');
|
||||
assert.ok(releaseNotes.includes('[rc.1 quickstart](quickstart.md)'));
|
||||
});
|
||||
|
||||
test('rc.1 quickstart gives a clone-to-cross-harness path', () => {
|
||||
const quickstart = read('docs/releases/2.0.0-rc.1/quickstart.md');
|
||||
for (const heading of ['Clone', 'Install', 'Verify', 'First Skill', 'Switch Harness']) {
|
||||
assert.ok(quickstart.includes(`## ${heading}`), `Missing ${heading} section`);
|
||||
}
|
||||
assert.ok(quickstart.includes('node tests/run-all.js'));
|
||||
assert.ok(quickstart.includes('skills/hermes-imports/SKILL.md'));
|
||||
});
|
||||
|
||||
test('cross-harness doc includes a worked skill portability example', () => {
|
||||
const source = read('docs/architecture/cross-harness.md');
|
||||
assert.ok(source.includes('## Worked Example'));
|
||||
assert.ok(source.includes('same skill source'));
|
||||
for (const harness of ['Claude Code', 'Codex', 'OpenCode']) {
|
||||
assert.ok(source.includes(harness), `Expected worked example to mention ${harness}`);
|
||||
}
|
||||
});
|
||||
|
||||
test('release docs use release-candidate wording consistently', () => {
|
||||
const releaseNotes = read('docs/releases/2.0.0-rc.1/release-notes.md');
|
||||
assert.ok(releaseNotes.includes('## Release Candidate Boundaries'));
|
||||
assert.ok(!releaseNotes.includes('## Preview Boundaries'));
|
||||
});
|
||||
|
||||
test('launch checklist records the ecc2 alpha version policy', () => {
|
||||
const cargoToml = read('ecc2/Cargo.toml');
|
||||
const launchChecklist = read('docs/releases/2.0.0-rc.1/launch-checklist.md');
|
||||
assert.ok(cargoToml.includes('version = "0.1.0"'));
|
||||
assert.ok(launchChecklist.includes('`ecc2/Cargo.toml` stays at `0.1.0`'));
|
||||
assert.ok(!launchChecklist.includes('confirm whether `ecc2/Cargo.toml` moves'));
|
||||
});
|
||||
|
||||
test('localized changelogs include rc.1 and 1.10.0 release entries', () => {
|
||||
for (const relativePath of ['docs/tr/CHANGELOG.md', 'docs/zh-CN/CHANGELOG.md']) {
|
||||
const source = read(relativePath);
|
||||
assert.ok(source.includes('## 2.0.0-rc.1 - 2026-04-28'), `${relativePath} missing rc.1 entry`);
|
||||
assert.ok(source.includes('## 1.10.0 - 2026-04-05'), `${relativePath} missing 1.10.0 entry`);
|
||||
}
|
||||
});
|
||||
|
||||
if (failed > 0) {
|
||||
console.log(`\nFailed: ${failed}`);
|
||||
process.exit(1);
|
||||
|
||||
@@ -72,6 +72,12 @@ if (test('blocks core.hooksPath override', () => {
|
||||
assert.ok(r.stderr.includes('core.hooksPath'), `stderr should mention core.hooksPath: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('blocks quoted core.hooksPath override argument', () => {
|
||||
const r = runHook({ tool_input: { command: 'git -c "core.hooksPath=/dev/null" commit -m "msg"' } });
|
||||
assert.strictEqual(r.code, 2, `expected exit 2, got ${r.code}`);
|
||||
assert.ok(r.stderr.includes('core.hooksPath'), `stderr should mention core.hooksPath: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Chained command false positive prevention (Comment 2) ---
|
||||
|
||||
if (test('does not false-positive on -n belonging to git log in a chain', () => {
|
||||
@@ -84,11 +90,58 @@ if (test('does not false-positive on --no-verify in a prior non-git command', ()
|
||||
assert.strictEqual(r.code, 0, `expected exit 0, got ${r.code}: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows --no-verify discussed in a double-quoted commit message', () => {
|
||||
const r = runHook({ tool_input: { command: 'git commit -m "fix: --no-verify edge case"' } });
|
||||
assert.strictEqual(r.code, 0, `expected exit 0, got ${r.code}: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows --no-verify discussed in a single-quoted commit message', () => {
|
||||
const r = runHook({ tool_input: { command: "git commit -m 'fix: --no-verify edge case'" } });
|
||||
assert.strictEqual(r.code, 0, `expected exit 0, got ${r.code}: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows -n discussed in a quoted commit message', () => {
|
||||
const r = runHook({ tool_input: { command: 'git commit -m "Fixed -n bug in module"' } });
|
||||
assert.strictEqual(r.code, 0, `expected exit 0, got ${r.code}: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows --no-verify after combined -am message option', () => {
|
||||
const r = runHook({ tool_input: { command: 'git commit -am "--no-verify"' } });
|
||||
assert.strictEqual(r.code, 0, `expected exit 0, got ${r.code}: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows -n after combined -am message option', () => {
|
||||
const r = runHook({ tool_input: { command: 'git commit -am "-n"' } });
|
||||
assert.strictEqual(r.code, 0, `expected exit 0, got ${r.code}: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows core.hooksPath discussed in a quoted commit message', () => {
|
||||
const r = runHook({ tool_input: { command: 'git commit -m "doc: explain core.hooksPath= setting"' } });
|
||||
assert.strictEqual(r.code, 0, `expected exit 0, got ${r.code}: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('allows git bypass phrase discussed in a quoted commit message', () => {
|
||||
const r = runHook({ tool_input: { command: 'git commit -m "doc: explain git push --no-verify risk"' } });
|
||||
assert.strictEqual(r.code, 0, `expected exit 0, got ${r.code}: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('still blocks --no-verify on the git commit part of a chain', () => {
|
||||
const r = runHook({ tool_input: { command: 'git log -n 5 && git commit --no-verify -m "msg"' } });
|
||||
assert.strictEqual(r.code, 2, `expected exit 2, got ${r.code}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('still blocks a real quoted --no-verify flag', () => {
|
||||
const r = runHook({ tool_input: { command: 'git commit "--no-verify" -m "msg"' } });
|
||||
assert.strictEqual(r.code, 2, `expected exit 2, got ${r.code}`);
|
||||
assert.ok(r.stderr.includes('BLOCKED'), `stderr should contain BLOCKED: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('still blocks bypass flags in later chained git commands', () => {
|
||||
const r = runHook({ tool_input: { command: 'git commit -m "msg" && git push --no-verify' } });
|
||||
assert.strictEqual(r.code, 2, `expected exit 2, got ${r.code}`);
|
||||
assert.ok(r.stderr.includes('git push'), `stderr should mention git push: ${r.stderr}`);
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Subcommand detection (Comment 4) ---
|
||||
|
||||
if (test('does not misclassify "commit" as subcommand when it is an argument to push', () => {
|
||||
|
||||
194
tests/hooks/continuous-learning-observe-runner.test.js
Normal file
194
tests/hooks/continuous-learning-observe-runner.test.js
Normal file
@@ -0,0 +1,194 @@
|
||||
/**
|
||||
* Tests for continuous-learning-v2 observe hook dispatch.
|
||||
*
|
||||
* Run with: node tests/hooks/continuous-learning-observe-runner.test.js
|
||||
*/
|
||||
|
||||
'use strict';
|
||||
|
||||
const assert = require('assert');
|
||||
const fs = require('fs');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const repoRoot = path.resolve(__dirname, '..', '..');
|
||||
const hooksJsonPath = path.join(repoRoot, 'hooks', 'hooks.json');
|
||||
const runWithFlagsPath = path.join(repoRoot, 'scripts', 'hooks', 'run-with-flags.js');
|
||||
const observeRunner = require(path.join(repoRoot, 'scripts', 'hooks', 'observe-runner.js'));
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
console.log(` \u2713 ${name}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.log(` \u2717 ${name}`);
|
||||
console.log(` Error: ${err.message}`);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function loadHook(id) {
|
||||
const hookGroups = JSON.parse(fs.readFileSync(hooksJsonPath, 'utf8')).hooks;
|
||||
const hooks = Object.values(hookGroups).flat();
|
||||
const hook = hooks.find(candidate => candidate.id === id);
|
||||
assert.ok(hook, `Expected ${id} in hooks/hooks.json`);
|
||||
assert.ok(Array.isArray(hook.hooks), `Expected ${id} to define hook commands`);
|
||||
assert.strictEqual(hook.hooks.length, 1, `Expected ${id} to have one command`);
|
||||
return hook.hooks[0].command;
|
||||
}
|
||||
|
||||
function withTempPluginRoot(fn) {
|
||||
const tempRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-observe-runner-'));
|
||||
try {
|
||||
fs.mkdirSync(path.join(tempRoot, 'scripts', 'hooks'), { recursive: true });
|
||||
fs.mkdirSync(path.join(tempRoot, 'scripts', 'lib'), { recursive: true });
|
||||
fs.copyFileSync(
|
||||
path.join(repoRoot, 'scripts', 'lib', 'hook-flags.js'),
|
||||
path.join(tempRoot, 'scripts', 'lib', 'hook-flags.js')
|
||||
);
|
||||
return fn(tempRoot);
|
||||
} finally {
|
||||
fs.rmSync(tempRoot, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
function withEnv(vars, fn) {
|
||||
const saved = {};
|
||||
for (const [key, value] of Object.entries(vars)) {
|
||||
saved[key] = process.env[key];
|
||||
if (value === undefined) {
|
||||
delete process.env[key];
|
||||
} else {
|
||||
process.env[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
return fn();
|
||||
} finally {
|
||||
for (const [key, value] of Object.entries(saved)) {
|
||||
if (value === undefined) {
|
||||
delete process.env[key];
|
||||
} else {
|
||||
process.env[key] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function writeFakeObserveScript(tempRoot) {
|
||||
const scriptPath = path.join(tempRoot, 'skills', 'continuous-learning-v2', 'hooks', 'observe.sh');
|
||||
fs.mkdirSync(path.dirname(scriptPath), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
scriptPath,
|
||||
[
|
||||
'#!/usr/bin/env bash',
|
||||
'input="$(cat)"',
|
||||
'printf "phase=%s input=%s root=%s" "$1" "$input" "${CLAUDE_PLUGIN_ROOT:-}"',
|
||||
''
|
||||
].join('\n'),
|
||||
'utf8'
|
||||
);
|
||||
fs.chmodSync(scriptPath, 0o755);
|
||||
}
|
||||
|
||||
function runWithFlags(tempRoot, hookId, relScriptPath, stdin) {
|
||||
return spawnSync(process.execPath, [runWithFlagsPath, hookId, relScriptPath, 'standard,strict'], {
|
||||
input: stdin,
|
||||
encoding: 'utf8',
|
||||
env: {
|
||||
...process.env,
|
||||
CLAUDE_PLUGIN_ROOT: tempRoot,
|
||||
ECC_HOOK_PROFILE: 'standard'
|
||||
},
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 10000
|
||||
});
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing continuous-learning observe hook dispatch ===\n');
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
if (test('observe hooks use node-mode runner instead of shell-mode dispatch', () => {
|
||||
for (const hookId of ['pre:observe:continuous-learning', 'post:observe:continuous-learning']) {
|
||||
const command = loadHook(hookId);
|
||||
const phase = hookId.startsWith('pre:') ? 'pre:observe' : 'post:observe';
|
||||
|
||||
assert.ok(command.includes(`node scripts/hooks/run-with-flags.js ${phase} scripts/hooks/observe-runner.js standard,strict`));
|
||||
assert.ok(!command.includes('shell scripts/hooks/run-with-flags-shell.sh'), `${hookId} should not use shell-mode bootstrap`);
|
||||
assert.ok(!command.includes('skills/continuous-learning-v2/hooks/observe.sh'), `${hookId} should not call observe.sh directly from hooks.json`);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('run-with-flags passes hookId to direct run exports', () => {
|
||||
withTempPluginRoot(tempRoot => {
|
||||
const scriptPath = path.join(tempRoot, 'scripts', 'hooks', 'capture-hook-id.js');
|
||||
fs.writeFileSync(
|
||||
scriptPath,
|
||||
[
|
||||
"'use strict';",
|
||||
'module.exports.run = function run(raw, options) {',
|
||||
' return { stdout: JSON.stringify({ raw, hookId: options.hookId, truncated: options.truncated }) };',
|
||||
'};',
|
||||
''
|
||||
].join('\n'),
|
||||
'utf8'
|
||||
);
|
||||
|
||||
const result = runWithFlags(tempRoot, 'post:observe', 'scripts/hooks/capture-hook-id.js', '{"ok":true}');
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
const payload = JSON.parse(result.stdout);
|
||||
assert.deepStrictEqual(payload, { raw: '{"ok":true}', hookId: 'post:observe', truncated: false });
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('observe-runner derives the observe phase from the hook id', () => {
|
||||
assert.strictEqual(observeRunner.getPhaseFromHookId('pre:observe'), 'pre');
|
||||
assert.strictEqual(observeRunner.getPhaseFromHookId('post:observe'), 'post');
|
||||
assert.strictEqual(observeRunner.getPhaseFromHookId('pre:observe:continuous-learning'), 'pre');
|
||||
assert.strictEqual(observeRunner.getPhaseFromHookId('unknown'), null);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('observe-runner invokes observe.sh with phase, stdin, and plugin root', () => {
|
||||
withTempPluginRoot(tempRoot => {
|
||||
writeFakeObserveScript(tempRoot);
|
||||
const env = fs.existsSync('/bin/sh') ? { BASH: '/bin/sh' } : {};
|
||||
withEnv(env, () => {
|
||||
const output = observeRunner.run('payload', {
|
||||
hookId: 'pre:observe',
|
||||
pluginRoot: tempRoot
|
||||
});
|
||||
|
||||
assert.strictEqual(output.exitCode, 0, output.stderr);
|
||||
assert.strictEqual(output.stdout, `phase=pre input=payload root=${tempRoot}`);
|
||||
});
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('observe-runner fails open when no shell runtime is available', () => {
|
||||
withTempPluginRoot(tempRoot => {
|
||||
writeFakeObserveScript(tempRoot);
|
||||
withEnv({ BASH: '', PATH: '' }, () => {
|
||||
const output = observeRunner.run('payload', {
|
||||
hookId: 'post:observe',
|
||||
pluginRoot: tempRoot
|
||||
});
|
||||
|
||||
assert.strictEqual(output.exitCode, 0);
|
||||
assert.ok(!Object.prototype.hasOwnProperty.call(output, 'stdout'), 'disabled observe should preserve stdin via runner passthrough');
|
||||
assert.ok(output.stderr.includes('shell runtime unavailable'));
|
||||
});
|
||||
});
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nPassed: ${passed}`);
|
||||
console.log(`Failed: ${failed}`);
|
||||
process.exit(failed > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
runTests();
|
||||
@@ -8,6 +8,7 @@ const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const runner = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'run-with-flags.js');
|
||||
const hookScript = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'gateguard-fact-force.js');
|
||||
const externalStateDir = process.env.GATEGUARD_STATE_DIR;
|
||||
const tmpRoot = process.env.TMPDIR || process.env.TEMP || process.env.TMP || '/tmp';
|
||||
const baseStateDir = externalStateDir || tmpRoot;
|
||||
@@ -120,6 +121,16 @@ function parseOutput(stdout) {
|
||||
}
|
||||
}
|
||||
|
||||
function loadDirectHook(env = {}) {
|
||||
delete require.cache[require.resolve(hookScript)];
|
||||
Object.assign(process.env, {
|
||||
GATEGUARD_STATE_DIR: stateDir,
|
||||
CLAUDE_SESSION_ID: TEST_SESSION_ID,
|
||||
...env
|
||||
});
|
||||
return require(hookScript);
|
||||
}
|
||||
|
||||
function runTests() {
|
||||
console.log('\n=== Testing gateguard-fact-force ===\n');
|
||||
|
||||
@@ -212,6 +223,62 @@ function runTests() {
|
||||
|
||||
// --- Test 5: denies first routine Bash, allows second ---
|
||||
clearState();
|
||||
if (test('allows safe git push --force-with-lease without destructive gate', () => {
|
||||
writeState({
|
||||
checked: ['__bash_session__'],
|
||||
last_active: Date.now()
|
||||
});
|
||||
|
||||
const input = {
|
||||
tool_name: 'Bash',
|
||||
tool_input: { command: 'git push --force-with-lease origin feature-branch' }
|
||||
};
|
||||
const result = runBashHook(input);
|
||||
assert.strictEqual(result.code, 0, 'exit code should be 0');
|
||||
const output = parseOutput(result.stdout);
|
||||
assert.ok(output, 'should produce valid JSON output');
|
||||
if (output.hookSpecificOutput) {
|
||||
assert.notStrictEqual(output.hookSpecificOutput.permissionDecision, 'deny',
|
||||
'safe lease-protected force push should not be denied');
|
||||
} else {
|
||||
assert.strictEqual(output.tool_name, 'Bash', 'pass-through should preserve input');
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 6: gates amend as destructive Bash ---
|
||||
clearState();
|
||||
if (test('denies git commit --amend as destructive Bash', () => {
|
||||
const input = {
|
||||
tool_name: 'Bash',
|
||||
tool_input: { command: 'git commit --amend --no-edit' }
|
||||
};
|
||||
const result = runBashHook(input);
|
||||
assert.strictEqual(result.code, 0, 'exit code should be 0');
|
||||
const output = parseOutput(result.stdout);
|
||||
assert.ok(output, 'should produce JSON output');
|
||||
assert.strictEqual(output.hookSpecificOutput.permissionDecision, 'deny');
|
||||
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('Destructive'));
|
||||
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('rollback'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 7: still gates plain force push as destructive Bash ---
|
||||
clearState();
|
||||
if (test('denies plain git push --force as destructive Bash', () => {
|
||||
const input = {
|
||||
tool_name: 'Bash',
|
||||
tool_input: { command: 'git push --force origin feature-branch' }
|
||||
};
|
||||
const result = runBashHook(input);
|
||||
assert.strictEqual(result.code, 0, 'exit code should be 0');
|
||||
const output = parseOutput(result.stdout);
|
||||
assert.ok(output, 'should produce JSON output');
|
||||
assert.strictEqual(output.hookSpecificOutput.permissionDecision, 'deny');
|
||||
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('Destructive'));
|
||||
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('rollback'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 8: denies first routine Bash, allows second ---
|
||||
clearState();
|
||||
if (test('denies first routine Bash, allows second', () => {
|
||||
const input = {
|
||||
tool_name: 'Bash',
|
||||
@@ -562,6 +629,237 @@ function runTests() {
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 20: malformed JSON passes through unchanged ---
|
||||
clearState();
|
||||
if (test('passes malformed JSON input through unchanged', () => {
|
||||
const rawInput = '{ not valid json';
|
||||
const result = runHook(rawInput);
|
||||
|
||||
assert.strictEqual(result.code, 0, 'exit code should be 0');
|
||||
assert.strictEqual(result.stdout, rawInput, 'malformed JSON should pass through unchanged');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 21: read-only git allowlist covers supported subcommands ---
|
||||
clearState();
|
||||
if (test('allows read-only git introspection subcommands without first-bash gating', () => {
|
||||
const commands = [
|
||||
'git status --porcelain --branch',
|
||||
'git diff',
|
||||
'git diff --name-only',
|
||||
'git log --oneline --max-count=1',
|
||||
'git show HEAD:README.md',
|
||||
'git branch --show-current',
|
||||
'git rev-parse --abbrev-ref HEAD',
|
||||
];
|
||||
|
||||
for (const command of commands) {
|
||||
const result = runBashHook({
|
||||
tool_name: 'Bash',
|
||||
tool_input: { command }
|
||||
});
|
||||
const output = parseOutput(result.stdout);
|
||||
assert.ok(output, `should produce JSON output for ${command}`);
|
||||
if (output.hookSpecificOutput) {
|
||||
assert.notStrictEqual(output.hookSpecificOutput.permissionDecision, 'deny',
|
||||
`${command} should not be denied`);
|
||||
} else {
|
||||
assert.strictEqual(output.tool_name, 'Bash', `${command} should pass through`);
|
||||
}
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 22: unsupported git commands still flow through routine Bash gate ---
|
||||
clearState();
|
||||
if (test('gates non-allowlisted git commands as routine Bash', () => {
|
||||
const result = runBashHook({
|
||||
tool_name: 'Bash',
|
||||
tool_input: { command: 'git remote -v' }
|
||||
});
|
||||
const output = parseOutput(result.stdout);
|
||||
assert.ok(output, 'should produce JSON output');
|
||||
assert.strictEqual(output.hookSpecificOutput.permissionDecision, 'deny');
|
||||
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('current user request'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 23: module-load pruning removes old state files only ---
|
||||
clearState();
|
||||
if (test('prunes stale state files while keeping fresh state files', () => {
|
||||
const staleFile = path.join(stateDir, 'state-stale-session.json');
|
||||
const freshFile = path.join(stateDir, 'state-fresh-session.json');
|
||||
fs.writeFileSync(staleFile, JSON.stringify({ checked: [], last_active: Date.now() }), 'utf8');
|
||||
fs.writeFileSync(freshFile, JSON.stringify({ checked: [], last_active: Date.now() }), 'utf8');
|
||||
|
||||
const staleTime = new Date(Date.now() - (61 * 60 * 1000));
|
||||
fs.utimesSync(staleFile, staleTime, staleTime);
|
||||
|
||||
const result = runHook({
|
||||
tool_name: 'Read',
|
||||
tool_input: { file_path: '/src/app.js' }
|
||||
});
|
||||
const output = parseOutput(result.stdout);
|
||||
assert.ok(output, 'should produce valid JSON output');
|
||||
|
||||
assert.ok(!fs.existsSync(staleFile), 'stale state file should be pruned at module load');
|
||||
assert.ok(fs.existsSync(freshFile), 'fresh state file should not be pruned');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 24: transcript path fallback provides a stable session key ---
|
||||
clearState();
|
||||
if (test('uses transcript_path fallback when session ids are absent', () => {
|
||||
const input = {
|
||||
transcript_path: path.join(stateDir, 'session.jsonl'),
|
||||
tool_name: 'Bash',
|
||||
tool_input: { command: 'pwd' }
|
||||
};
|
||||
|
||||
const first = runBashHook(input, {
|
||||
CLAUDE_SESSION_ID: '',
|
||||
ECC_SESSION_ID: '',
|
||||
CLAUDE_TRANSCRIPT_PATH: '',
|
||||
});
|
||||
const firstOutput = parseOutput(first.stdout);
|
||||
assert.strictEqual(firstOutput.hookSpecificOutput.permissionDecision, 'deny');
|
||||
|
||||
const stateFiles = fs.readdirSync(stateDir).filter(entry => entry.startsWith('state-') && entry.endsWith('.json'));
|
||||
assert.strictEqual(stateFiles.length, 1, 'transcript path should produce one state file');
|
||||
assert.ok(/state-tx-[a-f0-9]{24}\.json$/.test(stateFiles[0]), 'transcript path should hash to a tx-* key');
|
||||
|
||||
const second = runBashHook(input, {
|
||||
CLAUDE_SESSION_ID: '',
|
||||
ECC_SESSION_ID: '',
|
||||
CLAUDE_TRANSCRIPT_PATH: '',
|
||||
});
|
||||
const secondOutput = parseOutput(second.stdout);
|
||||
if (secondOutput.hookSpecificOutput) {
|
||||
assert.notStrictEqual(secondOutput.hookSpecificOutput.permissionDecision, 'deny',
|
||||
'retry should be allowed when transcript_path is stable');
|
||||
} else {
|
||||
assert.strictEqual(secondOutput.tool_name, 'Bash');
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 25: project directory fallback provides a stable session key ---
|
||||
clearState();
|
||||
if (test('uses project directory fallback when no session or transcript id exists', () => {
|
||||
const input = {
|
||||
tool_name: 'Bash',
|
||||
tool_input: { command: 'pwd' }
|
||||
};
|
||||
const fallbackEnv = {
|
||||
CLAUDE_SESSION_ID: '',
|
||||
ECC_SESSION_ID: '',
|
||||
CLAUDE_TRANSCRIPT_PATH: '',
|
||||
CLAUDE_PROJECT_DIR: path.join(stateDir, 'project-root'),
|
||||
};
|
||||
|
||||
const first = runBashHook(input, fallbackEnv);
|
||||
const firstOutput = parseOutput(first.stdout);
|
||||
assert.strictEqual(firstOutput.hookSpecificOutput.permissionDecision, 'deny');
|
||||
|
||||
const stateFiles = fs.readdirSync(stateDir).filter(entry => entry.startsWith('state-') && entry.endsWith('.json'));
|
||||
assert.strictEqual(stateFiles.length, 1, 'project fallback should produce one state file');
|
||||
assert.ok(/state-proj-[a-f0-9]{24}\.json$/.test(stateFiles[0]), 'project fallback should hash to a proj-* key');
|
||||
|
||||
const second = runBashHook(input, fallbackEnv);
|
||||
const secondOutput = parseOutput(second.stdout);
|
||||
if (secondOutput.hookSpecificOutput) {
|
||||
assert.notStrictEqual(secondOutput.hookSpecificOutput.permissionDecision, 'deny',
|
||||
'retry should be allowed when project fallback is stable');
|
||||
} else {
|
||||
assert.strictEqual(secondOutput.tool_name, 'Bash');
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 26: direct run() accepts object input and default fields ---
|
||||
clearState();
|
||||
if (test('direct run handles object input and missing optional fields', () => {
|
||||
const hook = loadDirectHook();
|
||||
|
||||
const readInput = { tool_name: 'Read', tool_input: { file_path: '/src/app.js' } };
|
||||
assert.strictEqual(hook.run(readInput), readInput, 'object input should pass through unchanged');
|
||||
|
||||
const editWithoutInput = { tool_name: 'Edit' };
|
||||
assert.strictEqual(hook.run(editWithoutInput), editWithoutInput, 'missing tool_input should allow Edit');
|
||||
|
||||
const multiWithoutEdits = { tool_name: 'MultiEdit', tool_input: {} };
|
||||
assert.strictEqual(hook.run(multiWithoutEdits), multiWithoutEdits, 'missing edits array should allow MultiEdit');
|
||||
|
||||
const bashWithoutCommand = { tool_name: 'Bash', tool_input: {} };
|
||||
const bashResult = hook.run(bashWithoutCommand);
|
||||
const bashOutput = JSON.parse(bashResult.stdout);
|
||||
assert.strictEqual(bashOutput.hookSpecificOutput.permissionDecision, 'deny',
|
||||
'missing Bash command should still use routine Bash gate');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 27: bidi controls are stripped from file paths ---
|
||||
clearState();
|
||||
if (test('sanitizes bidi override characters in gated file paths', () => {
|
||||
const bidiOverride = String.fromCharCode(0x202e);
|
||||
const input = {
|
||||
tool_name: 'Edit',
|
||||
tool_input: { file_path: `/src/${bidiOverride}evil.js`, old_string: 'a', new_string: 'b' }
|
||||
};
|
||||
|
||||
const result = runHook(input);
|
||||
const output = parseOutput(result.stdout);
|
||||
assert.ok(output, 'should produce JSON output');
|
||||
const reason = output.hookSpecificOutput.permissionDecisionReason;
|
||||
assert.ok(!reason.includes(bidiOverride), 'bidi override must not appear in denial reason');
|
||||
assert.ok(reason.includes('evil.js'), 'sanitized path should retain visible filename text');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 28: saveState preserves concurrent disk updates ---
|
||||
clearState();
|
||||
if (test('merges state written by another process during save', () => {
|
||||
const hook = loadDirectHook();
|
||||
const originalMkdirSync = fs.mkdirSync;
|
||||
let injected = false;
|
||||
|
||||
fs.mkdirSync = function patchedMkdirSync(target) {
|
||||
const result = originalMkdirSync.apply(fs, arguments);
|
||||
if (!injected && path.resolve(String(target)) === path.resolve(stateDir)) {
|
||||
injected = true;
|
||||
fs.writeFileSync(stateFile, JSON.stringify({
|
||||
checked: ['/src/concurrent.js'],
|
||||
last_active: Date.now()
|
||||
}), 'utf8');
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
try {
|
||||
const result = hook.run({
|
||||
tool_name: 'Edit',
|
||||
tool_input: { file_path: '/src/new-edit.js', old_string: 'a', new_string: 'b' }
|
||||
});
|
||||
const output = parseOutput(result.stdout);
|
||||
assert.strictEqual(output.hookSpecificOutput.permissionDecision, 'deny', 'first edit should still be gated');
|
||||
} finally {
|
||||
fs.mkdirSync = originalMkdirSync;
|
||||
}
|
||||
|
||||
const persisted = JSON.parse(fs.readFileSync(stateFile, 'utf8'));
|
||||
assert.ok(persisted.checked.includes('/src/concurrent.js'), 'concurrent disk entry should be preserved');
|
||||
assert.ok(persisted.checked.includes('/src/new-edit.js'), 'new in-memory entry should be persisted');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// --- Test 29: stale temp files from interrupted writes are pruned ---
|
||||
clearState();
|
||||
if (test('prunes stale state temp files at module load', () => {
|
||||
fs.mkdirSync(stateDir, { recursive: true });
|
||||
const staleTmp = path.join(stateDir, `${path.basename(stateFile)}.tmp.1234.abcd`);
|
||||
const freshState = path.join(stateDir, 'state-fresh-session.json');
|
||||
fs.writeFileSync(staleTmp, '{}', 'utf8');
|
||||
fs.writeFileSync(freshState, '{}', 'utf8');
|
||||
const staleTime = new Date(Date.now() - (61 * 60 * 1000));
|
||||
fs.utimesSync(staleTmp, staleTime, staleTime);
|
||||
|
||||
loadDirectHook();
|
||||
|
||||
assert.ok(!fs.existsSync(staleTmp), 'stale temp state file should be pruned');
|
||||
assert.ok(fs.existsSync(freshState), 'fresh state file should remain');
|
||||
})) passed++; else failed++;
|
||||
|
||||
// Cleanup only the temp directory created by this test file.
|
||||
try {
|
||||
if (fs.existsSync(stateDir)) {
|
||||
|
||||
@@ -545,6 +545,65 @@ async function runTests() {
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
if (
|
||||
await asyncTest('injects learned skills into session-start additional context', async () => {
|
||||
const isoHome = path.join(os.tmpdir(), `ecc-skills-context-${Date.now()}`);
|
||||
const learnedDir = path.join(isoHome, '.claude', 'skills', 'learned');
|
||||
fs.mkdirSync(learnedDir, { recursive: true });
|
||||
fs.mkdirSync(getCanonicalSessionsDir(isoHome), { recursive: true });
|
||||
|
||||
fs.writeFileSync(
|
||||
path.join(learnedDir, 'testing-patterns.md'),
|
||||
[
|
||||
'# Testing Patterns',
|
||||
'',
|
||||
'## When to Use',
|
||||
'Use for recurring flaky integration tests that need deterministic setup checks.',
|
||||
'',
|
||||
'## Solution',
|
||||
'Verify service readiness before running the test body.',
|
||||
].join('\n'),
|
||||
);
|
||||
fs.mkdirSync(path.join(learnedDir, 'debugging-pattern'), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(learnedDir, 'debugging-pattern', 'SKILL.md'),
|
||||
[
|
||||
'# Debugging Pattern',
|
||||
'',
|
||||
'## Trigger',
|
||||
'Use when a CLI tool silently exits without a result payload.',
|
||||
].join('\n'),
|
||||
);
|
||||
|
||||
try {
|
||||
const result = await runScript(path.join(scriptsDir, 'session-start.js'), '', {
|
||||
HOME: isoHome,
|
||||
USERPROFILE: isoHome
|
||||
});
|
||||
assert.strictEqual(result.code, 0);
|
||||
const additionalContext = getSessionStartAdditionalContext(result.stdout);
|
||||
assert.ok(
|
||||
additionalContext.includes('Available learned skills'),
|
||||
`Should inject learned skills into additionalContext, got: ${additionalContext}`
|
||||
);
|
||||
assert.ok(additionalContext.includes('testing-patterns'), 'Should include the learned skill slug');
|
||||
assert.ok(
|
||||
additionalContext.includes('Use for recurring flaky integration tests'),
|
||||
'Should include the learned skill trigger text'
|
||||
);
|
||||
assert.ok(additionalContext.includes('debugging-pattern'), 'Should include directory-style learned skills');
|
||||
assert.ok(
|
||||
additionalContext.includes('CLI tool silently exits'),
|
||||
'Should summarize directory-style learned skill trigger text'
|
||||
);
|
||||
} finally {
|
||||
fs.rmSync(isoHome, { recursive: true, force: true });
|
||||
}
|
||||
})
|
||||
)
|
||||
passed++;
|
||||
else failed++;
|
||||
|
||||
// check-console-log.js tests
|
||||
console.log('\ncheck-console-log.js:');
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'insaits-security-monitor.py');
|
||||
const MONITOR_TIMEOUT_MS = 30000;
|
||||
|
||||
function createTempDir() {
|
||||
return fs.mkdtempSync(path.join(os.tmpdir(), 'insaits-monitor-'));
|
||||
@@ -93,12 +94,16 @@ function runMonitor(options = {}) {
|
||||
encoding: 'utf8',
|
||||
env,
|
||||
cwd: tempDir,
|
||||
timeout: 10000,
|
||||
timeout: MONITOR_TIMEOUT_MS,
|
||||
});
|
||||
result.tempDir = tempDir;
|
||||
return result;
|
||||
}
|
||||
|
||||
function statusError(result) {
|
||||
return result.stderr || result.error?.message || `status ${result.status}`;
|
||||
}
|
||||
|
||||
function test(name, fn) {
|
||||
try {
|
||||
fn();
|
||||
@@ -123,7 +128,7 @@ function runTests() {
|
||||
env: { FAKE_INSAITS_MODE: 'clean' },
|
||||
});
|
||||
try {
|
||||
assert.strictEqual(result.status, 0, result.stderr);
|
||||
assert.strictEqual(result.status, 0, statusError(result));
|
||||
assert.strictEqual(result.stdout, '');
|
||||
|
||||
const [audit] = readAudit(result.tempDir);
|
||||
@@ -143,7 +148,7 @@ function runTests() {
|
||||
env: { FAKE_INSAITS_MODE: 'critical' },
|
||||
});
|
||||
try {
|
||||
assert.strictEqual(result.status, 2, result.stderr);
|
||||
assert.strictEqual(result.status, 2, statusError(result));
|
||||
assert.ok(result.stdout.includes('SECRET'));
|
||||
assert.ok(result.stdout.includes('token-like string detected'));
|
||||
|
||||
@@ -161,7 +166,7 @@ function runTests() {
|
||||
env: { FAKE_INSAITS_MODE: 'medium' },
|
||||
});
|
||||
try {
|
||||
assert.strictEqual(result.status, 0);
|
||||
assert.strictEqual(result.status, 0, statusError(result));
|
||||
assert.strictEqual(result.stdout, '');
|
||||
assert.ok(result.stderr.includes('PROMPT_INJECTION'));
|
||||
|
||||
@@ -179,7 +184,7 @@ function runTests() {
|
||||
env: { FAKE_INSAITS_MODE: 'error', INSAITS_FAIL_MODE: '' },
|
||||
});
|
||||
try {
|
||||
assert.strictEqual(result.status, 0);
|
||||
assert.strictEqual(result.status, 0, statusError(result));
|
||||
assert.strictEqual(result.stdout, '');
|
||||
assert.ok(result.stderr.includes('SDK error'));
|
||||
} finally {
|
||||
@@ -193,7 +198,7 @@ function runTests() {
|
||||
env: { FAKE_INSAITS_MODE: 'error', INSAITS_FAIL_MODE: 'closed' },
|
||||
});
|
||||
try {
|
||||
assert.strictEqual(result.status, 2);
|
||||
assert.strictEqual(result.status, 2, statusError(result));
|
||||
assert.ok(result.stdout.includes('InsAIts SDK error (RuntimeError)'));
|
||||
assert.ok(result.stdout.includes('blocking execution'));
|
||||
} finally {
|
||||
|
||||
@@ -54,6 +54,18 @@ function readState(statePath) {
|
||||
return JSON.parse(fs.readFileSync(statePath, 'utf8'));
|
||||
}
|
||||
|
||||
function readOptionalFile(filePath) {
|
||||
return fs.existsSync(filePath) ? fs.readFileSync(filePath, 'utf8') : '<missing>';
|
||||
}
|
||||
|
||||
function hookFailureDetails(result, statePath) {
|
||||
return [
|
||||
`exit=${result.code}`,
|
||||
`stderr=${result.stderr.trim() || '<empty>'}`,
|
||||
`state=${readOptionalFile(statePath)}`
|
||||
].join('; ');
|
||||
}
|
||||
|
||||
function createCommandConfig(scriptPath) {
|
||||
return {
|
||||
command: process.execPath,
|
||||
@@ -132,7 +144,15 @@ function waitForHttpReady(urlString, timeoutMs = 5000) {
|
||||
const attempt = () => {
|
||||
const req = client.request(urlString, { method: 'GET' }, res => {
|
||||
res.resume();
|
||||
resolve();
|
||||
res.once('end', resolve);
|
||||
res.once('error', error => {
|
||||
if (Date.now() >= deadline) {
|
||||
reject(new Error(`Timed out waiting for ${urlString}: ${error.message}`));
|
||||
return;
|
||||
}
|
||||
|
||||
setTimeout(attempt, 25);
|
||||
});
|
||||
});
|
||||
|
||||
req.setTimeout(250, () => {
|
||||
@@ -835,26 +855,30 @@ async function runTests() {
|
||||
|
||||
writeConfig(configPath, {
|
||||
mcpServers: {
|
||||
github: {
|
||||
http400: {
|
||||
type: 'http',
|
||||
url: `http://127.0.0.1:${port}/mcp`
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
const input = { tool_name: 'mcp__github__search_repositories', tool_input: {} };
|
||||
const input = { tool_name: 'mcp__http400__search_repositories', tool_input: {} };
|
||||
const result = runHook(input, {
|
||||
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
|
||||
ECC_MCP_CONFIG_PATH: configPath,
|
||||
ECC_MCP_HEALTH_STATE_PATH: statePath,
|
||||
ECC_MCP_HEALTH_TIMEOUT_MS: '500'
|
||||
ECC_MCP_HEALTH_TIMEOUT_MS: '2000'
|
||||
});
|
||||
|
||||
assert.strictEqual(result.code, 0, `Expected HTTP 400 probe to be treated as healthy, got ${result.code}`);
|
||||
assert.strictEqual(
|
||||
result.code,
|
||||
0,
|
||||
`Expected HTTP 400 probe to be treated as healthy: ${hookFailureDetails(result, statePath)}`
|
||||
);
|
||||
assert.strictEqual(result.stdout.trim(), JSON.stringify(input), 'Expected original JSON on stdout');
|
||||
|
||||
const state = readState(statePath);
|
||||
assert.strictEqual(state.servers.github.status, 'healthy', 'Expected HTTP MCP server to be marked healthy');
|
||||
assert.strictEqual(state.servers.http400.status, 'healthy', 'Expected HTTP MCP server to be marked healthy');
|
||||
} finally {
|
||||
serverProcess.kill('SIGTERM');
|
||||
cleanupTempDir(tempDir);
|
||||
@@ -910,10 +934,14 @@ async function runTests() {
|
||||
CLAUDE_HOOK_EVENT_NAME: 'PreToolUse',
|
||||
ECC_MCP_CONFIG_PATH: configPath,
|
||||
ECC_MCP_HEALTH_STATE_PATH: statePath,
|
||||
ECC_MCP_HEALTH_TIMEOUT_MS: '500'
|
||||
ECC_MCP_HEALTH_TIMEOUT_MS: '2000'
|
||||
});
|
||||
|
||||
assert.strictEqual(result.code, 0, `Expected HTTP 401 probe to be treated as healthy, got ${result.code}`);
|
||||
assert.strictEqual(
|
||||
result.code,
|
||||
0,
|
||||
`Expected HTTP 401 probe to be treated as healthy: ${hookFailureDetails(result, statePath)}`
|
||||
);
|
||||
assert.strictEqual(result.stdout.trim(), JSON.stringify(input), 'Expected original JSON on stdout');
|
||||
|
||||
const state = readState(statePath);
|
||||
|
||||
@@ -274,11 +274,12 @@ if (test('stdin entry point truncates oversized input and preserves pass-through
|
||||
filler: 'x'.repeat(1024 * 1024 + 1024)
|
||||
}
|
||||
});
|
||||
const result = spawnSync('node', [path.join(__dirname, '..', '..', 'scripts', 'hooks', 'pre-bash-commit-quality.js')], {
|
||||
const result = spawnSync(process.execPath, [path.join(__dirname, '..', '..', 'scripts', 'hooks', 'pre-bash-commit-quality.js')], {
|
||||
input: oversized,
|
||||
encoding: 'utf8',
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
timeout: 10000
|
||||
timeout: 10000,
|
||||
maxBuffer: 2 * 1024 * 1024
|
||||
});
|
||||
|
||||
assert.strictEqual(result.status, 0);
|
||||
|
||||
@@ -79,6 +79,7 @@ function runTests() {
|
||||
|
||||
if (test('lists install profiles from the real project', () => {
|
||||
const profiles = listInstallProfiles();
|
||||
assert.ok(profiles.some(profile => profile.id === 'minimal'), 'Should include minimal profile');
|
||||
assert.ok(profiles.some(profile => profile.id === 'core'), 'Should include core profile');
|
||||
assert.ok(profiles.some(profile => profile.id === 'full'), 'Should include full profile');
|
||||
})) passed++; else failed++;
|
||||
@@ -212,6 +213,22 @@ function runTests() {
|
||||
assert.strictEqual(plan.targetRoot, path.join(projectRoot, '.agent'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('resolves minimal profile without the hook runtime', () => {
|
||||
const plan = resolveInstallPlan({
|
||||
profileId: 'minimal',
|
||||
target: 'claude',
|
||||
projectRoot: '/workspace/app',
|
||||
});
|
||||
|
||||
assert.deepStrictEqual(
|
||||
plan.selectedModuleIds,
|
||||
['rules-core', 'agents-core', 'commands-core', 'platform-configs', 'workflow-quality']
|
||||
);
|
||||
assert.ok(!plan.selectedModuleIds.includes('hooks-runtime'),
|
||||
'minimal profile should not install hooks-runtime');
|
||||
assert.ok(plan.operations.length > 0, 'Should include install operations');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('resolves explicit modules with dependency expansion', () => {
|
||||
const plan = resolveInstallPlan({ moduleIds: ['security'] });
|
||||
assert.ok(plan.selectedModuleIds.includes('security'), 'Should include requested module');
|
||||
|
||||
@@ -172,6 +172,36 @@ function runTests() {
|
||||
);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('does not install root AGENTS.md into Cursor nested context', () => {
|
||||
const repoRoot = path.join(__dirname, '..', '..');
|
||||
const projectRoot = '/workspace/app';
|
||||
|
||||
const plan = planInstallTargetScaffold({
|
||||
target: 'cursor',
|
||||
repoRoot,
|
||||
projectRoot,
|
||||
modules: [
|
||||
{
|
||||
id: 'agents-core',
|
||||
paths: ['.agents', 'agents', 'AGENTS.md'],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
assert.ok(
|
||||
!plan.operations.some(operation => (
|
||||
normalizedRelativePath(operation.sourceRelativePath) === 'AGENTS.md'
|
||||
)),
|
||||
'Cursor installs should not copy ECC root AGENTS.md into host project context'
|
||||
);
|
||||
assert.ok(
|
||||
!plan.operations.some(operation => (
|
||||
operation.destinationPath === path.join(projectRoot, '.cursor', 'AGENTS.md')
|
||||
)),
|
||||
'Cursor installs should not create .cursor/AGENTS.md'
|
||||
);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('plans cursor platform rule files as .mdc and excludes rule README docs', () => {
|
||||
const repoRoot = path.join(__dirname, '..', '..');
|
||||
const projectRoot = '/workspace/app';
|
||||
|
||||
@@ -59,7 +59,7 @@ test('parseWorkerTask extracts objective and seeded overlays', () => {
|
||||
'',
|
||||
'## Seeded Local Overlays',
|
||||
'- `scripts/orchestrate-worktrees.js`',
|
||||
'- `commands/orchestrate.md`',
|
||||
'- `commands/multi-workflow.md`',
|
||||
'',
|
||||
'## Objective',
|
||||
'Verify seeded files and summarize status.'
|
||||
@@ -67,7 +67,7 @@ test('parseWorkerTask extracts objective and seeded overlays', () => {
|
||||
|
||||
assert.deepStrictEqual(task.seedPaths, [
|
||||
'scripts/orchestrate-worktrees.js',
|
||||
'commands/orchestrate.md'
|
||||
'commands/multi-workflow.md'
|
||||
]);
|
||||
assert.strictEqual(task.objective, 'Verify seeded files and summarize status.');
|
||||
});
|
||||
|
||||
@@ -442,6 +442,126 @@ function runTests() {
|
||||
assert.ok(result.text.includes('No failure patterns detected'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('chart helpers handle zero widths, truncation, and invalid buckets', () => {
|
||||
assert.strictEqual(dashboard.horizontalBar(10, 0, 4), '\u2591\u2591\u2591\u2591');
|
||||
assert.strictEqual(dashboard.horizontalBar(10, 10, 0), '');
|
||||
|
||||
const boxed = dashboard.panelBox('LongTitleForTinyPanel', ['abcdefghijklmnopqrstuvwxyz'], 10);
|
||||
assert.ok(boxed.includes('abcdefgh'), 'long content should be truncated to inner width');
|
||||
|
||||
const defaultBox = dashboard.panelBox('Default Width', ['ok']);
|
||||
assert.ok(defaultBox.split('\n')[0].length >= 60, 'omitted width should use default panel width');
|
||||
|
||||
const buckets = dashboard.bucketByDay([
|
||||
{ skill_id: 'alpha', outcome: 'success', recorded_at: 'not-a-date' },
|
||||
{ skill_id: 'alpha', outcome: 'success', recorded_at: now },
|
||||
], Date.parse(now), 1);
|
||||
assert.strictEqual(buckets[0].runs, 1, 'invalid dates should be ignored');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('success rate panel handles no skills, missing records, and trend directions', () => {
|
||||
const empty = dashboard.renderSuccessRatePanel([], [], { now });
|
||||
assert.ok(empty.text.includes('No skill execution data available'));
|
||||
assert.deepStrictEqual(empty.data.skills, []);
|
||||
|
||||
const orphan = dashboard.renderSuccessRatePanel([], [{ skill_id: 'orphan' }], { now });
|
||||
assert.strictEqual(orphan.data.skills.length, 1);
|
||||
assert.strictEqual(orphan.data.skills[0].current_7d, null);
|
||||
assert.ok(orphan.text.includes('n/a'));
|
||||
|
||||
const declining = dashboard.renderSuccessRatePanel([
|
||||
{ skill_id: 'gamma', outcome: 'failure', recorded_at: '2026-03-15T08:00:00.000Z' },
|
||||
{ skill_id: 'gamma', outcome: 'success', recorded_at: '2026-02-28T08:00:00.000Z' },
|
||||
{ skill_id: 'gamma', outcome: 'success', recorded_at: '2026-02-27T08:00:00.000Z' },
|
||||
], [{ skill_id: 'gamma' }], { now });
|
||||
assert.strictEqual(declining.data.skills[0].trend, '\u2198');
|
||||
|
||||
const flat = dashboard.renderSuccessRatePanel([
|
||||
{ skill_id: 'delta', outcome: 'success', recorded_at: '2026-03-15T08:00:00.000Z' },
|
||||
{ skill_id: 'delta', outcome: 'success', recorded_at: '2026-02-28T08:00:00.000Z' },
|
||||
], [{ skill_id: 'delta' }], { now });
|
||||
assert.strictEqual(flat.data.skills[0].trend, '\u2192');
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('failure cluster panel labels unknown single-skill failures', () => {
|
||||
const result = dashboard.renderFailureClusterPanel([
|
||||
{ skill_id: 'alpha', outcome: 'failure', failure_reason: '' },
|
||||
]);
|
||||
|
||||
assert.strictEqual(result.data.clusters.length, 1);
|
||||
assert.strictEqual(result.data.clusters[0].pattern, 'unknown');
|
||||
assert.strictEqual(result.data.clusters[0].percentage, 100);
|
||||
assert.ok(result.text.includes('(1 skill)'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('amendment panel handles missing dirs and pending proposal defaults', () => {
|
||||
const proposalSkillDir = createSkill(skillsRoot, 'proposal-defaults', '# Proposal Defaults\n');
|
||||
const proposalLog = path.join(proposalSkillDir, '.evolution', 'amendments.jsonl');
|
||||
appendFile(proposalLog, JSON.stringify({ event: 'proposal' }) + '\n');
|
||||
appendFile(proposalLog, JSON.stringify({ event: 'proposal', status: 'applied' }) + '\n');
|
||||
|
||||
const skillsById = new Map();
|
||||
skillsById.set('missing-dir', { skill_id: 'missing-dir' });
|
||||
skillsById.set('proposal-defaults', { skill_id: 'proposal-defaults', skill_dir: proposalSkillDir });
|
||||
|
||||
const result = dashboard.renderAmendmentPanel(skillsById, { width: 80 });
|
||||
assert.strictEqual(result.data.total, 1);
|
||||
assert.strictEqual(result.data.amendments[0].event, 'proposal');
|
||||
assert.strictEqual(result.data.amendments[0].status, 'pending');
|
||||
assert.strictEqual(result.data.amendments[0].created_at, null);
|
||||
assert.ok(result.text.includes('1 amendment pending review'));
|
||||
assert.ok(result.text.includes(' -'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('version timeline skips missing dirs and empty histories', () => {
|
||||
const emptyVersionDir = createSkill(skillsRoot, 'empty-version-history', '# Empty Version History\n');
|
||||
const skillsById = new Map();
|
||||
skillsById.set('missing-dir', { skill_id: 'missing-dir' });
|
||||
skillsById.set('empty-version-history', { skill_id: 'empty-version-history', skill_dir: emptyVersionDir });
|
||||
|
||||
const result = dashboard.renderVersionTimelinePanel(skillsById);
|
||||
assert.deepStrictEqual(result.data.skills, []);
|
||||
assert.ok(result.text.includes('No version history available'));
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('version timeline renders fallback date and reason values', () => {
|
||||
const originalListVersions = versioning.listVersions;
|
||||
const originalGetEvolutionLog = versioning.getEvolutionLog;
|
||||
versioning.listVersions = () => [
|
||||
{ version: 9, created_at: null },
|
||||
];
|
||||
versioning.getEvolutionLog = () => [
|
||||
{ version: 9, reason: '' },
|
||||
];
|
||||
|
||||
try {
|
||||
const skillsById = new Map();
|
||||
skillsById.set('fallback-version', { skill_id: 'fallback-version', skill_dir: skillsRoot });
|
||||
const result = dashboard.renderVersionTimelinePanel(skillsById);
|
||||
|
||||
assert.strictEqual(result.data.skills.length, 1);
|
||||
assert.strictEqual(result.data.skills[0].versions[0].reason, null);
|
||||
assert.ok(result.text.includes('v9'));
|
||||
assert.ok(result.text.includes(' - '));
|
||||
} finally {
|
||||
versioning.listVersions = originalListVersions;
|
||||
versioning.getEvolutionLog = originalGetEvolutionLog;
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('renderDashboard rejects invalid timestamps', () => {
|
||||
assert.throws(() => {
|
||||
dashboard.renderDashboard({
|
||||
skillsRoot,
|
||||
learnedRoot,
|
||||
importedRoot,
|
||||
homeDir,
|
||||
runsFilePath: runsFile,
|
||||
now: 'not-a-timestamp',
|
||||
});
|
||||
}, /Invalid now timestamp/);
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
|
||||
} finally {
|
||||
cleanupTempDir(repoRoot);
|
||||
|
||||
@@ -319,6 +319,126 @@ function runTests() {
|
||||
assert.strictEqual(record.duration_ms, 0);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('normalizes aliases, defaults, and nullable telemetry fields', () => {
|
||||
const aliasRecord = tracker.normalizeExecutionRecord({
|
||||
skillId: 'alias-skill',
|
||||
skillVersion: 'v2',
|
||||
taskAttempted: 'Alias task',
|
||||
outcome: 'failure',
|
||||
failureReason: 'Needs more examples',
|
||||
tokensUsed: null,
|
||||
userFeedback: 'rejected',
|
||||
}, {
|
||||
now: '2026-03-15T12:30:00.000Z',
|
||||
});
|
||||
|
||||
assert.deepStrictEqual(aliasRecord, {
|
||||
skill_id: 'alias-skill',
|
||||
skill_version: 'v2',
|
||||
task_description: 'Alias task',
|
||||
outcome: 'failure',
|
||||
failure_reason: 'Needs more examples',
|
||||
tokens_used: null,
|
||||
duration_ms: null,
|
||||
user_feedback: 'rejected',
|
||||
recorded_at: '2026-03-15T12:30:00.000Z',
|
||||
});
|
||||
|
||||
const legacyTaskRecord = tracker.normalizeExecutionRecord({
|
||||
skill_id: 'legacy-skill',
|
||||
skill_version: 'v1',
|
||||
task_attempted: 'Legacy attempted task',
|
||||
outcome: 'partial',
|
||||
durationMs: null,
|
||||
recorded_at: '2026-03-15T12:31:00.000Z',
|
||||
});
|
||||
|
||||
assert.strictEqual(legacyTaskRecord.task_description, 'Legacy attempted task');
|
||||
assert.strictEqual(legacyTaskRecord.failure_reason, null);
|
||||
assert.strictEqual(legacyTaskRecord.tokens_used, null);
|
||||
assert.strictEqual(legacyTaskRecord.duration_ms, null);
|
||||
assert.strictEqual(legacyTaskRecord.user_feedback, null);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('rejects invalid execution payloads and numeric telemetry', () => {
|
||||
const valid = {
|
||||
skill_id: 'alpha',
|
||||
skill_version: 'v1',
|
||||
task_description: 'Run task',
|
||||
outcome: 'success',
|
||||
user_feedback: 'accepted',
|
||||
recorded_at: '2026-03-15T12:32:00.000Z',
|
||||
};
|
||||
|
||||
const cases = [
|
||||
[null, /payload must be an object/],
|
||||
[[], /payload must be an object/],
|
||||
[{ ...valid, skill_id: ' ' }, /skill_id is required/],
|
||||
[{ ...valid, skill_version: '' }, /skill_version is required/],
|
||||
[{ ...valid, task_description: '\t' }, /task_description is required/],
|
||||
[{ ...valid, outcome: 'skipped' }, /outcome must be one of/],
|
||||
[{ ...valid, user_feedback: 'ignored' }, /user_feedback must be/],
|
||||
[{ ...valid, recorded_at: 'not-a-date' }, /recorded_at must be/],
|
||||
[{ ...valid, tokens_used: 'lots' }, /tokens_used must be a number/],
|
||||
[{ ...valid, duration_ms: Number.POSITIVE_INFINITY }, /duration_ms must be a number/],
|
||||
];
|
||||
|
||||
for (const [payload, expectedError] of cases) {
|
||||
assert.throws(() => tracker.normalizeExecutionRecord(payload), expectedError);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('uses state-store adapters for recording and reading when available', () => {
|
||||
let capturedRecord = null;
|
||||
const stateStore = {
|
||||
recordSkillExecution(record) {
|
||||
capturedRecord = record;
|
||||
return { id: 'run-1' };
|
||||
},
|
||||
listSkillExecutionRecords() {
|
||||
return [{ skill_id: 'stored-skill', outcome: 'success' }];
|
||||
},
|
||||
};
|
||||
|
||||
const result = tracker.recordSkillExecution({
|
||||
skill_id: 'stored-skill',
|
||||
skill_version: 'v4',
|
||||
task_description: 'Persist in formal store',
|
||||
outcome: 'success',
|
||||
recorded_at: '2026-03-15T12:33:00.000Z',
|
||||
}, {
|
||||
stateStore,
|
||||
});
|
||||
|
||||
assert.strictEqual(result.storage, 'state-store');
|
||||
assert.deepStrictEqual(result.result, { id: 'run-1' });
|
||||
assert.strictEqual(capturedRecord.skill_id, 'stored-skill');
|
||||
assert.deepStrictEqual(
|
||||
tracker.readSkillExecutionRecords({ stateStore }),
|
||||
[{ skill_id: 'stored-skill', outcome: 'success' }]
|
||||
);
|
||||
})) passed++; else failed++;
|
||||
|
||||
if (test('resolves default run paths and treats missing JSONL storage as empty', () => {
|
||||
const pathHome = createTempDir('skill-evolution-path-home-');
|
||||
try {
|
||||
const defaultPath = tracker.getRunsFilePath({ homeDir: pathHome });
|
||||
assert.strictEqual(
|
||||
defaultPath,
|
||||
path.join(pathHome, '.claude', 'state', 'skill-runs.jsonl')
|
||||
);
|
||||
assert.deepStrictEqual(
|
||||
tracker.readSkillExecutionRecords({ homeDir: pathHome }),
|
||||
[]
|
||||
);
|
||||
|
||||
const relativePath = path.join('.', 'relative-skill-runs.jsonl');
|
||||
assert.strictEqual(tracker.getRunsFilePath({ runsFilePath: relativePath }), path.resolve(relativePath));
|
||||
} finally {
|
||||
cleanupTempDir(pathHome);
|
||||
}
|
||||
})) passed++; else failed++;
|
||||
|
||||
console.log('\nHealth:');
|
||||
|
||||
if (test('computes per-skill health metrics and flags declining skills', () => {
|
||||
|
||||
@@ -354,6 +354,120 @@ async function runTests() {
|
||||
}
|
||||
})) passed += 1; else failed += 1;
|
||||
|
||||
if (await test('builds an empty status snapshot with null rates and missing install health', async () => {
|
||||
const testDir = createTempDir('ecc-state-empty-');
|
||||
const dbPath = path.join(testDir, 'state.db');
|
||||
|
||||
try {
|
||||
const store = await createStateStore({ dbPath });
|
||||
const status = store.getStatus({ activeLimit: 1, recentSkillRunLimit: 1, pendingLimit: 1 });
|
||||
const missingDetail = store.getSessionDetail('missing-session');
|
||||
store.close();
|
||||
|
||||
assert.strictEqual(missingDetail, null);
|
||||
assert.strictEqual(status.activeSessions.activeCount, 0);
|
||||
assert.deepStrictEqual(status.activeSessions.sessions, []);
|
||||
assert.strictEqual(status.skillRuns.summary.totalCount, 0);
|
||||
assert.strictEqual(status.skillRuns.summary.knownCount, 0);
|
||||
assert.strictEqual(status.skillRuns.summary.successRate, null);
|
||||
assert.strictEqual(status.skillRuns.summary.failureRate, null);
|
||||
assert.strictEqual(status.installHealth.status, 'missing');
|
||||
assert.strictEqual(status.installHealth.totalCount, 0);
|
||||
assert.deepStrictEqual(status.installHealth.installations, []);
|
||||
assert.strictEqual(status.governance.pendingCount, 0);
|
||||
assert.deepStrictEqual(status.governance.events, []);
|
||||
} finally {
|
||||
cleanupTempDir(testDir);
|
||||
}
|
||||
})) passed += 1; else failed += 1;
|
||||
|
||||
if (await test('normalizes default optional fields and reports warning install health', async () => {
|
||||
const testDir = createTempDir('ecc-state-defaults-');
|
||||
const dbPath = path.join(testDir, 'state.db');
|
||||
|
||||
try {
|
||||
const store = await createStateStore({ dbPath });
|
||||
const session = store.upsertSession({
|
||||
id: 'session-defaults',
|
||||
adapterId: 'manual',
|
||||
harness: 'codex',
|
||||
state: 'running',
|
||||
});
|
||||
|
||||
store.insertSkillRun({
|
||||
id: 'skill-run-defaults',
|
||||
skillId: 'planner',
|
||||
skillVersion: '1.0.0',
|
||||
sessionId: 'session-defaults',
|
||||
taskDescription: 'Exercise defaults',
|
||||
outcome: 'passed',
|
||||
});
|
||||
|
||||
const version = store.upsertSkillVersion({
|
||||
skillId: 'planner',
|
||||
version: '1.0.0',
|
||||
contentHash: 'hash-defaults',
|
||||
});
|
||||
|
||||
store.insertDecision({
|
||||
id: 'decision-defaults',
|
||||
sessionId: 'session-defaults',
|
||||
title: 'Use defaults',
|
||||
rationale: 'Optional decision fields should normalize',
|
||||
status: 'active',
|
||||
});
|
||||
|
||||
const installState = store.upsertInstallState({
|
||||
targetId: 'claude-project',
|
||||
targetRoot: path.join(testDir, '.claude'),
|
||||
});
|
||||
|
||||
store.insertGovernanceEvent({
|
||||
id: 'gov-defaults',
|
||||
eventType: 'manual-review',
|
||||
});
|
||||
|
||||
const detail = store.getSessionDetail('session-defaults');
|
||||
const status = store.getStatus();
|
||||
store.close();
|
||||
|
||||
assert.strictEqual(session.repoRoot, null);
|
||||
assert.strictEqual(session.startedAt, null);
|
||||
assert.strictEqual(session.endedAt, null);
|
||||
assert.deepStrictEqual(session.snapshot, {});
|
||||
assert.strictEqual(session.workerCount, 0);
|
||||
|
||||
assert.strictEqual(version.amendmentReason, null);
|
||||
assert.strictEqual(version.promotedAt, null);
|
||||
assert.strictEqual(version.rolledBackAt, null);
|
||||
|
||||
assert.deepStrictEqual(detail.workers, []);
|
||||
assert.strictEqual(detail.skillRuns[0].failureReason, null);
|
||||
assert.strictEqual(detail.skillRuns[0].tokensUsed, null);
|
||||
assert.strictEqual(detail.skillRuns[0].durationMs, null);
|
||||
assert.strictEqual(detail.skillRuns[0].userFeedback, null);
|
||||
assert.deepStrictEqual(detail.decisions[0].alternatives, []);
|
||||
assert.strictEqual(detail.decisions[0].supersedes, null);
|
||||
|
||||
assert.strictEqual(installState.profile, null);
|
||||
assert.deepStrictEqual(installState.modules, []);
|
||||
assert.deepStrictEqual(installState.operations, []);
|
||||
assert.strictEqual(installState.sourceVersion, null);
|
||||
|
||||
assert.strictEqual(status.activeSessions.activeCount, 1);
|
||||
assert.strictEqual(status.skillRuns.summary.successRate, 100);
|
||||
assert.strictEqual(status.installHealth.status, 'warning');
|
||||
assert.strictEqual(status.installHealth.warningCount, 1);
|
||||
assert.strictEqual(status.installHealth.installations[0].status, 'warning');
|
||||
assert.strictEqual(status.governance.pendingCount, 1);
|
||||
assert.strictEqual(status.governance.events[0].payload, null);
|
||||
assert.strictEqual(status.governance.events[0].sessionId, null);
|
||||
assert.strictEqual(status.governance.events[0].resolution, null);
|
||||
} finally {
|
||||
cleanupTempDir(testDir);
|
||||
}
|
||||
})) passed += 1; else failed += 1;
|
||||
|
||||
if (await test('validates entity payloads before writing to the database', async () => {
|
||||
const testDir = createTempDir('ecc-state-db-');
|
||||
const dbPath = path.join(testDir, 'state.db');
|
||||
@@ -404,6 +518,40 @@ async function runTests() {
|
||||
}
|
||||
})) passed += 1; else failed += 1;
|
||||
|
||||
if (await test('rejects invalid limits and unserializable JSON payloads', async () => {
|
||||
const testDir = createTempDir('ecc-state-errors-');
|
||||
const dbPath = path.join(testDir, 'state.db');
|
||||
|
||||
try {
|
||||
const store = await createStateStore({ dbPath });
|
||||
const circularSnapshot = {};
|
||||
circularSnapshot.self = circularSnapshot;
|
||||
|
||||
assert.throws(
|
||||
() => store.listRecentSessions({ limit: 0 }),
|
||||
/Invalid limit: 0/
|
||||
);
|
||||
assert.throws(
|
||||
() => store.getStatus({ activeLimit: 'many' }),
|
||||
/Invalid limit: many/
|
||||
);
|
||||
assert.throws(
|
||||
() => store.upsertSession({
|
||||
id: 'session-circular',
|
||||
adapterId: 'manual',
|
||||
harness: 'codex',
|
||||
state: 'active',
|
||||
snapshot: circularSnapshot,
|
||||
}),
|
||||
/Failed to serialize session\.snapshot/
|
||||
);
|
||||
|
||||
store.close();
|
||||
} finally {
|
||||
cleanupTempDir(testDir);
|
||||
}
|
||||
})) passed += 1; else failed += 1;
|
||||
|
||||
if (await test('status CLI supports human-readable and --json output', async () => {
|
||||
const testDir = createTempDir('ecc-state-cli-');
|
||||
const dbPath = path.join(testDir, 'state.db');
|
||||
|
||||
@@ -299,7 +299,7 @@ test('executePlan rolls back partial setup when orchestration fails mid-run', ()
|
||||
workerName: 'Docs',
|
||||
workerSlug: 'docs',
|
||||
worktreePath: '/tmp/ecc-rollback-docs',
|
||||
seedPaths: ['commands/orchestrate.md'],
|
||||
seedPaths: ['commands/multi-workflow.md'],
|
||||
gitArgs: ['worktree', 'add', '-b', 'orchestrator-rollback-test-docs', '/tmp/ecc-rollback-docs', 'HEAD'],
|
||||
launchCommand: 'echo run'
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user