From 2349e2173110a074c3d46fef244ca09776d5e33c Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Thu, 12 Mar 2026 08:50:24 -0700 Subject: [PATCH 1/4] feat: add orchestration workflows and harness skills --- .agents/skills/claude-api/SKILL.md | 333 ++++++++++++ .agents/skills/claude-api/agents/openai.yaml | 7 + .agents/skills/crosspost/SKILL.md | 187 +++++++ .agents/skills/crosspost/agents/openai.yaml | 7 + .agents/skills/deep-research/SKILL.md | 155 ++++++ .../skills/deep-research/agents/openai.yaml | 7 + .agents/skills/dmux-workflows/SKILL.md | 144 +++++ .../skills/dmux-workflows/agents/openai.yaml | 7 + .agents/skills/exa-search/SKILL.md | 165 ++++++ .agents/skills/exa-search/agents/openai.yaml | 7 + .agents/skills/fal-ai-media/SKILL.md | 277 ++++++++++ .../skills/fal-ai-media/agents/openai.yaml | 7 + .agents/skills/video-editing/SKILL.md | 308 +++++++++++ .../skills/video-editing/agents/openai.yaml | 7 + .agents/skills/x-api/SKILL.md | 211 ++++++++ .agents/skills/x-api/agents/openai.yaml | 7 + .codex/AGENTS.md | 11 +- .codex/config.toml | 47 +- .gitignore | 1 + commands/multi-workflow.md | 8 + commands/orchestrate.md | 55 ++ commands/sessions.md | 34 +- mcp-configs/mcp-servers.json | 48 +- package.json | 6 + rules/common/development-workflow.md | 3 +- scripts/hooks/session-end.js | 113 ++-- scripts/lib/orchestration-session.js | 295 +++++++++++ scripts/lib/session-manager.js | 19 + scripts/lib/tmux-worktree-orchestrator.js | 491 ++++++++++++++++++ scripts/orchestrate-codex-worker.sh | 92 ++++ scripts/orchestrate-worktrees.js | 108 ++++ scripts/orchestration-status.js | 59 +++ skills/claude-api/SKILL.md | 337 ++++++++++++ skills/configure-ecc/SKILL.md | 38 +- skills/crosspost/SKILL.md | 188 +++++++ skills/deep-research/SKILL.md | 155 ++++++ skills/dmux-workflows/SKILL.md | 191 +++++++ skills/exa-search/SKILL.md | 170 ++++++ skills/fal-ai-media/SKILL.md | 284 ++++++++++ skills/strategic-compact/SKILL.md | 28 + skills/video-editing/SKILL.md | 310 +++++++++++ skills/x-api/SKILL.md | 208 ++++++++ tests/hooks/hooks.test.js | 110 +++- tests/lib/orchestration-session.test.js | 214 ++++++++ tests/lib/session-manager.test.js | 6 + tests/lib/tmux-worktree-orchestrator.test.js | 233 +++++++++ 46 files changed, 5618 insertions(+), 80 deletions(-) create mode 100644 .agents/skills/claude-api/SKILL.md create mode 100644 .agents/skills/claude-api/agents/openai.yaml create mode 100644 .agents/skills/crosspost/SKILL.md create mode 100644 .agents/skills/crosspost/agents/openai.yaml create mode 100644 .agents/skills/deep-research/SKILL.md create mode 100644 .agents/skills/deep-research/agents/openai.yaml create mode 100644 .agents/skills/dmux-workflows/SKILL.md create mode 100644 .agents/skills/dmux-workflows/agents/openai.yaml create mode 100644 .agents/skills/exa-search/SKILL.md create mode 100644 .agents/skills/exa-search/agents/openai.yaml create mode 100644 .agents/skills/fal-ai-media/SKILL.md create mode 100644 .agents/skills/fal-ai-media/agents/openai.yaml create mode 100644 .agents/skills/video-editing/SKILL.md create mode 100644 .agents/skills/video-editing/agents/openai.yaml create mode 100644 .agents/skills/x-api/SKILL.md create mode 100644 .agents/skills/x-api/agents/openai.yaml create mode 100644 scripts/lib/orchestration-session.js create mode 100644 scripts/lib/tmux-worktree-orchestrator.js create mode 100755 scripts/orchestrate-codex-worker.sh create mode 100644 scripts/orchestrate-worktrees.js create mode 100644 scripts/orchestration-status.js create mode 100644 skills/claude-api/SKILL.md create mode 100644 skills/crosspost/SKILL.md create mode 100644 skills/deep-research/SKILL.md create mode 100644 skills/dmux-workflows/SKILL.md create mode 100644 skills/exa-search/SKILL.md create mode 100644 skills/fal-ai-media/SKILL.md create mode 100644 skills/video-editing/SKILL.md create mode 100644 skills/x-api/SKILL.md create mode 100644 tests/lib/orchestration-session.test.js create mode 100644 tests/lib/tmux-worktree-orchestrator.test.js diff --git a/.agents/skills/claude-api/SKILL.md b/.agents/skills/claude-api/SKILL.md new file mode 100644 index 00000000..b42a3e35 --- /dev/null +++ b/.agents/skills/claude-api/SKILL.md @@ -0,0 +1,333 @@ +--- +name: claude-api +description: Anthropic Claude API patterns for Python and TypeScript. Covers Messages API, streaming, tool use, vision, extended thinking, batches, prompt caching, and Claude Agent SDK. Use when building applications with the Claude API or Anthropic SDKs. +origin: ECC +--- + +# Claude API + +Build applications with the Anthropic Claude API and SDKs. + +## When to Activate + +- Building applications that call the Claude API +- Code imports `anthropic` (Python) or `@anthropic-ai/sdk` (TypeScript) +- User asks about Claude API patterns, tool use, streaming, or vision +- Implementing agent workflows with Claude Agent SDK +- Optimizing API costs, token usage, or latency + +## Model Selection + +| Model | ID | Best For | +|-------|-----|----------| +| Opus 4.6 | `claude-opus-4-6` | Complex reasoning, architecture, research | +| Sonnet 4.6 | `claude-sonnet-4-6` | Balanced coding, most development tasks | +| Haiku 4.5 | `claude-haiku-4-5-20251001` | Fast responses, high-volume, cost-sensitive | + +Default to Sonnet 4.6 unless the task requires deep reasoning (Opus) or speed/cost optimization (Haiku). + +## Python SDK + +### Installation + +```bash +pip install anthropic +``` + +### Basic Message + +```python +import anthropic + +client = anthropic.Anthropic() # reads ANTHROPIC_API_KEY from env + +message = client.messages.create( + model="claude-sonnet-4-6", + max_tokens=1024, + messages=[ + {"role": "user", "content": "Explain async/await in Python"} + ] +) +print(message.content[0].text) +``` + +### Streaming + +```python +with client.messages.stream( + model="claude-sonnet-4-6", + max_tokens=1024, + messages=[{"role": "user", "content": "Write a haiku about coding"}] +) as stream: + for text in stream.text_stream: + print(text, end="", flush=True) +``` + +### System Prompt + +```python +message = client.messages.create( + model="claude-sonnet-4-6", + max_tokens=1024, + system="You are a senior Python developer. Be concise.", + messages=[{"role": "user", "content": "Review this function"}] +) +``` + +## TypeScript SDK + +### Installation + +```bash +npm install @anthropic-ai/sdk +``` + +### Basic Message + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +const client = new Anthropic(); // reads ANTHROPIC_API_KEY from env + +const message = await client.messages.create({ + model: "claude-sonnet-4-6", + max_tokens: 1024, + messages: [ + { role: "user", content: "Explain async/await in TypeScript" } + ], +}); +console.log(message.content[0].text); +``` + +### Streaming + +```typescript +const stream = client.messages.stream({ + model: "claude-sonnet-4-6", + max_tokens: 1024, + messages: [{ role: "user", content: "Write a haiku" }], +}); + +for await (const event of stream) { + if (event.type === "content_block_delta" && event.delta.type === "text_delta") { + process.stdout.write(event.delta.text); + } +} +``` + +## Tool Use + +Define tools and let Claude call them: + +```python +tools = [ + { + "name": "get_weather", + "description": "Get current weather for a location", + "input_schema": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "City name"}, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} + }, + "required": ["location"] + } + } +] + +message = client.messages.create( + model="claude-sonnet-4-6", + max_tokens=1024, + tools=tools, + messages=[{"role": "user", "content": "What's the weather in SF?"}] +) + +# Handle tool use response +for block in message.content: + if block.type == "tool_use": + # Execute the tool with block.input + result = get_weather(**block.input) + # Send result back + follow_up = client.messages.create( + model="claude-sonnet-4-6", + max_tokens=1024, + tools=tools, + messages=[ + {"role": "user", "content": "What's the weather in SF?"}, + {"role": "assistant", "content": message.content}, + {"role": "user", "content": [ + {"type": "tool_result", "tool_use_id": block.id, "content": str(result)} + ]} + ] + ) +``` + +## Vision + +Send images for analysis: + +```python +import base64 + +with open("diagram.png", "rb") as f: + image_data = base64.standard_b64encode(f.read()).decode("utf-8") + +message = client.messages.create( + model="claude-sonnet-4-6", + max_tokens=1024, + messages=[{ + "role": "user", + "content": [ + {"type": "image", "source": {"type": "base64", "media_type": "image/png", "data": image_data}}, + {"type": "text", "text": "Describe this diagram"} + ] + }] +) +``` + +## Extended Thinking + +For complex reasoning tasks: + +```python +message = client.messages.create( + model="claude-sonnet-4-6", + max_tokens=16000, + thinking={ + "type": "enabled", + "budget_tokens": 10000 + }, + messages=[{"role": "user", "content": "Solve this math problem step by step..."}] +) + +for block in message.content: + if block.type == "thinking": + print(f"Thinking: {block.thinking}") + elif block.type == "text": + print(f"Answer: {block.text}") +``` + +## Prompt Caching + +Cache large system prompts or context to reduce costs: + +```python +message = client.messages.create( + model="claude-sonnet-4-6", + max_tokens=1024, + system=[ + {"type": "text", "text": large_system_prompt, "cache_control": {"type": "ephemeral"}} + ], + messages=[{"role": "user", "content": "Question about the cached context"}] +) +# Check cache usage +print(f"Cache read: {message.usage.cache_read_input_tokens}") +print(f"Cache creation: {message.usage.cache_creation_input_tokens}") +``` + +## Batches API + +Process large volumes asynchronously at 50% cost reduction: + +```python +batch = client.messages.batches.create( + requests=[ + { + "custom_id": f"request-{i}", + "params": { + "model": "claude-sonnet-4-6", + "max_tokens": 1024, + "messages": [{"role": "user", "content": prompt}] + } + } + for i, prompt in enumerate(prompts) + ] +) + +# Poll for completion +while True: + status = client.messages.batches.retrieve(batch.id) + if status.processing_status == "ended": + break + time.sleep(30) + +# Get results +for result in client.messages.batches.results(batch.id): + print(result.result.message.content[0].text) +``` + +## Claude Agent SDK + +Build multi-step agents: + +```python +# Note: Agent SDK API surface may change — check official docs +import anthropic + +# Define tools as functions +tools = [{ + "name": "search_codebase", + "description": "Search the codebase for relevant code", + "input_schema": { + "type": "object", + "properties": {"query": {"type": "string"}}, + "required": ["query"] + } +}] + +# Run an agentic loop with tool use +client = anthropic.Anthropic() +messages = [{"role": "user", "content": "Review the auth module for security issues"}] + +while True: + response = client.messages.create( + model="claude-sonnet-4-6", + max_tokens=4096, + tools=tools, + messages=messages, + ) + if response.stop_reason == "end_turn": + break + # Handle tool calls and continue the loop + messages.append({"role": "assistant", "content": response.content}) + # ... execute tools and append tool_result messages +``` + +## Cost Optimization + +| Strategy | Savings | When to Use | +|----------|---------|-------------| +| Prompt caching | Up to 90% on cached tokens | Repeated system prompts or context | +| Batches API | 50% | Non-time-sensitive bulk processing | +| Haiku instead of Sonnet | ~75% | Simple tasks, classification, extraction | +| Shorter max_tokens | Variable | When you know output will be short | +| Streaming | None (same cost) | Better UX, same price | + +## Error Handling + +```python +from anthropic import APIError, RateLimitError, APIConnectionError + +try: + message = client.messages.create(...) +except RateLimitError: + # Back off and retry + time.sleep(60) +except APIConnectionError: + # Network issue, retry with backoff + pass +except APIError as e: + print(f"API error {e.status_code}: {e.message}") +``` + +## Environment Setup + +```bash +# Required +export ANTHROPIC_API_KEY="your-api-key-here" + +# Optional: set default model +export ANTHROPIC_MODEL="claude-sonnet-4-6" +``` + +Never hardcode API keys. Always use environment variables. diff --git a/.agents/skills/claude-api/agents/openai.yaml b/.agents/skills/claude-api/agents/openai.yaml new file mode 100644 index 00000000..9db910fc --- /dev/null +++ b/.agents/skills/claude-api/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "Claude API" + short_description: "Anthropic Claude API patterns and SDKs" + brand_color: "#D97706" + default_prompt: "Build applications with the Claude API using Messages, tool use, streaming, and Agent SDK" +policy: + allow_implicit_invocation: true diff --git a/.agents/skills/crosspost/SKILL.md b/.agents/skills/crosspost/SKILL.md new file mode 100644 index 00000000..4d235933 --- /dev/null +++ b/.agents/skills/crosspost/SKILL.md @@ -0,0 +1,187 @@ +--- +name: crosspost +description: Multi-platform content distribution across X, LinkedIn, Threads, and Bluesky. Adapts content per platform using content-engine patterns. Never posts identical content cross-platform. Use when the user wants to distribute content across social platforms. +origin: ECC +--- + +# Crosspost + +Distribute content across multiple social platforms with platform-native adaptation. + +## When to Activate + +- User wants to post content to multiple platforms +- Publishing announcements, launches, or updates across social media +- Repurposing a post from one platform to others +- User says "crosspost", "post everywhere", "share on all platforms", or "distribute this" + +## Core Rules + +1. **Never post identical content cross-platform.** Each platform gets a native adaptation. +2. **Primary platform first.** Post to the main platform, then adapt for others. +3. **Respect platform conventions.** Length limits, formatting, link handling all differ. +4. **One idea per post.** If the source content has multiple ideas, split across posts. +5. **Attribution matters.** If crossposting someone else's content, credit the source. + +## Platform Specifications + +| Platform | Max Length | Link Handling | Hashtags | Media | +|----------|-----------|---------------|----------|-------| +| X | 280 chars (4000 for Premium) | Counted in length | Minimal (1-2 max) | Images, video, GIFs | +| LinkedIn | 3000 chars | Not counted in length | 3-5 relevant | Images, video, docs, carousels | +| Threads | 500 chars | Separate link attachment | None typical | Images, video | +| Bluesky | 300 chars | Via facets (rich text) | None (use feeds) | Images | + +## Workflow + +### Step 1: Create Source Content + +Start with the core idea. Use `content-engine` skill for high-quality drafts: +- Identify the single core message +- Determine the primary platform (where the audience is biggest) +- Draft the primary platform version first + +### Step 2: Identify Target Platforms + +Ask the user or determine from context: +- Which platforms to target +- Priority order (primary gets the best version) +- Any platform-specific requirements (e.g., LinkedIn needs professional tone) + +### Step 3: Adapt Per Platform + +For each target platform, transform the content: + +**X adaptation:** +- Open with a hook, not a summary +- Cut to the core insight fast +- Keep links out of main body when possible +- Use thread format for longer content + +**LinkedIn adaptation:** +- Strong first line (visible before "see more") +- Short paragraphs with line breaks +- Frame around lessons, results, or professional takeaways +- More explicit context than X (LinkedIn audience needs framing) + +**Threads adaptation:** +- Conversational, casual tone +- Shorter than LinkedIn, less compressed than X +- Visual-first if possible + +**Bluesky adaptation:** +- Direct and concise (300 char limit) +- Community-oriented tone +- Use feeds/lists for topic targeting instead of hashtags + +### Step 4: Post Primary Platform + +Post to the primary platform first: +- Use `x-api` skill for X +- Use platform-specific APIs or tools for others +- Capture the post URL for cross-referencing + +### Step 5: Post to Secondary Platforms + +Post adapted versions to remaining platforms: +- Stagger timing (not all at once — 30-60 min gaps) +- Include cross-platform references where appropriate ("longer thread on X" etc.) + +## Content Adaptation Examples + +### Source: Product Launch + +**X version:** +``` +We just shipped [feature]. + +[One specific thing it does that's impressive] + +[Link] +``` + +**LinkedIn version:** +``` +Excited to share: we just launched [feature] at [Company]. + +Here's why it matters: + +[2-3 short paragraphs with context] + +[Takeaway for the audience] + +[Link] +``` + +**Threads version:** +``` +just shipped something cool — [feature] + +[casual explanation of what it does] + +link in bio +``` + +### Source: Technical Insight + +**X version:** +``` +TIL: [specific technical insight] + +[Why it matters in one sentence] +``` + +**LinkedIn version:** +``` +A pattern I've been using that's made a real difference: + +[Technical insight with professional framing] + +[How it applies to teams/orgs] + +#relevantHashtag +``` + +## API Integration + +### Batch Crossposting Service (Example Pattern) +If using a crossposting service (e.g., Postbridge, Buffer, or a custom API), the pattern looks like: + +```python +import requests + +resp = requests.post( + "https://api.postbridge.io/v1/posts", + headers={"Authorization": f"Bearer {os.environ['POSTBRIDGE_API_KEY']}"}, + json={ + "platforms": ["twitter", "linkedin", "threads"], + "content": { + "twitter": {"text": x_version}, + "linkedin": {"text": linkedin_version}, + "threads": {"text": threads_version} + } + } +) +``` + +### Manual Posting +Without Postbridge, post to each platform using its native API: +- X: Use `x-api` skill patterns +- LinkedIn: LinkedIn API v2 with OAuth 2.0 +- Threads: Threads API (Meta) +- Bluesky: AT Protocol API + +## Quality Gate + +Before posting: +- [ ] Each platform version reads naturally for that platform +- [ ] No identical content across platforms +- [ ] Length limits respected +- [ ] Links work and are placed appropriately +- [ ] Tone matches platform conventions +- [ ] Media is sized correctly for each platform + +## Related Skills + +- `content-engine` — Generate platform-native content +- `x-api` — X/Twitter API integration diff --git a/.agents/skills/crosspost/agents/openai.yaml b/.agents/skills/crosspost/agents/openai.yaml new file mode 100644 index 00000000..c2534142 --- /dev/null +++ b/.agents/skills/crosspost/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "Crosspost" + short_description: "Multi-platform content distribution with native adaptation" + brand_color: "#EC4899" + default_prompt: "Distribute content across X, LinkedIn, Threads, and Bluesky with platform-native adaptation" +policy: + allow_implicit_invocation: true diff --git a/.agents/skills/deep-research/SKILL.md b/.agents/skills/deep-research/SKILL.md new file mode 100644 index 00000000..5a412b7e --- /dev/null +++ b/.agents/skills/deep-research/SKILL.md @@ -0,0 +1,155 @@ +--- +name: deep-research +description: Multi-source deep research using firecrawl and exa MCPs. Searches the web, synthesizes findings, and delivers cited reports with source attribution. Use when the user wants thorough research on any topic with evidence and citations. +origin: ECC +--- + +# Deep Research + +Produce thorough, cited research reports from multiple web sources using firecrawl and exa MCP tools. + +## When to Activate + +- User asks to research any topic in depth +- Competitive analysis, technology evaluation, or market sizing +- Due diligence on companies, investors, or technologies +- Any question requiring synthesis from multiple sources +- User says "research", "deep dive", "investigate", or "what's the current state of" + +## MCP Requirements + +At least one of: +- **firecrawl** — `firecrawl_search`, `firecrawl_scrape`, `firecrawl_crawl` +- **exa** — `web_search_exa`, `web_search_advanced_exa`, `crawling_exa` + +Both together give the best coverage. Configure in `~/.claude.json` or `~/.codex/config.toml`. + +## Workflow + +### Step 1: Understand the Goal + +Ask 1-2 quick clarifying questions: +- "What's your goal — learning, making a decision, or writing something?" +- "Any specific angle or depth you want?" + +If the user says "just research it" — skip ahead with reasonable defaults. + +### Step 2: Plan the Research + +Break the topic into 3-5 research sub-questions. Example: +- Topic: "Impact of AI on healthcare" + - What are the main AI applications in healthcare today? + - What clinical outcomes have been measured? + - What are the regulatory challenges? + - What companies are leading this space? + - What's the market size and growth trajectory? + +### Step 3: Execute Multi-Source Search + +For EACH sub-question, search using available MCP tools: + +**With firecrawl:** +``` +firecrawl_search(query: "", limit: 8) +``` + +**With exa:** +``` +web_search_exa(query: "", numResults: 8) +web_search_advanced_exa(query: "", numResults: 5, startPublishedDate: "2025-01-01") +``` + +**Search strategy:** +- Use 2-3 different keyword variations per sub-question +- Mix general and news-focused queries +- Aim for 15-30 unique sources total +- Prioritize: academic, official, reputable news > blogs > forums + +### Step 4: Deep-Read Key Sources + +For the most promising URLs, fetch full content: + +**With firecrawl:** +``` +firecrawl_scrape(url: "") +``` + +**With exa:** +``` +crawling_exa(url: "", tokensNum: 5000) +``` + +Read 3-5 key sources in full for depth. Do not rely only on search snippets. + +### Step 5: Synthesize and Write Report + +Structure the report: + +```markdown +# [Topic]: Research Report +*Generated: [date] | Sources: [N] | Confidence: [High/Medium/Low]* + +## Executive Summary +[3-5 sentence overview of key findings] + +## 1. [First Major Theme] +[Findings with inline citations] +- Key point ([Source Name](url)) +- Supporting data ([Source Name](url)) + +## 2. [Second Major Theme] +... + +## 3. [Third Major Theme] +... + +## Key Takeaways +- [Actionable insight 1] +- [Actionable insight 2] +- [Actionable insight 3] + +## Sources +1. [Title](url) — [one-line summary] +2. ... + +## Methodology +Searched [N] queries across web and news. Analyzed [M] sources. +Sub-questions investigated: [list] +``` + +### Step 6: Deliver + +- **Short topics**: Post the full report in chat +- **Long reports**: Post the executive summary + key takeaways, save full report to a file + +## Parallel Research with Subagents + +For broad topics, use Claude Code's Task tool to parallelize: + +``` +Launch 3 research agents in parallel: +1. Agent 1: Research sub-questions 1-2 +2. Agent 2: Research sub-questions 3-4 +3. Agent 3: Research sub-question 5 + cross-cutting themes +``` + +Each agent searches, reads sources, and returns findings. The main session synthesizes into the final report. + +## Quality Rules + +1. **Every claim needs a source.** No unsourced assertions. +2. **Cross-reference.** If only one source says it, flag it as unverified. +3. **Recency matters.** Prefer sources from the last 12 months. +4. **Acknowledge gaps.** If you couldn't find good info on a sub-question, say so. +5. **No hallucination.** If you don't know, say "insufficient data found." +6. **Separate fact from inference.** Label estimates, projections, and opinions clearly. + +## Examples + +``` +"Research the current state of nuclear fusion energy" +"Deep dive into Rust vs Go for backend services in 2026" +"Research the best strategies for bootstrapping a SaaS business" +"What's happening with the US housing market right now?" +"Investigate the competitive landscape for AI code editors" +``` diff --git a/.agents/skills/deep-research/agents/openai.yaml b/.agents/skills/deep-research/agents/openai.yaml new file mode 100644 index 00000000..51ac12b1 --- /dev/null +++ b/.agents/skills/deep-research/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "Deep Research" + short_description: "Multi-source deep research with firecrawl and exa MCPs" + brand_color: "#6366F1" + default_prompt: "Research the given topic using firecrawl and exa, produce a cited report" +policy: + allow_implicit_invocation: true diff --git a/.agents/skills/dmux-workflows/SKILL.md b/.agents/skills/dmux-workflows/SKILL.md new file mode 100644 index 00000000..813cc91c --- /dev/null +++ b/.agents/skills/dmux-workflows/SKILL.md @@ -0,0 +1,144 @@ +--- +name: dmux-workflows +description: Multi-agent orchestration using dmux (tmux pane manager for AI agents). Patterns for parallel agent workflows across Claude Code, Codex, OpenCode, and other harnesses. Use when running multiple agent sessions in parallel or coordinating multi-agent development workflows. +origin: ECC +--- + +# dmux Workflows + +Orchestrate parallel AI agent sessions using dmux, a tmux pane manager for agent harnesses. + +## When to Activate + +- Running multiple agent sessions in parallel +- Coordinating work across Claude Code, Codex, and other harnesses +- Complex tasks that benefit from divide-and-conquer parallelism +- User says "run in parallel", "split this work", "use dmux", or "multi-agent" + +## What is dmux + +dmux is a tmux-based orchestration tool that manages AI agent panes: +- Press `n` to create a new pane with a prompt +- Press `m` to merge pane output back to the main session +- Supports: Claude Code, Codex, OpenCode, Cline, Gemini, Qwen + +**Install:** `npm install -g dmux` or see [github.com/standardagents/dmux](https://github.com/standardagents/dmux) + +## Quick Start + +```bash +# Start dmux session +dmux + +# Create agent panes (press 'n' in dmux, then type prompt) +# Pane 1: "Implement the auth middleware in src/auth/" +# Pane 2: "Write tests for the user service" +# Pane 3: "Update API documentation" + +# Each pane runs its own agent session +# Press 'm' to merge results back +``` + +## Workflow Patterns + +### Pattern 1: Research + Implement + +Split research and implementation into parallel tracks: + +``` +Pane 1 (Research): "Research best practices for rate limiting in Node.js. + Check current libraries, compare approaches, and write findings to + /tmp/rate-limit-research.md" + +Pane 2 (Implement): "Implement rate limiting middleware for our Express API. + Start with a basic token bucket, we'll refine after research completes." + +# After Pane 1 completes, merge findings into Pane 2's context +``` + +### Pattern 2: Multi-File Feature + +Parallelize work across independent files: + +``` +Pane 1: "Create the database schema and migrations for the billing feature" +Pane 2: "Build the billing API endpoints in src/api/billing/" +Pane 3: "Create the billing dashboard UI components" + +# Merge all, then do integration in main pane +``` + +### Pattern 3: Test + Fix Loop + +Run tests in one pane, fix in another: + +``` +Pane 1 (Watcher): "Run the test suite in watch mode. When tests fail, + summarize the failures." + +Pane 2 (Fixer): "Fix failing tests based on the error output from pane 1" +``` + +### Pattern 4: Cross-Harness + +Use different AI tools for different tasks: + +``` +Pane 1 (Claude Code): "Review the security of the auth module" +Pane 2 (Codex): "Refactor the utility functions for performance" +Pane 3 (Claude Code): "Write E2E tests for the checkout flow" +``` + +### Pattern 5: Code Review Pipeline + +Parallel review perspectives: + +``` +Pane 1: "Review src/api/ for security vulnerabilities" +Pane 2: "Review src/api/ for performance issues" +Pane 3: "Review src/api/ for test coverage gaps" + +# Merge all reviews into a single report +``` + +## Best Practices + +1. **Independent tasks only.** Don't parallelize tasks that depend on each other's output. +2. **Clear boundaries.** Each pane should work on distinct files or concerns. +3. **Merge strategically.** Review pane output before merging to avoid conflicts. +4. **Use git worktrees.** For file-conflict-prone work, use separate worktrees per pane. +5. **Resource awareness.** Each pane uses API tokens — keep total panes under 5-6. + +## Git Worktree Integration + +For tasks that touch overlapping files: + +```bash +# Create worktrees for isolation +git worktree add ../feature-auth feat/auth +git worktree add ../feature-billing feat/billing + +# Run agents in separate worktrees +# Pane 1: cd ../feature-auth && claude +# Pane 2: cd ../feature-billing && claude + +# Merge branches when done +git merge feat/auth +git merge feat/billing +``` + +## Complementary Tools + +| Tool | What It Does | When to Use | +|------|-------------|-------------| +| **dmux** | tmux pane management for agents | Parallel agent sessions | +| **Superset** | Terminal IDE for 10+ parallel agents | Large-scale orchestration | +| **Claude Code Task tool** | In-process subagent spawning | Programmatic parallelism within a session | +| **Codex multi-agent** | Built-in agent roles | Codex-specific parallel work | + +## Troubleshooting + +- **Pane not responding:** Check if the agent session is waiting for input. Use `m` to read output. +- **Merge conflicts:** Use git worktrees to isolate file changes per pane. +- **High token usage:** Reduce number of parallel panes. Each pane is a full agent session. +- **tmux not found:** Install with `brew install tmux` (macOS) or `apt install tmux` (Linux). diff --git a/.agents/skills/dmux-workflows/agents/openai.yaml b/.agents/skills/dmux-workflows/agents/openai.yaml new file mode 100644 index 00000000..8147fea8 --- /dev/null +++ b/.agents/skills/dmux-workflows/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "dmux Workflows" + short_description: "Multi-agent orchestration with dmux" + brand_color: "#14B8A6" + default_prompt: "Orchestrate parallel agent sessions using dmux pane manager" +policy: + allow_implicit_invocation: true diff --git a/.agents/skills/exa-search/SKILL.md b/.agents/skills/exa-search/SKILL.md new file mode 100644 index 00000000..c386b2c6 --- /dev/null +++ b/.agents/skills/exa-search/SKILL.md @@ -0,0 +1,165 @@ +--- +name: exa-search +description: Neural search via Exa MCP for web, code, and company research. Use when the user needs web search, code examples, company intel, people lookup, or AI-powered deep research with Exa's neural search engine. +origin: ECC +--- + +# Exa Search + +Neural search for web content, code, companies, and people via the Exa MCP server. + +## When to Activate + +- User needs current web information or news +- Searching for code examples, API docs, or technical references +- Researching companies, competitors, or market players +- Finding professional profiles or people in a domain +- Running background research for any development task +- User says "search for", "look up", "find", or "what's the latest on" + +## MCP Requirement + +Exa MCP server must be configured. Add to `~/.claude.json`: + +```json +"exa-web-search": { + "command": "npx", + "args": ["-y", "exa-mcp-server"], + "env": { "EXA_API_KEY": "YOUR_EXA_API_KEY_HERE" } +} +``` + +Get an API key at [exa.ai](https://exa.ai). + +## Core Tools + +### web_search_exa +General web search for current information, news, or facts. + +``` +web_search_exa(query: "latest AI developments 2026", numResults: 5) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `query` | string | required | Search query | +| `numResults` | number | 8 | Number of results | + +### web_search_advanced_exa +Filtered search with domain and date constraints. + +``` +web_search_advanced_exa( + query: "React Server Components best practices", + numResults: 5, + includeDomains: ["github.com", "react.dev"], + startPublishedDate: "2025-01-01" +) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `query` | string | required | Search query | +| `numResults` | number | 8 | Number of results | +| `includeDomains` | string[] | none | Limit to specific domains | +| `excludeDomains` | string[] | none | Exclude specific domains | +| `startPublishedDate` | string | none | ISO date filter (start) | +| `endPublishedDate` | string | none | ISO date filter (end) | + +### get_code_context_exa +Find code examples and documentation from GitHub, Stack Overflow, and docs sites. + +``` +get_code_context_exa(query: "Python asyncio patterns", tokensNum: 3000) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `query` | string | required | Code or API search query | +| `tokensNum` | number | 5000 | Content tokens (1000-50000) | + +### company_research_exa +Research companies for business intelligence and news. + +``` +company_research_exa(companyName: "Anthropic", numResults: 5) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `companyName` | string | required | Company name | +| `numResults` | number | 5 | Number of results | + +### people_search_exa +Find professional profiles and bios. + +``` +people_search_exa(query: "AI safety researchers at Anthropic", numResults: 5) +``` + +### crawling_exa +Extract full page content from a URL. + +``` +crawling_exa(url: "https://example.com/article", tokensNum: 5000) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `url` | string | required | URL to extract | +| `tokensNum` | number | 5000 | Content tokens | + +### deep_researcher_start / deep_researcher_check +Start an AI research agent that runs asynchronously. + +``` +# Start research +deep_researcher_start(query: "comprehensive analysis of AI code editors in 2026") + +# Check status (returns results when complete) +deep_researcher_check(researchId: "") +``` + +## Usage Patterns + +### Quick Lookup +``` +web_search_exa(query: "Node.js 22 new features", numResults: 3) +``` + +### Code Research +``` +get_code_context_exa(query: "Rust error handling patterns Result type", tokensNum: 3000) +``` + +### Company Due Diligence +``` +company_research_exa(companyName: "Vercel", numResults: 5) +web_search_advanced_exa(query: "Vercel funding valuation 2026", numResults: 3) +``` + +### Technical Deep Dive +``` +# Start async research +deep_researcher_start(query: "WebAssembly component model status and adoption") +# ... do other work ... +deep_researcher_check(researchId: "") +``` + +## Tips + +- Use `web_search_exa` for broad queries, `web_search_advanced_exa` for filtered results +- Lower `tokensNum` (1000-2000) for focused code snippets, higher (5000+) for comprehensive context +- Combine `company_research_exa` with `web_search_advanced_exa` for thorough company analysis +- Use `crawling_exa` to get full content from specific URLs found in search results +- `deep_researcher_start` is best for comprehensive topics that benefit from AI synthesis + +## Related Skills + +- `deep-research` — Full research workflow using firecrawl + exa together +- `market-research` — Business-oriented research with decision frameworks diff --git a/.agents/skills/exa-search/agents/openai.yaml b/.agents/skills/exa-search/agents/openai.yaml new file mode 100644 index 00000000..80d26bd3 --- /dev/null +++ b/.agents/skills/exa-search/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "Exa Search" + short_description: "Neural search via Exa MCP for web, code, and companies" + brand_color: "#8B5CF6" + default_prompt: "Search using Exa MCP tools for web content, code, or company research" +policy: + allow_implicit_invocation: true diff --git a/.agents/skills/fal-ai-media/SKILL.md b/.agents/skills/fal-ai-media/SKILL.md new file mode 100644 index 00000000..cfada8df --- /dev/null +++ b/.agents/skills/fal-ai-media/SKILL.md @@ -0,0 +1,277 @@ +--- +name: fal-ai-media +description: Unified media generation via fal.ai MCP — image, video, and audio. Covers text-to-image (Nano Banana), text/image-to-video (Seedance, Kling, Veo 3), text-to-speech (CSM-1B), and video-to-audio (ThinkSound). Use when the user wants to generate images, videos, or audio with AI. +origin: ECC +--- + +# fal.ai Media Generation + +Generate images, videos, and audio using fal.ai models via MCP. + +## When to Activate + +- User wants to generate images from text prompts +- Creating videos from text or images +- Generating speech, music, or sound effects +- Any media generation task +- User says "generate image", "create video", "text to speech", "make a thumbnail", or similar + +## MCP Requirement + +fal.ai MCP server must be configured. Add to `~/.claude.json`: + +```json +"fal-ai": { + "command": "npx", + "args": ["-y", "fal-ai-mcp-server"], + "env": { "FAL_KEY": "YOUR_FAL_KEY_HERE" } +} +``` + +Get an API key at [fal.ai](https://fal.ai). + +## MCP Tools + +The fal.ai MCP provides these tools: +- `search` — Find available models by keyword +- `find` — Get model details and parameters +- `generate` — Run a model with parameters +- `result` — Check async generation status +- `status` — Check job status +- `cancel` — Cancel a running job +- `estimate_cost` — Estimate generation cost +- `models` — List popular models +- `upload` — Upload files for use as inputs + +--- + +## Image Generation + +### Nano Banana 2 (Fast) +Best for: quick iterations, drafts, text-to-image, image editing. + +``` +generate( + model_name: "fal-ai/nano-banana-2", + input: { + "prompt": "a futuristic cityscape at sunset, cyberpunk style", + "image_size": "landscape_16_9", + "num_images": 1, + "seed": 42 + } +) +``` + +### Nano Banana Pro (High Fidelity) +Best for: production images, realism, typography, detailed prompts. + +``` +generate( + model_name: "fal-ai/nano-banana-pro", + input: { + "prompt": "professional product photo of wireless headphones on marble surface, studio lighting", + "image_size": "square", + "num_images": 1, + "guidance_scale": 7.5 + } +) +``` + +### Common Image Parameters + +| Param | Type | Options | Notes | +|-------|------|---------|-------| +| `prompt` | string | required | Describe what you want | +| `image_size` | string | `square`, `portrait_4_3`, `landscape_16_9`, `portrait_16_9`, `landscape_4_3` | Aspect ratio | +| `num_images` | number | 1-4 | How many to generate | +| `seed` | number | any integer | Reproducibility | +| `guidance_scale` | number | 1-20 | How closely to follow the prompt (higher = more literal) | + +### Image Editing +Use Nano Banana 2 with an input image for inpainting, outpainting, or style transfer: + +``` +# First upload the source image +upload(file_path: "/path/to/image.png") + +# Then generate with image input +generate( + model_name: "fal-ai/nano-banana-2", + input: { + "prompt": "same scene but in watercolor style", + "image_url": "", + "image_size": "landscape_16_9" + } +) +``` + +--- + +## Video Generation + +### Seedance 1.0 Pro (ByteDance) +Best for: text-to-video, image-to-video with high motion quality. + +``` +generate( + model_name: "fal-ai/seedance-1-0-pro", + input: { + "prompt": "a drone flyover of a mountain lake at golden hour, cinematic", + "duration": "5s", + "aspect_ratio": "16:9", + "seed": 42 + } +) +``` + +### Kling Video v3 Pro +Best for: text/image-to-video with native audio generation. + +``` +generate( + model_name: "fal-ai/kling-video/v3/pro", + input: { + "prompt": "ocean waves crashing on a rocky coast, dramatic clouds", + "duration": "5s", + "aspect_ratio": "16:9" + } +) +``` + +### Veo 3 (Google DeepMind) +Best for: video with generated sound, high visual quality. + +``` +generate( + model_name: "fal-ai/veo-3", + input: { + "prompt": "a bustling Tokyo street market at night, neon signs, crowd noise", + "aspect_ratio": "16:9" + } +) +``` + +### Image-to-Video +Start from an existing image: + +``` +generate( + model_name: "fal-ai/seedance-1-0-pro", + input: { + "prompt": "camera slowly zooms out, gentle wind moves the trees", + "image_url": "", + "duration": "5s" + } +) +``` + +### Video Parameters + +| Param | Type | Options | Notes | +|-------|------|---------|-------| +| `prompt` | string | required | Describe the video | +| `duration` | string | `"5s"`, `"10s"` | Video length | +| `aspect_ratio` | string | `"16:9"`, `"9:16"`, `"1:1"` | Frame ratio | +| `seed` | number | any integer | Reproducibility | +| `image_url` | string | URL | Source image for image-to-video | + +--- + +## Audio Generation + +### CSM-1B (Conversational Speech) +Text-to-speech with natural, conversational quality. + +``` +generate( + model_name: "fal-ai/csm-1b", + input: { + "text": "Hello, welcome to the demo. Let me show you how this works.", + "speaker_id": 0 + } +) +``` + +### ThinkSound (Video-to-Audio) +Generate matching audio from video content. + +``` +generate( + model_name: "fal-ai/thinksound", + input: { + "video_url": "", + "prompt": "ambient forest sounds with birds chirping" + } +) +``` + +### ElevenLabs (via API, no MCP) +For professional voice synthesis, use ElevenLabs directly: + +```python +import os +import requests + +resp = requests.post( + "https://api.elevenlabs.io/v1/text-to-speech/", + headers={ + "xi-api-key": os.environ["ELEVENLABS_API_KEY"], + "Content-Type": "application/json" + }, + json={ + "text": "Your text here", + "model_id": "eleven_turbo_v2_5", + "voice_settings": {"stability": 0.5, "similarity_boost": 0.75} + } +) +with open("output.mp3", "wb") as f: + f.write(resp.content) +``` + +### VideoDB Generative Audio +If VideoDB is configured, use its generative audio: + +```python +# Voice generation +audio = coll.generate_voice(text="Your narration here", voice="alloy") + +# Music generation +music = coll.generate_music(prompt="upbeat electronic background music", duration=30) + +# Sound effects +sfx = coll.generate_sound_effect(prompt="thunder crack followed by rain") +``` + +--- + +## Cost Estimation + +Before generating, check estimated cost: + +``` +estimate_cost(model_name: "fal-ai/nano-banana-pro", input: {...}) +``` + +## Model Discovery + +Find models for specific tasks: + +``` +search(query: "text to video") +find(model_name: "fal-ai/seedance-1-0-pro") +models() +``` + +## Tips + +- Use `seed` for reproducible results when iterating on prompts +- Start with lower-cost models (Nano Banana 2) for prompt iteration, then switch to Pro for finals +- For video, keep prompts descriptive but concise — focus on motion and scene +- Image-to-video produces more controlled results than pure text-to-video +- Check `estimate_cost` before running expensive video generations + +## Related Skills + +- `videodb` — Video processing, editing, and streaming +- `video-editing` — AI-powered video editing workflows +- `content-engine` — Content creation for social platforms diff --git a/.agents/skills/fal-ai-media/agents/openai.yaml b/.agents/skills/fal-ai-media/agents/openai.yaml new file mode 100644 index 00000000..d20f8ab0 --- /dev/null +++ b/.agents/skills/fal-ai-media/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "fal.ai Media" + short_description: "AI image, video, and audio generation via fal.ai" + brand_color: "#F43F5E" + default_prompt: "Generate images, videos, or audio using fal.ai models" +policy: + allow_implicit_invocation: true diff --git a/.agents/skills/video-editing/SKILL.md b/.agents/skills/video-editing/SKILL.md new file mode 100644 index 00000000..fb47d9ad --- /dev/null +++ b/.agents/skills/video-editing/SKILL.md @@ -0,0 +1,308 @@ +--- +name: video-editing +description: AI-assisted video editing workflows for cutting, structuring, and augmenting real footage. Covers the full pipeline from raw capture through FFmpeg, Remotion, ElevenLabs, fal.ai, and final polish in Descript or CapCut. Use when the user wants to edit video, cut footage, create vlogs, or build video content. +origin: ECC +--- + +# Video Editing + +AI-assisted editing for real footage. Not generation from prompts. Editing existing video fast. + +## When to Activate + +- User wants to edit, cut, or structure video footage +- Turning long recordings into short-form content +- Building vlogs, tutorials, or demo videos from raw capture +- Adding overlays, subtitles, music, or voiceover to existing video +- Reframing video for different platforms (YouTube, TikTok, Instagram) +- User says "edit video", "cut this footage", "make a vlog", or "video workflow" + +## Core Thesis + +AI video editing is useful when you stop asking it to create the whole video and start using it to compress, structure, and augment real footage. The value is not generation. The value is compression. + +## The Pipeline + +``` +Screen Studio / raw footage + → Claude / Codex + → FFmpeg + → Remotion + → ElevenLabs / fal.ai + → Descript or CapCut +``` + +Each layer has a specific job. Do not skip layers. Do not try to make one tool do everything. + +## Layer 1: Capture (Screen Studio / Raw Footage) + +Collect the source material: +- **Screen Studio**: polished screen recordings for app demos, coding sessions, browser workflows +- **Raw camera footage**: vlog footage, interviews, event recordings +- **Desktop capture via VideoDB**: session recording with real-time context (see `videodb` skill) + +Output: raw files ready for organization. + +## Layer 2: Organization (Claude / Codex) + +Use Claude Code or Codex to: +- **Transcribe and label**: generate transcript, identify topics and themes +- **Plan structure**: decide what stays, what gets cut, what order works +- **Identify dead sections**: find pauses, tangents, repeated takes +- **Generate edit decision list**: timestamps for cuts, segments to keep +- **Scaffold FFmpeg and Remotion code**: generate the commands and compositions + +``` +Example prompt: +"Here's the transcript of a 4-hour recording. Identify the 8 strongest segments +for a 24-minute vlog. Give me FFmpeg cut commands for each segment." +``` + +This layer is about structure, not final creative taste. + +## Layer 3: Deterministic Cuts (FFmpeg) + +FFmpeg handles the boring but critical work: splitting, trimming, concatenating, and preprocessing. + +### Extract segment by timestamp + +```bash +ffmpeg -i raw.mp4 -ss 00:12:30 -to 00:15:45 -c copy segment_01.mp4 +``` + +### Batch cut from edit decision list + +```bash +#!/bin/bash +# cuts.txt: start,end,label +while IFS=, read -r start end label; do + ffmpeg -i raw.mp4 -ss "$start" -to "$end" -c copy "segments/${label}.mp4" +done < cuts.txt +``` + +### Concatenate segments + +```bash +# Create file list +for f in segments/*.mp4; do echo "file '$f'"; done > concat.txt +ffmpeg -f concat -safe 0 -i concat.txt -c copy assembled.mp4 +``` + +### Create proxy for faster editing + +```bash +ffmpeg -i raw.mp4 -vf "scale=960:-2" -c:v libx264 -preset ultrafast -crf 28 proxy.mp4 +``` + +### Extract audio for transcription + +```bash +ffmpeg -i raw.mp4 -vn -acodec pcm_s16le -ar 16000 audio.wav +``` + +### Normalize audio levels + +```bash +ffmpeg -i segment.mp4 -af loudnorm=I=-16:TP=-1.5:LRA=11 -c:v copy normalized.mp4 +``` + +## Layer 4: Programmable Composition (Remotion) + +Remotion turns editing problems into composable code. Use it for things that traditional editors make painful: + +### When to use Remotion + +- Overlays: text, images, branding, lower thirds +- Data visualizations: charts, stats, animated numbers +- Motion graphics: transitions, explainer animations +- Composable scenes: reusable templates across videos +- Product demos: annotated screenshots, UI highlights + +### Basic Remotion composition + +```tsx +import { AbsoluteFill, Sequence, Video, useCurrentFrame } from "remotion"; + +export const VlogComposition: React.FC = () => { + const frame = useCurrentFrame(); + + return ( + + {/* Main footage */} + + + + {/* Title overlay */} + + +

+ The AI Editing Stack +

+
+
+ + {/* Next segment */} + + +
+ ); +}; +``` + +### Render output + +```bash +npx remotion render src/index.ts VlogComposition output.mp4 +``` + +See the [Remotion docs](https://www.remotion.dev/docs) for detailed patterns and API reference. + +## Layer 5: Generated Assets (ElevenLabs / fal.ai) + +Generate only what you need. Do not generate the whole video. + +### Voiceover with ElevenLabs + +```python +import os +import requests + +resp = requests.post( + f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}", + headers={ + "xi-api-key": os.environ["ELEVENLABS_API_KEY"], + "Content-Type": "application/json" + }, + json={ + "text": "Your narration text here", + "model_id": "eleven_turbo_v2_5", + "voice_settings": {"stability": 0.5, "similarity_boost": 0.75} + } +) +with open("voiceover.mp3", "wb") as f: + f.write(resp.content) +``` + +### Music and SFX with fal.ai + +Use the `fal-ai-media` skill for: +- Background music generation +- Sound effects (ThinkSound model for video-to-audio) +- Transition sounds + +### Generated visuals with fal.ai + +Use for insert shots, thumbnails, or b-roll that doesn't exist: +``` +generate(model_name: "fal-ai/nano-banana-pro", input: { + "prompt": "professional thumbnail for tech vlog, dark background, code on screen", + "image_size": "landscape_16_9" +}) +``` + +### VideoDB generative audio + +If VideoDB is configured: +```python +voiceover = coll.generate_voice(text="Narration here", voice="alloy") +music = coll.generate_music(prompt="lo-fi background for coding vlog", duration=120) +sfx = coll.generate_sound_effect(prompt="subtle whoosh transition") +``` + +## Layer 6: Final Polish (Descript / CapCut) + +The last layer is human. Use a traditional editor for: +- **Pacing**: adjust cuts that feel too fast or slow +- **Captions**: auto-generated, then manually cleaned +- **Color grading**: basic correction and mood +- **Final audio mix**: balance voice, music, and SFX levels +- **Export**: platform-specific formats and quality settings + +This is where taste lives. AI clears the repetitive work. You make the final calls. + +## Social Media Reframing + +Different platforms need different aspect ratios: + +| Platform | Aspect Ratio | Resolution | +|----------|-------------|------------| +| YouTube | 16:9 | 1920x1080 | +| TikTok / Reels | 9:16 | 1080x1920 | +| Instagram Feed | 1:1 | 1080x1080 | +| X / Twitter | 16:9 or 1:1 | 1280x720 or 720x720 | + +### Reframe with FFmpeg + +```bash +# 16:9 to 9:16 (center crop) +ffmpeg -i input.mp4 -vf "crop=ih*9/16:ih,scale=1080:1920" vertical.mp4 + +# 16:9 to 1:1 (center crop) +ffmpeg -i input.mp4 -vf "crop=ih:ih,scale=1080:1080" square.mp4 +``` + +### Reframe with VideoDB + +```python +# Smart reframe (AI-guided subject tracking) +reframed = video.reframe(start=0, end=60, target="vertical", mode=ReframeMode.smart) +``` + +## Scene Detection and Auto-Cut + +### FFmpeg scene detection + +```bash +# Detect scene changes (threshold 0.3 = moderate sensitivity) +ffmpeg -i input.mp4 -vf "select='gt(scene,0.3)',showinfo" -vsync vfr -f null - 2>&1 | grep showinfo +``` + +### Silence detection for auto-cut + +```bash +# Find silent segments (useful for cutting dead air) +ffmpeg -i input.mp4 -af silencedetect=noise=-30dB:d=2 -f null - 2>&1 | grep silence +``` + +### Highlight extraction + +Use Claude to analyze transcript + scene timestamps: +``` +"Given this transcript with timestamps and these scene change points, +identify the 5 most engaging 30-second clips for social media." +``` + +## What Each Tool Does Best + +| Tool | Strength | Weakness | +|------|----------|----------| +| Claude / Codex | Organization, planning, code generation | Not the creative taste layer | +| FFmpeg | Deterministic cuts, batch processing, format conversion | No visual editing UI | +| Remotion | Programmable overlays, composable scenes, reusable templates | Learning curve for non-devs | +| Screen Studio | Polished screen recordings immediately | Only screen capture | +| ElevenLabs | Voice, narration, music, SFX | Not the center of the workflow | +| Descript / CapCut | Final pacing, captions, polish | Manual, not automatable | + +## Key Principles + +1. **Edit, don't generate.** This workflow is for cutting real footage, not creating from prompts. +2. **Structure before style.** Get the story right in Layer 2 before touching anything visual. +3. **FFmpeg is the backbone.** Boring but critical. Where long footage becomes manageable. +4. **Remotion for repeatability.** If you'll do it more than once, make it a Remotion component. +5. **Generate selectively.** Only use AI generation for assets that don't exist, not for everything. +6. **Taste is the last layer.** AI clears repetitive work. You make the final creative calls. + +## Related Skills + +- `fal-ai-media` — AI image, video, and audio generation +- `videodb` — Server-side video processing, indexing, and streaming +- `content-engine` — Platform-native content distribution diff --git a/.agents/skills/video-editing/agents/openai.yaml b/.agents/skills/video-editing/agents/openai.yaml new file mode 100644 index 00000000..e943e019 --- /dev/null +++ b/.agents/skills/video-editing/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "Video Editing" + short_description: "AI-assisted video editing for real footage" + brand_color: "#EF4444" + default_prompt: "Edit video using AI-assisted pipeline: organize, cut, compose, generate assets, polish" +policy: + allow_implicit_invocation: true diff --git a/.agents/skills/x-api/SKILL.md b/.agents/skills/x-api/SKILL.md new file mode 100644 index 00000000..d0e3ad65 --- /dev/null +++ b/.agents/skills/x-api/SKILL.md @@ -0,0 +1,211 @@ +--- +name: x-api +description: X/Twitter API integration for posting tweets, threads, reading timelines, search, and analytics. Covers OAuth auth patterns, rate limits, and platform-native content posting. Use when the user wants to interact with X programmatically. +origin: ECC +--- + +# X API + +Programmatic interaction with X (Twitter) for posting, reading, searching, and analytics. + +## When to Activate + +- User wants to post tweets or threads programmatically +- Reading timeline, mentions, or user data from X +- Searching X for content, trends, or conversations +- Building X integrations or bots +- Analytics and engagement tracking +- User says "post to X", "tweet", "X API", or "Twitter API" + +## Authentication + +### OAuth 2.0 (App-Only / User Context) + +Best for: read-heavy operations, search, public data. + +```bash +# Environment setup +export X_BEARER_TOKEN="your-bearer-token" +``` + +```python +import os +import requests + +bearer = os.environ["X_BEARER_TOKEN"] +headers = {"Authorization": f"Bearer {bearer}"} + +# Search recent tweets +resp = requests.get( + "https://api.x.com/2/tweets/search/recent", + headers=headers, + params={"query": "claude code", "max_results": 10} +) +tweets = resp.json() +``` + +### OAuth 1.0a (User Context) + +Required for: posting tweets, managing account, DMs. + +```bash +# Environment setup — source before use +export X_API_KEY="your-api-key" +export X_API_SECRET="your-api-secret" +export X_ACCESS_TOKEN="your-access-token" +export X_ACCESS_SECRET="your-access-secret" +``` + +```python +import os +from requests_oauthlib import OAuth1Session + +oauth = OAuth1Session( + os.environ["X_API_KEY"], + client_secret=os.environ["X_API_SECRET"], + resource_owner_key=os.environ["X_ACCESS_TOKEN"], + resource_owner_secret=os.environ["X_ACCESS_SECRET"], +) +``` + +## Core Operations + +### Post a Tweet + +```python +resp = oauth.post( + "https://api.x.com/2/tweets", + json={"text": "Hello from Claude Code"} +) +resp.raise_for_status() +tweet_id = resp.json()["data"]["id"] +``` + +### Post a Thread + +```python +def post_thread(oauth, tweets: list[str]) -> list[str]: + ids = [] + reply_to = None + for text in tweets: + payload = {"text": text} + if reply_to: + payload["reply"] = {"in_reply_to_tweet_id": reply_to} + resp = oauth.post("https://api.x.com/2/tweets", json=payload) + tweet_id = resp.json()["data"]["id"] + ids.append(tweet_id) + reply_to = tweet_id + return ids +``` + +### Read User Timeline + +```python +resp = requests.get( + f"https://api.x.com/2/users/{user_id}/tweets", + headers=headers, + params={ + "max_results": 10, + "tweet.fields": "created_at,public_metrics", + } +) +``` + +### Search Tweets + +```python +resp = requests.get( + "https://api.x.com/2/tweets/search/recent", + headers=headers, + params={ + "query": "from:affaanmustafa -is:retweet", + "max_results": 10, + "tweet.fields": "public_metrics,created_at", + } +) +``` + +### Get User by Username + +```python +resp = requests.get( + "https://api.x.com/2/users/by/username/affaanmustafa", + headers=headers, + params={"user.fields": "public_metrics,description,created_at"} +) +``` + +### Upload Media and Post + +```python +# Media upload uses v1.1 endpoint + +# Step 1: Upload media +media_resp = oauth.post( + "https://upload.twitter.com/1.1/media/upload.json", + files={"media": open("image.png", "rb")} +) +media_id = media_resp.json()["media_id_string"] + +# Step 2: Post with media +resp = oauth.post( + "https://api.x.com/2/tweets", + json={"text": "Check this out", "media": {"media_ids": [media_id]}} +) +``` + +## Rate Limits Reference + +| Endpoint | Limit | Window | +|----------|-------|--------| +| POST /2/tweets | 200 | 15 min | +| GET /2/tweets/search/recent | 450 | 15 min | +| GET /2/users/:id/tweets | 1500 | 15 min | +| GET /2/users/by/username | 300 | 15 min | +| POST media/upload | 415 | 15 min | + +Always check `x-rate-limit-remaining` and `x-rate-limit-reset` headers. + +```python +remaining = int(resp.headers.get("x-rate-limit-remaining", 0)) +if remaining < 5: + reset = int(resp.headers.get("x-rate-limit-reset", 0)) + wait = max(0, reset - int(time.time())) + print(f"Rate limit approaching. Resets in {wait}s") +``` + +## Error Handling + +```python +resp = oauth.post("https://api.x.com/2/tweets", json={"text": content}) +if resp.status_code == 201: + return resp.json()["data"]["id"] +elif resp.status_code == 429: + reset = int(resp.headers["x-rate-limit-reset"]) + raise Exception(f"Rate limited. Resets at {reset}") +elif resp.status_code == 403: + raise Exception(f"Forbidden: {resp.json().get('detail', 'check permissions')}") +else: + raise Exception(f"X API error {resp.status_code}: {resp.text}") +``` + +## Security + +- **Never hardcode tokens.** Use environment variables or `.env` files. +- **Never commit `.env` files.** Add to `.gitignore`. +- **Rotate tokens** if exposed. Regenerate at developer.x.com. +- **Use read-only tokens** when write access is not needed. +- **Store OAuth secrets securely** — not in source code or logs. + +## Integration with Content Engine + +Use `content-engine` skill to generate platform-native content, then post via X API: +1. Generate content with content-engine (X platform format) +2. Validate length (280 chars for single tweet) +3. Post via X API using patterns above +4. Track engagement via public_metrics + +## Related Skills + +- `content-engine` — Generate platform-native content for X +- `crosspost` — Distribute content across X, LinkedIn, and other platforms diff --git a/.agents/skills/x-api/agents/openai.yaml b/.agents/skills/x-api/agents/openai.yaml new file mode 100644 index 00000000..2875958f --- /dev/null +++ b/.agents/skills/x-api/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "X API" + short_description: "X/Twitter API integration for posting, threads, and analytics" + brand_color: "#000000" + default_prompt: "Use X API to post tweets, threads, or retrieve timeline and search data" +policy: + allow_implicit_invocation: true diff --git a/.codex/AGENTS.md b/.codex/AGENTS.md index 6de01b1e..60f63920 100644 --- a/.codex/AGENTS.md +++ b/.codex/AGENTS.md @@ -34,10 +34,17 @@ Available skills: - strategic-compact — Context management - api-design — REST API design patterns - verification-loop — Build, test, lint, typecheck, security +- deep-research — Multi-source research with firecrawl and exa MCPs +- exa-search — Neural search via Exa MCP for web, code, and companies +- claude-api — Anthropic Claude API patterns and SDKs +- x-api — X/Twitter API integration for posting, threads, and analytics +- crosspost — Multi-platform content distribution +- fal-ai-media — AI image/video/audio generation via fal.ai +- dmux-workflows — Multi-agent orchestration with dmux ## MCP Servers -Configure in `~/.codex/config.toml` under `[mcp_servers]`. See `.codex/config.toml` for reference configuration with GitHub, Context7, Memory, and Sequential Thinking servers. +Treat the project-local `.codex/config.toml` as the default Codex baseline for ECC. The current ECC baseline enables GitHub, Context7, Exa, Memory, Playwright, and Sequential Thinking; add heavier extras in `~/.codex/config.toml` only when a task actually needs them. ## Multi-Agent Support @@ -63,7 +70,7 @@ Sample role configs in this repo: | Commands | `/slash` commands | Instruction-based | | Agents | Subagent Task tool | Multi-agent via `/agent` and `[agents.]` roles | | Security | Hook-based enforcement | Instruction + sandbox | -| MCP | Full support | Command-based only | +| MCP | Full support | Supported via `config.toml` and `codex mcp add` | ## Security Without Hooks diff --git a/.codex/config.toml b/.codex/config.toml index 3a5dd147..e1e1bf52 100644 --- a/.codex/config.toml +++ b/.codex/config.toml @@ -33,6 +33,8 @@ notify = [ # model_instructions_file = "/absolute/path/to/instructions.md" # MCP servers +# Keep the default project set lean. API-backed servers inherit credentials from +# the launching environment or can be supplied by a user-level ~/.codex/config.toml. [mcp_servers.github] command = "npx" args = ["-y", "@modelcontextprotocol/server-github"] @@ -41,10 +43,17 @@ args = ["-y", "@modelcontextprotocol/server-github"] command = "npx" args = ["-y", "@upstash/context7-mcp@latest"] +[mcp_servers.exa] +url = "https://mcp.exa.ai/mcp" + [mcp_servers.memory] command = "npx" args = ["-y", "@modelcontextprotocol/server-memory"] +[mcp_servers.playwright] +command = "npx" +args = ["-y", "@playwright/mcp@latest", "--extension"] + [mcp_servers.sequential-thinking] command = "npx" args = ["-y", "@modelcontextprotocol/server-sequential-thinking"] @@ -58,6 +67,10 @@ args = ["-y", "@modelcontextprotocol/server-sequential-thinking"] # command = "npx" # args = ["-y", "firecrawl-mcp"] # +# [mcp_servers.fal-ai] +# command = "npx" +# args = ["-y", "fal-ai-mcp-server"] +# # [mcp_servers.cloudflare] # command = "npx" # args = ["-y", "@cloudflare/mcp-server-cloudflare"] @@ -77,22 +90,18 @@ approval_policy = "never" sandbox_mode = "workspace-write" web_search = "live" -# Optional project-local multi-agent roles. -# Keep these commented in global config, because config_file paths are resolved -# relative to the config.toml file and must exist at load time. -# -# [agents] -# max_threads = 6 -# max_depth = 1 -# -# [agents.explorer] -# description = "Read-only codebase explorer for gathering evidence before changes are proposed." -# config_file = "agents/explorer.toml" -# -# [agents.reviewer] -# description = "PR reviewer focused on correctness, security, and missing tests." -# config_file = "agents/reviewer.toml" -# -# [agents.docs_researcher] -# description = "Documentation specialist that verifies APIs, framework behavior, and release notes." -# config_file = "agents/docs-researcher.toml" +[agents] +max_threads = 6 +max_depth = 1 + +[agents.explorer] +description = "Read-only codebase explorer for gathering evidence before changes are proposed." +config_file = "agents/explorer.toml" + +[agents.reviewer] +description = "PR reviewer focused on correctness, security, and missing tests." +config_file = "agents/reviewer.toml" + +[agents.docs_researcher] +description = "Documentation specialist that verifies APIs, framework behavior, and release notes." +config_file = "agents/docs-researcher.toml" diff --git a/.gitignore b/.gitignore index 7e37f0fd..c7aaa2cf 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,4 @@ examples/sessions/*.tmp # Local drafts marketing/ +.dmux/ diff --git a/commands/multi-workflow.md b/commands/multi-workflow.md index 8ca12a9a..52509d51 100644 --- a/commands/multi-workflow.md +++ b/commands/multi-workflow.md @@ -101,6 +101,14 @@ TaskOutput({ task_id: "", block: true, timeout: 600000 }) 4. Force stop when score < 7 or user does not approve. 5. Use `AskUserQuestion` tool for user interaction when needed (e.g., confirmation/selection/approval). +## When to Use External Orchestration + +Use external tmux/worktree orchestration when the work must be split across parallel workers that need isolated git state, independent terminals, or separate build/test execution. Use in-process subagents for lightweight analysis, planning, or review where the main session remains the only writer. + +```bash +node scripts/orchestrate-worktrees.js .claude/plan/workflow-e2e-test.json --execute +``` + --- ## Execution Workflow diff --git a/commands/orchestrate.md b/commands/orchestrate.md index 3a629ec5..2db80d66 100644 --- a/commands/orchestrate.md +++ b/commands/orchestrate.md @@ -148,6 +148,61 @@ Run simultaneously: Combine outputs into single report ``` +For external tmux-pane workers with separate git worktrees, use `node scripts/orchestrate-worktrees.js plan.json --execute`. The built-in orchestration pattern stays in-process; the helper is for long-running or cross-harness sessions. + +When workers need to see dirty or untracked local files from the main checkout, add `seedPaths` to the plan file. ECC overlays only those selected paths into each worker worktree after `git worktree add`, which keeps the branch isolated while still exposing in-flight local scripts, plans, or docs. + +```json +{ + "sessionName": "workflow-e2e", + "seedPaths": [ + "scripts/orchestrate-worktrees.js", + "scripts/lib/tmux-worktree-orchestrator.js", + ".claude/plan/workflow-e2e-test.json" + ], + "workers": [ + { "name": "docs", "task": "Update orchestration docs." } + ] +} +``` + +To export a control-plane snapshot for a live tmux/worktree session, run: + +```bash +node scripts/orchestration-status.js .claude/plan/workflow-visual-proof.json +``` + +The snapshot includes session activity, tmux pane metadata, worker states, objectives, seeded overlays, and recent handoff summaries in JSON form. + +## Operator Command-Center Handoff + +When the workflow spans multiple sessions, worktrees, or tmux panes, append a control-plane block to the final handoff: + +```markdown +CONTROL PLANE +------------- +Sessions: +- active session ID or alias +- branch + worktree path for each active worker +- tmux pane or detached session name when applicable + +Diffs: +- git status summary +- git diff --stat for touched files +- merge/conflict risk notes + +Approvals: +- pending user approvals +- blocked steps awaiting confirmation + +Telemetry: +- last activity timestamp or idle signal +- estimated token or cost drift +- policy events raised by hooks or reviewers +``` + +This keeps planner, implementer, reviewer, and loop workers legible from the operator surface. + ## Arguments $ARGUMENTS: diff --git a/commands/sessions.md b/commands/sessions.md index d54f02ec..3acfd418 100644 --- a/commands/sessions.md +++ b/commands/sessions.md @@ -12,6 +12,8 @@ Manage Claude Code session history - list, load, alias, and edit sessions stored Display all sessions with metadata, filtering, and pagination. +Use `/sessions info` when you need operator-surface context for a swarm: branch, worktree path, and session recency. + ```bash /sessions # List all sessions (default) /sessions list # Same as above @@ -25,6 +27,7 @@ Display all sessions with metadata, filtering, and pagination. node -e " const sm = require((process.env.CLAUDE_PLUGIN_ROOT||require('path').join(require('os').homedir(),'.claude'))+'/scripts/lib/session-manager'); const aa = require((process.env.CLAUDE_PLUGIN_ROOT||require('path').join(require('os').homedir(),'.claude'))+'/scripts/lib/session-aliases'); +const path = require('path'); const result = sm.getAllSessions({ limit: 20 }); const aliases = aa.listAliases(); @@ -33,17 +36,18 @@ for (const a of aliases) aliasMap[a.sessionPath] = a.name; console.log('Sessions (showing ' + result.sessions.length + ' of ' + result.total + '):'); console.log(''); -console.log('ID Date Time Size Lines Alias'); -console.log('────────────────────────────────────────────────────'); +console.log('ID Date Time Branch Worktree Alias'); +console.log('────────────────────────────────────────────────────────────────────'); for (const s of result.sessions) { const alias = aliasMap[s.filename] || ''; - const size = sm.getSessionSize(s.sessionPath); - const stats = sm.getSessionStats(s.sessionPath); + const metadata = sm.parseSessionMetadata(sm.getSessionContent(s.sessionPath)); const id = s.shortId === 'no-id' ? '(none)' : s.shortId.slice(0, 8); const time = s.modifiedTime.toTimeString().slice(0, 5); + const branch = (metadata.branch || '-').slice(0, 12); + const worktree = metadata.worktree ? path.basename(metadata.worktree).slice(0, 18) : '-'; - console.log(id.padEnd(8) + ' ' + s.date + ' ' + time + ' ' + size.padEnd(7) + ' ' + String(stats.lineCount).padEnd(5) + ' ' + alias); + console.log(id.padEnd(8) + ' ' + s.date + ' ' + time + ' ' + branch.padEnd(12) + ' ' + worktree.padEnd(18) + ' ' + alias); } " ``` @@ -108,6 +112,18 @@ if (session.metadata.started) { if (session.metadata.lastUpdated) { console.log('Last Updated: ' + session.metadata.lastUpdated); } + +if (session.metadata.project) { + console.log('Project: ' + session.metadata.project); +} + +if (session.metadata.branch) { + console.log('Branch: ' + session.metadata.branch); +} + +if (session.metadata.worktree) { + console.log('Worktree: ' + session.metadata.worktree); +} " "$ARGUMENTS" ``` @@ -215,6 +231,9 @@ console.log('ID: ' + (session.shortId === 'no-id' ? '(none)' : session. console.log('Filename: ' + session.filename); console.log('Date: ' + session.date); console.log('Modified: ' + session.modifiedTime.toISOString().slice(0, 19).replace('T', ' ')); +console.log('Project: ' + (session.metadata.project || '-')); +console.log('Branch: ' + (session.metadata.branch || '-')); +console.log('Worktree: ' + (session.metadata.worktree || '-')); console.log(''); console.log('Content:'); console.log(' Lines: ' + stats.lineCount); @@ -236,6 +255,11 @@ Show all session aliases. /sessions aliases # List all aliases ``` +## Operator Notes + +- Session files persist `Project`, `Branch`, and `Worktree` in the header so `/sessions info` can disambiguate parallel tmux/worktree runs. +- For command-center style monitoring, combine `/sessions info`, `git diff --stat`, and the cost metrics emitted by `scripts/hooks/cost-tracker.js`. + **Script:** ```bash node -e " diff --git a/mcp-configs/mcp-servers.json b/mcp-configs/mcp-servers.json index 64b1ad00..db6e4f34 100644 --- a/mcp-configs/mcp-servers.json +++ b/mcp-configs/mcp-servers.json @@ -72,11 +72,11 @@ "env": { "EXA_API_KEY": "YOUR_EXA_API_KEY_HERE" }, - "description": "Web search, research, and data ingestion via Exa API — recommended for research-first development workflow" + "description": "Web search, research, and data ingestion via Exa API — prefer task-scoped use for broader research after GitHub search and primary docs" }, "context7": { "command": "npx", - "args": ["-y", "@context7/mcp-server"], + "args": ["-y", "@upstash/context7-mcp@latest"], "description": "Live documentation lookup" }, "magic": { @@ -93,6 +93,50 @@ "command": "python3", "args": ["-m", "insa_its.mcp_server"], "description": "AI-to-AI security monitoring — anomaly detection, credential exposure, hallucination checks, forensic tracing. 23 anomaly types, OWASP MCP Top 10 coverage. 100% local. Install: pip install insa-its" + }, + "playwright": { + "command": "npx", + "args": ["-y", "@playwright/mcp", "--browser", "chrome"], + "description": "Browser automation and testing via Playwright" + }, + "fal-ai": { + "command": "npx", + "args": ["-y", "fal-ai-mcp-server"], + "env": { + "FAL_KEY": "YOUR_FAL_KEY_HERE" + }, + "description": "AI image/video/audio generation via fal.ai models" + }, + "browserbase": { + "command": "npx", + "args": ["-y", "@browserbasehq/mcp-server-browserbase"], + "env": { + "BROWSERBASE_API_KEY": "YOUR_BROWSERBASE_KEY_HERE" + }, + "description": "Cloud browser sessions via Browserbase" + }, + "browser-use": { + "type": "http", + "url": "https://api.browser-use.com/mcp", + "headers": { + "x-browser-use-api-key": "YOUR_BROWSER_USE_KEY_HERE" + }, + "description": "AI browser agent for web tasks" + }, + "token-optimizer": { + "command": "npx", + "args": ["-y", "token-optimizer-mcp"], + "description": "Token optimization for 95%+ context reduction via content deduplication and compression" + }, + "confluence": { + "command": "npx", + "args": ["-y", "confluence-mcp-server"], + "env": { + "CONFLUENCE_BASE_URL": "YOUR_CONFLUENCE_URL_HERE", + "CONFLUENCE_EMAIL": "YOUR_EMAIL_HERE", + "CONFLUENCE_API_TOKEN": "YOUR_CONFLUENCE_TOKEN_HERE" + }, + "description": "Confluence Cloud integration — search pages, retrieve content, explore spaces" } }, "_comments": { diff --git a/package.json b/package.json index 352d41fc..ff5f7d35 100644 --- a/package.json +++ b/package.json @@ -67,6 +67,9 @@ "scripts/hooks/", "scripts/lib/", "scripts/claw.js", + "scripts/orchestration-status.js", + "scripts/orchestrate-codex-worker.sh", + "scripts/orchestrate-worktrees.js", "scripts/setup-package-manager.js", "scripts/skill-create-output.js", "skills/", @@ -83,6 +86,9 @@ "postinstall": "echo '\\n ecc-universal installed!\\n Run: npx ecc-install typescript\\n Docs: https://github.com/affaan-m/everything-claude-code\\n'", "lint": "eslint . && markdownlint '**/*.md' --ignore node_modules", "claw": "node scripts/claw.js", + "orchestrate:status": "node scripts/orchestration-status.js", + "orchestrate:worker": "bash scripts/orchestrate-codex-worker.sh", + "orchestrate:tmux": "node scripts/orchestrate-worktrees.js", "test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js", "coverage": "c8 --all --include=\"scripts/**/*.js\" --check-coverage --lines 80 --functions 80 --branches 80 --statements 80 --reporter=text --reporter=lcov node tests/run-all.js" }, diff --git a/rules/common/development-workflow.md b/rules/common/development-workflow.md index 89372795..d97c1b1d 100644 --- a/rules/common/development-workflow.md +++ b/rules/common/development-workflow.md @@ -8,7 +8,8 @@ The Feature Implementation Workflow describes the development pipeline: research 0. **Research & Reuse** _(mandatory before any new implementation)_ - **GitHub code search first:** Run `gh search repos` and `gh search code` to find existing implementations, templates, and patterns before writing anything new. - - **Exa MCP for research:** Use `exa-web-search` MCP during the planning phase for broader research, data ingestion, and discovering prior art. + - **Library docs second:** Use Context7 or primary vendor docs to confirm API behavior, package usage, and version-specific details before implementing. + - **Exa only when the first two are insufficient:** Use Exa for broader web research or discovery after GitHub search and primary docs. - **Check package registries:** Search npm, PyPI, crates.io, and other registries before writing utility code. Prefer battle-tested libraries over hand-rolled solutions. - **Search for adaptable implementations:** Look for open-source projects that solve 80%+ of the problem and can be forked, ported, or wrapped. - Prefer adopting or porting a proven approach over writing net-new code when it meets the requirement. diff --git a/scripts/hooks/session-end.js b/scripts/hooks/session-end.js index ccbc0b42..301ced97 100644 --- a/scripts/hooks/session-end.js +++ b/scripts/hooks/session-end.js @@ -16,15 +16,17 @@ const { getDateString, getTimeString, getSessionIdShort, + getProjectName, ensureDir, readFile, writeFile, - replaceInFile, + runCommand, log } = require('../lib/utils'); const SUMMARY_START_MARKER = ''; const SUMMARY_END_MARKER = ''; +const SESSION_SEPARATOR = '\n---\n'; /** * Extract a meaningful summary from the session transcript. @@ -128,6 +130,51 @@ function runMain() { }); } +function getSessionMetadata() { + const branchResult = runCommand('git rev-parse --abbrev-ref HEAD'); + + return { + project: getProjectName() || 'unknown', + branch: branchResult.success ? branchResult.output : 'unknown', + worktree: process.cwd() + }; +} + +function extractHeaderField(header, label) { + const match = header.match(new RegExp(`\\*\\*${escapeRegExp(label)}:\\*\\*\\s*(.+)$`, 'm')); + return match ? match[1].trim() : null; +} + +function buildSessionHeader(today, currentTime, metadata, existingContent = '') { + const headingMatch = existingContent.match(/^#\s+.+$/m); + const heading = headingMatch ? headingMatch[0] : `# Session: ${today}`; + const date = extractHeaderField(existingContent, 'Date') || today; + const started = extractHeaderField(existingContent, 'Started') || currentTime; + + return [ + heading, + `**Date:** ${date}`, + `**Started:** ${started}`, + `**Last Updated:** ${currentTime}`, + `**Project:** ${metadata.project}`, + `**Branch:** ${metadata.branch}`, + `**Worktree:** ${metadata.worktree}`, + '' + ].join('\n'); +} + +function mergeSessionHeader(content, today, currentTime, metadata) { + const separatorIndex = content.indexOf(SESSION_SEPARATOR); + if (separatorIndex === -1) { + return null; + } + + const existingHeader = content.slice(0, separatorIndex); + const body = content.slice(separatorIndex + SESSION_SEPARATOR.length); + const nextHeader = buildSessionHeader(today, currentTime, metadata, existingHeader); + return `${nextHeader}${SESSION_SEPARATOR}${body}`; +} + async function main() { // Parse stdin JSON to get transcript_path let transcriptPath = null; @@ -143,6 +190,7 @@ async function main() { const today = getDateString(); const shortId = getSessionIdShort(); const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`); + const sessionMetadata = getSessionMetadata(); ensureDir(sessionsDir); @@ -160,42 +208,42 @@ async function main() { } if (fs.existsSync(sessionFile)) { - // Update existing session file - const updated = replaceInFile( - sessionFile, - /\*\*Last Updated:\*\*.*/, - `**Last Updated:** ${currentTime}` - ); - if (!updated) { - log(`[SessionEnd] Failed to update timestamp in ${sessionFile}`); + const existing = readFile(sessionFile); + let updatedContent = existing; + + if (existing) { + const merged = mergeSessionHeader(existing, today, currentTime, sessionMetadata); + if (merged) { + updatedContent = merged; + } else { + log(`[SessionEnd] Failed to normalize header in ${sessionFile}`); + } } // If we have a new summary, update only the generated summary block. // This keeps repeated Stop invocations idempotent and preserves // user-authored sections in the same session file. - if (summary) { - const existing = readFile(sessionFile); - if (existing) { - const summaryBlock = buildSummaryBlock(summary); - let updatedContent = existing; + if (summary && updatedContent) { + const summaryBlock = buildSummaryBlock(summary); - if (existing.includes(SUMMARY_START_MARKER) && existing.includes(SUMMARY_END_MARKER)) { - updatedContent = existing.replace( - new RegExp(`${escapeRegExp(SUMMARY_START_MARKER)}[\\s\\S]*?${escapeRegExp(SUMMARY_END_MARKER)}`), - summaryBlock - ); - } else { - // Migration path for files created before summary markers existed. - updatedContent = existing.replace( - /## (?:Session Summary|Current State)[\s\S]*?$/, - `${summaryBlock}\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\`\n` - ); - } - - writeFile(sessionFile, updatedContent); + if (updatedContent.includes(SUMMARY_START_MARKER) && updatedContent.includes(SUMMARY_END_MARKER)) { + updatedContent = updatedContent.replace( + new RegExp(`${escapeRegExp(SUMMARY_START_MARKER)}[\\s\\S]*?${escapeRegExp(SUMMARY_END_MARKER)}`), + summaryBlock + ); + } else { + // Migration path for files created before summary markers existed. + updatedContent = updatedContent.replace( + /## (?:Session Summary|Current State)[\s\S]*?$/, + `${summaryBlock}\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\`\n` + ); } } + if (updatedContent) { + writeFile(sessionFile, updatedContent); + } + log(`[SessionEnd] Updated session file: ${sessionFile}`); } else { // Create new session file @@ -203,14 +251,7 @@ async function main() { ? `${buildSummaryBlock(summary)}\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\`` : `## Current State\n\n[Session context goes here]\n\n### Completed\n- [ ]\n\n### In Progress\n- [ ]\n\n### Notes for Next Session\n-\n\n### Context to Load\n\`\`\`\n[relevant files]\n\`\`\``; - const template = `# Session: ${today} -**Date:** ${today} -**Started:** ${currentTime} -**Last Updated:** ${currentTime} - ---- - -${summarySection} + const template = `${buildSessionHeader(today, currentTime, sessionMetadata)}${SESSION_SEPARATOR}${summarySection} `; writeFile(sessionFile, template); diff --git a/scripts/lib/orchestration-session.js b/scripts/lib/orchestration-session.js new file mode 100644 index 00000000..f544714d --- /dev/null +++ b/scripts/lib/orchestration-session.js @@ -0,0 +1,295 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const { spawnSync } = require('child_process'); + +function stripCodeTicks(value) { + if (typeof value !== 'string') { + return value; + } + + const trimmed = value.trim(); + if (trimmed.startsWith('`') && trimmed.endsWith('`') && trimmed.length >= 2) { + return trimmed.slice(1, -1); + } + + return trimmed; +} + +function parseSection(content, heading) { + if (typeof content !== 'string' || content.length === 0) { + return ''; + } + + const lines = content.split('\n'); + const headingLines = new Set([`## ${heading}`, `**${heading}**`]); + const startIndex = lines.findIndex(line => headingLines.has(line.trim())); + + if (startIndex === -1) { + return ''; + } + + const collected = []; + for (let index = startIndex + 1; index < lines.length; index += 1) { + const line = lines[index]; + const trimmed = line.trim(); + if (trimmed.startsWith('## ') || (/^\*\*.+\*\*$/.test(trimmed) && !headingLines.has(trimmed))) { + break; + } + collected.push(line); + } + + return collected.join('\n').trim(); +} + +function parseBullets(section) { + if (!section) { + return []; + } + + return section + .split('\n') + .map(line => line.trim()) + .filter(line => line.startsWith('- ')) + .map(line => stripCodeTicks(line.replace(/^- /, '').trim())); +} + +function parseWorkerStatus(content) { + const status = { + state: null, + updated: null, + branch: null, + worktree: null, + taskFile: null, + handoffFile: null + }; + + if (typeof content !== 'string' || content.length === 0) { + return status; + } + + for (const line of content.split('\n')) { + const match = line.match(/^- ([A-Za-z ]+):\s*(.+)$/); + if (!match) { + continue; + } + + const key = match[1].trim().toLowerCase().replace(/\s+/g, ''); + const value = stripCodeTicks(match[2]); + + if (key === 'state') status.state = value; + if (key === 'updated') status.updated = value; + if (key === 'branch') status.branch = value; + if (key === 'worktree') status.worktree = value; + if (key === 'taskfile') status.taskFile = value; + if (key === 'handofffile') status.handoffFile = value; + } + + return status; +} + +function parseWorkerTask(content) { + return { + objective: parseSection(content, 'Objective'), + seedPaths: parseBullets(parseSection(content, 'Seeded Local Overlays')) + }; +} + +function parseWorkerHandoff(content) { + return { + summary: parseBullets(parseSection(content, 'Summary')), + validation: parseBullets(parseSection(content, 'Validation')), + remainingRisks: parseBullets(parseSection(content, 'Remaining Risks')) + }; +} + +function readTextIfExists(filePath) { + if (!filePath || !fs.existsSync(filePath)) { + return ''; + } + + return fs.readFileSync(filePath, 'utf8'); +} + +function listWorkerDirectories(coordinationDir) { + if (!coordinationDir || !fs.existsSync(coordinationDir)) { + return []; + } + + return fs.readdirSync(coordinationDir, { withFileTypes: true }) + .filter(entry => entry.isDirectory()) + .filter(entry => { + const workerDir = path.join(coordinationDir, entry.name); + return ['status.md', 'task.md', 'handoff.md'] + .some(filename => fs.existsSync(path.join(workerDir, filename))); + }) + .map(entry => entry.name) + .sort(); +} + +function loadWorkerSnapshots(coordinationDir) { + return listWorkerDirectories(coordinationDir).map(workerSlug => { + const workerDir = path.join(coordinationDir, workerSlug); + const statusPath = path.join(workerDir, 'status.md'); + const taskPath = path.join(workerDir, 'task.md'); + const handoffPath = path.join(workerDir, 'handoff.md'); + + const status = parseWorkerStatus(readTextIfExists(statusPath)); + const task = parseWorkerTask(readTextIfExists(taskPath)); + const handoff = parseWorkerHandoff(readTextIfExists(handoffPath)); + + return { + workerSlug, + workerDir, + status, + task, + handoff, + files: { + status: statusPath, + task: taskPath, + handoff: handoffPath + } + }; + }); +} + +function listTmuxPanes(sessionName) { + const format = [ + '#{pane_id}', + '#{window_index}', + '#{pane_index}', + '#{pane_title}', + '#{pane_current_command}', + '#{pane_current_path}', + '#{pane_active}', + '#{pane_dead}', + '#{pane_pid}' + ].join('\t'); + + const result = spawnSync('tmux', ['list-panes', '-t', sessionName, '-F', format], { + encoding: 'utf8', + stdio: ['ignore', 'pipe', 'pipe'] + }); + + if (result.error) { + throw result.error; + } + + if (result.status !== 0) { + return []; + } + + return (result.stdout || '') + .split('\n') + .map(line => line.trim()) + .filter(Boolean) + .map(line => { + const [ + paneId, + windowIndex, + paneIndex, + title, + currentCommand, + currentPath, + active, + dead, + pid + ] = line.split('\t'); + + return { + paneId, + windowIndex: Number(windowIndex), + paneIndex: Number(paneIndex), + title, + currentCommand, + currentPath, + active: active === '1', + dead: dead === '1', + pid: pid ? Number(pid) : null + }; + }); +} + +function summarizeWorkerStates(workers) { + return workers.reduce((counts, worker) => { + const state = worker.status.state || 'unknown'; + counts[state] = (counts[state] || 0) + 1; + return counts; + }, {}); +} + +function buildSessionSnapshot({ sessionName, coordinationDir, panes }) { + const workerSnapshots = loadWorkerSnapshots(coordinationDir); + const paneMap = new Map(panes.map(pane => [pane.title, pane])); + + const workers = workerSnapshots.map(worker => ({ + ...worker, + pane: paneMap.get(worker.workerSlug) || null + })); + + return { + sessionName, + coordinationDir, + sessionActive: panes.length > 0, + paneCount: panes.length, + workerCount: workers.length, + workerStates: summarizeWorkerStates(workers), + panes, + workers + }; +} + +function resolveSnapshotTarget(targetPath, cwd = process.cwd()) { + const absoluteTarget = path.resolve(cwd, targetPath); + + if (fs.existsSync(absoluteTarget) && fs.statSync(absoluteTarget).isFile()) { + const config = JSON.parse(fs.readFileSync(absoluteTarget, 'utf8')); + const repoRoot = path.resolve(config.repoRoot || cwd); + const coordinationRoot = path.resolve( + config.coordinationRoot || path.join(repoRoot, '.orchestration') + ); + + return { + sessionName: config.sessionName, + coordinationDir: path.join(coordinationRoot, config.sessionName), + repoRoot, + targetType: 'plan' + }; + } + + return { + sessionName: targetPath, + coordinationDir: path.join(cwd, '.claude', 'orchestration', targetPath), + repoRoot: cwd, + targetType: 'session' + }; +} + +function collectSessionSnapshot(targetPath, cwd = process.cwd()) { + const target = resolveSnapshotTarget(targetPath, cwd); + const panes = listTmuxPanes(target.sessionName); + const snapshot = buildSessionSnapshot({ + sessionName: target.sessionName, + coordinationDir: target.coordinationDir, + panes + }); + + return { + ...snapshot, + repoRoot: target.repoRoot, + targetType: target.targetType + }; +} + +module.exports = { + buildSessionSnapshot, + collectSessionSnapshot, + listTmuxPanes, + loadWorkerSnapshots, + normalizeText: stripCodeTicks, + parseWorkerHandoff, + parseWorkerStatus, + parseWorkerTask, + resolveSnapshotTarget +}; diff --git a/scripts/lib/session-manager.js b/scripts/lib/session-manager.js index d4a3fe74..88913b36 100644 --- a/scripts/lib/session-manager.js +++ b/scripts/lib/session-manager.js @@ -85,6 +85,9 @@ function parseSessionMetadata(content) { date: null, started: null, lastUpdated: null, + project: null, + branch: null, + worktree: null, completed: [], inProgress: [], notes: '', @@ -117,6 +120,22 @@ function parseSessionMetadata(content) { metadata.lastUpdated = updatedMatch[1]; } + // Extract control-plane metadata + const projectMatch = content.match(/\*\*Project:\*\*\s*(.+)$/m); + if (projectMatch) { + metadata.project = projectMatch[1].trim(); + } + + const branchMatch = content.match(/\*\*Branch:\*\*\s*(.+)$/m); + if (branchMatch) { + metadata.branch = branchMatch[1].trim(); + } + + const worktreeMatch = content.match(/\*\*Worktree:\*\*\s*(.+)$/m); + if (worktreeMatch) { + metadata.worktree = worktreeMatch[1].trim(); + } + // Extract completed items const completedSection = content.match(/### Completed\s*\n([\s\S]*?)(?=###|\n\n|$)/); if (completedSection) { diff --git a/scripts/lib/tmux-worktree-orchestrator.js b/scripts/lib/tmux-worktree-orchestrator.js new file mode 100644 index 00000000..d89a803d --- /dev/null +++ b/scripts/lib/tmux-worktree-orchestrator.js @@ -0,0 +1,491 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const { spawnSync } = require('child_process'); + +function slugify(value, fallback = 'worker') { + const normalized = String(value || '') + .trim() + .toLowerCase() + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-+|-+$/g, ''); + return normalized || fallback; +} + +function renderTemplate(template, variables) { + if (typeof template !== 'string' || template.trim().length === 0) { + throw new Error('launcherCommand must be a non-empty string'); + } + + return template.replace(/\{([a-z_]+)\}/g, (match, key) => { + if (!(key in variables)) { + throw new Error(`Unknown template variable: ${key}`); + } + return String(variables[key]); + }); +} + +function shellQuote(value) { + return `'${String(value).replace(/'/g, `'\\''`)}'`; +} + +function formatCommand(program, args) { + return [program, ...args.map(shellQuote)].join(' '); +} + +function normalizeSeedPaths(seedPaths, repoRoot) { + const resolvedRepoRoot = path.resolve(repoRoot); + const entries = Array.isArray(seedPaths) ? seedPaths : []; + const seen = new Set(); + const normalized = []; + + for (const entry of entries) { + if (typeof entry !== 'string' || entry.trim().length === 0) { + continue; + } + + const absolutePath = path.resolve(resolvedRepoRoot, entry); + const relativePath = path.relative(resolvedRepoRoot, absolutePath); + + if ( + relativePath.startsWith('..') || + path.isAbsolute(relativePath) + ) { + throw new Error(`seedPaths entries must stay inside repoRoot: ${entry}`); + } + + const normalizedPath = relativePath.split(path.sep).join('/'); + if (seen.has(normalizedPath)) { + continue; + } + + seen.add(normalizedPath); + normalized.push(normalizedPath); + } + + return normalized; +} + +function overlaySeedPaths({ repoRoot, seedPaths, worktreePath }) { + const normalizedSeedPaths = normalizeSeedPaths(seedPaths, repoRoot); + + for (const seedPath of normalizedSeedPaths) { + const sourcePath = path.join(repoRoot, seedPath); + const destinationPath = path.join(worktreePath, seedPath); + + if (!fs.existsSync(sourcePath)) { + throw new Error(`Seed path does not exist in repoRoot: ${seedPath}`); + } + + fs.mkdirSync(path.dirname(destinationPath), { recursive: true }); + fs.rmSync(destinationPath, { force: true, recursive: true }); + fs.cpSync(sourcePath, destinationPath, { + dereference: false, + force: true, + preserveTimestamps: true, + recursive: true + }); + } +} + +function buildWorkerArtifacts(workerPlan) { + const seededPathsSection = workerPlan.seedPaths.length > 0 + ? [ + '', + '## Seeded Local Overlays', + ...workerPlan.seedPaths.map(seedPath => `- \`${seedPath}\``) + ] + : []; + + return { + dir: workerPlan.coordinationDir, + files: [ + { + path: workerPlan.taskFilePath, + content: [ + `# Worker Task: ${workerPlan.workerName}`, + '', + `- Session: \`${workerPlan.sessionName}\``, + `- Repo root: \`${workerPlan.repoRoot}\``, + `- Worktree: \`${workerPlan.worktreePath}\``, + `- Branch: \`${workerPlan.branchName}\``, + `- Launcher status file: \`${workerPlan.statusFilePath}\``, + `- Launcher handoff file: \`${workerPlan.handoffFilePath}\``, + ...seededPathsSection, + '', + '## Objective', + workerPlan.task, + '', + '## Completion', + 'Do not spawn subagents or external agents for this task.', + 'Report results in your final response.', + `The worker launcher captures your response in \`${workerPlan.handoffFilePath}\` automatically.`, + `The worker launcher updates \`${workerPlan.statusFilePath}\` automatically.` + ].join('\n') + }, + { + path: workerPlan.handoffFilePath, + content: [ + `# Handoff: ${workerPlan.workerName}`, + '', + '## Summary', + '- Pending', + '', + '## Files Changed', + '- Pending', + '', + '## Tests / Verification', + '- Pending', + '', + '## Follow-ups', + '- Pending' + ].join('\n') + }, + { + path: workerPlan.statusFilePath, + content: [ + `# Status: ${workerPlan.workerName}`, + '', + '- State: not started', + `- Worktree: \`${workerPlan.worktreePath}\``, + `- Branch: \`${workerPlan.branchName}\`` + ].join('\n') + } + ] + }; +} + +function buildOrchestrationPlan(config = {}) { + const repoRoot = path.resolve(config.repoRoot || process.cwd()); + const repoName = path.basename(repoRoot); + const workers = Array.isArray(config.workers) ? config.workers : []; + const globalSeedPaths = normalizeSeedPaths(config.seedPaths, repoRoot); + const sessionName = slugify(config.sessionName || repoName, 'session'); + const worktreeRoot = path.resolve(config.worktreeRoot || path.dirname(repoRoot)); + const coordinationRoot = path.resolve( + config.coordinationRoot || path.join(repoRoot, '.orchestration') + ); + const coordinationDir = path.join(coordinationRoot, sessionName); + const baseRef = config.baseRef || 'HEAD'; + const defaultLauncher = config.launcherCommand || ''; + + if (workers.length === 0) { + throw new Error('buildOrchestrationPlan requires at least one worker'); + } + + const workerPlans = workers.map((worker, index) => { + if (!worker || typeof worker.task !== 'string' || worker.task.trim().length === 0) { + throw new Error(`Worker ${index + 1} is missing a task`); + } + + const workerName = worker.name || `worker-${index + 1}`; + const workerSlug = slugify(workerName, `worker-${index + 1}`); + const branchName = `orchestrator-${sessionName}-${workerSlug}`; + const worktreePath = path.join(worktreeRoot, `${repoName}-${sessionName}-${workerSlug}`); + const workerCoordinationDir = path.join(coordinationDir, workerSlug); + const taskFilePath = path.join(workerCoordinationDir, 'task.md'); + const handoffFilePath = path.join(workerCoordinationDir, 'handoff.md'); + const statusFilePath = path.join(workerCoordinationDir, 'status.md'); + const launcherCommand = worker.launcherCommand || defaultLauncher; + const workerSeedPaths = normalizeSeedPaths(worker.seedPaths, repoRoot); + const seedPaths = normalizeSeedPaths([...globalSeedPaths, ...workerSeedPaths], repoRoot); + const templateVariables = { + branch_name: branchName, + handoff_file: handoffFilePath, + repo_root: repoRoot, + session_name: sessionName, + status_file: statusFilePath, + task_file: taskFilePath, + worker_name: workerName, + worker_slug: workerSlug, + worktree_path: worktreePath + }; + + if (!launcherCommand) { + throw new Error(`Worker ${workerName} is missing a launcherCommand`); + } + + const gitArgs = ['worktree', 'add', '-b', branchName, worktreePath, baseRef]; + + return { + branchName, + coordinationDir: workerCoordinationDir, + gitArgs, + gitCommand: formatCommand('git', gitArgs), + handoffFilePath, + launchCommand: renderTemplate(launcherCommand, templateVariables), + repoRoot, + sessionName, + seedPaths, + statusFilePath, + task: worker.task.trim(), + taskFilePath, + workerName, + workerSlug, + worktreePath + }; + }); + + const tmuxCommands = [ + { + cmd: 'tmux', + args: ['new-session', '-d', '-s', sessionName, '-n', 'orchestrator', '-c', repoRoot], + description: 'Create detached tmux session' + }, + { + cmd: 'tmux', + args: [ + 'send-keys', + '-t', + sessionName, + `printf '%s\\n' 'Session: ${sessionName}' 'Coordination: ${coordinationDir}'`, + 'C-m' + ], + description: 'Print orchestrator session details' + } + ]; + + for (const workerPlan of workerPlans) { + tmuxCommands.push( + { + cmd: 'tmux', + args: ['split-window', '-d', '-t', sessionName, '-c', workerPlan.worktreePath], + description: `Create pane for ${workerPlan.workerName}` + }, + { + cmd: 'tmux', + args: ['select-layout', '-t', sessionName, 'tiled'], + description: 'Arrange panes in tiled layout' + }, + { + cmd: 'tmux', + args: ['select-pane', '-t', '', '-T', workerPlan.workerSlug], + description: `Label pane ${workerPlan.workerSlug}` + }, + { + cmd: 'tmux', + args: [ + 'send-keys', + '-t', + '', + `cd ${shellQuote(workerPlan.worktreePath)} && ${workerPlan.launchCommand}`, + 'C-m' + ], + description: `Launch worker ${workerPlan.workerName}` + } + ); + } + + return { + baseRef, + coordinationDir, + replaceExisting: Boolean(config.replaceExisting), + repoRoot, + sessionName, + tmuxCommands, + workerPlans + }; +} + +function materializePlan(plan) { + for (const workerPlan of plan.workerPlans) { + const artifacts = buildWorkerArtifacts(workerPlan); + fs.mkdirSync(artifacts.dir, { recursive: true }); + for (const file of artifacts.files) { + fs.writeFileSync(file.path, file.content + '\n', 'utf8'); + } + } +} + +function runCommand(program, args, options = {}) { + const result = spawnSync(program, args, { + cwd: options.cwd, + encoding: 'utf8', + stdio: ['ignore', 'pipe', 'pipe'] + }); + + if (result.error) { + throw result.error; + } + if (result.status !== 0) { + const stderr = (result.stderr || '').trim(); + throw new Error(`${program} ${args.join(' ')} failed${stderr ? `: ${stderr}` : ''}`); + } + return result; +} + +function commandSucceeds(program, args, options = {}) { + const result = spawnSync(program, args, { + cwd: options.cwd, + encoding: 'utf8', + stdio: ['ignore', 'pipe', 'pipe'] + }); + return result.status === 0; +} + +function canonicalizePath(targetPath) { + const resolvedPath = path.resolve(targetPath); + + try { + return fs.realpathSync.native(resolvedPath); + } catch (error) { + const parentPath = path.dirname(resolvedPath); + + try { + return path.join(fs.realpathSync.native(parentPath), path.basename(resolvedPath)); + } catch (parentError) { + return resolvedPath; + } + } +} + +function branchExists(repoRoot, branchName) { + return commandSucceeds('git', ['show-ref', '--verify', '--quiet', `refs/heads/${branchName}`], { + cwd: repoRoot + }); +} + +function listWorktrees(repoRoot) { + const listed = runCommand('git', ['worktree', 'list', '--porcelain'], { cwd: repoRoot }); + const lines = (listed.stdout || '').split('\n'); + const worktrees = []; + + for (const line of lines) { + if (line.startsWith('worktree ')) { + const listedPath = line.slice('worktree '.length).trim(); + worktrees.push({ + listedPath, + canonicalPath: canonicalizePath(listedPath) + }); + } + } + + return worktrees; +} + +function cleanupExisting(plan) { + runCommand('git', ['worktree', 'prune', '--expire', 'now'], { cwd: plan.repoRoot }); + + const hasSession = spawnSync('tmux', ['has-session', '-t', plan.sessionName], { + encoding: 'utf8', + stdio: ['ignore', 'pipe', 'pipe'] + }); + + if (hasSession.status === 0) { + runCommand('tmux', ['kill-session', '-t', plan.sessionName], { cwd: plan.repoRoot }); + } + + for (const workerPlan of plan.workerPlans) { + const expectedWorktreePath = canonicalizePath(workerPlan.worktreePath); + const existingWorktree = listWorktrees(plan.repoRoot).find( + worktree => worktree.canonicalPath === expectedWorktreePath + ); + + if (existingWorktree) { + runCommand('git', ['worktree', 'remove', '--force', existingWorktree.listedPath], { + cwd: plan.repoRoot + }); + } + + if (fs.existsSync(workerPlan.worktreePath)) { + fs.rmSync(workerPlan.worktreePath, { force: true, recursive: true }); + } + + runCommand('git', ['worktree', 'prune', '--expire', 'now'], { cwd: plan.repoRoot }); + + if (branchExists(plan.repoRoot, workerPlan.branchName)) { + runCommand('git', ['branch', '-D', workerPlan.branchName], { cwd: plan.repoRoot }); + } + } +} + +function executePlan(plan) { + runCommand('git', ['rev-parse', '--is-inside-work-tree'], { cwd: plan.repoRoot }); + runCommand('tmux', ['-V']); + + if (plan.replaceExisting) { + cleanupExisting(plan); + } else { + const hasSession = spawnSync('tmux', ['has-session', '-t', plan.sessionName], { + encoding: 'utf8', + stdio: ['ignore', 'pipe', 'pipe'] + }); + if (hasSession.status === 0) { + throw new Error(`tmux session already exists: ${plan.sessionName}`); + } + } + + materializePlan(plan); + + for (const workerPlan of plan.workerPlans) { + runCommand('git', workerPlan.gitArgs, { cwd: plan.repoRoot }); + overlaySeedPaths({ + repoRoot: plan.repoRoot, + seedPaths: workerPlan.seedPaths, + worktreePath: workerPlan.worktreePath + }); + } + + runCommand( + 'tmux', + ['new-session', '-d', '-s', plan.sessionName, '-n', 'orchestrator', '-c', plan.repoRoot], + { cwd: plan.repoRoot } + ); + runCommand( + 'tmux', + [ + 'send-keys', + '-t', + plan.sessionName, + `printf '%s\\n' 'Session: ${plan.sessionName}' 'Coordination: ${plan.coordinationDir}'`, + 'C-m' + ], + { cwd: plan.repoRoot } + ); + + for (const workerPlan of plan.workerPlans) { + const splitResult = runCommand( + 'tmux', + ['split-window', '-d', '-P', '-F', '#{pane_id}', '-t', plan.sessionName, '-c', workerPlan.worktreePath], + { cwd: plan.repoRoot } + ); + const paneId = splitResult.stdout.trim(); + + if (!paneId) { + throw new Error(`tmux split-window did not return a pane id for ${workerPlan.workerName}`); + } + + runCommand('tmux', ['select-layout', '-t', plan.sessionName, 'tiled'], { cwd: plan.repoRoot }); + runCommand('tmux', ['select-pane', '-t', paneId, '-T', workerPlan.workerSlug], { + cwd: plan.repoRoot + }); + runCommand( + 'tmux', + [ + 'send-keys', + '-t', + paneId, + `cd ${shellQuote(workerPlan.worktreePath)} && ${workerPlan.launchCommand}`, + 'C-m' + ], + { cwd: plan.repoRoot } + ); + } + + return { + coordinationDir: plan.coordinationDir, + sessionName: plan.sessionName, + workerCount: plan.workerPlans.length + }; +} + +module.exports = { + buildOrchestrationPlan, + executePlan, + materializePlan, + normalizeSeedPaths, + overlaySeedPaths, + renderTemplate, + slugify +}; diff --git a/scripts/orchestrate-codex-worker.sh b/scripts/orchestrate-codex-worker.sh new file mode 100755 index 00000000..5461596b --- /dev/null +++ b/scripts/orchestrate-codex-worker.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +set -euo pipefail + +if [[ $# -ne 3 ]]; then + echo "Usage: bash scripts/orchestrate-codex-worker.sh " >&2 + exit 1 +fi + +task_file="$1" +handoff_file="$2" +status_file="$3" + +timestamp() { + date -u +"%Y-%m-%dT%H:%M:%SZ" +} + +write_status() { + local state="$1" + local details="$2" + + cat > "$status_file" < "$prompt_file" < "$handoff_file" + write_status "completed" "- Handoff file: \`$handoff_file\`" +else + { + echo "# Handoff" + echo + echo "- Failed: $(timestamp)" + echo "- Branch: \`$(git rev-parse --abbrev-ref HEAD)\`" + echo "- Worktree: \`$(pwd)\`" + echo + echo "The Codex worker exited with a non-zero status." + } > "$handoff_file" + write_status "failed" "- Handoff file: \`$handoff_file\`" + exit 1 +fi diff --git a/scripts/orchestrate-worktrees.js b/scripts/orchestrate-worktrees.js new file mode 100644 index 00000000..0368825f --- /dev/null +++ b/scripts/orchestrate-worktrees.js @@ -0,0 +1,108 @@ +#!/usr/bin/env node +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +const { + buildOrchestrationPlan, + executePlan, + materializePlan +} = require('./lib/tmux-worktree-orchestrator'); + +function usage() { + console.log([ + 'Usage:', + ' node scripts/orchestrate-worktrees.js [--execute]', + ' node scripts/orchestrate-worktrees.js [--write-only]', + '', + 'Placeholders supported in launcherCommand:', + ' {worker_name} {worker_slug} {session_name} {repo_root}', + ' {worktree_path} {branch_name} {task_file} {handoff_file} {status_file}', + '', + 'Without flags the script prints a dry-run plan only.' + ].join('\n')); +} + +function parseArgs(argv) { + const args = argv.slice(2); + const planPath = args.find(arg => !arg.startsWith('--')); + return { + execute: args.includes('--execute'), + planPath, + writeOnly: args.includes('--write-only') + }; +} + +function loadPlanConfig(planPath) { + const absolutePath = path.resolve(planPath); + const raw = fs.readFileSync(absolutePath, 'utf8'); + const config = JSON.parse(raw); + config.repoRoot = config.repoRoot || process.cwd(); + return { absolutePath, config }; +} + +function printDryRun(plan, absolutePath) { + const preview = { + planFile: absolutePath, + sessionName: plan.sessionName, + repoRoot: plan.repoRoot, + coordinationDir: plan.coordinationDir, + workers: plan.workerPlans.map(worker => ({ + workerName: worker.workerName, + branchName: worker.branchName, + worktreePath: worker.worktreePath, + seedPaths: worker.seedPaths, + taskFilePath: worker.taskFilePath, + handoffFilePath: worker.handoffFilePath, + launchCommand: worker.launchCommand + })), + commands: [ + ...plan.workerPlans.map(worker => worker.gitCommand), + ...plan.tmuxCommands.map(command => [command.cmd, ...command.args].join(' ')) + ] + }; + + console.log(JSON.stringify(preview, null, 2)); +} + +function main() { + const { execute, planPath, writeOnly } = parseArgs(process.argv); + + if (!planPath) { + usage(); + process.exit(1); + } + + const { absolutePath, config } = loadPlanConfig(planPath); + const plan = buildOrchestrationPlan(config); + + if (writeOnly) { + materializePlan(plan); + console.log(`Wrote orchestration files to ${plan.coordinationDir}`); + return; + } + + if (!execute) { + printDryRun(plan, absolutePath); + return; + } + + const result = executePlan(plan); + console.log([ + `Started tmux session '${result.sessionName}' with ${result.workerCount} worker panes.`, + `Coordination files: ${result.coordinationDir}`, + `Attach with: tmux attach -t ${result.sessionName}` + ].join('\n')); +} + +if (require.main === module) { + try { + main(); + } catch (error) { + console.error(`[orchestrate-worktrees] ${error.message}`); + process.exit(1); + } +} + +module.exports = { main }; diff --git a/scripts/orchestration-status.js b/scripts/orchestration-status.js new file mode 100644 index 00000000..309a9f3d --- /dev/null +++ b/scripts/orchestration-status.js @@ -0,0 +1,59 @@ +#!/usr/bin/env node +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +const { collectSessionSnapshot } = require('./lib/orchestration-session'); + +function usage() { + console.log([ + 'Usage:', + ' node scripts/orchestration-status.js [--write ]', + '', + 'Examples:', + ' node scripts/orchestration-status.js workflow-visual-proof', + ' node scripts/orchestration-status.js .claude/plan/workflow-visual-proof.json', + ' node scripts/orchestration-status.js .claude/plan/workflow-visual-proof.json --write /tmp/snapshot.json' + ].join('\n')); +} + +function parseArgs(argv) { + const args = argv.slice(2); + const target = args.find(arg => !arg.startsWith('--')); + const writeIndex = args.indexOf('--write'); + const writePath = writeIndex >= 0 ? args[writeIndex + 1] : null; + + return { target, writePath }; +} + +function main() { + const { target, writePath } = parseArgs(process.argv); + + if (!target) { + usage(); + process.exit(1); + } + + const snapshot = collectSessionSnapshot(target, process.cwd()); + const json = JSON.stringify(snapshot, null, 2); + + if (writePath) { + const absoluteWritePath = path.resolve(writePath); + fs.mkdirSync(path.dirname(absoluteWritePath), { recursive: true }); + fs.writeFileSync(absoluteWritePath, json + '\n', 'utf8'); + } + + console.log(json); +} + +if (require.main === module) { + try { + main(); + } catch (error) { + console.error(`[orchestration-status] ${error.message}`); + process.exit(1); + } +} + +module.exports = { main }; diff --git a/skills/claude-api/SKILL.md b/skills/claude-api/SKILL.md new file mode 100644 index 00000000..6759e978 --- /dev/null +++ b/skills/claude-api/SKILL.md @@ -0,0 +1,337 @@ +--- +name: claude-api +description: Anthropic Claude API patterns for Python and TypeScript. Covers Messages API, streaming, tool use, vision, extended thinking, batches, prompt caching, and Claude Agent SDK. Use when building applications with the Claude API or Anthropic SDKs. +origin: ECC +--- + +# Claude API + +Build applications with the Anthropic Claude API and SDKs. + +## When to Activate + +- Building applications that call the Claude API +- Code imports `anthropic` (Python) or `@anthropic-ai/sdk` (TypeScript) +- User asks about Claude API patterns, tool use, streaming, or vision +- Implementing agent workflows with Claude Agent SDK +- Optimizing API costs, token usage, or latency + +## Model Selection + +| Model | ID | Best For | +|-------|-----|----------| +| Opus 4.1 | `claude-opus-4-1` | Complex reasoning, architecture, research | +| Sonnet 4 | `claude-sonnet-4-0` | Balanced coding, most development tasks | +| Haiku 3.5 | `claude-3-5-haiku-latest` | Fast responses, high-volume, cost-sensitive | + +Default to Sonnet 4 unless the task requires deep reasoning (Opus) or speed/cost optimization (Haiku). For production, prefer pinned snapshot IDs over aliases. + +## Python SDK + +### Installation + +```bash +pip install anthropic +``` + +### Basic Message + +```python +import anthropic + +client = anthropic.Anthropic() # reads ANTHROPIC_API_KEY from env + +message = client.messages.create( + model="claude-sonnet-4-0", + max_tokens=1024, + messages=[ + {"role": "user", "content": "Explain async/await in Python"} + ] +) +print(message.content[0].text) +``` + +### Streaming + +```python +with client.messages.stream( + model="claude-sonnet-4-0", + max_tokens=1024, + messages=[{"role": "user", "content": "Write a haiku about coding"}] +) as stream: + for text in stream.text_stream: + print(text, end="", flush=True) +``` + +### System Prompt + +```python +message = client.messages.create( + model="claude-sonnet-4-0", + max_tokens=1024, + system="You are a senior Python developer. Be concise.", + messages=[{"role": "user", "content": "Review this function"}] +) +``` + +## TypeScript SDK + +### Installation + +```bash +npm install @anthropic-ai/sdk +``` + +### Basic Message + +```typescript +import Anthropic from "@anthropic-ai/sdk"; + +const client = new Anthropic(); // reads ANTHROPIC_API_KEY from env + +const message = await client.messages.create({ + model: "claude-sonnet-4-0", + max_tokens: 1024, + messages: [ + { role: "user", content: "Explain async/await in TypeScript" } + ], +}); +console.log(message.content[0].text); +``` + +### Streaming + +```typescript +const stream = client.messages.stream({ + model: "claude-sonnet-4-0", + max_tokens: 1024, + messages: [{ role: "user", content: "Write a haiku" }], +}); + +for await (const event of stream) { + if (event.type === "content_block_delta" && event.delta.type === "text_delta") { + process.stdout.write(event.delta.text); + } +} +``` + +## Tool Use + +Define tools and let Claude call them: + +```python +tools = [ + { + "name": "get_weather", + "description": "Get current weather for a location", + "input_schema": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "City name"}, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} + }, + "required": ["location"] + } + } +] + +message = client.messages.create( + model="claude-sonnet-4-0", + max_tokens=1024, + tools=tools, + messages=[{"role": "user", "content": "What's the weather in SF?"}] +) + +# Handle tool use response +for block in message.content: + if block.type == "tool_use": + # Execute the tool with block.input + result = get_weather(**block.input) + # Send result back + follow_up = client.messages.create( + model="claude-sonnet-4-0", + max_tokens=1024, + tools=tools, + messages=[ + {"role": "user", "content": "What's the weather in SF?"}, + {"role": "assistant", "content": message.content}, + {"role": "user", "content": [ + {"type": "tool_result", "tool_use_id": block.id, "content": str(result)} + ]} + ] + ) +``` + +## Vision + +Send images for analysis: + +```python +import base64 + +with open("diagram.png", "rb") as f: + image_data = base64.standard_b64encode(f.read()).decode("utf-8") + +message = client.messages.create( + model="claude-sonnet-4-0", + max_tokens=1024, + messages=[{ + "role": "user", + "content": [ + {"type": "image", "source": {"type": "base64", "media_type": "image/png", "data": image_data}}, + {"type": "text", "text": "Describe this diagram"} + ] + }] +) +``` + +## Extended Thinking + +For complex reasoning tasks: + +```python +message = client.messages.create( + model="claude-sonnet-4-0", + max_tokens=16000, + thinking={ + "type": "enabled", + "budget_tokens": 10000 + }, + messages=[{"role": "user", "content": "Solve this math problem step by step..."}] +) + +for block in message.content: + if block.type == "thinking": + print(f"Thinking: {block.thinking}") + elif block.type == "text": + print(f"Answer: {block.text}") +``` + +## Prompt Caching + +Cache large system prompts or context to reduce costs: + +```python +message = client.messages.create( + model="claude-sonnet-4-0", + max_tokens=1024, + system=[ + {"type": "text", "text": large_system_prompt, "cache_control": {"type": "ephemeral"}} + ], + messages=[{"role": "user", "content": "Question about the cached context"}] +) +# Check cache usage +print(f"Cache read: {message.usage.cache_read_input_tokens}") +print(f"Cache creation: {message.usage.cache_creation_input_tokens}") +``` + +## Batches API + +Process large volumes asynchronously at 50% cost reduction: + +```python +import time + +batch = client.messages.batches.create( + requests=[ + { + "custom_id": f"request-{i}", + "params": { + "model": "claude-sonnet-4-0", + "max_tokens": 1024, + "messages": [{"role": "user", "content": prompt}] + } + } + for i, prompt in enumerate(prompts) + ] +) + +# Poll for completion +while True: + status = client.messages.batches.retrieve(batch.id) + if status.processing_status == "ended": + break + time.sleep(30) + +# Get results +for result in client.messages.batches.results(batch.id): + print(result.result.message.content[0].text) +``` + +## Claude Agent SDK + +Build multi-step agents: + +```python +# Note: Agent SDK API surface may change — check official docs +import anthropic + +# Define tools as functions +tools = [{ + "name": "search_codebase", + "description": "Search the codebase for relevant code", + "input_schema": { + "type": "object", + "properties": {"query": {"type": "string"}}, + "required": ["query"] + } +}] + +# Run an agentic loop with tool use +client = anthropic.Anthropic() +messages = [{"role": "user", "content": "Review the auth module for security issues"}] + +while True: + response = client.messages.create( + model="claude-sonnet-4-0", + max_tokens=4096, + tools=tools, + messages=messages, + ) + if response.stop_reason == "end_turn": + break + # Handle tool calls and continue the loop + messages.append({"role": "assistant", "content": response.content}) + # ... execute tools and append tool_result messages +``` + +## Cost Optimization + +| Strategy | Savings | When to Use | +|----------|---------|-------------| +| Prompt caching | Up to 90% on cached tokens | Repeated system prompts or context | +| Batches API | 50% | Non-time-sensitive bulk processing | +| Haiku instead of Sonnet | ~75% | Simple tasks, classification, extraction | +| Shorter max_tokens | Variable | When you know output will be short | +| Streaming | None (same cost) | Better UX, same price | + +## Error Handling + +```python +import time + +from anthropic import APIError, RateLimitError, APIConnectionError + +try: + message = client.messages.create(...) +except RateLimitError: + # Back off and retry + time.sleep(60) +except APIConnectionError: + # Network issue, retry with backoff + pass +except APIError as e: + print(f"API error {e.status_code}: {e.message}") +``` + +## Environment Setup + +```bash +# Required +export ANTHROPIC_API_KEY="your-api-key-here" + +# Optional: set default model +export ANTHROPIC_MODEL="claude-sonnet-4-0" +``` + +Never hardcode API keys. Always use environment variables. diff --git a/skills/configure-ecc/SKILL.md b/skills/configure-ecc/SKILL.md index 42786a53..a762868b 100644 --- a/skills/configure-ecc/SKILL.md +++ b/skills/configure-ecc/SKILL.md @@ -82,7 +82,7 @@ If the user chooses niche or core + niche, continue to category selection below ### 2b: Choose Skill Categories -There are 27 skills organized into 4 categories. Use `AskUserQuestion` with `multiSelect: true`: +There are 35 skills organized into 7 categories. Use `AskUserQuestion` with `multiSelect: true`: ``` Question: "Which skill categories do you want to install?" @@ -90,6 +90,10 @@ Options: - "Framework & Language" — "Django, Spring Boot, Go, Python, Java, Frontend, Backend patterns" - "Database" — "PostgreSQL, ClickHouse, JPA/Hibernate patterns" - "Workflow & Quality" — "TDD, verification, learning, security review, compaction" + - "Research & APIs" — "Deep research, Exa search, Claude API patterns" + - "Social & Content Distribution" — "X/Twitter API, crossposting alongside content-engine" + - "Media Generation" — "fal.ai image/video/audio alongside VideoDB" + - "Orchestration" — "dmux multi-agent workflows" - "All skills" — "Install every available skill" ``` @@ -150,6 +154,34 @@ For each selected category, print the full list of skills below and ask the user | `investor-materials` | Pitch decks, one-pagers, investor memos, and financial models | | `investor-outreach` | Personalized investor cold emails, warm intros, and follow-ups | +**Category: Research & APIs (3 skills)** + +| Skill | Description | +|-------|-------------| +| `deep-research` | Multi-source deep research using firecrawl and exa MCPs with cited reports | +| `exa-search` | Neural search via Exa MCP for web, code, company, and people research | +| `claude-api` | Anthropic Claude API patterns: Messages, streaming, tool use, vision, batches, Agent SDK | + +**Category: Social & Content Distribution (2 skills)** + +| Skill | Description | +|-------|-------------| +| `x-api` | X/Twitter API integration for posting, threads, search, and analytics | +| `crosspost` | Multi-platform content distribution with platform-native adaptation | + +**Category: Media Generation (2 skills)** + +| Skill | Description | +|-------|-------------| +| `fal-ai-media` | Unified AI media generation (image, video, audio) via fal.ai MCP | +| `video-editing` | AI-assisted video editing for cutting, structuring, and augmenting real footage | + +**Category: Orchestration (1 skill)** + +| Skill | Description | +|-------|-------------| +| `dmux-workflows` | Multi-agent orchestration using dmux for parallel agent sessions | + **Standalone** | Skill | Description | @@ -230,6 +262,10 @@ Some skills reference others. Verify these dependencies: - `continuous-learning-v2` references `~/.claude/homunculus/` directory - `python-testing` may reference `python-patterns` - `golang-testing` may reference `golang-patterns` +- `crosspost` references `content-engine` and `x-api` +- `deep-research` references `exa-search` (complementary MCP tools) +- `fal-ai-media` references `videodb` (complementary media skill) +- `x-api` references `content-engine` and `crosspost` - Language-specific rules reference `common/` counterparts ### 4d: Report Issues diff --git a/skills/crosspost/SKILL.md b/skills/crosspost/SKILL.md new file mode 100644 index 00000000..81e2a29d --- /dev/null +++ b/skills/crosspost/SKILL.md @@ -0,0 +1,188 @@ +--- +name: crosspost +description: Multi-platform content distribution across X, LinkedIn, Threads, and Bluesky. Adapts content per platform using content-engine patterns. Never posts identical content cross-platform. Use when the user wants to distribute content across social platforms. +origin: ECC +--- + +# Crosspost + +Distribute content across multiple social platforms with platform-native adaptation. + +## When to Activate + +- User wants to post content to multiple platforms +- Publishing announcements, launches, or updates across social media +- Repurposing a post from one platform to others +- User says "crosspost", "post everywhere", "share on all platforms", or "distribute this" + +## Core Rules + +1. **Never post identical content cross-platform.** Each platform gets a native adaptation. +2. **Primary platform first.** Post to the main platform, then adapt for others. +3. **Respect platform conventions.** Length limits, formatting, link handling all differ. +4. **One idea per post.** If the source content has multiple ideas, split across posts. +5. **Attribution matters.** If crossposting someone else's content, credit the source. + +## Platform Specifications + +| Platform | Max Length | Link Handling | Hashtags | Media | +|----------|-----------|---------------|----------|-------| +| X | 280 chars (4000 for Premium) | Counted in length | Minimal (1-2 max) | Images, video, GIFs | +| LinkedIn | 3000 chars | Not counted in length | 3-5 relevant | Images, video, docs, carousels | +| Threads | 500 chars | Separate link attachment | None typical | Images, video | +| Bluesky | 300 chars | Via facets (rich text) | None (use feeds) | Images | + +## Workflow + +### Step 1: Create Source Content + +Start with the core idea. Use `content-engine` skill for high-quality drafts: +- Identify the single core message +- Determine the primary platform (where the audience is biggest) +- Draft the primary platform version first + +### Step 2: Identify Target Platforms + +Ask the user or determine from context: +- Which platforms to target +- Priority order (primary gets the best version) +- Any platform-specific requirements (e.g., LinkedIn needs professional tone) + +### Step 3: Adapt Per Platform + +For each target platform, transform the content: + +**X adaptation:** +- Open with a hook, not a summary +- Cut to the core insight fast +- Keep links out of main body when possible +- Use thread format for longer content + +**LinkedIn adaptation:** +- Strong first line (visible before "see more") +- Short paragraphs with line breaks +- Frame around lessons, results, or professional takeaways +- More explicit context than X (LinkedIn audience needs framing) + +**Threads adaptation:** +- Conversational, casual tone +- Shorter than LinkedIn, less compressed than X +- Visual-first if possible + +**Bluesky adaptation:** +- Direct and concise (300 char limit) +- Community-oriented tone +- Use feeds/lists for topic targeting instead of hashtags + +### Step 4: Post Primary Platform + +Post to the primary platform first: +- Use `x-api` skill for X +- Use platform-specific APIs or tools for others +- Capture the post URL for cross-referencing + +### Step 5: Post to Secondary Platforms + +Post adapted versions to remaining platforms: +- Stagger timing (not all at once — 30-60 min gaps) +- Include cross-platform references where appropriate ("longer thread on X" etc.) + +## Content Adaptation Examples + +### Source: Product Launch + +**X version:** +``` +We just shipped [feature]. + +[One specific thing it does that's impressive] + +[Link] +``` + +**LinkedIn version:** +``` +Excited to share: we just launched [feature] at [Company]. + +Here's why it matters: + +[2-3 short paragraphs with context] + +[Takeaway for the audience] + +[Link] +``` + +**Threads version:** +``` +just shipped something cool — [feature] + +[casual explanation of what it does] + +link in bio +``` + +### Source: Technical Insight + +**X version:** +``` +TIL: [specific technical insight] + +[Why it matters in one sentence] +``` + +**LinkedIn version:** +``` +A pattern I've been using that's made a real difference: + +[Technical insight with professional framing] + +[How it applies to teams/orgs] + +#relevantHashtag +``` + +## API Integration + +### Batch Crossposting Service (Example Pattern) +If using a crossposting service (e.g., Postbridge, Buffer, or a custom API), the pattern looks like: + +```python +import os +import requests + +resp = requests.post( + "https://your-crosspost-service.example/api/posts", + headers={"Authorization": f"Bearer {os.environ['POSTBRIDGE_API_KEY']}"}, + json={ + "platforms": ["twitter", "linkedin", "threads"], + "content": { + "twitter": {"text": x_version}, + "linkedin": {"text": linkedin_version}, + "threads": {"text": threads_version} + } + } +) +``` + +### Manual Posting +Without Postbridge, post to each platform using its native API: +- X: Use `x-api` skill patterns +- LinkedIn: LinkedIn API v2 with OAuth 2.0 +- Threads: Threads API (Meta) +- Bluesky: AT Protocol API + +## Quality Gate + +Before posting: +- [ ] Each platform version reads naturally for that platform +- [ ] No identical content across platforms +- [ ] Length limits respected +- [ ] Links work and are placed appropriately +- [ ] Tone matches platform conventions +- [ ] Media is sized correctly for each platform + +## Related Skills + +- `content-engine` — Generate platform-native content +- `x-api` — X/Twitter API integration diff --git a/skills/deep-research/SKILL.md b/skills/deep-research/SKILL.md new file mode 100644 index 00000000..5a412b7e --- /dev/null +++ b/skills/deep-research/SKILL.md @@ -0,0 +1,155 @@ +--- +name: deep-research +description: Multi-source deep research using firecrawl and exa MCPs. Searches the web, synthesizes findings, and delivers cited reports with source attribution. Use when the user wants thorough research on any topic with evidence and citations. +origin: ECC +--- + +# Deep Research + +Produce thorough, cited research reports from multiple web sources using firecrawl and exa MCP tools. + +## When to Activate + +- User asks to research any topic in depth +- Competitive analysis, technology evaluation, or market sizing +- Due diligence on companies, investors, or technologies +- Any question requiring synthesis from multiple sources +- User says "research", "deep dive", "investigate", or "what's the current state of" + +## MCP Requirements + +At least one of: +- **firecrawl** — `firecrawl_search`, `firecrawl_scrape`, `firecrawl_crawl` +- **exa** — `web_search_exa`, `web_search_advanced_exa`, `crawling_exa` + +Both together give the best coverage. Configure in `~/.claude.json` or `~/.codex/config.toml`. + +## Workflow + +### Step 1: Understand the Goal + +Ask 1-2 quick clarifying questions: +- "What's your goal — learning, making a decision, or writing something?" +- "Any specific angle or depth you want?" + +If the user says "just research it" — skip ahead with reasonable defaults. + +### Step 2: Plan the Research + +Break the topic into 3-5 research sub-questions. Example: +- Topic: "Impact of AI on healthcare" + - What are the main AI applications in healthcare today? + - What clinical outcomes have been measured? + - What are the regulatory challenges? + - What companies are leading this space? + - What's the market size and growth trajectory? + +### Step 3: Execute Multi-Source Search + +For EACH sub-question, search using available MCP tools: + +**With firecrawl:** +``` +firecrawl_search(query: "", limit: 8) +``` + +**With exa:** +``` +web_search_exa(query: "", numResults: 8) +web_search_advanced_exa(query: "", numResults: 5, startPublishedDate: "2025-01-01") +``` + +**Search strategy:** +- Use 2-3 different keyword variations per sub-question +- Mix general and news-focused queries +- Aim for 15-30 unique sources total +- Prioritize: academic, official, reputable news > blogs > forums + +### Step 4: Deep-Read Key Sources + +For the most promising URLs, fetch full content: + +**With firecrawl:** +``` +firecrawl_scrape(url: "") +``` + +**With exa:** +``` +crawling_exa(url: "", tokensNum: 5000) +``` + +Read 3-5 key sources in full for depth. Do not rely only on search snippets. + +### Step 5: Synthesize and Write Report + +Structure the report: + +```markdown +# [Topic]: Research Report +*Generated: [date] | Sources: [N] | Confidence: [High/Medium/Low]* + +## Executive Summary +[3-5 sentence overview of key findings] + +## 1. [First Major Theme] +[Findings with inline citations] +- Key point ([Source Name](url)) +- Supporting data ([Source Name](url)) + +## 2. [Second Major Theme] +... + +## 3. [Third Major Theme] +... + +## Key Takeaways +- [Actionable insight 1] +- [Actionable insight 2] +- [Actionable insight 3] + +## Sources +1. [Title](url) — [one-line summary] +2. ... + +## Methodology +Searched [N] queries across web and news. Analyzed [M] sources. +Sub-questions investigated: [list] +``` + +### Step 6: Deliver + +- **Short topics**: Post the full report in chat +- **Long reports**: Post the executive summary + key takeaways, save full report to a file + +## Parallel Research with Subagents + +For broad topics, use Claude Code's Task tool to parallelize: + +``` +Launch 3 research agents in parallel: +1. Agent 1: Research sub-questions 1-2 +2. Agent 2: Research sub-questions 3-4 +3. Agent 3: Research sub-question 5 + cross-cutting themes +``` + +Each agent searches, reads sources, and returns findings. The main session synthesizes into the final report. + +## Quality Rules + +1. **Every claim needs a source.** No unsourced assertions. +2. **Cross-reference.** If only one source says it, flag it as unverified. +3. **Recency matters.** Prefer sources from the last 12 months. +4. **Acknowledge gaps.** If you couldn't find good info on a sub-question, say so. +5. **No hallucination.** If you don't know, say "insufficient data found." +6. **Separate fact from inference.** Label estimates, projections, and opinions clearly. + +## Examples + +``` +"Research the current state of nuclear fusion energy" +"Deep dive into Rust vs Go for backend services in 2026" +"Research the best strategies for bootstrapping a SaaS business" +"What's happening with the US housing market right now?" +"Investigate the competitive landscape for AI code editors" +``` diff --git a/skills/dmux-workflows/SKILL.md b/skills/dmux-workflows/SKILL.md new file mode 100644 index 00000000..6e6c5544 --- /dev/null +++ b/skills/dmux-workflows/SKILL.md @@ -0,0 +1,191 @@ +--- +name: dmux-workflows +description: Multi-agent orchestration using dmux (tmux pane manager for AI agents). Patterns for parallel agent workflows across Claude Code, Codex, OpenCode, and other harnesses. Use when running multiple agent sessions in parallel or coordinating multi-agent development workflows. +origin: ECC +--- + +# dmux Workflows + +Orchestrate parallel AI agent sessions using dmux, a tmux pane manager for agent harnesses. + +## When to Activate + +- Running multiple agent sessions in parallel +- Coordinating work across Claude Code, Codex, and other harnesses +- Complex tasks that benefit from divide-and-conquer parallelism +- User says "run in parallel", "split this work", "use dmux", or "multi-agent" + +## What is dmux + +dmux is a tmux-based orchestration tool that manages AI agent panes: +- Press `n` to create a new pane with a prompt +- Press `m` to merge pane output back to the main session +- Supports: Claude Code, Codex, OpenCode, Cline, Gemini, Qwen + +**Install:** `npm install -g dmux` or see [github.com/standardagents/dmux](https://github.com/standardagents/dmux) + +## Quick Start + +```bash +# Start dmux session +dmux + +# Create agent panes (press 'n' in dmux, then type prompt) +# Pane 1: "Implement the auth middleware in src/auth/" +# Pane 2: "Write tests for the user service" +# Pane 3: "Update API documentation" + +# Each pane runs its own agent session +# Press 'm' to merge results back +``` + +## Workflow Patterns + +### Pattern 1: Research + Implement + +Split research and implementation into parallel tracks: + +``` +Pane 1 (Research): "Research best practices for rate limiting in Node.js. + Check current libraries, compare approaches, and write findings to + /tmp/rate-limit-research.md" + +Pane 2 (Implement): "Implement rate limiting middleware for our Express API. + Start with a basic token bucket, we'll refine after research completes." + +# After Pane 1 completes, merge findings into Pane 2's context +``` + +### Pattern 2: Multi-File Feature + +Parallelize work across independent files: + +``` +Pane 1: "Create the database schema and migrations for the billing feature" +Pane 2: "Build the billing API endpoints in src/api/billing/" +Pane 3: "Create the billing dashboard UI components" + +# Merge all, then do integration in main pane +``` + +### Pattern 3: Test + Fix Loop + +Run tests in one pane, fix in another: + +``` +Pane 1 (Watcher): "Run the test suite in watch mode. When tests fail, + summarize the failures." + +Pane 2 (Fixer): "Fix failing tests based on the error output from pane 1" +``` + +### Pattern 4: Cross-Harness + +Use different AI tools for different tasks: + +``` +Pane 1 (Claude Code): "Review the security of the auth module" +Pane 2 (Codex): "Refactor the utility functions for performance" +Pane 3 (Claude Code): "Write E2E tests for the checkout flow" +``` + +### Pattern 5: Code Review Pipeline + +Parallel review perspectives: + +``` +Pane 1: "Review src/api/ for security vulnerabilities" +Pane 2: "Review src/api/ for performance issues" +Pane 3: "Review src/api/ for test coverage gaps" + +# Merge all reviews into a single report +``` + +## Best Practices + +1. **Independent tasks only.** Don't parallelize tasks that depend on each other's output. +2. **Clear boundaries.** Each pane should work on distinct files or concerns. +3. **Merge strategically.** Review pane output before merging to avoid conflicts. +4. **Use git worktrees.** For file-conflict-prone work, use separate worktrees per pane. +5. **Resource awareness.** Each pane uses API tokens — keep total panes under 5-6. + +## Git Worktree Integration + +For tasks that touch overlapping files: + +```bash +# Create worktrees for isolation +git worktree add -b feat/auth ../feature-auth HEAD +git worktree add -b feat/billing ../feature-billing HEAD + +# Run agents in separate worktrees +# Pane 1: cd ../feature-auth && claude +# Pane 2: cd ../feature-billing && claude + +# Merge branches when done +git merge feat/auth +git merge feat/billing +``` + +## Complementary Tools + +| Tool | What It Does | When to Use | +|------|-------------|-------------| +| **dmux** | tmux pane management for agents | Parallel agent sessions | +| **Superset** | Terminal IDE for 10+ parallel agents | Large-scale orchestration | +| **Claude Code Task tool** | In-process subagent spawning | Programmatic parallelism within a session | +| **Codex multi-agent** | Built-in agent roles | Codex-specific parallel work | + +## ECC Helper + +ECC now includes a helper for external tmux-pane orchestration with separate git worktrees: + +```bash +node scripts/orchestrate-worktrees.js plan.json --execute +``` + +Example `plan.json`: + +```json +{ + "sessionName": "skill-audit", + "baseRef": "HEAD", + "launcherCommand": "codex exec --cwd {worktree_path} --task-file {task_file}", + "workers": [ + { "name": "docs-a", "task": "Fix skills 1-4 and write handoff notes." }, + { "name": "docs-b", "task": "Fix skills 5-8 and write handoff notes." } + ] +} +``` + +The helper: +- Creates one branch-backed git worktree per worker +- Optionally overlays selected `seedPaths` from the main checkout into each worker worktree +- Writes per-worker `task.md`, `handoff.md`, and `status.md` files under `.orchestration//` +- Starts a tmux session with one pane per worker +- Launches each worker command in its own pane +- Leaves the main pane free for the orchestrator + +Use `seedPaths` when workers need access to dirty or untracked local files that are not yet part of `HEAD`, such as local orchestration scripts, draft plans, or docs: + +```json +{ + "sessionName": "workflow-e2e", + "seedPaths": [ + "scripts/orchestrate-worktrees.js", + "scripts/lib/tmux-worktree-orchestrator.js", + ".claude/plan/workflow-e2e-test.json" + ], + "launcherCommand": "bash {repo_root}/scripts/orchestrate-codex-worker.sh {task_file} {handoff_file} {status_file}", + "workers": [ + { "name": "seed-check", "task": "Verify seeded files are present before starting work." } + ] +} +``` + +## Troubleshooting + +- **Pane not responding:** Switch to the pane directly or inspect it with `tmux capture-pane -pt :0.`. +- **Merge conflicts:** Use git worktrees to isolate file changes per pane. +- **High token usage:** Reduce number of parallel panes. Each pane is a full agent session. +- **tmux not found:** Install with `brew install tmux` (macOS) or `apt install tmux` (Linux). diff --git a/skills/exa-search/SKILL.md b/skills/exa-search/SKILL.md new file mode 100644 index 00000000..ebe6f001 --- /dev/null +++ b/skills/exa-search/SKILL.md @@ -0,0 +1,170 @@ +--- +name: exa-search +description: Neural search via Exa MCP for web, code, and company research. Use when the user needs web search, code examples, company intel, people lookup, or AI-powered deep research with Exa's neural search engine. +origin: ECC +--- + +# Exa Search + +Neural search for web content, code, companies, and people via the Exa MCP server. + +## When to Activate + +- User needs current web information or news +- Searching for code examples, API docs, or technical references +- Researching companies, competitors, or market players +- Finding professional profiles or people in a domain +- Running background research for any development task +- User says "search for", "look up", "find", or "what's the latest on" + +## MCP Requirement + +Exa MCP server must be configured. Add to `~/.claude.json`: + +```json +"exa-web-search": { + "command": "npx", + "args": [ + "-y", + "exa-mcp-server", + "tools=web_search_exa,get_code_context_exa,crawling_exa,company_research_exa,linkedin_search_exa,deep_researcher_start,deep_researcher_check" + ], + "env": { "EXA_API_KEY": "YOUR_EXA_API_KEY_HERE" } +} +``` + +Get an API key at [exa.ai](https://exa.ai). +If you omit the `tools=...` argument, only a smaller default tool set may be enabled. + +## Core Tools + +### web_search_exa +General web search for current information, news, or facts. + +``` +web_search_exa(query: "latest AI developments 2026", numResults: 5) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `query` | string | required | Search query | +| `numResults` | number | 8 | Number of results | + +### web_search_advanced_exa +Filtered search with domain and date constraints. + +``` +web_search_advanced_exa( + query: "React Server Components best practices", + numResults: 5, + includeDomains: ["github.com", "react.dev"], + startPublishedDate: "2025-01-01" +) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `query` | string | required | Search query | +| `numResults` | number | 8 | Number of results | +| `includeDomains` | string[] | none | Limit to specific domains | +| `excludeDomains` | string[] | none | Exclude specific domains | +| `startPublishedDate` | string | none | ISO date filter (start) | +| `endPublishedDate` | string | none | ISO date filter (end) | + +### get_code_context_exa +Find code examples and documentation from GitHub, Stack Overflow, and docs sites. + +``` +get_code_context_exa(query: "Python asyncio patterns", tokensNum: 3000) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `query` | string | required | Code or API search query | +| `tokensNum` | number | 5000 | Content tokens (1000-50000) | + +### company_research_exa +Research companies for business intelligence and news. + +``` +company_research_exa(companyName: "Anthropic", numResults: 5) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `companyName` | string | required | Company name | +| `numResults` | number | 5 | Number of results | + +### linkedin_search_exa +Find professional profiles and company-adjacent people research. + +``` +linkedin_search_exa(query: "AI safety researchers at Anthropic", numResults: 5) +``` + +### crawling_exa +Extract full page content from a URL. + +``` +crawling_exa(url: "https://example.com/article", tokensNum: 5000) +``` + +**Parameters:** +| Param | Type | Default | Notes | +|-------|------|---------|-------| +| `url` | string | required | URL to extract | +| `tokensNum` | number | 5000 | Content tokens | + +### deep_researcher_start / deep_researcher_check +Start an AI research agent that runs asynchronously. + +``` +# Start research +deep_researcher_start(query: "comprehensive analysis of AI code editors in 2026") + +# Check status (returns results when complete) +deep_researcher_check(researchId: "") +``` + +## Usage Patterns + +### Quick Lookup +``` +web_search_exa(query: "Node.js 22 new features", numResults: 3) +``` + +### Code Research +``` +get_code_context_exa(query: "Rust error handling patterns Result type", tokensNum: 3000) +``` + +### Company Due Diligence +``` +company_research_exa(companyName: "Vercel", numResults: 5) +web_search_advanced_exa(query: "Vercel funding valuation 2026", numResults: 3) +``` + +### Technical Deep Dive +``` +# Start async research +deep_researcher_start(query: "WebAssembly component model status and adoption") +# ... do other work ... +deep_researcher_check(researchId: "") +``` + +## Tips + +- Use `web_search_exa` for broad queries, `web_search_advanced_exa` for filtered results +- Lower `tokensNum` (1000-2000) for focused code snippets, higher (5000+) for comprehensive context +- Combine `company_research_exa` with `web_search_advanced_exa` for thorough company analysis +- Use `crawling_exa` to get full content from specific URLs found in search results +- `deep_researcher_start` is best for comprehensive topics that benefit from AI synthesis + +## Related Skills + +- `deep-research` — Full research workflow using firecrawl + exa together +- `market-research` — Business-oriented research with decision frameworks diff --git a/skills/fal-ai-media/SKILL.md b/skills/fal-ai-media/SKILL.md new file mode 100644 index 00000000..254be99e --- /dev/null +++ b/skills/fal-ai-media/SKILL.md @@ -0,0 +1,284 @@ +--- +name: fal-ai-media +description: Unified media generation via fal.ai MCP — image, video, and audio. Covers text-to-image (Nano Banana), text/image-to-video (Seedance, Kling, Veo 3), text-to-speech (CSM-1B), and video-to-audio (ThinkSound). Use when the user wants to generate images, videos, or audio with AI. +origin: ECC +--- + +# fal.ai Media Generation + +Generate images, videos, and audio using fal.ai models via MCP. + +## When to Activate + +- User wants to generate images from text prompts +- Creating videos from text or images +- Generating speech, music, or sound effects +- Any media generation task +- User says "generate image", "create video", "text to speech", "make a thumbnail", or similar + +## MCP Requirement + +fal.ai MCP server must be configured. Add to `~/.claude.json`: + +```json +"fal-ai": { + "command": "npx", + "args": ["-y", "fal-ai-mcp-server"], + "env": { "FAL_KEY": "YOUR_FAL_KEY_HERE" } +} +``` + +Get an API key at [fal.ai](https://fal.ai). + +## MCP Tools + +The fal.ai MCP provides these tools: +- `search` — Find available models by keyword +- `find` — Get model details and parameters +- `generate` — Run a model with parameters +- `result` — Check async generation status +- `status` — Check job status +- `cancel` — Cancel a running job +- `estimate_cost` — Estimate generation cost +- `models` — List popular models +- `upload` — Upload files for use as inputs + +--- + +## Image Generation + +### Nano Banana 2 (Fast) +Best for: quick iterations, drafts, text-to-image, image editing. + +``` +generate( + app_id: "fal-ai/nano-banana-2", + input_data: { + "prompt": "a futuristic cityscape at sunset, cyberpunk style", + "image_size": "landscape_16_9", + "num_images": 1, + "seed": 42 + } +) +``` + +### Nano Banana Pro (High Fidelity) +Best for: production images, realism, typography, detailed prompts. + +``` +generate( + app_id: "fal-ai/nano-banana-pro", + input_data: { + "prompt": "professional product photo of wireless headphones on marble surface, studio lighting", + "image_size": "square", + "num_images": 1, + "guidance_scale": 7.5 + } +) +``` + +### Common Image Parameters + +| Param | Type | Options | Notes | +|-------|------|---------|-------| +| `prompt` | string | required | Describe what you want | +| `image_size` | string | `square`, `portrait_4_3`, `landscape_16_9`, `portrait_16_9`, `landscape_4_3` | Aspect ratio | +| `num_images` | number | 1-4 | How many to generate | +| `seed` | number | any integer | Reproducibility | +| `guidance_scale` | number | 1-20 | How closely to follow the prompt (higher = more literal) | + +### Image Editing +Use Nano Banana 2 with an input image for inpainting, outpainting, or style transfer: + +``` +# First upload the source image +upload(file_path: "/path/to/image.png") + +# Then generate with image input +generate( + app_id: "fal-ai/nano-banana-2", + input_data: { + "prompt": "same scene but in watercolor style", + "image_url": "", + "image_size": "landscape_16_9" + } +) +``` + +--- + +## Video Generation + +### Seedance 1.0 Pro (ByteDance) +Best for: text-to-video, image-to-video with high motion quality. + +``` +generate( + app_id: "fal-ai/seedance-1-0-pro", + input_data: { + "prompt": "a drone flyover of a mountain lake at golden hour, cinematic", + "duration": "5s", + "aspect_ratio": "16:9", + "seed": 42 + } +) +``` + +### Kling Video v3 Pro +Best for: text/image-to-video with native audio generation. + +``` +generate( + app_id: "fal-ai/kling-video/v3/pro", + input_data: { + "prompt": "ocean waves crashing on a rocky coast, dramatic clouds", + "duration": "5s", + "aspect_ratio": "16:9" + } +) +``` + +### Veo 3 (Google DeepMind) +Best for: video with generated sound, high visual quality. + +``` +generate( + app_id: "fal-ai/veo-3", + input_data: { + "prompt": "a bustling Tokyo street market at night, neon signs, crowd noise", + "aspect_ratio": "16:9" + } +) +``` + +### Image-to-Video +Start from an existing image: + +``` +generate( + app_id: "fal-ai/seedance-1-0-pro", + input_data: { + "prompt": "camera slowly zooms out, gentle wind moves the trees", + "image_url": "", + "duration": "5s" + } +) +``` + +### Video Parameters + +| Param | Type | Options | Notes | +|-------|------|---------|-------| +| `prompt` | string | required | Describe the video | +| `duration` | string | `"5s"`, `"10s"` | Video length | +| `aspect_ratio` | string | `"16:9"`, `"9:16"`, `"1:1"` | Frame ratio | +| `seed` | number | any integer | Reproducibility | +| `image_url` | string | URL | Source image for image-to-video | + +--- + +## Audio Generation + +### CSM-1B (Conversational Speech) +Text-to-speech with natural, conversational quality. + +``` +generate( + app_id: "fal-ai/csm-1b", + input_data: { + "text": "Hello, welcome to the demo. Let me show you how this works.", + "speaker_id": 0 + } +) +``` + +### ThinkSound (Video-to-Audio) +Generate matching audio from video content. + +``` +generate( + app_id: "fal-ai/thinksound", + input_data: { + "video_url": "", + "prompt": "ambient forest sounds with birds chirping" + } +) +``` + +### ElevenLabs (via API, no MCP) +For professional voice synthesis, use ElevenLabs directly: + +```python +import os +import requests + +resp = requests.post( + "https://api.elevenlabs.io/v1/text-to-speech/", + headers={ + "xi-api-key": os.environ["ELEVENLABS_API_KEY"], + "Content-Type": "application/json" + }, + json={ + "text": "Your text here", + "model_id": "eleven_turbo_v2_5", + "voice_settings": {"stability": 0.5, "similarity_boost": 0.75} + } +) +with open("output.mp3", "wb") as f: + f.write(resp.content) +``` + +### VideoDB Generative Audio +If VideoDB is configured, use its generative audio: + +```python +# Voice generation +audio = coll.generate_voice(text="Your narration here", voice="alloy") + +# Music generation +music = coll.generate_music(prompt="upbeat electronic background music", duration=30) + +# Sound effects +sfx = coll.generate_sound_effect(prompt="thunder crack followed by rain") +``` + +--- + +## Cost Estimation + +Before generating, check estimated cost: + +``` +estimate_cost( + estimate_type: "unit_price", + endpoints: { + "fal-ai/nano-banana-pro": { + "num_images": 1 + } + } +) +``` + +## Model Discovery + +Find models for specific tasks: + +``` +search(query: "text to video") +find(endpoint_ids: ["fal-ai/seedance-1-0-pro"]) +models() +``` + +## Tips + +- Use `seed` for reproducible results when iterating on prompts +- Start with lower-cost models (Nano Banana 2) for prompt iteration, then switch to Pro for finals +- For video, keep prompts descriptive but concise — focus on motion and scene +- Image-to-video produces more controlled results than pure text-to-video +- Check `estimate_cost` before running expensive video generations + +## Related Skills + +- `videodb` — Video processing, editing, and streaming +- `video-editing` — AI-powered video editing workflows +- `content-engine` — Content creation for social platforms diff --git a/skills/strategic-compact/SKILL.md b/skills/strategic-compact/SKILL.md index 67bbb31e..ddb99753 100644 --- a/skills/strategic-compact/SKILL.md +++ b/skills/strategic-compact/SKILL.md @@ -96,6 +96,34 @@ Understanding what persists helps you compact with confidence: 5. **Write before compacting** — Save important context to files or memory before compacting 6. **Use `/compact` with a summary** — Add a custom message: `/compact Focus on implementing auth middleware next` +## Token Optimization Patterns + +### Trigger-Table Lazy Loading +Instead of loading full skill content at session start, use a trigger table that maps keywords to skill paths. Skills load only when triggered, reducing baseline context by 50%+: + +| Trigger | Skill | Load When | +|---------|-------|-----------| +| "test", "tdd", "coverage" | tdd-workflow | User mentions testing | +| "security", "auth", "xss" | security-review | Security-related work | +| "deploy", "ci/cd" | deployment-patterns | Deployment context | + +### Context Composition Awareness +Monitor what's consuming your context window: +- **CLAUDE.md files** — Always loaded, keep lean +- **Loaded skills** — Each skill adds 1-5K tokens +- **Conversation history** — Grows with each exchange +- **Tool results** — File reads, search results add bulk + +### Duplicate Instruction Detection +Common sources of duplicate context: +- Same rules in both `~/.claude/rules/` and project `.claude/rules/` +- Skills that repeat CLAUDE.md instructions +- Multiple skills covering overlapping domains + +### Context Optimization Tools +- `token-optimizer` MCP — Automated 95%+ token reduction via content deduplication +- `context-mode` — Context virtualization (315KB to 5.4KB demonstrated) + ## Related - [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) — Token optimization section diff --git a/skills/video-editing/SKILL.md b/skills/video-editing/SKILL.md new file mode 100644 index 00000000..7dd60416 --- /dev/null +++ b/skills/video-editing/SKILL.md @@ -0,0 +1,310 @@ +--- +name: video-editing +description: AI-assisted video editing workflows for cutting, structuring, and augmenting real footage. Covers the full pipeline from raw capture through FFmpeg, Remotion, ElevenLabs, fal.ai, and final polish in Descript or CapCut. Use when the user wants to edit video, cut footage, create vlogs, or build video content. +origin: ECC +--- + +# Video Editing + +AI-assisted editing for real footage. Not generation from prompts. Editing existing video fast. + +## When to Activate + +- User wants to edit, cut, or structure video footage +- Turning long recordings into short-form content +- Building vlogs, tutorials, or demo videos from raw capture +- Adding overlays, subtitles, music, or voiceover to existing video +- Reframing video for different platforms (YouTube, TikTok, Instagram) +- User says "edit video", "cut this footage", "make a vlog", or "video workflow" + +## Core Thesis + +AI video editing is useful when you stop asking it to create the whole video and start using it to compress, structure, and augment real footage. The value is not generation. The value is compression. + +## The Pipeline + +``` +Screen Studio / raw footage + → Claude / Codex + → FFmpeg + → Remotion + → ElevenLabs / fal.ai + → Descript or CapCut +``` + +Each layer has a specific job. Do not skip layers. Do not try to make one tool do everything. + +## Layer 1: Capture (Screen Studio / Raw Footage) + +Collect the source material: +- **Screen Studio**: polished screen recordings for app demos, coding sessions, browser workflows +- **Raw camera footage**: vlog footage, interviews, event recordings +- **Desktop capture via VideoDB**: session recording with real-time context (see `videodb` skill) + +Output: raw files ready for organization. + +## Layer 2: Organization (Claude / Codex) + +Use Claude Code or Codex to: +- **Transcribe and label**: generate transcript, identify topics and themes +- **Plan structure**: decide what stays, what gets cut, what order works +- **Identify dead sections**: find pauses, tangents, repeated takes +- **Generate edit decision list**: timestamps for cuts, segments to keep +- **Scaffold FFmpeg and Remotion code**: generate the commands and compositions + +``` +Example prompt: +"Here's the transcript of a 4-hour recording. Identify the 8 strongest segments +for a 24-minute vlog. Give me FFmpeg cut commands for each segment." +``` + +This layer is about structure, not final creative taste. + +## Layer 3: Deterministic Cuts (FFmpeg) + +FFmpeg handles the boring but critical work: splitting, trimming, concatenating, and preprocessing. + +### Extract segment by timestamp + +```bash +ffmpeg -i raw.mp4 -ss 00:12:30 -to 00:15:45 -c copy segment_01.mp4 +``` + +### Batch cut from edit decision list + +```bash +#!/bin/bash +# cuts.txt: start,end,label +while IFS=, read -r start end label; do + ffmpeg -i raw.mp4 -ss "$start" -to "$end" -c copy "segments/${label}.mp4" +done < cuts.txt +``` + +### Concatenate segments + +```bash +# Create file list +for f in segments/*.mp4; do echo "file '$f'"; done > concat.txt +ffmpeg -f concat -safe 0 -i concat.txt -c copy assembled.mp4 +``` + +### Create proxy for faster editing + +```bash +ffmpeg -i raw.mp4 -vf "scale=960:-2" -c:v libx264 -preset ultrafast -crf 28 proxy.mp4 +``` + +### Extract audio for transcription + +```bash +ffmpeg -i raw.mp4 -vn -acodec pcm_s16le -ar 16000 audio.wav +``` + +### Normalize audio levels + +```bash +ffmpeg -i segment.mp4 -af loudnorm=I=-16:TP=-1.5:LRA=11 -c:v copy normalized.mp4 +``` + +## Layer 4: Programmable Composition (Remotion) + +Remotion turns editing problems into composable code. Use it for things that traditional editors make painful: + +### When to use Remotion + +- Overlays: text, images, branding, lower thirds +- Data visualizations: charts, stats, animated numbers +- Motion graphics: transitions, explainer animations +- Composable scenes: reusable templates across videos +- Product demos: annotated screenshots, UI highlights + +### Basic Remotion composition + +```tsx +import { AbsoluteFill, Sequence, Video, useCurrentFrame } from "remotion"; + +export const VlogComposition: React.FC = () => { + const frame = useCurrentFrame(); + + return ( + + {/* Main footage */} + + + + {/* Title overlay */} + + +

+ The AI Editing Stack +

+
+
+ + {/* Next segment */} + + +
+ ); +}; +``` + +### Render output + +```bash +npx remotion render src/index.ts VlogComposition output.mp4 +``` + +See the [Remotion docs](https://www.remotion.dev/docs) for detailed patterns and API reference. + +## Layer 5: Generated Assets (ElevenLabs / fal.ai) + +Generate only what you need. Do not generate the whole video. + +### Voiceover with ElevenLabs + +```python +import os +import requests + +resp = requests.post( + f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}", + headers={ + "xi-api-key": os.environ["ELEVENLABS_API_KEY"], + "Content-Type": "application/json" + }, + json={ + "text": "Your narration text here", + "model_id": "eleven_turbo_v2_5", + "voice_settings": {"stability": 0.5, "similarity_boost": 0.75} + } +) +with open("voiceover.mp3", "wb") as f: + f.write(resp.content) +``` + +### Music and SFX with fal.ai + +Use the `fal-ai-media` skill for: +- Background music generation +- Sound effects (ThinkSound model for video-to-audio) +- Transition sounds + +### Generated visuals with fal.ai + +Use for insert shots, thumbnails, or b-roll that doesn't exist: +``` +generate(app_id: "fal-ai/nano-banana-pro", input_data: { + "prompt": "professional thumbnail for tech vlog, dark background, code on screen", + "image_size": "landscape_16_9" +}) +``` + +### VideoDB generative audio + +If VideoDB is configured: +```python +voiceover = coll.generate_voice(text="Narration here", voice="alloy") +music = coll.generate_music(prompt="lo-fi background for coding vlog", duration=120) +sfx = coll.generate_sound_effect(prompt="subtle whoosh transition") +``` + +## Layer 6: Final Polish (Descript / CapCut) + +The last layer is human. Use a traditional editor for: +- **Pacing**: adjust cuts that feel too fast or slow +- **Captions**: auto-generated, then manually cleaned +- **Color grading**: basic correction and mood +- **Final audio mix**: balance voice, music, and SFX levels +- **Export**: platform-specific formats and quality settings + +This is where taste lives. AI clears the repetitive work. You make the final calls. + +## Social Media Reframing + +Different platforms need different aspect ratios: + +| Platform | Aspect Ratio | Resolution | +|----------|-------------|------------| +| YouTube | 16:9 | 1920x1080 | +| TikTok / Reels | 9:16 | 1080x1920 | +| Instagram Feed | 1:1 | 1080x1080 | +| X / Twitter | 16:9 or 1:1 | 1280x720 or 720x720 | + +### Reframe with FFmpeg + +```bash +# 16:9 to 9:16 (center crop) +ffmpeg -i input.mp4 -vf "crop=ih*9/16:ih,scale=1080:1920" vertical.mp4 + +# 16:9 to 1:1 (center crop) +ffmpeg -i input.mp4 -vf "crop=ih:ih,scale=1080:1080" square.mp4 +``` + +### Reframe with VideoDB + +```python +from videodb import ReframeMode + +# Smart reframe (AI-guided subject tracking) +reframed = video.reframe(start=0, end=60, target="vertical", mode=ReframeMode.smart) +``` + +## Scene Detection and Auto-Cut + +### FFmpeg scene detection + +```bash +# Detect scene changes (threshold 0.3 = moderate sensitivity) +ffmpeg -i input.mp4 -vf "select='gt(scene,0.3)',showinfo" -vsync vfr -f null - 2>&1 | grep showinfo +``` + +### Silence detection for auto-cut + +```bash +# Find silent segments (useful for cutting dead air) +ffmpeg -i input.mp4 -af silencedetect=noise=-30dB:d=2 -f null - 2>&1 | grep silence +``` + +### Highlight extraction + +Use Claude to analyze transcript + scene timestamps: +``` +"Given this transcript with timestamps and these scene change points, +identify the 5 most engaging 30-second clips for social media." +``` + +## What Each Tool Does Best + +| Tool | Strength | Weakness | +|------|----------|----------| +| Claude / Codex | Organization, planning, code generation | Not the creative taste layer | +| FFmpeg | Deterministic cuts, batch processing, format conversion | No visual editing UI | +| Remotion | Programmable overlays, composable scenes, reusable templates | Learning curve for non-devs | +| Screen Studio | Polished screen recordings immediately | Only screen capture | +| ElevenLabs | Voice, narration, music, SFX | Not the center of the workflow | +| Descript / CapCut | Final pacing, captions, polish | Manual, not automatable | + +## Key Principles + +1. **Edit, don't generate.** This workflow is for cutting real footage, not creating from prompts. +2. **Structure before style.** Get the story right in Layer 2 before touching anything visual. +3. **FFmpeg is the backbone.** Boring but critical. Where long footage becomes manageable. +4. **Remotion for repeatability.** If you'll do it more than once, make it a Remotion component. +5. **Generate selectively.** Only use AI generation for assets that don't exist, not for everything. +6. **Taste is the last layer.** AI clears repetitive work. You make the final creative calls. + +## Related Skills + +- `fal-ai-media` — AI image, video, and audio generation +- `videodb` — Server-side video processing, indexing, and streaming +- `content-engine` — Platform-native content distribution diff --git a/skills/x-api/SKILL.md b/skills/x-api/SKILL.md new file mode 100644 index 00000000..23346c49 --- /dev/null +++ b/skills/x-api/SKILL.md @@ -0,0 +1,208 @@ +--- +name: x-api +description: X/Twitter API integration for posting tweets, threads, reading timelines, search, and analytics. Covers OAuth auth patterns, rate limits, and platform-native content posting. Use when the user wants to interact with X programmatically. +origin: ECC +--- + +# X API + +Programmatic interaction with X (Twitter) for posting, reading, searching, and analytics. + +## When to Activate + +- User wants to post tweets or threads programmatically +- Reading timeline, mentions, or user data from X +- Searching X for content, trends, or conversations +- Building X integrations or bots +- Analytics and engagement tracking +- User says "post to X", "tweet", "X API", or "Twitter API" + +## Authentication + +### OAuth 2.0 Bearer Token (App-Only) + +Best for: read-heavy operations, search, public data. + +```bash +# Environment setup +export X_BEARER_TOKEN="your-bearer-token" +``` + +```python +import os +import requests + +bearer = os.environ["X_BEARER_TOKEN"] +headers = {"Authorization": f"Bearer {bearer}"} + +# Search recent tweets +resp = requests.get( + "https://api.x.com/2/tweets/search/recent", + headers=headers, + params={"query": "claude code", "max_results": 10} +) +tweets = resp.json() +``` + +### OAuth 1.0a (User Context) + +Required for: posting tweets, managing account, DMs. + +```bash +# Environment setup — source before use +export X_API_KEY="your-api-key" +export X_API_SECRET="your-api-secret" +export X_ACCESS_TOKEN="your-access-token" +export X_ACCESS_SECRET="your-access-secret" +``` + +```python +import os +from requests_oauthlib import OAuth1Session + +oauth = OAuth1Session( + os.environ["X_API_KEY"], + client_secret=os.environ["X_API_SECRET"], + resource_owner_key=os.environ["X_ACCESS_TOKEN"], + resource_owner_secret=os.environ["X_ACCESS_SECRET"], +) +``` + +## Core Operations + +### Post a Tweet + +```python +resp = oauth.post( + "https://api.x.com/2/tweets", + json={"text": "Hello from Claude Code"} +) +resp.raise_for_status() +tweet_id = resp.json()["data"]["id"] +``` + +### Post a Thread + +```python +def post_thread(oauth, tweets: list[str]) -> list[str]: + ids = [] + reply_to = None + for text in tweets: + payload = {"text": text} + if reply_to: + payload["reply"] = {"in_reply_to_tweet_id": reply_to} + resp = oauth.post("https://api.x.com/2/tweets", json=payload) + tweet_id = resp.json()["data"]["id"] + ids.append(tweet_id) + reply_to = tweet_id + return ids +``` + +### Read User Timeline + +```python +resp = requests.get( + f"https://api.x.com/2/users/{user_id}/tweets", + headers=headers, + params={ + "max_results": 10, + "tweet.fields": "created_at,public_metrics", + } +) +``` + +### Search Tweets + +```python +resp = requests.get( + "https://api.x.com/2/tweets/search/recent", + headers=headers, + params={ + "query": "from:affaanmustafa -is:retweet", + "max_results": 10, + "tweet.fields": "public_metrics,created_at", + } +) +``` + +### Get User by Username + +```python +resp = requests.get( + "https://api.x.com/2/users/by/username/affaanmustafa", + headers=headers, + params={"user.fields": "public_metrics,description,created_at"} +) +``` + +### Upload Media and Post + +```python +# Media upload uses v1.1 endpoint + +# Step 1: Upload media +media_resp = oauth.post( + "https://upload.twitter.com/1.1/media/upload.json", + files={"media": open("image.png", "rb")} +) +media_id = media_resp.json()["media_id_string"] + +# Step 2: Post with media +resp = oauth.post( + "https://api.x.com/2/tweets", + json={"text": "Check this out", "media": {"media_ids": [media_id]}} +) +``` + +## Rate Limits + +X API rate limits vary by endpoint, auth method, and account tier, and they change over time. Always: +- Check the current X developer docs before hardcoding assumptions +- Read `x-rate-limit-remaining` and `x-rate-limit-reset` headers at runtime +- Back off automatically instead of relying on static tables in code + +```python +import time + +remaining = int(resp.headers.get("x-rate-limit-remaining", 0)) +if remaining < 5: + reset = int(resp.headers.get("x-rate-limit-reset", 0)) + wait = max(0, reset - int(time.time())) + print(f"Rate limit approaching. Resets in {wait}s") +``` + +## Error Handling + +```python +resp = oauth.post("https://api.x.com/2/tweets", json={"text": content}) +if resp.status_code == 201: + return resp.json()["data"]["id"] +elif resp.status_code == 429: + reset = int(resp.headers["x-rate-limit-reset"]) + raise Exception(f"Rate limited. Resets at {reset}") +elif resp.status_code == 403: + raise Exception(f"Forbidden: {resp.json().get('detail', 'check permissions')}") +else: + raise Exception(f"X API error {resp.status_code}: {resp.text}") +``` + +## Security + +- **Never hardcode tokens.** Use environment variables or `.env` files. +- **Never commit `.env` files.** Add to `.gitignore`. +- **Rotate tokens** if exposed. Regenerate at developer.x.com. +- **Use read-only tokens** when write access is not needed. +- **Store OAuth secrets securely** — not in source code or logs. + +## Integration with Content Engine + +Use `content-engine` skill to generate platform-native content, then post via X API: +1. Generate content with content-engine (X platform format) +2. Validate length (280 chars for single tweet) +3. Post via X API using patterns above +4. Track engagement via public_metrics + +## Related Skills + +- `content-engine` — Generate platform-native content for X +- `crosspost` — Distribute content across X, LinkedIn, and other platforms diff --git a/tests/hooks/hooks.test.js b/tests/hooks/hooks.test.js index 540d5084..6e814bd3 100644 --- a/tests/hooks/hooks.test.js +++ b/tests/hooks/hooks.test.js @@ -360,22 +360,30 @@ async function runTests() { if ( await asyncTest('creates or updates session file', async () => { - // Run the script - await runScript(path.join(scriptsDir, 'session-end.js')); + const isoHome = path.join(os.tmpdir(), `ecc-session-create-${Date.now()}`); - // Check if session file was created - // Note: Without CLAUDE_SESSION_ID, falls back to project name (not 'default') - // Use local time to match the script's getDateString() function - const sessionsDir = path.join(os.homedir(), '.claude', 'sessions'); - const now = new Date(); - const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`; + try { + await runScript(path.join(scriptsDir, 'session-end.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome + }); - // Get the expected session ID (project name fallback) - const utils = require('../../scripts/lib/utils'); - const expectedId = utils.getSessionIdShort(); - const sessionFile = path.join(sessionsDir, `${today}-${expectedId}-session.tmp`); + // Check if session file was created + // Note: Without CLAUDE_SESSION_ID, falls back to project/worktree name (not 'default') + // Use local time to match the script's getDateString() function + const sessionsDir = path.join(isoHome, '.claude', 'sessions'); + const now = new Date(); + const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`; - assert.ok(fs.existsSync(sessionFile), `Session file should exist: ${sessionFile}`); + // Get the expected session ID (project name fallback) + const utils = require('../../scripts/lib/utils'); + const expectedId = utils.getSessionIdShort(); + const sessionFile = path.join(sessionsDir, `${today}-${expectedId}-session.tmp`); + + assert.ok(fs.existsSync(sessionFile), `Session file should exist: ${sessionFile}`); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } }) ) passed++; @@ -404,6 +412,39 @@ async function runTests() { passed++; else failed++; + if ( + await asyncTest('writes project, branch, and worktree metadata into new session files', async () => { + const isoHome = path.join(os.tmpdir(), `ecc-session-metadata-${Date.now()}`); + const testSessionId = 'test-session-meta1234'; + const expectedShortId = testSessionId.slice(-8); + const topLevel = spawnSync('git', ['rev-parse', '--show-toplevel'], { encoding: 'utf8' }).stdout.trim(); + const branch = spawnSync('git', ['rev-parse', '--abbrev-ref', 'HEAD'], { encoding: 'utf8' }).stdout.trim(); + const project = path.basename(topLevel); + + try { + const result = await runScript(path.join(scriptsDir, 'session-end.js'), '', { + HOME: isoHome, + USERPROFILE: isoHome, + CLAUDE_SESSION_ID: testSessionId + }); + assert.strictEqual(result.code, 0, 'Hook should exit 0'); + + const now = new Date(); + const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`; + const sessionFile = path.join(isoHome, '.claude', 'sessions', `${today}-${expectedShortId}-session.tmp`); + const content = fs.readFileSync(sessionFile, 'utf8'); + + assert.ok(content.includes(`**Project:** ${project}`), 'Should persist project metadata'); + assert.ok(content.includes(`**Branch:** ${branch}`), 'Should persist branch metadata'); + assert.ok(content.includes(`**Worktree:** ${process.cwd()}`), 'Should persist worktree metadata'); + } finally { + fs.rmSync(isoHome, { recursive: true, force: true }); + } + }) + ) + passed++; + else failed++; + // pre-compact.js tests console.log('\npre-compact.js:'); @@ -1218,7 +1259,10 @@ async function runTests() { fs.writeFileSync(transcriptPath, lines.join('\n')); const stdinJson = JSON.stringify({ transcript_path: transcriptPath }); - const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson); + const result = await runScript(path.join(scriptsDir, 'session-end.js'), stdinJson, { + HOME: testDir, + USERPROFILE: testDir + }); assert.strictEqual(result.code, 0); // Session file should contain summary with tools used assert.ok(result.stderr.includes('Created session file') || result.stderr.includes('Updated session file'), 'Should create/update session file'); @@ -2448,6 +2492,42 @@ async function runTests() { passed++; else failed++; + if ( + await asyncTest('normalizes existing session headers with project, branch, and worktree metadata', async () => { + const testDir = createTestDir(); + const sessionsDir = path.join(testDir, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + + const utils = require('../../scripts/lib/utils'); + const today = utils.getDateString(); + const shortId = 'update04'; + const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`); + const branch = spawnSync('git', ['rev-parse', '--abbrev-ref', 'HEAD'], { encoding: 'utf8' }).stdout.trim(); + const project = path.basename(spawnSync('git', ['rev-parse', '--show-toplevel'], { encoding: 'utf8' }).stdout.trim()); + + fs.writeFileSync( + sessionFile, + `# Session: ${today}\n**Date:** ${today}\n**Started:** 09:00\n**Last Updated:** 09:00\n\n---\n\n## Current State\n\n[Session context goes here]\n` + ); + + const result = await runScript(path.join(scriptsDir, 'session-end.js'), '', { + HOME: testDir, + USERPROFILE: testDir, + CLAUDE_SESSION_ID: `session-${shortId}` + }); + assert.strictEqual(result.code, 0); + + const updated = fs.readFileSync(sessionFile, 'utf8'); + assert.ok(updated.includes(`**Project:** ${project}`), 'Should inject project metadata into existing headers'); + assert.ok(updated.includes(`**Branch:** ${branch}`), 'Should inject branch metadata into existing headers'); + assert.ok(updated.includes(`**Worktree:** ${process.cwd()}`), 'Should inject worktree metadata into existing headers'); + + cleanupTestDir(testDir); + }) + ) + passed++; + else failed++; + if ( await asyncTest('replaces blank template with summary when updating existing file', async () => { const testDir = createTestDir(); @@ -3815,6 +3895,8 @@ async function runTests() { try { const result = await runScript(path.join(scriptsDir, 'session-end.js'), oversizedPayload, { + HOME: testDir, + USERPROFILE: testDir, CLAUDE_TRANSCRIPT_PATH: transcriptPath }); assert.strictEqual(result.code, 0, 'Should exit 0 even with oversized stdin'); diff --git a/tests/lib/orchestration-session.test.js b/tests/lib/orchestration-session.test.js new file mode 100644 index 00000000..21e7bfae --- /dev/null +++ b/tests/lib/orchestration-session.test.js @@ -0,0 +1,214 @@ +'use strict'; + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +const { + buildSessionSnapshot, + loadWorkerSnapshots, + parseWorkerHandoff, + parseWorkerStatus, + parseWorkerTask, + resolveSnapshotTarget +} = require('../../scripts/lib/orchestration-session'); + +console.log('=== Testing orchestration-session.js ===\n'); + +let passed = 0; +let failed = 0; + +function test(desc, fn) { + try { + fn(); + console.log(` ✓ ${desc}`); + passed++; + } catch (error) { + console.log(` ✗ ${desc}: ${error.message}`); + failed++; + } +} + +test('parseWorkerStatus extracts structured status fields', () => { + const status = parseWorkerStatus([ + '# Status', + '', + '- State: completed', + '- Updated: 2026-03-12T14:09:15Z', + '- Branch: feature-branch', + '- Worktree: `/tmp/worktree`', + '', + '- Handoff file: `/tmp/handoff.md`' + ].join('\n')); + + assert.deepStrictEqual(status, { + state: 'completed', + updated: '2026-03-12T14:09:15Z', + branch: 'feature-branch', + worktree: '/tmp/worktree', + taskFile: null, + handoffFile: '/tmp/handoff.md' + }); +}); + +test('parseWorkerTask extracts objective and seeded overlays', () => { + const task = parseWorkerTask([ + '# Worker Task', + '', + '## Seeded Local Overlays', + '- `scripts/orchestrate-worktrees.js`', + '- `commands/orchestrate.md`', + '', + '## Objective', + 'Verify seeded files and summarize status.' + ].join('\n')); + + assert.deepStrictEqual(task.seedPaths, [ + 'scripts/orchestrate-worktrees.js', + 'commands/orchestrate.md' + ]); + assert.strictEqual(task.objective, 'Verify seeded files and summarize status.'); +}); + +test('parseWorkerHandoff extracts summary, validation, and risks', () => { + const handoff = parseWorkerHandoff([ + '# Handoff', + '', + '## Summary', + '- Worker completed successfully', + '', + '## Validation', + '- Ran tests', + '', + '## Remaining Risks', + '- No runtime screenshot' + ].join('\n')); + + assert.deepStrictEqual(handoff.summary, ['Worker completed successfully']); + assert.deepStrictEqual(handoff.validation, ['Ran tests']); + assert.deepStrictEqual(handoff.remainingRisks, ['No runtime screenshot']); +}); + +test('parseWorkerHandoff also supports bold section headers', () => { + const handoff = parseWorkerHandoff([ + '# Handoff', + '', + '**Summary**', + '- Worker completed successfully', + '', + '**Validation**', + '- Ran tests', + '', + '**Remaining Risks**', + '- No runtime screenshot' + ].join('\n')); + + assert.deepStrictEqual(handoff.summary, ['Worker completed successfully']); + assert.deepStrictEqual(handoff.validation, ['Ran tests']); + assert.deepStrictEqual(handoff.remainingRisks, ['No runtime screenshot']); +}); + +test('loadWorkerSnapshots reads coordination worker directories', () => { + const tempRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-orch-session-')); + const coordinationDir = path.join(tempRoot, 'coordination'); + const workerDir = path.join(coordinationDir, 'seed-check'); + const proofDir = path.join(coordinationDir, 'proof'); + fs.mkdirSync(workerDir, { recursive: true }); + fs.mkdirSync(proofDir, { recursive: true }); + + try { + fs.writeFileSync(path.join(workerDir, 'status.md'), [ + '# Status', + '', + '- State: running', + '- Branch: seed-branch', + '- Worktree: `/tmp/seed-worktree`' + ].join('\n')); + fs.writeFileSync(path.join(workerDir, 'task.md'), [ + '# Worker Task', + '', + '## Objective', + 'Inspect seed paths.' + ].join('\n')); + fs.writeFileSync(path.join(workerDir, 'handoff.md'), [ + '# Handoff', + '', + '## Summary', + '- Pending' + ].join('\n')); + + const workers = loadWorkerSnapshots(coordinationDir); + assert.strictEqual(workers.length, 1); + assert.strictEqual(workers[0].workerSlug, 'seed-check'); + assert.strictEqual(workers[0].status.branch, 'seed-branch'); + assert.strictEqual(workers[0].task.objective, 'Inspect seed paths.'); + } finally { + fs.rmSync(tempRoot, { recursive: true, force: true }); + } +}); + +test('buildSessionSnapshot merges tmux panes with worker metadata', () => { + const tempRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-orch-snapshot-')); + const coordinationDir = path.join(tempRoot, 'coordination'); + const workerDir = path.join(coordinationDir, 'seed-check'); + fs.mkdirSync(workerDir, { recursive: true }); + + try { + fs.writeFileSync(path.join(workerDir, 'status.md'), '- State: completed\n- Branch: seed-branch\n'); + fs.writeFileSync(path.join(workerDir, 'task.md'), '## Objective\nInspect seed paths.\n'); + fs.writeFileSync(path.join(workerDir, 'handoff.md'), '## Summary\n- ok\n'); + + const snapshot = buildSessionSnapshot({ + sessionName: 'workflow-visual-proof', + coordinationDir, + panes: [ + { + paneId: '%95', + windowIndex: 1, + paneIndex: 2, + title: 'seed-check', + currentCommand: 'codex', + currentPath: '/tmp/worktree', + active: false, + dead: false, + pid: 1234 + } + ] + }); + + assert.strictEqual(snapshot.sessionActive, true); + assert.strictEqual(snapshot.workerCount, 1); + assert.strictEqual(snapshot.workerStates.completed, 1); + assert.strictEqual(snapshot.workers[0].pane.paneId, '%95'); + } finally { + fs.rmSync(tempRoot, { recursive: true, force: true }); + } +}); + +test('resolveSnapshotTarget handles plan files and direct session names', () => { + const tempRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-orch-target-')); + const repoRoot = path.join(tempRoot, 'repo'); + fs.mkdirSync(repoRoot, { recursive: true }); + const planPath = path.join(repoRoot, 'plan.json'); + fs.writeFileSync(planPath, JSON.stringify({ + sessionName: 'workflow-visual-proof', + repoRoot, + coordinationRoot: path.join(repoRoot, '.claude', 'orchestration') + })); + + try { + const fromPlan = resolveSnapshotTarget(planPath, repoRoot); + assert.strictEqual(fromPlan.targetType, 'plan'); + assert.strictEqual(fromPlan.sessionName, 'workflow-visual-proof'); + + const fromSession = resolveSnapshotTarget('workflow-visual-proof', repoRoot); + assert.strictEqual(fromSession.targetType, 'session'); + assert.ok(fromSession.coordinationDir.endsWith(path.join('.claude', 'orchestration', 'workflow-visual-proof'))); + } finally { + fs.rmSync(tempRoot, { recursive: true, force: true }); + } +}); + +console.log(`\n=== Results: ${passed} passed, ${failed} failed ===`); +if (failed > 0) process.exit(1); diff --git a/tests/lib/session-manager.test.js b/tests/lib/session-manager.test.js index 611c6485..d33c3874 100644 --- a/tests/lib/session-manager.test.js +++ b/tests/lib/session-manager.test.js @@ -94,6 +94,9 @@ function runTests() { **Date:** 2026-02-01 **Started:** 10:30 **Last Updated:** 14:45 +**Project:** everything-claude-code +**Branch:** feature/session-metadata +**Worktree:** /tmp/ecc-worktree ### Completed - [x] Set up project @@ -114,6 +117,9 @@ src/main.ts assert.strictEqual(meta.date, '2026-02-01'); assert.strictEqual(meta.started, '10:30'); assert.strictEqual(meta.lastUpdated, '14:45'); + assert.strictEqual(meta.project, 'everything-claude-code'); + assert.strictEqual(meta.branch, 'feature/session-metadata'); + assert.strictEqual(meta.worktree, '/tmp/ecc-worktree'); assert.strictEqual(meta.completed.length, 2); assert.strictEqual(meta.completed[0], 'Set up project'); assert.strictEqual(meta.inProgress.length, 1); diff --git a/tests/lib/tmux-worktree-orchestrator.test.js b/tests/lib/tmux-worktree-orchestrator.test.js new file mode 100644 index 00000000..fcbe1b71 --- /dev/null +++ b/tests/lib/tmux-worktree-orchestrator.test.js @@ -0,0 +1,233 @@ +'use strict'; + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +const { + slugify, + renderTemplate, + buildOrchestrationPlan, + materializePlan, + normalizeSeedPaths, + overlaySeedPaths +} = require('../../scripts/lib/tmux-worktree-orchestrator'); + +console.log('=== Testing tmux-worktree-orchestrator.js ===\n'); + +let passed = 0; +let failed = 0; + +function test(desc, fn) { + try { + fn(); + console.log(` ✓ ${desc}`); + passed++; + } catch (error) { + console.log(` ✗ ${desc}: ${error.message}`); + failed++; + } +} + +console.log('Helpers:'); +test('slugify normalizes mixed punctuation and casing', () => { + assert.strictEqual(slugify('Feature Audit: Docs + Tmux'), 'feature-audit-docs-tmux'); +}); + +test('renderTemplate replaces supported placeholders', () => { + const rendered = renderTemplate('run {worker_name} in {worktree_path}', { + worker_name: 'Docs Fixer', + worktree_path: '/tmp/repo-worker' + }); + assert.strictEqual(rendered, 'run Docs Fixer in /tmp/repo-worker'); +}); + +test('renderTemplate rejects unknown placeholders', () => { + assert.throws( + () => renderTemplate('missing {unknown}', { worker_name: 'docs' }), + /Unknown template variable/ + ); +}); + +console.log('\nPlan generation:'); +test('buildOrchestrationPlan creates worktrees, branches, and tmux commands', () => { + const repoRoot = path.join('/tmp', 'ecc'); + const plan = buildOrchestrationPlan({ + repoRoot, + sessionName: 'Skill Audit', + baseRef: 'main', + launcherCommand: 'codex exec --cwd {worktree_path} --task-file {task_file}', + workers: [ + { name: 'Docs A', task: 'Fix skills 1-4' }, + { name: 'Docs B', task: 'Fix skills 5-8' } + ] + }); + + assert.strictEqual(plan.sessionName, 'skill-audit'); + assert.strictEqual(plan.workerPlans.length, 2); + assert.strictEqual(plan.workerPlans[0].branchName, 'orchestrator-skill-audit-docs-a'); + assert.strictEqual(plan.workerPlans[1].branchName, 'orchestrator-skill-audit-docs-b'); + assert.deepStrictEqual( + plan.workerPlans[0].gitArgs.slice(0, 4), + ['worktree', 'add', '-b', 'orchestrator-skill-audit-docs-a'], + 'Should create branch-backed worktrees' + ); + assert.ok( + plan.workerPlans[0].worktreePath.endsWith(path.join('ecc-skill-audit-docs-a')), + 'Should create sibling worktree path' + ); + assert.ok( + plan.workerPlans[0].taskFilePath.endsWith(path.join('.orchestration', 'skill-audit', 'docs-a', 'task.md')), + 'Should create per-worker task file' + ); + assert.ok( + plan.workerPlans[0].handoffFilePath.endsWith(path.join('.orchestration', 'skill-audit', 'docs-a', 'handoff.md')), + 'Should create per-worker handoff file' + ); + assert.ok( + plan.workerPlans[0].launchCommand.includes(plan.workerPlans[0].taskFilePath), + 'Launch command should interpolate task file' + ); + assert.ok( + plan.workerPlans[0].launchCommand.includes(plan.workerPlans[0].worktreePath), + 'Launch command should interpolate worktree path' + ); + assert.ok( + plan.tmuxCommands.some(command => command.args.includes('split-window')), + 'Should include tmux split commands' + ); + assert.ok( + plan.tmuxCommands.some(command => command.args.includes('select-layout')), + 'Should include tiled layout command' + ); +}); + +test('buildOrchestrationPlan requires at least one worker', () => { + assert.throws( + () => buildOrchestrationPlan({ + repoRoot: '/tmp/ecc', + sessionName: 'empty', + launcherCommand: 'codex exec --task-file {task_file}', + workers: [] + }), + /at least one worker/ + ); +}); + +test('buildOrchestrationPlan normalizes global and worker seed paths', () => { + const plan = buildOrchestrationPlan({ + repoRoot: '/tmp/ecc', + sessionName: 'seeded', + launcherCommand: 'echo run', + seedPaths: ['scripts/orchestrate-worktrees.js', './.claude/plan/workflow-e2e-test.json'], + workers: [ + { + name: 'Docs', + task: 'Update docs', + seedPaths: ['commands/multi-workflow.md'] + } + ] + }); + + assert.deepStrictEqual(plan.workerPlans[0].seedPaths, [ + 'scripts/orchestrate-worktrees.js', + '.claude/plan/workflow-e2e-test.json', + 'commands/multi-workflow.md' + ]); +}); + +test('normalizeSeedPaths rejects paths outside the repo root', () => { + assert.throws( + () => normalizeSeedPaths(['../outside.txt'], '/tmp/ecc'), + /inside repoRoot/ + ); +}); + +test('materializePlan keeps worker instructions inside the worktree boundary', () => { + const tempRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-orchestrator-test-')); + + try { + const plan = buildOrchestrationPlan({ + repoRoot: tempRoot, + coordinationRoot: path.join(tempRoot, '.claude', 'orchestration'), + sessionName: 'Workflow E2E', + launcherCommand: 'bash {repo_root}/scripts/orchestrate-codex-worker.sh {task_file} {handoff_file} {status_file}', + workers: [{ name: 'Docs', task: 'Update the workflow docs.' }] + }); + + materializePlan(plan); + + const taskFile = fs.readFileSync(plan.workerPlans[0].taskFilePath, 'utf8'); + + assert.ok( + taskFile.includes('Report results in your final response.'), + 'Task file should tell the worker to report in stdout' + ); + assert.ok( + taskFile.includes('Do not spawn subagents or external agents for this task.'), + 'Task file should keep nested workers single-session' + ); + assert.ok( + !taskFile.includes('Write results and handoff notes to'), + 'Task file should not require writing handoff files outside the worktree' + ); + assert.ok( + !taskFile.includes('Update `'), + 'Task file should not instruct the nested worker to update orchestration status files' + ); + } finally { + fs.rmSync(tempRoot, { recursive: true, force: true }); + } +}); + +test('overlaySeedPaths copies local overlays into the worker worktree', () => { + const tempRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-orchestrator-overlay-')); + const repoRoot = path.join(tempRoot, 'repo'); + const worktreePath = path.join(tempRoot, 'worktree'); + + try { + fs.mkdirSync(path.join(repoRoot, 'scripts'), { recursive: true }); + fs.mkdirSync(path.join(repoRoot, '.claude', 'plan'), { recursive: true }); + fs.mkdirSync(path.join(worktreePath, 'scripts'), { recursive: true }); + + fs.writeFileSync( + path.join(repoRoot, 'scripts', 'orchestrate-worktrees.js'), + 'local-version\n', + 'utf8' + ); + fs.writeFileSync( + path.join(repoRoot, '.claude', 'plan', 'workflow-e2e-test.json'), + '{"seeded":true}\n', + 'utf8' + ); + fs.writeFileSync( + path.join(worktreePath, 'scripts', 'orchestrate-worktrees.js'), + 'head-version\n', + 'utf8' + ); + + overlaySeedPaths({ + repoRoot, + seedPaths: [ + 'scripts/orchestrate-worktrees.js', + '.claude/plan/workflow-e2e-test.json' + ], + worktreePath + }); + + assert.strictEqual( + fs.readFileSync(path.join(worktreePath, 'scripts', 'orchestrate-worktrees.js'), 'utf8'), + 'local-version\n' + ); + assert.strictEqual( + fs.readFileSync(path.join(worktreePath, '.claude', 'plan', 'workflow-e2e-test.json'), 'utf8'), + '{"seeded":true}\n' + ); + } finally { + fs.rmSync(tempRoot, { recursive: true, force: true }); + } +}); + +console.log(`\n=== Results: ${passed} passed, ${failed} failed ===`); +if (failed > 0) process.exit(1); From bdf4befb3e5855cd22983ce158bf4d1e8c6313ee Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Thu, 12 Mar 2026 09:35:03 -0700 Subject: [PATCH 2/4] fix: resolve orchestration lint errors --- scripts/lib/tmux-worktree-orchestrator.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/lib/tmux-worktree-orchestrator.js b/scripts/lib/tmux-worktree-orchestrator.js index d89a803d..4cf8bb4d 100644 --- a/scripts/lib/tmux-worktree-orchestrator.js +++ b/scripts/lib/tmux-worktree-orchestrator.js @@ -329,12 +329,12 @@ function canonicalizePath(targetPath) { try { return fs.realpathSync.native(resolvedPath); - } catch (error) { + } catch (_error) { const parentPath = path.dirname(resolvedPath); try { return path.join(fs.realpathSync.native(parentPath), path.basename(resolvedPath)); - } catch (parentError) { + } catch (_parentError) { return resolvedPath; } } From 424f3b3729957561d4381a2f6fa75c410a84f209 Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Thu, 12 Mar 2026 10:20:42 -0700 Subject: [PATCH 3/4] fix: resolve exa skill markdown lint --- .agents/skills/exa-search/SKILL.md | 5 +++++ skills/exa-search/SKILL.md | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/.agents/skills/exa-search/SKILL.md b/.agents/skills/exa-search/SKILL.md index c386b2c6..37ea2b1b 100644 --- a/.agents/skills/exa-search/SKILL.md +++ b/.agents/skills/exa-search/SKILL.md @@ -41,6 +41,7 @@ web_search_exa(query: "latest AI developments 2026", numResults: 5) ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `query` | string | required | Search query | @@ -59,6 +60,7 @@ web_search_advanced_exa( ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `query` | string | required | Search query | @@ -76,6 +78,7 @@ get_code_context_exa(query: "Python asyncio patterns", tokensNum: 3000) ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `query` | string | required | Code or API search query | @@ -89,6 +92,7 @@ company_research_exa(companyName: "Anthropic", numResults: 5) ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `companyName` | string | required | Company name | @@ -109,6 +113,7 @@ crawling_exa(url: "https://example.com/article", tokensNum: 5000) ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `url` | string | required | URL to extract | diff --git a/skills/exa-search/SKILL.md b/skills/exa-search/SKILL.md index ebe6f001..f3cffb8d 100644 --- a/skills/exa-search/SKILL.md +++ b/skills/exa-search/SKILL.md @@ -46,6 +46,7 @@ web_search_exa(query: "latest AI developments 2026", numResults: 5) ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `query` | string | required | Search query | @@ -64,6 +65,7 @@ web_search_advanced_exa( ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `query` | string | required | Search query | @@ -81,6 +83,7 @@ get_code_context_exa(query: "Python asyncio patterns", tokensNum: 3000) ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `query` | string | required | Code or API search query | @@ -94,6 +97,7 @@ company_research_exa(companyName: "Anthropic", numResults: 5) ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `companyName` | string | required | Company name | @@ -114,6 +118,7 @@ crawling_exa(url: "https://example.com/article", tokensNum: 5000) ``` **Parameters:** + | Param | Type | Default | Notes | |-------|------|---------|-------| | `url` | string | required | URL to extract | From 4e028bd2d20cfc514a571aa7f91ff0639e55e7fc Mon Sep 17 00:00:00 2001 From: Affaan Mustafa Date: Sat, 14 Mar 2026 12:55:25 -0700 Subject: [PATCH 4/4] feat: orchestration harness, selective install, observer improvements --- .agents/skills/claude-api/SKILL.md | 4 + .agents/skills/crosspost/SKILL.md | 1 + .agents/skills/x-api/SKILL.md | 3 + docs/ECC-2.0-SESSION-ADAPTER-DISCOVERY.md | 322 ++++++ docs/MEGA-PLAN-REPO-PROMPTS-2026-03-12.md | 286 ++++++ docs/PHASE1-ISSUE-BUNDLE-2026-03-12.md | 272 +++++ docs/PR-399-REVIEW-2026-03-12.md | 59 ++ docs/PR-QUEUE-TRIAGE-2026-03-13.md | 355 +++++++ docs/SELECTIVE-INSTALL-ARCHITECTURE.md | 933 ++++++++++++++++++ docs/SELECTIVE-INSTALL-DESIGN.md | 489 +++++++++ install.sh | 237 +---- manifests/install-components.json | 173 ++++ manifests/install-modules.json | 335 +++++++ manifests/install-profiles.json | 75 ++ package.json | 14 +- schemas/ecc-install-config.schema.json | 58 ++ schemas/install-components.schema.json | 56 ++ schemas/install-modules.schema.json | 105 ++ schemas/install-profiles.schema.json | 45 + schemas/install-state.schema.json | 210 ++++ scripts/ci/validate-install-manifests.js | 211 ++++ scripts/doctor.js | 110 +++ scripts/ecc.js | 194 ++++ scripts/install-apply.js | 137 +++ scripts/install-plan.js | 254 +++++ scripts/lib/install-executor.js | 605 ++++++++++++ scripts/lib/install-lifecycle.js | 763 ++++++++++++++ scripts/lib/install-manifests.js | 305 ++++++ scripts/lib/install-state.js | 120 +++ .../install-targets/antigravity-project.js | 9 + scripts/lib/install-targets/claude-home.js | 10 + scripts/lib/install-targets/codex-home.js | 10 + scripts/lib/install-targets/cursor-project.js | 10 + scripts/lib/install-targets/helpers.js | 89 ++ scripts/lib/install-targets/opencode-home.js | 10 + scripts/lib/install-targets/registry.js | 64 ++ scripts/lib/install/apply.js | 23 + scripts/lib/install/config.js | 82 ++ scripts/lib/install/request.js | 113 +++ scripts/lib/install/runtime.js | 42 + scripts/lib/orchestration-session.js | 8 +- .../lib/session-adapters/canonical-session.js | 138 +++ .../lib/session-adapters/claude-history.js | 147 +++ scripts/lib/session-adapters/dmux-tmux.js | 78 ++ scripts/lib/session-adapters/registry.js | 42 + scripts/list-installed.js | 90 ++ scripts/orchestration-status.js | 7 +- scripts/repair.js | 97 ++ scripts/session-inspect.js | 77 ++ scripts/uninstall.js | 96 ++ skills/configure-ecc/SKILL.md | 2 +- .../agents/observer-loop.sh | 3 +- .../continuous-learning-v2/hooks/observe.sh | 50 +- skills/crosspost/SKILL.md | 4 +- skills/exa-search/SKILL.md | 100 +- skills/fal-ai-media/SKILL.md | 2 +- tests/ci/validators.test.js | 379 +++++++ tests/hooks/hooks.test.js | 84 +- tests/lib/install-config.test.js | 117 +++ tests/lib/install-lifecycle.test.js | 357 +++++++ tests/lib/install-manifests.test.js | 196 ++++ tests/lib/install-request.test.js | 147 +++ tests/lib/install-state.test.js | 139 +++ tests/lib/install-targets.test.js | 110 +++ tests/lib/orchestration-session.test.js | 11 + tests/lib/session-adapters.test.js | 214 ++++ tests/scripts/doctor.test.js | 190 ++++ tests/scripts/ecc.test.js | 139 +++ tests/scripts/install-apply.test.js | 309 ++++++ tests/scripts/install-plan.test.js | 154 +++ tests/scripts/install-sh.test.js | 87 ++ tests/scripts/list-installed.test.js | 139 +++ tests/scripts/orchestration-status.test.js | 76 ++ tests/scripts/repair.test.js | 159 +++ tests/scripts/session-inspect.test.js | 116 +++ tests/scripts/uninstall.test.js | 133 +++ 76 files changed, 11050 insertions(+), 340 deletions(-) create mode 100644 docs/ECC-2.0-SESSION-ADAPTER-DISCOVERY.md create mode 100644 docs/MEGA-PLAN-REPO-PROMPTS-2026-03-12.md create mode 100644 docs/PHASE1-ISSUE-BUNDLE-2026-03-12.md create mode 100644 docs/PR-399-REVIEW-2026-03-12.md create mode 100644 docs/PR-QUEUE-TRIAGE-2026-03-13.md create mode 100644 docs/SELECTIVE-INSTALL-ARCHITECTURE.md create mode 100644 docs/SELECTIVE-INSTALL-DESIGN.md create mode 100644 manifests/install-components.json create mode 100644 manifests/install-modules.json create mode 100644 manifests/install-profiles.json create mode 100644 schemas/ecc-install-config.schema.json create mode 100644 schemas/install-components.schema.json create mode 100644 schemas/install-modules.schema.json create mode 100644 schemas/install-profiles.schema.json create mode 100644 schemas/install-state.schema.json create mode 100644 scripts/ci/validate-install-manifests.js create mode 100644 scripts/doctor.js create mode 100644 scripts/ecc.js create mode 100644 scripts/install-apply.js create mode 100644 scripts/install-plan.js create mode 100644 scripts/lib/install-executor.js create mode 100644 scripts/lib/install-lifecycle.js create mode 100644 scripts/lib/install-manifests.js create mode 100644 scripts/lib/install-state.js create mode 100644 scripts/lib/install-targets/antigravity-project.js create mode 100644 scripts/lib/install-targets/claude-home.js create mode 100644 scripts/lib/install-targets/codex-home.js create mode 100644 scripts/lib/install-targets/cursor-project.js create mode 100644 scripts/lib/install-targets/helpers.js create mode 100644 scripts/lib/install-targets/opencode-home.js create mode 100644 scripts/lib/install-targets/registry.js create mode 100644 scripts/lib/install/apply.js create mode 100644 scripts/lib/install/config.js create mode 100644 scripts/lib/install/request.js create mode 100644 scripts/lib/install/runtime.js create mode 100644 scripts/lib/session-adapters/canonical-session.js create mode 100644 scripts/lib/session-adapters/claude-history.js create mode 100644 scripts/lib/session-adapters/dmux-tmux.js create mode 100644 scripts/lib/session-adapters/registry.js create mode 100644 scripts/list-installed.js create mode 100644 scripts/repair.js create mode 100644 scripts/session-inspect.js create mode 100644 scripts/uninstall.js create mode 100644 tests/lib/install-config.test.js create mode 100644 tests/lib/install-lifecycle.test.js create mode 100644 tests/lib/install-manifests.test.js create mode 100644 tests/lib/install-request.test.js create mode 100644 tests/lib/install-state.test.js create mode 100644 tests/lib/install-targets.test.js create mode 100644 tests/lib/session-adapters.test.js create mode 100644 tests/scripts/doctor.test.js create mode 100644 tests/scripts/ecc.test.js create mode 100644 tests/scripts/install-apply.test.js create mode 100644 tests/scripts/install-plan.test.js create mode 100644 tests/scripts/install-sh.test.js create mode 100644 tests/scripts/list-installed.test.js create mode 100644 tests/scripts/orchestration-status.test.js create mode 100644 tests/scripts/repair.test.js create mode 100644 tests/scripts/session-inspect.test.js create mode 100644 tests/scripts/uninstall.test.js diff --git a/.agents/skills/claude-api/SKILL.md b/.agents/skills/claude-api/SKILL.md index b42a3e35..0c09b740 100644 --- a/.agents/skills/claude-api/SKILL.md +++ b/.agents/skills/claude-api/SKILL.md @@ -230,6 +230,8 @@ print(f"Cache creation: {message.usage.cache_creation_input_tokens}") Process large volumes asynchronously at 50% cost reduction: ```python +import time + batch = client.messages.batches.create( requests=[ { @@ -306,6 +308,8 @@ while True: ## Error Handling ```python +import time + from anthropic import APIError, RateLimitError, APIConnectionError try: diff --git a/.agents/skills/crosspost/SKILL.md b/.agents/skills/crosspost/SKILL.md index 4d235933..b67192e7 100644 --- a/.agents/skills/crosspost/SKILL.md +++ b/.agents/skills/crosspost/SKILL.md @@ -148,6 +148,7 @@ A pattern I've been using that's made a real difference: If using a crossposting service (e.g., Postbridge, Buffer, or a custom API), the pattern looks like: ```python +import os import requests resp = requests.post( diff --git a/.agents/skills/x-api/SKILL.md b/.agents/skills/x-api/SKILL.md index d0e3ad65..a88a2e8f 100644 --- a/.agents/skills/x-api/SKILL.md +++ b/.agents/skills/x-api/SKILL.md @@ -92,6 +92,7 @@ def post_thread(oauth, tweets: list[str]) -> list[str]: if reply_to: payload["reply"] = {"in_reply_to_tweet_id": reply_to} resp = oauth.post("https://api.x.com/2/tweets", json=payload) + resp.raise_for_status() tweet_id = resp.json()["data"]["id"] ids.append(tweet_id) reply_to = tweet_id @@ -167,6 +168,8 @@ resp = oauth.post( Always check `x-rate-limit-remaining` and `x-rate-limit-reset` headers. ```python +import time + remaining = int(resp.headers.get("x-rate-limit-remaining", 0)) if remaining < 5: reset = int(resp.headers.get("x-rate-limit-reset", 0)) diff --git a/docs/ECC-2.0-SESSION-ADAPTER-DISCOVERY.md b/docs/ECC-2.0-SESSION-ADAPTER-DISCOVERY.md new file mode 100644 index 00000000..68124fd1 --- /dev/null +++ b/docs/ECC-2.0-SESSION-ADAPTER-DISCOVERY.md @@ -0,0 +1,322 @@ +# ECC 2.0 Session Adapter Discovery + +## Purpose + +This document turns the March 11 ECC 2.0 control-plane direction into a +concrete adapter and snapshot design grounded in the orchestration code that +already exists in this repo. + +## Current Implemented Substrate + +The repo already has a real first-pass orchestration substrate: + +- `scripts/lib/tmux-worktree-orchestrator.js` + provisions tmux panes plus isolated git worktrees +- `scripts/orchestrate-worktrees.js` + is the current session launcher +- `scripts/lib/orchestration-session.js` + collects machine-readable session snapshots +- `scripts/orchestration-status.js` + exports those snapshots from a session name or plan file +- `commands/sessions.md` + already exposes adjacent session-history concepts from Claude's local store +- `scripts/lib/session-adapters/canonical-session.js` + defines the canonical `ecc.session.v1` normalization layer +- `scripts/lib/session-adapters/dmux-tmux.js` + wraps the current orchestration snapshot collector as adapter `dmux-tmux` +- `scripts/lib/session-adapters/claude-history.js` + normalizes Claude local session history as a second adapter +- `scripts/lib/session-adapters/registry.js` + selects adapters from explicit targets and target types +- `scripts/session-inspect.js` + emits canonical read-only session snapshots through the adapter registry + +In practice, ECC can already answer: + +- what workers exist in a tmux-orchestrated session +- what pane each worker is attached to +- what task, status, and handoff files exist for each worker +- whether the session is active and how many panes/workers exist +- what the most recent Claude local session looked like in the same canonical + snapshot shape as orchestration sessions + +That is enough to prove the substrate. It is not yet enough to qualify as a +general ECC 2.0 control plane. + +## What The Current Snapshot Actually Models + +The current snapshot model coming out of `scripts/lib/orchestration-session.js` +has these effective fields: + +```json +{ + "sessionName": "workflow-visual-proof", + "coordinationDir": ".../.claude/orchestration/workflow-visual-proof", + "repoRoot": "...", + "targetType": "plan", + "sessionActive": true, + "paneCount": 2, + "workerCount": 2, + "workerStates": { + "running": 1, + "completed": 1 + }, + "panes": [ + { + "paneId": "%95", + "windowIndex": 1, + "paneIndex": 0, + "title": "seed-check", + "currentCommand": "codex", + "currentPath": "/tmp/worktree", + "active": false, + "dead": false, + "pid": 1234 + } + ], + "workers": [ + { + "workerSlug": "seed-check", + "workerDir": ".../seed-check", + "status": { + "state": "running", + "updated": "...", + "branch": "...", + "worktree": "...", + "taskFile": "...", + "handoffFile": "..." + }, + "task": { + "objective": "...", + "seedPaths": ["scripts/orchestrate-worktrees.js"] + }, + "handoff": { + "summary": [], + "validation": [], + "remainingRisks": [] + }, + "files": { + "status": ".../status.md", + "task": ".../task.md", + "handoff": ".../handoff.md" + }, + "pane": { + "paneId": "%95", + "title": "seed-check" + } + } + ] +} +``` + +This is already a useful operator payload. The main limitation is that it is +implicitly tied to one execution style: + +- tmux pane identity +- worker slug equals pane title +- markdown coordination files +- plan-file or session-name lookup rules + +## Gap Between ECC 1.x And ECC 2.0 + +ECC 1.x currently has two different "session" surfaces: + +1. Claude local session history +2. Orchestration runtime/session snapshots + +Those surfaces are adjacent but not unified. + +The missing ECC 2.0 layer is a harness-neutral session adapter boundary that +can normalize: + +- tmux-orchestrated workers +- plain Claude sessions +- Codex worktree sessions +- OpenCode sessions +- future GitHub/App or remote-control sessions + +Without that adapter layer, any future operator UI would be forced to read +tmux-specific details and coordination markdown directly. + +## Adapter Boundary + +ECC 2.0 should introduce a canonical session adapter contract. + +Suggested minimal interface: + +```ts +type SessionAdapter = { + id: string; + canOpen(target: SessionTarget): boolean; + open(target: SessionTarget): Promise; +}; + +type AdapterHandle = { + getSnapshot(): Promise; + streamEvents?(onEvent: (event: SessionEvent) => void): Promise<() => void>; + runAction?(action: SessionAction): Promise; +}; +``` + +### Canonical Snapshot Shape + +Suggested first-pass canonical payload: + +```json +{ + "schemaVersion": "ecc.session.v1", + "adapterId": "dmux-tmux", + "session": { + "id": "workflow-visual-proof", + "kind": "orchestrated", + "state": "active", + "repoRoot": "...", + "sourceTarget": { + "type": "plan", + "value": ".claude/plan/workflow-visual-proof.json" + } + }, + "workers": [ + { + "id": "seed-check", + "label": "seed-check", + "state": "running", + "branch": "...", + "worktree": "...", + "runtime": { + "kind": "tmux-pane", + "command": "codex", + "pid": 1234, + "active": false, + "dead": false + }, + "intent": { + "objective": "...", + "seedPaths": ["scripts/orchestrate-worktrees.js"] + }, + "outputs": { + "summary": [], + "validation": [], + "remainingRisks": [] + }, + "artifacts": { + "statusFile": "...", + "taskFile": "...", + "handoffFile": "..." + } + } + ], + "aggregates": { + "workerCount": 2, + "states": { + "running": 1, + "completed": 1 + } + } +} +``` + +This preserves the useful signal already present while removing tmux-specific +details from the control-plane contract. + +## First Adapters To Support + +### 1. `dmux-tmux` + +Wrap the logic already living in +`scripts/lib/orchestration-session.js`. + +This is the easiest first adapter because the substrate is already real. + +### 2. `claude-history` + +Normalize the data that +`commands/sessions.md` +and the existing session-manager utilities already expose: + +- session id / alias +- branch +- worktree +- project path +- recency / file size / item counts + +This provides a non-orchestrated baseline for ECC 2.0. + +### 3. `codex-worktree` + +Use the same canonical shape, but back it with Codex-native execution metadata +instead of tmux assumptions where available. + +### 4. `opencode` + +Use the same adapter boundary once OpenCode session metadata is stable enough to +normalize. + +## What Should Stay Out Of The Adapter Layer + +The adapter layer should not own: + +- business logic for merge sequencing +- operator UI layout +- pricing or monetization decisions +- install profile selection +- tmux lifecycle orchestration itself + +Its job is narrower: + +- detect session targets +- load normalized snapshots +- optionally stream runtime events +- optionally expose safe actions + +## Current File Layout + +The adapter layer now lives in: + +```text +scripts/lib/session-adapters/ + canonical-session.js + dmux-tmux.js + claude-history.js + registry.js +scripts/session-inspect.js +tests/lib/session-adapters.test.js +tests/scripts/session-inspect.test.js +``` + +The current orchestration snapshot parser is now being consumed as an adapter +implementation rather than remaining the only product contract. + +## Immediate Next Steps + +1. Add a third adapter, likely `codex-worktree`, so the abstraction moves + beyond tmux plus Claude-history. +2. Decide whether canonical snapshots need separate `state` and `health` + fields before UI work starts. +3. Decide whether event streaming belongs in v1 or stays out until after the + snapshot layer proves itself. +4. Build operator-facing panels only on top of the adapter registry, not by + reading orchestration internals directly. + +## Open Questions + +1. Should worker identity be keyed by worker slug, branch, or stable UUID? +2. Do we need separate `state` and `health` fields at the canonical layer? +3. Should event streaming be part of v1, or should ECC 2.0 ship snapshot-only + first? +4. How much path information should be redacted before snapshots leave the local + machine? +5. Should the adapter registry live inside this repo long-term, or move into the + eventual ECC 2.0 control-plane app once the interface stabilizes? + +## Recommendation + +Treat the current tmux/worktree implementation as adapter `0`, not as the final +product surface. + +The shortest path to ECC 2.0 is: + +1. preserve the current orchestration substrate +2. wrap it in a canonical session adapter contract +3. add one non-tmux adapter +4. only then start building operator panels on top diff --git a/docs/MEGA-PLAN-REPO-PROMPTS-2026-03-12.md b/docs/MEGA-PLAN-REPO-PROMPTS-2026-03-12.md new file mode 100644 index 00000000..4830deb5 --- /dev/null +++ b/docs/MEGA-PLAN-REPO-PROMPTS-2026-03-12.md @@ -0,0 +1,286 @@ +# Mega Plan Repo Prompt List — March 12, 2026 + +## Purpose + +Use these prompts to split the remaining March 11 mega-plan work by repo. +They are written for parallel agents and assume the March 12 orchestration and +Windows CI lane is already merged via `#417`. + +## Current Snapshot + +- `everything-claude-code` has finished the orchestration, Codex baseline, and + Windows CI recovery lane. +- The next open ECC Phase 1 items are: + - review `#399` + - convert recurring discussion pressure into tracked issues + - define selective-install architecture + - write the ECC 2.0 discovery doc +- `agentshield`, `ECC-website`, and `skill-creator-app` all have dirty + `main` worktrees and should not be edited directly on `main`. +- `applications/` is not a standalone git repo. It lives inside the parent + workspace repo at ``. + +## Repo: `everything-claude-code` + +### Prompt A — PR `#399` Review and Merge Readiness + +```text +Work in: /everything-claude-code + +Goal: +Review PR #399 ("fix(observe): 5-layer automated session guard to prevent +self-loop observations") against the actual loop problem described in issue +#398 and the March 11 mega plan. Do not assume the old failing CI on the PR is +still meaningful, because the Windows baseline was repaired later in #417. + +Tasks: +1. Read issue #398 and PR #399 in full. +2. Inspect the observe hook implementation and tests locally. +3. Determine whether the PR really prevents observer self-observation, + automated-session observation, and runaway recursive loops. +4. Identify any missing env-based bypass, idle gating, or session exclusion + behavior. +5. Produce a merge recommendation with findings ordered by severity. + +Constraints: +- Do not merge automatically. +- Do not rewrite unrelated hook behavior. +- If you make code changes, keep them tightly scoped to observe behavior and + tests. + +Deliverables: +- review summary +- exact findings with file references +- recommended merge / rework decision +- test commands run +``` + +### Prompt B — Roadmap Issues Extraction + +```text +Work in: /everything-claude-code + +Goal: +Convert recurring discussion pressure from the mega plan into concrete GitHub +issues. Focus on high-signal roadmap items that unblock ECC 1.x and ECC 2.0. + +Create issue drafts or a ready-to-post issue bundle for: +1. selective install profiles +2. uninstall / doctor / repair lifecycle +3. generated skill placement and provenance policy +4. governance past the tool call +5. ECC 2.0 discovery doc / adapter contracts + +Tasks: +1. Read the March 11 mega plan and March 12 handoff. +2. Deduplicate against already-open issues. +3. Draft issue titles, problem statements, scope, non-goals, acceptance + criteria, and file/system areas affected. + +Constraints: +- Do not create filler issues. +- Prefer 4-6 high-value issues over a large backlog dump. +- Keep each issue scoped so it could plausibly land in one focused PR series. + +Deliverables: +- issue shortlist +- ready-to-post issue bodies +- duplication notes against existing issues +``` + +### Prompt C — ECC 2.0 Discovery and Adapter Spec + +```text +Work in: /everything-claude-code + +Goal: +Turn the existing ECC 2.0 vision into a first concrete discovery doc focused on +adapter contracts, session/task state, token accounting, and security/policy +events. + +Tasks: +1. Use the current orchestration/session snapshot code as the baseline. +2. Define a normalized adapter contract for Claude Code, Codex, OpenCode, and + later Cursor / GitHub App integration. +3. Define the initial SQLite-backed data model for sessions, tasks, worktrees, + events, findings, and approvals. +4. Define what stays in ECC 1.x versus what belongs in ECC 2.0. +5. Call out unresolved product decisions separately from implementation + requirements. + +Constraints: +- Treat the current tmux/worktree/session snapshot substrate as the starting + point, not a blank slate. +- Keep the doc implementation-oriented. + +Deliverables: +- discovery doc +- adapter contract sketch +- event model sketch +- unresolved questions list +``` + +## Repo: `agentshield` + +### Prompt — False Positive Audit and Regression Plan + +```text +Work in: /agentshield + +Goal: +Advance the AgentShield Phase 2 workstream from the mega plan: reduce false +positives, especially where declarative deny rules, block hooks, docs examples, +or config snippets are misclassified as executable risk. + +Important repo state: +- branch is currently main +- dirty files exist in CLAUDE.md and README.md +- classify or park existing edits before broader changes + +Tasks: +1. Inspect the current false-positive behavior around: + - .claude hook configs + - AGENTS.md / CLAUDE.md + - .cursor rules + - .opencode plugin configs + - sample deny-list patterns +2. Separate parser behavior for declarative patterns vs executable commands. +3. Propose regression coverage additions and the exact fixture set needed. +4. If safe after branch setup, implement the first pass of the classifier fix. + +Constraints: +- do not work directly on dirty main +- keep fixes parser/classifier-scoped +- document any remaining ambiguity explicitly + +Deliverables: +- branch recommendation +- false-positive taxonomy +- proposed or landed regression tests +- remaining edge cases +``` + +## Repo: `ECC-website` + +### Prompt — Landing Rewrite and Product Framing + +```text +Work in: /ECC-website + +Goal: +Execute the website lane from the mega plan by rewriting the landing/product +framing away from "config repo" and toward "open agent harness system" plus +future control-plane direction. + +Important repo state: +- branch is currently main +- dirty files exist in favicon assets and multiple page/component files +- branch before meaningful work and preserve existing edits unless explicitly + classified as stale + +Tasks: +1. Classify the dirty main worktree state. +2. Rewrite the landing page narrative around: + - open agent harness system + - runtime guardrails + - cross-harness parity + - operator visibility and security +3. Define or update the next key pages: + - /skills + - /security + - /platforms + - /system or /dashboard +4. Keep the page visually intentional and product-forward, not generic SaaS. + +Constraints: +- do not silently overwrite existing dirty work +- preserve existing design system where it is coherent +- distinguish ECC 1.x toolkit from ECC 2.0 control plane clearly + +Deliverables: +- branch recommendation +- landing-page rewrite diff or content spec +- follow-up page map +- deployment readiness notes +``` + +## Repo: `skill-creator-app` + +### Prompt — Skill Import Pipeline and Product Fit + +```text +Work in: /skill-creator-app + +Goal: +Align skill-creator-app with the mega-plan external skill sourcing and audited +import pipeline workstream. + +Important repo state: +- branch is currently main +- dirty files exist in README.md and src/lib/github.ts +- classify or park existing changes before broader work + +Tasks: +1. Assess whether the app should support: + - inventorying external skills + - provenance tagging + - dependency/risk audit fields + - ECC convention adaptation workflows +2. Review the existing GitHub integration surface in src/lib/github.ts. +3. Produce a concrete product/technical scope for an audited import pipeline. +4. If safe after branching, land the smallest enabling changes for metadata + capture or GitHub ingestion. + +Constraints: +- do not turn this into a generic prompt-builder +- keep the focus on audited skill ingestion and ECC-compatible output + +Deliverables: +- product-fit summary +- recommended scope for v1 +- data fields / workflow steps for the import pipeline +- code changes if they are small and clearly justified +``` + +## Repo: `ECC` Workspace (`applications/`, `knowledge/`, `tasks/`) + +### Prompt — Example Apps and Workflow Reliability Proofs + +```text +Work in: + +Goal: +Use the parent ECC workspace to support the mega-plan hosted/workflow lanes. +This is not a standalone applications repo; it is the umbrella workspace that +contains applications/, knowledge/, tasks/, and related planning assets. + +Tasks: +1. Inventory what in applications/ is real product code vs placeholder. +2. Identify where example repos or demo apps should live for: + - GitHub App workflow proofs + - ECC 2.0 prototype spikes + - example install / setup reliability checks +3. Propose a clean workspace structure so product code, research, and planning + stop bleeding into each other. +4. Recommend which proof-of-concept should be built first. + +Constraints: +- do not move large directories blindly +- distinguish repo structure recommendations from immediate code changes +- keep recommendations compatible with the current multi-repo ECC setup + +Deliverables: +- workspace inventory +- proposed structure +- first demo/app recommendation +- follow-up branch/worktree plan +``` + +## Local Continuation + +The current worktree should stay on ECC-native Phase 1 work that does not touch +the existing dirty skill-file changes here. The best next local tasks are: + +1. selective-install architecture +2. ECC 2.0 discovery doc +3. PR `#399` review diff --git a/docs/PHASE1-ISSUE-BUNDLE-2026-03-12.md b/docs/PHASE1-ISSUE-BUNDLE-2026-03-12.md new file mode 100644 index 00000000..d1594a3a --- /dev/null +++ b/docs/PHASE1-ISSUE-BUNDLE-2026-03-12.md @@ -0,0 +1,272 @@ +# Phase 1 Issue Bundle — March 12, 2026 + +## Status + +These issue drafts were prepared from the March 11 mega plan plus the March 12 +handoff. I attempted to open them directly in GitHub, but issue creation was +blocked by missing GitHub authentication in the MCP session. + +## GitHub Status + +These drafts were later posted via `gh`: + +- `#423` Implement manifest-driven selective install profiles for ECC +- `#421` Add ECC install-state plus uninstall / doctor / repair lifecycle +- `#424` Define canonical session adapter contract for ECC 2.0 control plane +- `#422` Define generated skill placement and provenance policy +- `#425` Define governance and visibility past the tool call + +The bodies below are preserved as the local source bundle used to create the +issues. + +## Issue 1 + +### Title + +Implement manifest-driven selective install profiles for ECC + +### Labels + +- `enhancement` + +### Body + +```md +## Problem + +ECC still installs primarily by target and language. The repo now has first-pass +selective-install manifests and a non-mutating plan resolver, but the installer +itself does not yet consume those profiles. + +Current groundwork already landed in-repo: + +- `manifests/install-modules.json` +- `manifests/install-profiles.json` +- `scripts/ci/validate-install-manifests.js` +- `scripts/lib/install-manifests.js` +- `scripts/install-plan.js` + +That means the missing step is no longer design discovery. The missing step is +execution: wire profile/module resolution into the actual install flow while +preserving backward compatibility. + +## Scope + +Implement manifest-driven install execution for current ECC targets: + +- `claude` +- `cursor` +- `antigravity` + +Add first-pass support for: + +- `ecc-install --profile ` +- `ecc-install --modules ` +- target-aware filtering based on module target support +- backward-compatible legacy language installs during rollout + +## Non-Goals + +- Full uninstall/doctor/repair lifecycle in the same issue +- Codex/OpenCode install targets in the first pass if that blocks rollout +- Reorganizing the repository into separate published packages + +## Acceptance Criteria + +- `install.sh` can resolve and install a named profile +- `install.sh` can resolve explicit module IDs +- Unsupported modules for a target are skipped or rejected deterministically +- Legacy language-based install mode still works +- Tests cover profile resolution and installer behavior +- Docs explain the new preferred profile/module install path +``` + +## Issue 2 + +### Title + +Add ECC install-state plus uninstall / doctor / repair lifecycle + +### Labels + +- `enhancement` + +### Body + +```md +## Problem + +ECC has no canonical installed-state record. That makes uninstall, repair, and +post-install inspection nondeterministic. + +Today the repo can classify installable content, but it still cannot reliably +answer: + +- what profile/modules were installed +- what target they were installed into +- what paths ECC owns +- how to remove or repair only ECC-managed files + +Without install-state, lifecycle commands are guesswork. + +## Scope + +Introduce a durable install-state contract and the first lifecycle commands: + +- `ecc list-installed` +- `ecc uninstall` +- `ecc doctor` +- `ecc repair` + +Suggested state locations: + +- Claude: `~/.claude/ecc/install-state.json` +- Cursor: `./.cursor/ecc-install-state.json` +- Antigravity: `./.agent/ecc-install-state.json` + +The state file should capture at minimum: + +- installed version +- timestamp +- target +- profile +- resolved modules +- copied/managed paths +- source repo version or package version + +## Non-Goals + +- Rebuilding the installer architecture from scratch +- Full remote/cloud control-plane functionality +- Target support expansion beyond the current local installers unless it falls + out naturally + +## Acceptance Criteria + +- Successful installs write install-state deterministically +- `list-installed` reports target/profile/modules/version cleanly +- `doctor` reports missing or drifted managed paths +- `repair` restores missing managed files from recorded install-state +- `uninstall` removes only ECC-managed files and leaves unrelated local files + alone +- Tests cover install-state creation and lifecycle behavior +``` + +## Issue 3 + +### Title + +Define canonical session adapter contract for ECC 2.0 control plane + +### Labels + +- `enhancement` + +### Body + +```md +## Problem + +ECC now has real orchestration/session substrate, but it is still +implementation-specific. + +Current state: + +- tmux/worktree orchestration exists +- machine-readable session snapshots exist +- Claude local session-history commands exist + +What does not exist yet is a harness-neutral adapter boundary that can normalize +session/task state across: + +- tmux-orchestrated workers +- plain Claude sessions +- Codex worktrees +- OpenCode sessions +- later remote or GitHub-integrated operator surfaces + +Without that adapter contract, any future ECC 2.0 operator shell will be forced +to read tmux-specific and markdown-coordination details directly. + +## Scope + +Define and implement the first-pass canonical session adapter layer. + +Suggested deliverables: + +- adapter registry +- canonical session snapshot schema +- `dmux-tmux` adapter backed by current orchestration code +- `claude-history` adapter backed by current session history utilities +- read-only inspection CLI for canonical session snapshots + +## Non-Goals + +- Full ECC 2.0 UI in the same issue +- Monetization/GitHub App implementation +- Remote multi-user control plane + +## Acceptance Criteria + +- There is a documented canonical snapshot contract +- Current tmux orchestration snapshot code is wrapped as an adapter rather than + the top-level product contract +- A second non-tmux adapter exists to prove the abstraction is real +- Tests cover adapter selection and normalized snapshot output +- The design clearly separates adapter concerns from orchestration and UI + concerns +``` + +## Issue 4 + +### Title + +Define generated skill placement and provenance policy + +### Labels + +- `enhancement` + +### Body + +```md +## Problem + +ECC now has a large and growing skill surface, but generated/imported/learned +skills do not yet have a clear long-term placement and provenance policy. + +This creates several problems: + +- unclear separation between curated skills and generated/learned skills +- validator noise around directories that may or may not exist locally +- weak provenance for imported or machine-generated skill content +- uncertainty about where future automated learning outputs should live + +As ECC grows, the repo needs explicit rules for where generated skill artifacts +belong and how they are identified. + +## Scope + +Define a repo-wide policy for: + +- curated vs generated vs imported skill placement +- provenance metadata requirements +- validator behavior for optional/generated skill directories +- whether generated skills are shipped, ignored, or materialized during + install/build steps + +## Non-Goals + +- Building a full external skill marketplace +- Rewriting all existing skill content in one pass +- Solving every content-quality issue in the same issue + +## Acceptance Criteria + +- A documented placement policy exists for generated/imported skills +- Provenance requirements are explicit +- Validators no longer produce ambiguous behavior around optional/generated + skill locations +- The policy clearly states what is publishable vs local-only +- Follow-on implementation work is split into concrete, bounded PR-sized steps +``` diff --git a/docs/PR-399-REVIEW-2026-03-12.md b/docs/PR-399-REVIEW-2026-03-12.md new file mode 100644 index 00000000..98a2ef23 --- /dev/null +++ b/docs/PR-399-REVIEW-2026-03-12.md @@ -0,0 +1,59 @@ +# PR 399 Review — March 12, 2026 + +## Scope + +Reviewed `#399`: + +- title: `fix(observe): 5-layer automated session guard to prevent self-loop observations` +- head: `e7df0e588ceecfcd1072ef616034ccd33bb0f251` +- files changed: + - `skills/continuous-learning-v2/hooks/observe.sh` + - `skills/continuous-learning-v2/agents/observer-loop.sh` + +## Findings + +### Medium + +1. `skills/continuous-learning-v2/hooks/observe.sh` + +The new `CLAUDE_CODE_ENTRYPOINT` guard uses a finite allowlist of known +non-`cli` values (`sdk-ts`, `sdk-py`, `sdk-cli`, `mcp`, `remote`). + +That leaves a forward-compatibility hole: any future non-`cli` entrypoint value +will fall through and be treated as interactive. That reintroduces the exact +class of automated-session observation the PR is trying to prevent. + +The safer rule is: + +- allow only `cli` +- treat every other explicit entrypoint as automated +- keep the default fallback as `cli` when the variable is unset + +Suggested shape: + +```bash +case "${CLAUDE_CODE_ENTRYPOINT:-cli}" in + cli) ;; + *) exit 0 ;; +esac +``` + +## Merge Recommendation + +`Needs one follow-up change before merge.` + +The PR direction is correct: + +- it closes the ECC self-observation loop in `observer-loop.sh` +- it adds multiple guard layers in the right area of `observe.sh` +- it already addressed the cheaper-first ordering and skip-path trimming issues + +But the entrypoint guard should be generalized before merge so the automation +filter does not silently age out when Claude Code introduces additional +non-interactive entrypoints. + +## Residual Risk + +- There is still no dedicated regression test coverage around the new shell + guard behavior, so the final merge should include at least one executable + verification pass for the entrypoint and skip-path cases. diff --git a/docs/PR-QUEUE-TRIAGE-2026-03-13.md b/docs/PR-QUEUE-TRIAGE-2026-03-13.md new file mode 100644 index 00000000..892ff579 --- /dev/null +++ b/docs/PR-QUEUE-TRIAGE-2026-03-13.md @@ -0,0 +1,355 @@ +# PR Review And Queue Triage — March 13, 2026 + +## Snapshot + +This document records a live GitHub triage snapshot for the +`everything-claude-code` pull-request queue as of `2026-03-13T08:33:31Z`. + +Sources used: + +- `gh pr view` +- `gh pr checks` +- `gh pr diff --name-only` +- targeted local verification against the merged `#399` head + +Stale threshold used for this pass: + +- `last updated before 2026-02-11` (`>30` days before March 13, 2026) + +## PR `#399` Retrospective Review + +PR: + +- `#399` — `fix(observe): 5-layer automated session guard to prevent self-loop observations` +- state: `MERGED` +- merged at: `2026-03-13T06:40:03Z` +- merge commit: `c52a28ace9e7e84c00309fc7b629955dfc46ecf9` + +Files changed: + +- `skills/continuous-learning-v2/hooks/observe.sh` +- `skills/continuous-learning-v2/agents/observer-loop.sh` + +Validation performed against merged head `546628182200c16cc222b97673ddd79e942eacce`: + +- `bash -n` on both changed shell scripts +- `node tests/hooks/hooks.test.js` (`204` passed, `0` failed) +- targeted hook invocations for: + - interactive CLI session + - `CLAUDE_CODE_ENTRYPOINT=mcp` + - `ECC_HOOK_PROFILE=minimal` + - `ECC_SKIP_OBSERVE=1` + - `agent_id` payload + - trimmed `ECC_OBSERVE_SKIP_PATHS` + +Behavioral result: + +- the core self-loop fix works +- automated-session guard branches suppress observation writes as intended +- the final `non-cli => exit` entrypoint logic is the correct fail-closed shape + +Remaining findings: + +1. Medium: skipped automated sessions still create homunculus project state + before the new guards exit. + `observe.sh` resolves `cwd` and sources project detection before reaching the + automated-session guard block, so `detect-project.sh` still creates + `projects//...` directories and updates `projects.json` for sessions that + later exit early. +2. Low: the new guard matrix shipped without direct regression coverage. + The hook test suite still validates adjacent behavior, but it does not + directly assert the new `CLAUDE_CODE_ENTRYPOINT`, `ECC_HOOK_PROFILE`, + `ECC_SKIP_OBSERVE`, `agent_id`, or trimmed skip-path branches. + +Verdict: + +- `#399` is technically correct for its primary goal and was safe to merge as + the urgent loop-stop fix. +- It still warrants a follow-up issue or patch to move automated-session guards + ahead of project-registration side effects and to add explicit guard-path + tests. + +## Open PR Inventory + +There are currently `4` open PRs. + +### Queue Table + +| PR | Title | Draft | Mergeable | Merge State | Updated | Stale | Current Verdict | +| --- | --- | --- | --- | --- | --- | --- | --- | +| `#292` | `chore(config): governance and config foundation (PR #272 split 1/6)` | `false` | `MERGEABLE` | `UNSTABLE` | `2026-03-13T07:26:55Z` | `No` | `Best current merge candidate` | +| `#298` | `feat(agents,skills,rules): add Rust, Java, mobile, DevOps, and performance content` | `false` | `CONFLICTING` | `DIRTY` | `2026-03-11T04:29:07Z` | `No` | `Needs changes before review can finish` | +| `#336` | `Customisation for Codex CLI - Features from Claude Code and OpenCode` | `true` | `MERGEABLE` | `UNSTABLE` | `2026-03-13T07:26:12Z` | `No` | `Needs manual review and draft exit` | +| `#420` | `feat: add laravel skills` | `true` | `MERGEABLE` | `UNSTABLE` | `2026-03-12T22:57:36Z` | `No` | `Low-risk draft, review after draft exit` | + +No currently open PR is stale by the `>30 days since last update` rule. + +## Per-PR Assessment + +### `#292` — Governance / Config Foundation + +Live state: + +- open +- non-draft +- `MERGEABLE` +- merge state `UNSTABLE` +- visible checks: + - `CodeRabbit` passed + - `GitGuardian Security Checks` passed + +Scope: + +- `.env.example` +- `.github/ISSUE_TEMPLATE/copilot-task.md` +- `.github/PULL_REQUEST_TEMPLATE.md` +- `.gitignore` +- `.markdownlint.json` +- `.tool-versions` +- `VERSION` + +Assessment: + +- This is the cleanest merge candidate in the current queue. +- The branch was already refreshed onto current `main`. +- The currently visible bot feedback is minor/nit-level rather than obviously + merge-blocking. +- The main caution is that only external bot checks are visible right now; no + GitHub Actions matrix run appears in the current PR checks output. + +Current recommendation: + +- `Mergeable after one final owner pass.` +- If you want a conservative path, do one quick human review of the remaining + `.env.example`, PR-template, and `.tool-versions` nitpicks before merge. + +### `#298` — Large Multi-Domain Content Expansion + +Live state: + +- open +- non-draft +- `CONFLICTING` +- merge state `DIRTY` +- visible checks: + - `CodeRabbit` passed + - `GitGuardian Security Checks` passed + - `cubic · AI code reviewer` passed + +Scope: + +- `35` files +- large documentation and skill/rule expansion across Java, Rust, mobile, + DevOps, performance, data, and MLOps + +Assessment: + +- This PR is not ready for merge. +- It conflicts with current `main`, so it is not even mergeable at the branch + level yet. +- cubic identified `34` issues across `35` files in the current review. + Those findings are substantive and technical, not just style cleanup, and + they cover broken or misleading examples across several new skills. +- Even without the conflict, the scope is large enough that it needs a deliberate + content-fix pass rather than a quick merge decision. + +Current recommendation: + +- `Needs changes.` +- Rebase or restack first, then resolve the substantive example-quality issues. +- If momentum matters, split by domain rather than carrying one very large PR. + +### `#336` — Codex CLI Customization + +Live state: + +- open +- draft +- `MERGEABLE` +- merge state `UNSTABLE` +- visible checks: + - `CodeRabbit` passed + - `GitGuardian Security Checks` passed + +Scope: + +- `scripts/codex-git-hooks/pre-commit` +- `scripts/codex-git-hooks/pre-push` +- `scripts/codex/check-codex-global-state.sh` +- `scripts/codex/install-global-git-hooks.sh` +- `scripts/sync-ecc-to-codex.sh` + +Assessment: + +- This PR is no longer conflicting, but it is still draft-only and has not had + a meaningful first-party review pass. +- It modifies user-global Codex setup behavior and git-hook installation, so the + operational blast radius is higher than a docs-only PR. +- The visible checks are only external bots; there is no full GitHub Actions run + shown in the current check set. +- Because the branch comes from a contributor fork `main`, it also deserves an + extra sanity pass on what exactly is being proposed before changing status. + +Current recommendation: + +- `Needs changes before merge readiness`, where the required changes are process + and review oriented rather than an already-proven code defect: + - finish manual review + - run or confirm validation on the global-state scripts + - take it out of draft only after that review is complete + +### `#420` — Laravel Skills + +Live state: + +- open +- draft +- `MERGEABLE` +- merge state `UNSTABLE` +- visible checks: + - `CodeRabbit` passed + - `GitGuardian Security Checks` passed + +Scope: + +- `README.md` +- `examples/laravel-api-CLAUDE.md` +- `rules/php/patterns.md` +- `rules/php/security.md` +- `rules/php/testing.md` +- `skills/configure-ecc/SKILL.md` +- `skills/laravel-patterns/SKILL.md` +- `skills/laravel-security/SKILL.md` +- `skills/laravel-tdd/SKILL.md` +- `skills/laravel-verification/SKILL.md` + +Assessment: + +- This is content-heavy and operationally lower risk than `#336`. +- It is still draft and has not had a substantive human review pass yet. +- The visible checks are external bots only. +- Nothing in the live PR state suggests a merge blocker yet, but it is not ready + to be merged simply because it is still draft and under-reviewed. + +Current recommendation: + +- `Review next after the highest-priority non-draft work.` +- Likely a good review candidate once the author is ready to exit draft. + +## Mergeability Buckets + +### Mergeable Now Or After A Final Owner Pass + +- `#292` + +### Needs Changes Before Merge + +- `#298` +- `#336` + +### Draft / Needs Review Before Any Merge Decision + +- `#420` + +### Stale `>30 Days` + +- none + +## Recommended Order + +1. `#292` + This is the cleanest live merge candidate. +2. `#420` + Low runtime risk, but wait for draft exit and a real review pass. +3. `#336` + Review carefully because it changes global Codex sync and hook behavior. +4. `#298` + Rebase and fix the substantive content issues before spending more review time + on it. + +## Bottom Line + +- `#399`: safe bugfix merge with one follow-up cleanup still warranted +- `#292`: highest-priority merge candidate in the current open queue +- `#298`: not mergeable; conflicts plus substantive content defects +- `#336`: no longer conflicting, but not ready while still draft and lightly + validated +- `#420`: draft, low-risk content lane, review after the non-draft queue + +## Live Refresh + +Refreshed at `2026-03-13T22:11:40Z`. + +### Main Branch + +- `origin/main` is green right now, including the Windows test matrix. +- Mainline CI repair is not the current bottleneck. + +### Updated Queue Read + +#### `#292` — Governance / Config Foundation + +- open +- non-draft +- `MERGEABLE` +- visible checks: + - `CodeRabbit` passed + - `GitGuardian Security Checks` passed +- highest-signal remaining work is not CI repair; it is the small correctness + pass on `.env.example` and PR-template alignment before merge + +Current recommendation: + +- `Next actionable PR.` +- Either patch the remaining doc/config correctness issues, or do one final + owner pass and merge if you accept the current tradeoffs. + +#### `#420` — Laravel Skills + +- open +- draft +- `MERGEABLE` +- visible checks: + - `CodeRabbit` skipped because the PR is draft + - `GitGuardian Security Checks` passed +- no substantive human review is visible yet + +Current recommendation: + +- `Review after the non-draft queue.` +- Low implementation risk, but not merge-ready while still draft and + under-reviewed. + +#### `#336` — Codex CLI Customization + +- open +- draft +- `MERGEABLE` +- visible checks: + - `CodeRabbit` passed + - `GitGuardian Security Checks` passed +- still needs a deliberate manual review because it touches global Codex sync + and git-hook installation behavior + +Current recommendation: + +- `Manual-review lane, not immediate merge lane.` + +#### `#298` — Large Content Expansion + +- open +- non-draft +- `CONFLICTING` +- still the hardest remaining PR in the queue + +Current recommendation: + +- `Last priority among current open PRs.` +- Rebase first, then handle the substantive content/example corrections. + +### Current Order + +1. `#292` +2. `#420` +3. `#336` +4. `#298` diff --git a/docs/SELECTIVE-INSTALL-ARCHITECTURE.md b/docs/SELECTIVE-INSTALL-ARCHITECTURE.md new file mode 100644 index 00000000..77c03375 --- /dev/null +++ b/docs/SELECTIVE-INSTALL-ARCHITECTURE.md @@ -0,0 +1,933 @@ +# ECC 2.0 Selective Install Discovery + +## Purpose + +This document turns the March 11 mega-plan selective-install requirement into a +concrete ECC 2.0 discovery design. + +The goal is not just "fewer files copied during install." The actual target is +an install system that can answer, deterministically: + +- what was requested +- what was resolved +- what was copied or generated +- what target-specific transforms were applied +- what ECC owns and may safely remove or repair later + +That is the missing contract between ECC 1.x installation and an ECC 2.0 +control plane. + +## Current Implemented Foundation + +The first selective-install substrate already exists in-repo: + +- `manifests/install-modules.json` +- `manifests/install-profiles.json` +- `schemas/install-modules.schema.json` +- `schemas/install-profiles.schema.json` +- `schemas/install-state.schema.json` +- `scripts/ci/validate-install-manifests.js` +- `scripts/lib/install-manifests.js` +- `scripts/lib/install/request.js` +- `scripts/lib/install/runtime.js` +- `scripts/lib/install/apply.js` +- `scripts/lib/install-targets/` +- `scripts/lib/install-state.js` +- `scripts/lib/install-executor.js` +- `scripts/lib/install-lifecycle.js` +- `scripts/ecc.js` +- `scripts/install-apply.js` +- `scripts/install-plan.js` +- `scripts/list-installed.js` +- `scripts/doctor.js` + +Current capabilities: + +- machine-readable module and profile catalogs +- CI validation that manifest entries point at real repo paths +- dependency expansion and target filtering +- adapter-aware operation planning +- canonical request normalization for legacy and manifest install modes +- explicit runtime dispatch from normalized requests into plan creation +- legacy and manifest installs both write durable install-state +- read-only inspection of install plans before any mutation +- unified `ecc` CLI routing install, planning, and lifecycle commands +- lifecycle inspection and mutation via `list-installed`, `doctor`, `repair`, + and `uninstall` + +Current limitation: + +- target-specific merge/remove semantics are still scaffold-level for some modules +- legacy `ecc-install` compatibility still points at `install.sh` +- publish surface is still broad in `package.json` + +## Current Code Review + +The current installer stack is already much healthier than the original +language-first shell installer, but it still concentrates too much +responsibility in a few files. + +### Current Runtime Path + +The runtime flow today is: + +1. `install.sh` + thin shell wrapper that resolves the real package root +2. `scripts/install-apply.js` + user-facing installer CLI for legacy and manifest modes +3. `scripts/lib/install/request.js` + CLI parsing plus canonical request normalization +4. `scripts/lib/install/runtime.js` + runtime dispatch from normalized requests into install plans +5. `scripts/lib/install-executor.js` + argument translation, legacy compatibility, operation materialization, + filesystem mutation, and install-state write +6. `scripts/lib/install-manifests.js` + module/profile catalog loading plus dependency expansion +7. `scripts/lib/install-targets/` + target root and destination-path scaffolding +8. `scripts/lib/install-state.js` + schema-backed install-state read/write +9. `scripts/lib/install-lifecycle.js` + doctor/repair/uninstall behavior derived from stored operations + +That is enough to prove the selective-install substrate, but not enough to make +the installer architecture feel settled. + +### Current Strengths + +- install intent is now explicit through `--profile` and `--modules` +- request parsing and request normalization are now split from the CLI shell +- target root resolution is already adapterized +- lifecycle commands now use durable install-state instead of guessing +- the repo already has a unified Node entrypoint through `ecc` and + `install-apply.js` + +### Current Coupling Still Present + +1. `install-executor.js` is smaller than before, but still carrying too many + planning and materialization layers at once. + The request boundary is now extracted, but legacy request translation, + manifest-plan expansion, and operation materialization still live together. +2. target adapters are still too thin. + Today they mostly resolve roots and scaffold destination paths. The real + install semantics still live in executor branches and path heuristics. +3. the planner/executor boundary is not clean enough yet. + `install-manifests.js` resolves modules, but the final install operation set + is still partly constructed in executor-specific logic. +4. lifecycle behavior depends on low-level recorded operations more than on + stable module semantics. + That works for plain file copy, but becomes brittle for merge/generate/remove + behaviors. +5. compatibility mode is mixed directly into the main installer runtime. + Legacy language installs should behave like a request adapter, not as a + parallel installer architecture. + +## Proposed Modular Architecture Changes + +The next architectural step is to separate the installer into explicit layers, +with each layer returning stable data instead of immediately mutating files. + +### Target State + +The desired install pipeline is: + +1. CLI surface +2. request normalization +3. module resolution +4. target planning +5. operation planning +6. execution +7. install-state persistence +8. lifecycle services built on the same operation contract + +The main idea is simple: + +- manifests describe content +- adapters describe target-specific landing semantics +- planners describe what should happen +- executors apply those plans +- lifecycle commands reuse the same plan/state model instead of reinventing it + +### Proposed Runtime Layers + +#### 1. CLI Surface + +Responsibility: + +- parse user intent only +- route to install, plan, doctor, repair, uninstall +- render human or JSON output + +Should not own: + +- legacy language translation +- target-specific install rules +- operation construction + +Suggested files: + +```text +scripts/ecc.js +scripts/install-apply.js +scripts/install-plan.js +scripts/doctor.js +scripts/repair.js +scripts/uninstall.js +``` + +These stay as entrypoints, but become thin wrappers around library modules. + +#### 2. Request Normalizer + +Responsibility: + +- translate raw CLI flags into a canonical install request +- convert legacy language installs into a compatibility request shape +- reject mixed or ambiguous inputs early + +Suggested canonical request: + +```json +{ + "mode": "manifest", + "target": "cursor", + "profile": "developer", + "modules": [], + "legacyLanguages": [], + "dryRun": false +} +``` + +or, in compatibility mode: + +```json +{ + "mode": "legacy-compat", + "target": "claude", + "profile": null, + "modules": [], + "legacyLanguages": ["typescript", "python"], + "dryRun": false +} +``` + +This lets the rest of the pipeline ignore whether the request came from old or +new CLI syntax. + +#### 3. Module Resolver + +Responsibility: + +- load manifest catalogs +- expand dependencies +- reject conflicts +- filter unsupported modules per target +- return a canonical resolution object + +This layer should stay pure and read-only. + +It should not know: + +- destination filesystem paths +- merge semantics +- copy strategies + +Current nearest file: + +- `scripts/lib/install-manifests.js` + +Suggested split: + +```text +scripts/lib/install/catalog.js +scripts/lib/install/resolve-request.js +scripts/lib/install/resolve-modules.js +``` + +#### 4. Target Planner + +Responsibility: + +- select the install target adapter +- resolve target root +- resolve install-state path +- expand module-to-target mapping rules +- emit target-aware operation intents + +This is where target-specific meaning should live. + +Examples: + +- Claude may preserve native hierarchy under `~/.claude` +- Cursor may sync bundled `.cursor` root children differently from rules +- generated configs may require merge or replace semantics depending on target + +Current nearest files: + +- `scripts/lib/install-targets/helpers.js` +- `scripts/lib/install-targets/registry.js` + +Suggested evolution: + +```text +scripts/lib/install/targets/registry.js +scripts/lib/install/targets/claude-home.js +scripts/lib/install/targets/cursor-project.js +scripts/lib/install/targets/antigravity-project.js +``` + +Each adapter should eventually expose more than `resolveRoot`. +It should own path and strategy mapping for its target family. + +#### 5. Operation Planner + +Responsibility: + +- turn module resolution plus adapter rules into a typed operation graph +- emit first-class operations such as: + - `copy-file` + - `copy-tree` + - `merge-json` + - `render-template` + - `remove` +- attach ownership and validation metadata + +This is the missing architectural seam in the current installer. + +Today, operations are partly scaffold-level and partly executor-specific. +ECC 2.0 should make operation planning a standalone phase so that: + +- `plan` becomes a true preview of execution +- `doctor` can validate intended behavior, not just current files +- `repair` can rebuild exact missing work safely +- `uninstall` can reverse only managed operations + +#### 6. Execution Engine + +Responsibility: + +- apply a typed operation graph +- enforce overwrite and ownership rules +- stage writes safely +- collect final applied-operation results + +This layer should not decide *what* to do. +It should only decide *how* to apply a provided operation kind safely. + +Current nearest file: + +- `scripts/lib/install-executor.js` + +Recommended refactor: + +```text +scripts/lib/install/executor/apply-plan.js +scripts/lib/install/executor/apply-copy.js +scripts/lib/install/executor/apply-merge-json.js +scripts/lib/install/executor/apply-remove.js +``` + +That turns executor logic from one large branching runtime into a set of small +operation handlers. + +#### 7. Install-State Store + +Responsibility: + +- validate and persist install-state +- record canonical request, resolution, and applied operations +- support lifecycle commands without forcing them to reverse-engineer installs + +Current nearest file: + +- `scripts/lib/install-state.js` + +This layer is already close to the right shape. The main remaining change is to +store richer operation metadata once merge/generate semantics are real. + +#### 8. Lifecycle Services + +Responsibility: + +- `list-installed`: inspect state only +- `doctor`: compare desired/install-state view against current filesystem +- `repair`: regenerate a plan from state and reapply safe operations +- `uninstall`: remove only ECC-owned outputs + +Current nearest file: + +- `scripts/lib/install-lifecycle.js` + +This layer should eventually operate on operation kinds and ownership policies, +not just on raw `copy-file` records. + +## Proposed File Layout + +The clean modular end state should look roughly like this: + +```text +scripts/lib/install/ + catalog.js + request.js + resolve-modules.js + plan-operations.js + state-store.js + targets/ + registry.js + claude-home.js + cursor-project.js + antigravity-project.js + codex-home.js + opencode-home.js + executor/ + apply-plan.js + apply-copy.js + apply-merge-json.js + apply-render-template.js + apply-remove.js + lifecycle/ + discover.js + doctor.js + repair.js + uninstall.js +``` + +This is not a packaging split. +It is a code-ownership split inside the current repo so each layer has one job. + +## Migration Map From Current Files + +The lowest-risk migration path is evolutionary, not a rewrite. + +### Keep + +- `install.sh` as the public compatibility shim +- `scripts/ecc.js` as the unified CLI +- `scripts/lib/install-state.js` as the starting point for the state store +- current target adapter IDs and state locations + +### Extract + +- request parsing and compatibility translation out of + `scripts/lib/install-executor.js` +- target-aware operation planning out of executor branches and into target + adapters plus planner modules +- lifecycle-specific analysis out of the shared lifecycle monolith into smaller + services + +### Replace Gradually + +- broad path-copy heuristics with typed operations +- scaffold-only adapter planning with adapter-owned semantics +- legacy language install branches with legacy request translation into the same + planner/executor pipeline + +## Immediate Architecture Changes To Make Next + +If the goal is ECC 2.0 and not just “working enough,” the next modularization +steps should be: + +1. split `install-executor.js` into request normalization, operation planning, + and execution modules +2. move target-specific strategy decisions into adapter-owned planning methods +3. make `repair` and `uninstall` operate on typed operation handlers rather than + only plain `copy-file` records +4. teach manifests about install strategy and ownership so the planner no + longer depends on path heuristics +5. narrow the npm publish surface only after the internal module boundaries are + stable + +## Why The Current Model Is Not Enough + +Today ECC still behaves like a broad payload copier: + +- `install.sh` is language-first and target-branch-heavy +- targets are partly implicit in directory layout +- uninstall, repair, and doctor now exist but are still early lifecycle commands +- the repo cannot prove what a prior install actually wrote +- publish surface is still broad in `package.json` + +That creates the problems already called out in the mega plan: + +- users pull more content than their harness or workflow needs +- support and upgrades are harder because installs are not recorded +- target behavior drifts because install logic is duplicated in shell branches +- future targets like Codex or OpenCode require more special-case logic instead + of reusing a stable install contract + +## ECC 2.0 Design Thesis + +Selective install should be modeled as: + +1. resolve requested intent into a canonical module graph +2. translate that graph through a target adapter +3. execute a deterministic install operation set +4. write install-state as the durable source of truth + +That means ECC 2.0 needs two contracts, not one: + +- a content contract + what modules exist and how they depend on each other +- a target contract + how those modules land inside Claude, Cursor, Antigravity, Codex, or OpenCode + +The current repo only had the first half in early form. +The current repo now has the first full vertical slice, but not the full +target-specific semantics. + +## Design Constraints + +1. Keep `everything-claude-code` as the canonical source repo. +2. Preserve existing `install.sh` flows during migration. +3. Support home-scoped and project-scoped targets from the same planner. +4. Make uninstall/repair/doctor possible without guessing. +5. Avoid per-target copy logic leaking back into module definitions. +6. Keep future Codex and OpenCode support additive, not a rewrite. + +## Canonical Artifacts + +### 1. Module Catalog + +The module catalog is the canonical content graph. + +Current fields already implemented: + +- `id` +- `kind` +- `description` +- `paths` +- `targets` +- `dependencies` +- `defaultInstall` +- `cost` +- `stability` + +Fields still needed for ECC 2.0: + +- `installStrategy` + for example `copy`, `flatten-rules`, `generate`, `merge-config` +- `ownership` + whether ECC fully owns the target path or only generated files under it +- `pathMode` + for example `preserve`, `flatten`, `target-template` +- `conflicts` + modules or path families that cannot coexist on one target +- `publish` + whether the module is packaged by default, optional, or generated post-install + +Suggested future shape: + +```json +{ + "id": "hooks-runtime", + "kind": "hooks", + "paths": ["hooks", "scripts/hooks"], + "targets": ["claude", "cursor", "opencode"], + "dependencies": [], + "installStrategy": "copy", + "pathMode": "preserve", + "ownership": "managed", + "defaultInstall": true, + "cost": "medium", + "stability": "stable" +} +``` + +### 2. Profile Catalog + +Profiles stay thin. + +They should express user intent, not duplicate target logic. + +Current examples already implemented: + +- `core` +- `developer` +- `security` +- `research` +- `full` + +Fields still needed: + +- `defaultTargets` +- `recommendedFor` +- `excludes` +- `requiresConfirmation` + +That lets ECC 2.0 say things like: + +- `developer` is the recommended default for Claude and Cursor +- `research` may be heavy for narrow local installs +- `full` is allowed but not default + +### 3. Target Adapters + +This is the main missing layer. + +The module graph should not know: + +- where Claude home lives +- how Cursor flattens or remaps content +- which config files need merge semantics instead of blind copy + +That belongs to a target adapter. + +Suggested interface: + +```ts +type InstallTargetAdapter = { + id: string; + kind: "home" | "project"; + supports(target: string): boolean; + resolveRoot(input?: string): Promise; + planOperations(input: InstallOperationInput): Promise; + validate?(input: InstallOperationInput): Promise; +}; +``` + +Suggested first adapters: + +1. `claude-home` + writes into `~/.claude/...` +2. `cursor-project` + writes into `./.cursor/...` +3. `antigravity-project` + writes into `./.agent/...` +4. `codex-home` + later +5. `opencode-home` + later + +This matches the same pattern already proposed in the session-adapter discovery +doc: canonical contract first, harness-specific adapter second. + +## Install Planning Model + +The current `scripts/install-plan.js` CLI proves the repo can resolve requested +modules into a filtered module set. + +ECC 2.0 needs the next layer: operation planning. + +Suggested phases: + +1. input normalization + - parse `--target` + - parse `--profile` + - parse `--modules` + - optionally translate legacy language args +2. module resolution + - expand dependencies + - reject conflicts + - filter by supported targets +3. adapter planning + - resolve target root + - derive exact copy or generation operations + - identify config merges and target remaps +4. dry-run output + - show selected modules + - show skipped modules + - show exact file operations +5. mutation + - execute the operation plan +6. state write + - persist install-state only after successful completion + +Suggested operation shape: + +```json +{ + "kind": "copy", + "moduleId": "rules-core", + "source": "rules/common/coding-style.md", + "destination": "/Users/example/.claude/rules/common/coding-style.md", + "ownership": "managed", + "overwritePolicy": "replace" +} +``` + +Other operation kinds: + +- `copy` +- `copy-tree` +- `flatten-copy` +- `render-template` +- `merge-json` +- `merge-jsonc` +- `mkdir` +- `remove` + +## Install-State Contract + +Install-state is the durable contract that ECC 1.x is missing. + +Suggested path conventions: + +- Claude target: + `~/.claude/ecc/install-state.json` +- Cursor target: + `./.cursor/ecc-install-state.json` +- Antigravity target: + `./.agent/ecc-install-state.json` +- future Codex target: + `~/.codex/ecc-install-state.json` + +Suggested payload: + +```json +{ + "schemaVersion": "ecc.install.v1", + "installedAt": "2026-03-13T00:00:00Z", + "lastValidatedAt": "2026-03-13T00:00:00Z", + "target": { + "id": "claude-home", + "root": "/Users/example/.claude" + }, + "request": { + "profile": "developer", + "modules": ["orchestration"], + "legacyLanguages": ["typescript", "python"] + }, + "resolution": { + "selectedModules": [ + "rules-core", + "agents-core", + "commands-core", + "hooks-runtime", + "platform-configs", + "workflow-quality", + "framework-language", + "database", + "orchestration" + ], + "skippedModules": [] + }, + "source": { + "repoVersion": "1.9.0", + "repoCommit": "git-sha", + "manifestVersion": 1 + }, + "operations": [ + { + "kind": "copy", + "moduleId": "rules-core", + "destination": "/Users/example/.claude/rules/common/coding-style.md", + "digest": "sha256:..." + } + ] +} +``` + +State requirements: + +- enough detail for uninstall to remove only ECC-managed outputs +- enough detail for repair to compare desired versus actual installed files +- enough detail for doctor to explain drift instead of guessing + +## Lifecycle Commands + +The following commands are the lifecycle surface for install-state: + +1. `ecc list-installed` +2. `ecc uninstall` +3. `ecc doctor` +4. `ecc repair` + +Current implementation status: + +- `ecc list-installed` routes to `node scripts/list-installed.js` +- `ecc uninstall` routes to `node scripts/uninstall.js` +- `ecc doctor` routes to `node scripts/doctor.js` +- `ecc repair` routes to `node scripts/repair.js` +- legacy script entrypoints remain available during migration + +### `list-installed` + +Responsibilities: + +- show target id and root +- show requested profile/modules +- show resolved modules +- show source version and install time + +### `uninstall` + +Responsibilities: + +- load install-state +- remove only ECC-managed destinations recorded in state +- leave user-authored unrelated files untouched +- delete install-state only after successful cleanup + +### `doctor` + +Responsibilities: + +- detect missing managed files +- detect unexpected config drift +- detect target roots that no longer exist +- detect manifest/version mismatch + +### `repair` + +Responsibilities: + +- rebuild the desired operation plan from install-state +- re-copy missing or drifted managed files +- refuse repair if requested modules no longer exist in the current manifest + unless a compatibility map exists + +## Legacy Compatibility Layer + +Current `install.sh` accepts: + +- `--target ` +- a list of language names + +That behavior cannot disappear in one cut because users already depend on it. + +ECC 2.0 should translate legacy language arguments into a compatibility request. + +Suggested approach: + +1. keep existing CLI shape for legacy mode +2. map language names to module requests such as: + - `rules-core` + - target-compatible rule subsets +3. write install-state even for legacy installs +4. label the request as `legacyMode: true` + +Example: + +```json +{ + "request": { + "legacyMode": true, + "legacyLanguages": ["typescript", "python"] + } +} +``` + +This keeps old behavior available while moving all installs onto the same state +contract. + +## Publish Boundary + +The current npm package still publishes a broad payload through `package.json`. + +ECC 2.0 should improve this carefully. + +Recommended sequence: + +1. keep one canonical npm package first +2. use manifests to drive install-time selection before changing publish shape +3. only later consider reducing packaged surface where safe + +Why: + +- selective install can ship before aggressive package surgery +- uninstall and repair depend on install-state more than publish changes +- Codex/OpenCode support is easier if the package source remains unified + +Possible later directions: + +- generated slim bundles per profile +- generated target-specific tarballs +- optional remote fetch of heavy modules + +Those are Phase 3 or later, not prerequisites for profile-aware installs. + +## File Layout Recommendation + +Suggested next files: + +```text +scripts/lib/install-targets/ + claude-home.js + cursor-project.js + antigravity-project.js + registry.js +scripts/lib/install-state.js +scripts/ecc.js +scripts/install-apply.js +scripts/list-installed.js +scripts/uninstall.js +scripts/doctor.js +scripts/repair.js +tests/lib/install-targets.test.js +tests/lib/install-state.test.js +tests/lib/install-lifecycle.test.js +``` + +`install.sh` can remain the user-facing entry point during migration, but it +should become a thin shell around a Node-based planner and executor rather than +keep growing per-target shell branches. + +## Implementation Sequence + +### Phase 1: Planner To Contract + +1. keep current manifest schema and resolver +2. add operation planning on top of resolved modules +3. define `ecc.install.v1` state schema +4. write install-state on successful install + +### Phase 2: Target Adapters + +1. extract Claude install behavior into `claude-home` adapter +2. extract Cursor install behavior into `cursor-project` adapter +3. extract Antigravity install behavior into `antigravity-project` adapter +4. reduce `install.sh` to argument parsing plus adapter invocation + +### Phase 3: Lifecycle + +1. add stronger target-specific merge/remove semantics +2. extend repair/uninstall coverage for non-copy operations +3. reduce package shipping surface to the module graph instead of broad folders +4. decide when `ecc-install` should become a thin alias for `ecc install` + +### Phase 4: Publish And Future Targets + +1. evaluate safe reduction of `package.json` publish surface +2. add `codex-home` +3. add `opencode-home` +4. consider generated profile bundles if packaging pressure remains high + +## Immediate Repo-Local Next Steps + +The highest-signal next implementation moves in this repo are: + +1. add target-specific merge/remove semantics for config-like modules +2. extend repair and uninstall beyond simple copy-file operations +3. reduce package shipping surface to the module graph instead of broad folders +4. decide whether `ecc-install` remains separate or becomes `ecc install` +5. add tests that lock down: + - target-specific merge/remove behavior + - repair and uninstall safety for non-copy operations + - unified `ecc` CLI routing and compatibility guarantees + +## Open Questions + +1. Should rules stay language-addressable in legacy mode forever, or only during + the migration window? +2. Should `platform-configs` always install with `core`, or be split into + smaller target-specific modules? +3. Do we want config merge semantics recorded at the operation level or only in + adapter logic? +4. Should heavy skill families eventually move to fetch-on-demand rather than + package-time inclusion? +5. Should Codex and OpenCode target adapters ship only after the Claude/Cursor + lifecycle commands are stable? + +## Recommendation + +Treat the current manifest resolver as adapter `0` for installs: + +1. preserve the current install surface +2. move real copy behavior behind target adapters +3. write install-state for every successful install +4. make uninstall, doctor, and repair depend only on install-state +5. only then shrink packaging or add more targets + +That is the shortest path from ECC 1.x installer sprawl to an ECC 2.0 +install/control contract that is deterministic, supportable, and extensible. diff --git a/docs/SELECTIVE-INSTALL-DESIGN.md b/docs/SELECTIVE-INSTALL-DESIGN.md new file mode 100644 index 00000000..817210ce --- /dev/null +++ b/docs/SELECTIVE-INSTALL-DESIGN.md @@ -0,0 +1,489 @@ +# ECC Selective Install Design + +## Purpose + +This document defines the user-facing selective-install design for ECC. + +It complements +`docs/SELECTIVE-INSTALL-ARCHITECTURE.md`, which focuses on internal runtime +architecture and code boundaries. + +This document answers the product and operator questions first: + +- how users choose ECC components +- what the CLI should feel like +- what config file should exist +- how installation should behave across harness targets +- how the design maps onto the current ECC codebase without requiring a rewrite + +## Problem + +Today ECC still feels like a large payload installer even though the repo now +has first-pass manifest and lifecycle support. + +Users need a simpler mental model: + +- install the baseline +- add the language packs they actually use +- add the framework configs they actually want +- add optional capability packs like security, research, or orchestration + +The selective-install system should make ECC feel composable instead of +all-or-nothing. + +In the current substrate, user-facing components are still an alias layer over +coarser internal install modules. That means include/exclude is already useful +at the module-selection level, but some file-level boundaries remain imperfect +until the underlying module graph is split more finely. + +## Goals + +1. Let users install a small default ECC footprint quickly. +2. Let users compose installs from reusable component families: + - core rules + - language packs + - framework packs + - capability packs + - target/platform configs +3. Keep one consistent UX across Claude, Cursor, Antigravity, Codex, and + OpenCode. +4. Keep installs inspectable, repairable, and uninstallable. +5. Preserve backward compatibility with the current `ecc-install typescript` + style during rollout. + +## Non-Goals + +- packaging ECC into multiple npm packages in the first phase +- building a remote marketplace +- full control-plane UI in the same phase +- solving every skill-classification problem before selective install ships + +## User Experience Principles + +### 1. Start Small + +A user should be able to get a useful ECC install with one command: + +```bash +ecc install --target claude --profile core +``` + +The default experience should not assume the user wants every skill family and +every framework. + +### 2. Build Up By Intent + +The user should think in terms of: + +- "I want the developer baseline" +- "I need TypeScript and Python" +- "I want Next.js and Django" +- "I want the security pack" + +The user should not have to know raw internal repo paths. + +### 3. Preview Before Mutation + +Every install path should support dry-run planning: + +```bash +ecc install --target cursor --profile developer --with lang:typescript --with framework:nextjs --dry-run +``` + +The plan should clearly show: + +- selected components +- skipped components +- target root +- managed paths +- expected install-state location + +### 4. Local Configuration Should Be First-Class + +Teams should be able to commit a project-level install config and use: + +```bash +ecc install --config ecc-install.json +``` + +That allows deterministic installs across contributors and CI. + +## Component Model + +The current manifest already uses install modules and profiles. The user-facing +design should keep that internal structure, but present it as four main +component families. + +Near-term implementation note: some user-facing component IDs still resolve to +shared internal modules, especially in the language/framework layer. The +catalog improves UX immediately while preserving a clean path toward finer +module granularity in later phases. + +### 1. Baseline + +These are the default ECC building blocks: + +- core rules +- baseline agents +- core commands +- runtime hooks +- platform configs +- workflow quality primitives + +Examples of current internal modules: + +- `rules-core` +- `agents-core` +- `commands-core` +- `hooks-runtime` +- `platform-configs` +- `workflow-quality` + +### 2. Language Packs + +Language packs group rules, guidance, and workflows for a language ecosystem. + +Examples: + +- `lang:typescript` +- `lang:python` +- `lang:go` +- `lang:java` +- `lang:rust` + +Each language pack should resolve to one or more internal modules plus +target-specific assets. + +### 3. Framework Packs + +Framework packs sit above language packs and pull in framework-specific rules, +skills, and optional setup. + +Examples: + +- `framework:react` +- `framework:nextjs` +- `framework:django` +- `framework:springboot` +- `framework:laravel` + +Framework packs should depend on the correct language pack or baseline +primitives where appropriate. + +### 4. Capability Packs + +Capability packs are cross-cutting ECC feature bundles. + +Examples: + +- `capability:security` +- `capability:research` +- `capability:orchestration` +- `capability:media` +- `capability:content` + +These should map onto the current module families already being introduced in +the manifests. + +## Profiles + +Profiles remain the fastest on-ramp. + +Recommended user-facing profiles: + +- `core` + minimal baseline, safe default for most users trying ECC +- `developer` + best default for active software engineering work +- `security` + baseline plus security-heavy guidance +- `research` + baseline plus research/content/investigation tools +- `full` + everything classified and currently supported + +Profiles should be composable with additional `--with` and `--without` flags. + +Example: + +```bash +ecc install --target claude --profile developer --with lang:typescript --with framework:nextjs --without capability:orchestration +``` + +## Proposed CLI Design + +### Primary Commands + +```bash +ecc install +ecc plan +ecc list-installed +ecc doctor +ecc repair +ecc uninstall +ecc catalog +``` + +### Install CLI + +Recommended shape: + +```bash +ecc install [--target ] [--profile ] [--with ]... [--without ]... [--config ] [--dry-run] [--json] +``` + +Examples: + +```bash +ecc install --target claude --profile core +ecc install --target cursor --profile developer --with lang:typescript --with framework:nextjs +ecc install --target antigravity --with capability:security --with lang:python +ecc install --config ecc-install.json +``` + +### Plan CLI + +Recommended shape: + +```bash +ecc plan [same selection flags as install] +``` + +Purpose: + +- produce a preview without mutation +- act as the canonical debugging surface for selective install + +### Catalog CLI + +Recommended shape: + +```bash +ecc catalog profiles +ecc catalog components +ecc catalog components --family language +ecc catalog show framework:nextjs +``` + +Purpose: + +- let users discover valid component names without reading docs +- keep config authoring approachable + +### Compatibility CLI + +These legacy flows should still work during migration: + +```bash +ecc-install typescript +ecc-install --target cursor typescript +ecc typescript +``` + +Internally these should normalize into the new request model and write +install-state the same way as modern installs. + +## Proposed Config File + +### Filename + +Recommended default: + +- `ecc-install.json` + +Optional future support: + +- `.ecc/install.json` + +### Config Shape + +```json +{ + "$schema": "./schemas/ecc-install-config.schema.json", + "version": 1, + "target": "cursor", + "profile": "developer", + "include": [ + "lang:typescript", + "lang:python", + "framework:nextjs", + "capability:security" + ], + "exclude": [ + "capability:media" + ], + "options": { + "hooksProfile": "standard", + "mcpCatalog": "baseline", + "includeExamples": false + } +} +``` + +### Field Semantics + +- `target` + selected harness target such as `claude`, `cursor`, or `antigravity` +- `profile` + baseline profile to start from +- `include` + additional components to add +- `exclude` + components to subtract from the profile result +- `options` + target/runtime tuning flags that do not change component identity + +### Precedence Rules + +1. CLI arguments override config file values. +2. config file overrides profile defaults. +3. profile defaults override internal module defaults. + +This keeps the behavior predictable and easy to explain. + +## Modular Installation Flow + +The user-facing flow should be: + +1. load config file if provided or auto-detected +2. merge CLI intent on top of config intent +3. normalize the request into a canonical selection +4. expand profile into baseline components +5. add `include` components +6. subtract `exclude` components +7. resolve dependencies and target compatibility +8. render a plan +9. apply operations if not in dry-run mode +10. write install-state + +The important UX property is that the exact same flow powers: + +- `install` +- `plan` +- `repair` +- `uninstall` + +The commands differ in action, not in how ECC understands the selected install. + +## Target Behavior + +Selective install should preserve the same conceptual component graph across all +targets, while letting target adapters decide how content lands. + +### Claude + +Best fit for: + +- home-scoped ECC baseline +- commands, agents, rules, hooks, platform config, orchestration + +### Cursor + +Best fit for: + +- project-scoped installs +- rules plus project-local automation and config + +### Antigravity + +Best fit for: + +- project-scoped agent/rule/workflow installs + +### Codex / OpenCode + +Should remain additive targets rather than special forks of the installer. + +The selective-install design should make these just new adapters plus new +target-specific mapping rules, not new installer architectures. + +## Technical Feasibility + +This design is feasible because the repo already has: + +- install module and profile manifests +- target adapters with install-state paths +- plan inspection +- install-state recording +- lifecycle commands +- a unified `ecc` CLI surface + +The missing work is not conceptual invention. The missing work is productizing +the current substrate into a cleaner user-facing component model. + +### Feasible In Phase 1 + +- profile + include/exclude selection +- `ecc-install.json` config file parsing +- catalog/discovery command +- alias mapping from user-facing component IDs to internal module sets +- dry-run and JSON planning + +### Feasible In Phase 2 + +- richer target adapter semantics +- merge-aware operations for config-like assets +- stronger repair/uninstall behavior for non-copy operations + +### Later + +- reduced publish surface +- generated slim bundles +- remote component fetch + +## Mapping To Current ECC Manifests + +The current manifests do not yet expose a true user-facing `lang:*` / +`framework:*` / `capability:*` taxonomy. That should be introduced as a +presentation layer on top of the existing modules, not as a second installer +engine. + +Recommended approach: + +- keep `install-modules.json` as the internal resolution catalog +- add a user-facing component catalog that maps friendly component IDs to one or + more internal modules +- let profiles reference either internal modules or user-facing component IDs + during the migration window + +That avoids breaking the current selective-install substrate while improving UX. + +## Suggested Rollout + +### Phase 1: Design And Discovery + +- finalize the user-facing component taxonomy +- add the config schema +- add CLI design and precedence rules + +### Phase 2: User-Facing Resolution Layer + +- implement component aliases +- implement config-file parsing +- implement `include` / `exclude` +- implement `catalog` + +### Phase 3: Stronger Target Semantics + +- move more logic into target-owned planning +- support merge/generate operations cleanly +- improve repair/uninstall fidelity + +### Phase 4: Packaging Optimization + +- narrow published surface +- evaluate generated bundles + +## Recommendation + +The next implementation move should not be "rewrite the installer." + +It should be: + +1. keep the current manifest/runtime substrate +2. add a user-facing component catalog and config file +3. add `include` / `exclude` selection and catalog discovery +4. let the existing planner and lifecycle stack consume that model + +That is the shortest path from the current ECC codebase to a real selective +install experience that feels like ECC 2.0 instead of a large legacy installer. diff --git a/install.sh b/install.sh index f1e303d7..d0abc14f 100755 --- a/install.sh +++ b/install.sh @@ -1,246 +1,17 @@ #!/usr/bin/env bash -# install.sh — Install claude rules while preserving directory structure. +# install.sh — Legacy shell entrypoint for the ECC installer. # -# Usage: -# ./install.sh [--target ] [ ...] -# -# Examples: -# ./install.sh typescript -# ./install.sh typescript python golang -# ./install.sh --target cursor typescript -# ./install.sh --target cursor typescript python golang -# -# Targets: -# claude (default) — Install rules to ~/.claude/rules/ -# cursor — Install rules, agents, skills, commands, and MCP to ./.cursor/ -# antigravity — Install configs to .agent/ -# -# This script copies rules into the target directory keeping the common/ and -# language-specific subdirectories intact so that: -# 1. Files with the same name in common/ and / don't overwrite -# each other. -# 2. Relative references (e.g. ../common/coding-style.md) remain valid. +# This wrapper resolves the real repo/package root when invoked through a +# symlinked npm bin, then delegates to the Node-based installer runtime. set -euo pipefail -# Resolve symlinks — needed when invoked as `ecc-install` via npm/bun bin symlink SCRIPT_PATH="$0" while [ -L "$SCRIPT_PATH" ]; do link_dir="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)" SCRIPT_PATH="$(readlink "$SCRIPT_PATH")" - # Resolve relative symlinks [[ "$SCRIPT_PATH" != /* ]] && SCRIPT_PATH="$link_dir/$SCRIPT_PATH" done SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)" -RULES_DIR="$SCRIPT_DIR/rules" -# --- Parse --target flag --- -TARGET="claude" -if [[ "${1:-}" == "--target" ]]; then - if [[ -z "${2:-}" ]]; then - echo "Error: --target requires a value (claude or cursor)" >&2 - exit 1 - fi - TARGET="$2" - shift 2 -fi - -if [[ "$TARGET" != "claude" && "$TARGET" != "cursor" && "$TARGET" != "antigravity" ]]; then - echo "Error: unknown target '$TARGET'. Must be 'claude', 'cursor', or 'antigravity'." >&2 - exit 1 -fi - -# --- Usage --- -if [[ $# -eq 0 ]]; then - echo "Usage: $0 [--target ] [ ...]" - echo "" - echo "Targets:" - echo " claude (default) — Install rules to ~/.claude/rules/" - echo " cursor — Install rules, agents, skills, commands, and MCP to ./.cursor/" - echo " antigravity — Install configs to .agent/" - echo "" - echo "Available languages:" - for dir in "$RULES_DIR"/*/; do - name="$(basename "$dir")" - [[ "$name" == "common" ]] && continue - echo " - $name" - done - exit 1 -fi - -# --- Claude target (existing behavior) --- -if [[ "$TARGET" == "claude" ]]; then - DEST_DIR="${CLAUDE_RULES_DIR:-$HOME/.claude/rules}" - - # Warn if destination already exists (user may have local customizations) - if [[ -d "$DEST_DIR" ]] && [[ "$(ls -A "$DEST_DIR" 2>/dev/null)" ]]; then - echo "Note: $DEST_DIR/ already exists. Existing files will be overwritten." - echo " Back up any local customizations before proceeding." - fi - - # Always install common rules - echo "Installing common rules -> $DEST_DIR/common/" - mkdir -p "$DEST_DIR/common" - cp -r "$RULES_DIR/common/." "$DEST_DIR/common/" - - # Install each requested language - for lang in "$@"; do - # Validate language name to prevent path traversal - if [[ ! "$lang" =~ ^[a-zA-Z0-9_-]+$ ]]; then - echo "Error: invalid language name '$lang'. Only alphanumeric, dash, and underscore allowed." >&2 - continue - fi - lang_dir="$RULES_DIR/$lang" - if [[ ! -d "$lang_dir" ]]; then - echo "Warning: rules/$lang/ does not exist, skipping." >&2 - continue - fi - echo "Installing $lang rules -> $DEST_DIR/$lang/" - mkdir -p "$DEST_DIR/$lang" - cp -r "$lang_dir/." "$DEST_DIR/$lang/" - done - - echo "Done. Rules installed to $DEST_DIR/" -fi - -# --- Cursor target --- -if [[ "$TARGET" == "cursor" ]]; then - DEST_DIR=".cursor" - CURSOR_SRC="$SCRIPT_DIR/.cursor" - - echo "Installing Cursor configs to $DEST_DIR/" - - # --- Rules --- - echo "Installing common rules -> $DEST_DIR/rules/" - mkdir -p "$DEST_DIR/rules" - # Copy common rules (flattened names like common-coding-style.md) - if [[ -d "$CURSOR_SRC/rules" ]]; then - for f in "$CURSOR_SRC/rules"/common-*.md; do - [[ -f "$f" ]] && cp "$f" "$DEST_DIR/rules/" - done - fi - - # Install language-specific rules - for lang in "$@"; do - # Validate language name to prevent path traversal - if [[ ! "$lang" =~ ^[a-zA-Z0-9_-]+$ ]]; then - echo "Error: invalid language name '$lang'. Only alphanumeric, dash, and underscore allowed." >&2 - continue - fi - if [[ -d "$CURSOR_SRC/rules" ]]; then - found=false - for f in "$CURSOR_SRC/rules"/${lang}-*.md; do - if [[ -f "$f" ]]; then - cp "$f" "$DEST_DIR/rules/" - found=true - fi - done - if $found; then - echo "Installing $lang rules -> $DEST_DIR/rules/" - else - echo "Warning: no Cursor rules for '$lang' found, skipping." >&2 - fi - fi - done - - # --- Agents --- - if [[ -d "$CURSOR_SRC/agents" ]]; then - echo "Installing agents -> $DEST_DIR/agents/" - mkdir -p "$DEST_DIR/agents" - cp -r "$CURSOR_SRC/agents/." "$DEST_DIR/agents/" - fi - - # --- Skills --- - if [[ -d "$CURSOR_SRC/skills" ]]; then - echo "Installing skills -> $DEST_DIR/skills/" - mkdir -p "$DEST_DIR/skills" - cp -r "$CURSOR_SRC/skills/." "$DEST_DIR/skills/" - fi - - # --- Commands --- - if [[ -d "$CURSOR_SRC/commands" ]]; then - echo "Installing commands -> $DEST_DIR/commands/" - mkdir -p "$DEST_DIR/commands" - cp -r "$CURSOR_SRC/commands/." "$DEST_DIR/commands/" - fi - - # --- Hooks --- - if [[ -f "$CURSOR_SRC/hooks.json" ]]; then - echo "Installing hooks config -> $DEST_DIR/hooks.json" - cp "$CURSOR_SRC/hooks.json" "$DEST_DIR/hooks.json" - fi - if [[ -d "$CURSOR_SRC/hooks" ]]; then - echo "Installing hook scripts -> $DEST_DIR/hooks/" - mkdir -p "$DEST_DIR/hooks" - cp -r "$CURSOR_SRC/hooks/." "$DEST_DIR/hooks/" - fi - - # --- MCP Config --- - if [[ -f "$CURSOR_SRC/mcp.json" ]]; then - echo "Installing MCP config -> $DEST_DIR/mcp.json" - cp "$CURSOR_SRC/mcp.json" "$DEST_DIR/mcp.json" - fi - - echo "Done. Cursor configs installed to $DEST_DIR/" -fi - -# --- Antigravity target --- -if [[ "$TARGET" == "antigravity" ]]; then - DEST_DIR=".agent" - - if [[ -d "$DEST_DIR/rules" ]] && [[ "$(ls -A "$DEST_DIR/rules" 2>/dev/null)" ]]; then - echo "Note: $DEST_DIR/rules/ already exists. Existing files will be overwritten." - echo " Back up any local customizations before proceeding." - fi - - # --- Rules --- - echo "Installing common rules -> $DEST_DIR/rules/" - mkdir -p "$DEST_DIR/rules" - if [[ -d "$RULES_DIR/common" ]]; then - for f in "$RULES_DIR/common"/*.md; do - if [[ -f "$f" ]]; then - cp "$f" "$DEST_DIR/rules/common-$(basename "$f")" - fi - done - fi - - for lang in "$@"; do - # Validate language name to prevent path traversal - if [[ ! "$lang" =~ ^[a-zA-Z0-9_-]+$ ]]; then - echo "Error: invalid language name '$lang'. Only alphanumeric, dash, and underscore allowed." >&2 - continue - fi - lang_dir="$RULES_DIR/$lang" - if [[ ! -d "$lang_dir" ]]; then - echo "Warning: rules/$lang/ does not exist, skipping." >&2 - continue - fi - - echo "Installing $lang rules -> $DEST_DIR/rules/" - for f in "$lang_dir"/*.md; do - if [[ -f "$f" ]]; then - cp "$f" "$DEST_DIR/rules/${lang}-$(basename "$f")" - fi - done - done - - # --- Workflows (Commands) --- - if [[ -d "$SCRIPT_DIR/commands" ]]; then - echo "Installing commands -> $DEST_DIR/workflows/" - mkdir -p "$DEST_DIR/workflows" - cp -r "$SCRIPT_DIR/commands/." "$DEST_DIR/workflows/" - fi - - # --- Skills and Agents --- - mkdir -p "$DEST_DIR/skills" - if [[ -d "$SCRIPT_DIR/agents" ]]; then - echo "Installing agents -> $DEST_DIR/skills/" - cp -r "$SCRIPT_DIR/agents/." "$DEST_DIR/skills/" - fi - if [[ -d "$SCRIPT_DIR/skills" ]]; then - echo "Installing skills -> $DEST_DIR/skills/" - cp -r "$SCRIPT_DIR/skills/." "$DEST_DIR/skills/" - fi - - echo "Done. Antigravity configs installed to $DEST_DIR/" -fi +exec node "$SCRIPT_DIR/scripts/install-apply.js" "$@" diff --git a/manifests/install-components.json b/manifests/install-components.json new file mode 100644 index 00000000..6da4992a --- /dev/null +++ b/manifests/install-components.json @@ -0,0 +1,173 @@ +{ + "version": 1, + "components": [ + { + "id": "baseline:rules", + "family": "baseline", + "description": "Core shared rules and supported language rule packs.", + "modules": [ + "rules-core" + ] + }, + { + "id": "baseline:agents", + "family": "baseline", + "description": "Baseline agent definitions and shared AGENTS guidance.", + "modules": [ + "agents-core" + ] + }, + { + "id": "baseline:commands", + "family": "baseline", + "description": "Core command library and workflow command docs.", + "modules": [ + "commands-core" + ] + }, + { + "id": "baseline:hooks", + "family": "baseline", + "description": "Hook runtime configs and hook helper scripts.", + "modules": [ + "hooks-runtime" + ] + }, + { + "id": "baseline:platform", + "family": "baseline", + "description": "Platform configs, package-manager setup, and MCP catalog defaults.", + "modules": [ + "platform-configs" + ] + }, + { + "id": "baseline:workflow", + "family": "baseline", + "description": "Evaluation, TDD, verification, and compaction workflow support.", + "modules": [ + "workflow-quality" + ] + }, + { + "id": "lang:typescript", + "family": "language", + "description": "TypeScript and frontend/backend application-engineering guidance. Currently resolves through the shared framework-language module.", + "modules": [ + "framework-language" + ] + }, + { + "id": "lang:python", + "family": "language", + "description": "Python and Django-oriented engineering guidance. Currently resolves through the shared framework-language module.", + "modules": [ + "framework-language" + ] + }, + { + "id": "lang:go", + "family": "language", + "description": "Go-focused coding and testing guidance. Currently resolves through the shared framework-language module.", + "modules": [ + "framework-language" + ] + }, + { + "id": "lang:java", + "family": "language", + "description": "Java and Spring application guidance. Currently resolves through the shared framework-language module.", + "modules": [ + "framework-language" + ] + }, + { + "id": "framework:react", + "family": "framework", + "description": "React-focused engineering guidance. Currently resolves through the shared framework-language module.", + "modules": [ + "framework-language" + ] + }, + { + "id": "framework:nextjs", + "family": "framework", + "description": "Next.js-focused engineering guidance. Currently resolves through the shared framework-language module.", + "modules": [ + "framework-language" + ] + }, + { + "id": "framework:django", + "family": "framework", + "description": "Django-focused engineering guidance. Currently resolves through the shared framework-language module.", + "modules": [ + "framework-language" + ] + }, + { + "id": "framework:springboot", + "family": "framework", + "description": "Spring Boot-focused engineering guidance. Currently resolves through the shared framework-language module.", + "modules": [ + "framework-language" + ] + }, + { + "id": "capability:database", + "family": "capability", + "description": "Database and persistence-oriented skills.", + "modules": [ + "database" + ] + }, + { + "id": "capability:security", + "family": "capability", + "description": "Security review and security-focused framework guidance.", + "modules": [ + "security" + ] + }, + { + "id": "capability:research", + "family": "capability", + "description": "Research and API-integration skills for deep investigations and external tooling.", + "modules": [ + "research-apis" + ] + }, + { + "id": "capability:content", + "family": "capability", + "description": "Business, writing, market, and investor communication skills.", + "modules": [ + "business-content" + ] + }, + { + "id": "capability:social", + "family": "capability", + "description": "Social publishing and distribution skills.", + "modules": [ + "social-distribution" + ] + }, + { + "id": "capability:media", + "family": "capability", + "description": "Media generation and AI-assisted editing skills.", + "modules": [ + "media-generation" + ] + }, + { + "id": "capability:orchestration", + "family": "capability", + "description": "Worktree and tmux orchestration runtime and workflow docs.", + "modules": [ + "orchestration" + ] + } + ] +} diff --git a/manifests/install-modules.json b/manifests/install-modules.json new file mode 100644 index 00000000..e4fe0bbf --- /dev/null +++ b/manifests/install-modules.json @@ -0,0 +1,335 @@ +{ + "version": 1, + "modules": [ + { + "id": "rules-core", + "kind": "rules", + "description": "Shared and language rules for supported harness targets.", + "paths": [ + "rules" + ], + "targets": [ + "claude", + "cursor", + "antigravity" + ], + "dependencies": [], + "defaultInstall": true, + "cost": "light", + "stability": "stable" + }, + { + "id": "agents-core", + "kind": "agents", + "description": "Agent definitions and project-level agent guidance.", + "paths": [ + ".agents", + "agents", + "AGENTS.md" + ], + "targets": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ], + "dependencies": [], + "defaultInstall": true, + "cost": "light", + "stability": "stable" + }, + { + "id": "commands-core", + "kind": "commands", + "description": "Core slash-command library and command docs.", + "paths": [ + "commands" + ], + "targets": [ + "claude", + "cursor", + "antigravity", + "opencode" + ], + "dependencies": [], + "defaultInstall": true, + "cost": "medium", + "stability": "stable" + }, + { + "id": "hooks-runtime", + "kind": "hooks", + "description": "Runtime hook configs and hook script helpers.", + "paths": [ + "hooks", + "scripts/hooks" + ], + "targets": [ + "claude", + "cursor", + "opencode" + ], + "dependencies": [], + "defaultInstall": true, + "cost": "medium", + "stability": "stable" + }, + { + "id": "platform-configs", + "kind": "platform", + "description": "Baseline platform configs, package-manager setup, and MCP catalog.", + "paths": [ + ".claude-plugin", + ".codex", + ".cursor", + ".opencode", + "mcp-configs", + "scripts/setup-package-manager.js" + ], + "targets": [ + "claude", + "cursor", + "codex", + "opencode" + ], + "dependencies": [], + "defaultInstall": true, + "cost": "light", + "stability": "stable" + }, + { + "id": "framework-language", + "kind": "skills", + "description": "Core framework, language, and application-engineering skills.", + "paths": [ + "skills/backend-patterns", + "skills/coding-standards", + "skills/frontend-patterns", + "skills/frontend-slides", + "skills/golang-patterns", + "skills/golang-testing", + "skills/python-patterns", + "skills/python-testing", + "skills/django-patterns", + "skills/django-tdd", + "skills/django-verification", + "skills/java-coding-standards", + "skills/springboot-patterns", + "skills/springboot-tdd", + "skills/springboot-verification" + ], + "targets": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ], + "dependencies": [ + "rules-core", + "agents-core", + "commands-core", + "platform-configs" + ], + "defaultInstall": false, + "cost": "medium", + "stability": "stable" + }, + { + "id": "database", + "kind": "skills", + "description": "Database and persistence-focused skills.", + "paths": [ + "skills/clickhouse-io", + "skills/jpa-patterns", + "skills/postgres-patterns" + ], + "targets": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ], + "dependencies": [ + "platform-configs" + ], + "defaultInstall": false, + "cost": "medium", + "stability": "stable" + }, + { + "id": "workflow-quality", + "kind": "skills", + "description": "Evaluation, TDD, verification, learning, and compaction skills.", + "paths": [ + "skills/continuous-learning", + "skills/continuous-learning-v2", + "skills/eval-harness", + "skills/iterative-retrieval", + "skills/strategic-compact", + "skills/tdd-workflow", + "skills/verification-loop" + ], + "targets": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ], + "dependencies": [ + "platform-configs" + ], + "defaultInstall": true, + "cost": "medium", + "stability": "stable" + }, + { + "id": "security", + "kind": "skills", + "description": "Security review and security-focused framework guidance.", + "paths": [ + "skills/security-review", + "skills/security-scan", + "skills/django-security", + "skills/springboot-security", + "the-security-guide.md" + ], + "targets": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ], + "dependencies": [ + "workflow-quality" + ], + "defaultInstall": false, + "cost": "medium", + "stability": "stable" + }, + { + "id": "research-apis", + "kind": "skills", + "description": "Research and API integration skills for deep investigations and model integrations.", + "paths": [ + "skills/claude-api", + "skills/deep-research", + "skills/exa-search" + ], + "targets": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ], + "dependencies": [ + "platform-configs" + ], + "defaultInstall": false, + "cost": "medium", + "stability": "stable" + }, + { + "id": "business-content", + "kind": "skills", + "description": "Business, writing, market, and investor communication skills.", + "paths": [ + "skills/article-writing", + "skills/content-engine", + "skills/investor-materials", + "skills/investor-outreach", + "skills/market-research" + ], + "targets": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ], + "dependencies": [ + "platform-configs" + ], + "defaultInstall": false, + "cost": "heavy", + "stability": "stable" + }, + { + "id": "social-distribution", + "kind": "skills", + "description": "Social publishing and distribution skills.", + "paths": [ + "skills/crosspost", + "skills/x-api" + ], + "targets": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ], + "dependencies": [ + "business-content" + ], + "defaultInstall": false, + "cost": "medium", + "stability": "stable" + }, + { + "id": "media-generation", + "kind": "skills", + "description": "Media generation and AI-assisted editing skills.", + "paths": [ + "skills/fal-ai-media", + "skills/video-editing", + "skills/videodb" + ], + "targets": [ + "claude", + "cursor", + "codex", + "opencode" + ], + "dependencies": [ + "platform-configs" + ], + "defaultInstall": false, + "cost": "heavy", + "stability": "beta" + }, + { + "id": "orchestration", + "kind": "orchestration", + "description": "Worktree/tmux orchestration runtime and workflow docs.", + "paths": [ + "commands/multi-workflow.md", + "commands/orchestrate.md", + "commands/sessions.md", + "scripts/lib/orchestration-session.js", + "scripts/lib/tmux-worktree-orchestrator.js", + "scripts/orchestrate-codex-worker.sh", + "scripts/orchestrate-worktrees.js", + "scripts/orchestration-status.js", + "skills/dmux-workflows" + ], + "targets": [ + "claude", + "codex", + "opencode" + ], + "dependencies": [ + "commands-core", + "platform-configs" + ], + "defaultInstall": false, + "cost": "medium", + "stability": "beta" + } + ] +} diff --git a/manifests/install-profiles.json b/manifests/install-profiles.json new file mode 100644 index 00000000..34d80ee0 --- /dev/null +++ b/manifests/install-profiles.json @@ -0,0 +1,75 @@ +{ + "version": 1, + "profiles": { + "core": { + "description": "Minimal harness baseline with commands, hooks, platform configs, and quality workflow support.", + "modules": [ + "rules-core", + "agents-core", + "commands-core", + "hooks-runtime", + "platform-configs", + "workflow-quality" + ] + }, + "developer": { + "description": "Default engineering profile for most ECC users working across app codebases.", + "modules": [ + "rules-core", + "agents-core", + "commands-core", + "hooks-runtime", + "platform-configs", + "workflow-quality", + "framework-language", + "database", + "orchestration" + ] + }, + "security": { + "description": "Security-heavy setup with baseline runtime support and security-specific guidance.", + "modules": [ + "rules-core", + "agents-core", + "commands-core", + "hooks-runtime", + "platform-configs", + "workflow-quality", + "security" + ] + }, + "research": { + "description": "Research and content-oriented setup for investigation, synthesis, and publishing workflows.", + "modules": [ + "rules-core", + "agents-core", + "commands-core", + "hooks-runtime", + "platform-configs", + "workflow-quality", + "research-apis", + "business-content", + "social-distribution" + ] + }, + "full": { + "description": "Complete ECC install with all currently classified modules.", + "modules": [ + "rules-core", + "agents-core", + "commands-core", + "hooks-runtime", + "platform-configs", + "framework-language", + "database", + "workflow-quality", + "security", + "research-apis", + "business-content", + "social-distribution", + "media-generation", + "orchestration" + ] + } + } +} diff --git a/package.json b/package.json index ff5f7d35..50d77ede 100644 --- a/package.json +++ b/package.json @@ -59,19 +59,28 @@ "examples/user-CLAUDE.md", "examples/statusline.json", "hooks/", + "manifests/", "mcp-configs/", "plugins/", "rules/", "schemas/", "scripts/ci/", + "scripts/ecc.js", "scripts/hooks/", "scripts/lib/", "scripts/claw.js", + "scripts/doctor.js", + "scripts/install-apply.js", + "scripts/install-plan.js", + "scripts/list-installed.js", "scripts/orchestration-status.js", "scripts/orchestrate-codex-worker.sh", "scripts/orchestrate-worktrees.js", "scripts/setup-package-manager.js", "scripts/skill-create-output.js", + "scripts/repair.js", + "scripts/session-inspect.js", + "scripts/uninstall.js", "skills/", "AGENTS.md", ".claude-plugin/plugin.json", @@ -80,16 +89,17 @@ "llms.txt" ], "bin": { + "ecc": "scripts/ecc.js", "ecc-install": "install.sh" }, "scripts": { - "postinstall": "echo '\\n ecc-universal installed!\\n Run: npx ecc-install typescript\\n Docs: https://github.com/affaan-m/everything-claude-code\\n'", + "postinstall": "echo '\\n ecc-universal installed!\\n Run: npx ecc typescript\\n Compat: npx ecc-install typescript\\n Docs: https://github.com/affaan-m/everything-claude-code\\n'", "lint": "eslint . && markdownlint '**/*.md' --ignore node_modules", "claw": "node scripts/claw.js", "orchestrate:status": "node scripts/orchestration-status.js", "orchestrate:worker": "bash scripts/orchestrate-codex-worker.sh", "orchestrate:tmux": "node scripts/orchestrate-worktrees.js", - "test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js", + "test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-install-manifests.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js", "coverage": "c8 --all --include=\"scripts/**/*.js\" --check-coverage --lines 80 --functions 80 --branches 80 --statements 80 --reporter=text --reporter=lcov node tests/run-all.js" }, "devDependencies": { diff --git a/schemas/ecc-install-config.schema.json b/schemas/ecc-install-config.schema.json new file mode 100644 index 00000000..89fd7c72 --- /dev/null +++ b/schemas/ecc-install-config.schema.json @@ -0,0 +1,58 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ECC Install Config", + "type": "object", + "additionalProperties": false, + "required": [ + "version" + ], + "properties": { + "$schema": { + "type": "string", + "minLength": 1 + }, + "version": { + "type": "integer", + "const": 1 + }, + "target": { + "type": "string", + "enum": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ] + }, + "profile": { + "type": "string", + "pattern": "^[a-z0-9-]+$" + }, + "modules": { + "type": "array", + "items": { + "type": "string", + "pattern": "^[a-z0-9-]+$" + } + }, + "include": { + "type": "array", + "items": { + "type": "string", + "pattern": "^(baseline|lang|framework|capability):[a-z0-9-]+$" + } + }, + "exclude": { + "type": "array", + "items": { + "type": "string", + "pattern": "^(baseline|lang|framework|capability):[a-z0-9-]+$" + } + }, + "options": { + "type": "object", + "additionalProperties": true + } + } +} diff --git a/schemas/install-components.schema.json b/schemas/install-components.schema.json new file mode 100644 index 00000000..c90fefc8 --- /dev/null +++ b/schemas/install-components.schema.json @@ -0,0 +1,56 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ECC Install Components", + "type": "object", + "additionalProperties": false, + "required": [ + "version", + "components" + ], + "properties": { + "version": { + "type": "integer", + "minimum": 1 + }, + "components": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": [ + "id", + "family", + "description", + "modules" + ], + "properties": { + "id": { + "type": "string", + "pattern": "^(baseline|lang|framework|capability):[a-z0-9-]+$" + }, + "family": { + "type": "string", + "enum": [ + "baseline", + "language", + "framework", + "capability" + ] + }, + "description": { + "type": "string", + "minLength": 1 + }, + "modules": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "pattern": "^[a-z0-9-]+$" + } + } + } + } + } + } +} diff --git a/schemas/install-modules.schema.json b/schemas/install-modules.schema.json new file mode 100644 index 00000000..5c92fdbe --- /dev/null +++ b/schemas/install-modules.schema.json @@ -0,0 +1,105 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ECC Install Modules", + "type": "object", + "properties": { + "version": { + "type": "integer", + "minimum": 1 + }, + "modules": { + "type": "array", + "minItems": 1, + "items": { + "type": "object", + "properties": { + "id": { + "type": "string", + "pattern": "^[a-z0-9-]+$" + }, + "kind": { + "type": "string", + "enum": [ + "rules", + "agents", + "commands", + "hooks", + "platform", + "orchestration", + "skills" + ] + }, + "description": { + "type": "string", + "minLength": 1 + }, + "paths": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "minLength": 1 + } + }, + "targets": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "enum": [ + "claude", + "cursor", + "antigravity", + "codex", + "opencode" + ] + } + }, + "dependencies": { + "type": "array", + "items": { + "type": "string", + "pattern": "^[a-z0-9-]+$" + } + }, + "defaultInstall": { + "type": "boolean" + }, + "cost": { + "type": "string", + "enum": [ + "light", + "medium", + "heavy" + ] + }, + "stability": { + "type": "string", + "enum": [ + "experimental", + "beta", + "stable" + ] + } + }, + "required": [ + "id", + "kind", + "description", + "paths", + "targets", + "dependencies", + "defaultInstall", + "cost", + "stability" + ], + "additionalProperties": false + } + } + }, + "required": [ + "version", + "modules" + ], + "additionalProperties": false +} diff --git a/schemas/install-profiles.schema.json b/schemas/install-profiles.schema.json new file mode 100644 index 00000000..426c9820 --- /dev/null +++ b/schemas/install-profiles.schema.json @@ -0,0 +1,45 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ECC Install Profiles", + "type": "object", + "properties": { + "version": { + "type": "integer", + "minimum": 1 + }, + "profiles": { + "type": "object", + "minProperties": 1, + "propertyNames": { + "pattern": "^[a-z0-9-]+$" + }, + "additionalProperties": { + "type": "object", + "properties": { + "description": { + "type": "string", + "minLength": 1 + }, + "modules": { + "type": "array", + "minItems": 1, + "items": { + "type": "string", + "pattern": "^[a-z0-9-]+$" + } + } + }, + "required": [ + "description", + "modules" + ], + "additionalProperties": false + } + } + }, + "required": [ + "version", + "profiles" + ], + "additionalProperties": false +} diff --git a/schemas/install-state.schema.json b/schemas/install-state.schema.json new file mode 100644 index 00000000..b293c512 --- /dev/null +++ b/schemas/install-state.schema.json @@ -0,0 +1,210 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ECC install state", + "type": "object", + "additionalProperties": false, + "required": [ + "schemaVersion", + "installedAt", + "target", + "request", + "resolution", + "source", + "operations" + ], + "properties": { + "schemaVersion": { + "type": "string", + "const": "ecc.install.v1" + }, + "installedAt": { + "type": "string", + "minLength": 1 + }, + "lastValidatedAt": { + "type": "string", + "minLength": 1 + }, + "target": { + "type": "object", + "additionalProperties": false, + "required": [ + "id", + "root", + "installStatePath" + ], + "properties": { + "id": { + "type": "string", + "minLength": 1 + }, + "target": { + "type": "string", + "minLength": 1 + }, + "kind": { + "type": "string", + "enum": [ + "home", + "project" + ] + }, + "root": { + "type": "string", + "minLength": 1 + }, + "installStatePath": { + "type": "string", + "minLength": 1 + } + } + }, + "request": { + "type": "object", + "additionalProperties": false, + "required": [ + "profile", + "modules", + "includeComponents", + "excludeComponents", + "legacyLanguages", + "legacyMode" + ], + "properties": { + "profile": { + "type": [ + "string", + "null" + ] + }, + "modules": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + } + }, + "includeComponents": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + } + }, + "excludeComponents": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + } + }, + "legacyLanguages": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + } + }, + "legacyMode": { + "type": "boolean" + } + } + }, + "resolution": { + "type": "object", + "additionalProperties": false, + "required": [ + "selectedModules", + "skippedModules" + ], + "properties": { + "selectedModules": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + } + }, + "skippedModules": { + "type": "array", + "items": { + "type": "string", + "minLength": 1 + } + } + } + }, + "source": { + "type": "object", + "additionalProperties": false, + "required": [ + "repoVersion", + "repoCommit", + "manifestVersion" + ], + "properties": { + "repoVersion": { + "type": [ + "string", + "null" + ] + }, + "repoCommit": { + "type": [ + "string", + "null" + ] + }, + "manifestVersion": { + "type": "integer", + "minimum": 1 + } + } + }, + "operations": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "required": [ + "kind", + "moduleId", + "sourceRelativePath", + "destinationPath", + "strategy", + "ownership", + "scaffoldOnly" + ], + "properties": { + "kind": { + "type": "string", + "minLength": 1 + }, + "moduleId": { + "type": "string", + "minLength": 1 + }, + "sourceRelativePath": { + "type": "string", + "minLength": 1 + }, + "destinationPath": { + "type": "string", + "minLength": 1 + }, + "strategy": { + "type": "string", + "minLength": 1 + }, + "ownership": { + "type": "string", + "minLength": 1 + }, + "scaffoldOnly": { + "type": "boolean" + } + } + } + } + } +} diff --git a/scripts/ci/validate-install-manifests.js b/scripts/ci/validate-install-manifests.js new file mode 100644 index 00000000..f25aabc7 --- /dev/null +++ b/scripts/ci/validate-install-manifests.js @@ -0,0 +1,211 @@ +#!/usr/bin/env node +/** + * Validate selective-install manifests and profile/module relationships. + */ + +const fs = require('fs'); +const path = require('path'); +const Ajv = require('ajv'); + +const REPO_ROOT = path.join(__dirname, '../..'); +const MODULES_MANIFEST_PATH = path.join(REPO_ROOT, 'manifests/install-modules.json'); +const PROFILES_MANIFEST_PATH = path.join(REPO_ROOT, 'manifests/install-profiles.json'); +const COMPONENTS_MANIFEST_PATH = path.join(REPO_ROOT, 'manifests/install-components.json'); +const MODULES_SCHEMA_PATH = path.join(REPO_ROOT, 'schemas/install-modules.schema.json'); +const PROFILES_SCHEMA_PATH = path.join(REPO_ROOT, 'schemas/install-profiles.schema.json'); +const COMPONENTS_SCHEMA_PATH = path.join(REPO_ROOT, 'schemas/install-components.schema.json'); +const COMPONENT_FAMILY_PREFIXES = { + baseline: 'baseline:', + language: 'lang:', + framework: 'framework:', + capability: 'capability:', +}; + +function readJson(filePath, label) { + try { + return JSON.parse(fs.readFileSync(filePath, 'utf8')); + } catch (error) { + throw new Error(`Invalid JSON in ${label}: ${error.message}`); + } +} + +function normalizeRelativePath(relativePath) { + return String(relativePath).replace(/\\/g, '/').replace(/\/+$/, ''); +} + +function validateSchema(ajv, schemaPath, data, label) { + const schema = readJson(schemaPath, `${label} schema`); + const validate = ajv.compile(schema); + const valid = validate(data); + + if (!valid) { + for (const error of validate.errors) { + console.error( + `ERROR: ${label} schema: ${error.instancePath || '/'} ${error.message}` + ); + } + return true; + } + + return false; +} + +function validateInstallManifests() { + if (!fs.existsSync(MODULES_MANIFEST_PATH) || !fs.existsSync(PROFILES_MANIFEST_PATH)) { + console.log('Install manifests not found, skipping validation'); + process.exit(0); + } + + let hasErrors = false; + let modulesData; + let profilesData; + let componentsData = { version: null, components: [] }; + + try { + modulesData = readJson(MODULES_MANIFEST_PATH, 'install-modules.json'); + profilesData = readJson(PROFILES_MANIFEST_PATH, 'install-profiles.json'); + if (fs.existsSync(COMPONENTS_MANIFEST_PATH)) { + componentsData = readJson(COMPONENTS_MANIFEST_PATH, 'install-components.json'); + } + } catch (error) { + console.error(`ERROR: ${error.message}`); + process.exit(1); + } + + const ajv = new Ajv({ allErrors: true }); + hasErrors = validateSchema(ajv, MODULES_SCHEMA_PATH, modulesData, 'install-modules.json') || hasErrors; + hasErrors = validateSchema(ajv, PROFILES_SCHEMA_PATH, profilesData, 'install-profiles.json') || hasErrors; + if (fs.existsSync(COMPONENTS_MANIFEST_PATH)) { + hasErrors = validateSchema(ajv, COMPONENTS_SCHEMA_PATH, componentsData, 'install-components.json') || hasErrors; + } + + if (hasErrors) { + process.exit(1); + } + + const modules = Array.isArray(modulesData.modules) ? modulesData.modules : []; + const moduleIds = new Set(); + const claimedPaths = new Map(); + + for (const module of modules) { + if (moduleIds.has(module.id)) { + console.error(`ERROR: Duplicate install module id: ${module.id}`); + hasErrors = true; + } + moduleIds.add(module.id); + + for (const dependency of module.dependencies) { + if (!moduleIds.has(dependency) && !modules.some(candidate => candidate.id === dependency)) { + console.error(`ERROR: Module ${module.id} depends on unknown module ${dependency}`); + hasErrors = true; + } + if (dependency === module.id) { + console.error(`ERROR: Module ${module.id} cannot depend on itself`); + hasErrors = true; + } + } + + for (const relativePath of module.paths) { + const normalizedPath = normalizeRelativePath(relativePath); + const absolutePath = path.join(REPO_ROOT, normalizedPath); + + if (!fs.existsSync(absolutePath)) { + console.error( + `ERROR: Module ${module.id} references missing path: ${normalizedPath}` + ); + hasErrors = true; + } + + if (claimedPaths.has(normalizedPath)) { + console.error( + `ERROR: Install path ${normalizedPath} is claimed by both ${claimedPaths.get(normalizedPath)} and ${module.id}` + ); + hasErrors = true; + } else { + claimedPaths.set(normalizedPath, module.id); + } + } + } + + const profiles = profilesData.profiles || {}; + const components = Array.isArray(componentsData.components) ? componentsData.components : []; + const expectedProfileIds = ['core', 'developer', 'security', 'research', 'full']; + + for (const profileId of expectedProfileIds) { + if (!profiles[profileId]) { + console.error(`ERROR: Missing required install profile: ${profileId}`); + hasErrors = true; + } + } + + for (const [profileId, profile] of Object.entries(profiles)) { + const seenModules = new Set(); + for (const moduleId of profile.modules) { + if (!moduleIds.has(moduleId)) { + console.error( + `ERROR: Profile ${profileId} references unknown module ${moduleId}` + ); + hasErrors = true; + } + + if (seenModules.has(moduleId)) { + console.error( + `ERROR: Profile ${profileId} contains duplicate module ${moduleId}` + ); + hasErrors = true; + } + seenModules.add(moduleId); + } + } + + if (profiles.full) { + const fullModules = new Set(profiles.full.modules); + for (const moduleId of moduleIds) { + if (!fullModules.has(moduleId)) { + console.error(`ERROR: full profile is missing module ${moduleId}`); + hasErrors = true; + } + } + } + + const componentIds = new Set(); + for (const component of components) { + if (componentIds.has(component.id)) { + console.error(`ERROR: Duplicate install component id: ${component.id}`); + hasErrors = true; + } + componentIds.add(component.id); + + const expectedPrefix = COMPONENT_FAMILY_PREFIXES[component.family]; + if (expectedPrefix && !component.id.startsWith(expectedPrefix)) { + console.error( + `ERROR: Component ${component.id} does not match expected ${component.family} prefix ${expectedPrefix}` + ); + hasErrors = true; + } + + const seenModules = new Set(); + for (const moduleId of component.modules) { + if (!moduleIds.has(moduleId)) { + console.error(`ERROR: Component ${component.id} references unknown module ${moduleId}`); + hasErrors = true; + } + + if (seenModules.has(moduleId)) { + console.error(`ERROR: Component ${component.id} contains duplicate module ${moduleId}`); + hasErrors = true; + } + seenModules.add(moduleId); + } + } + + if (hasErrors) { + process.exit(1); + } + + console.log( + `Validated ${modules.length} install modules, ${components.length} install components, and ${Object.keys(profiles).length} profiles` + ); +} + +validateInstallManifests(); diff --git a/scripts/doctor.js b/scripts/doctor.js new file mode 100644 index 00000000..0da3ff74 --- /dev/null +++ b/scripts/doctor.js @@ -0,0 +1,110 @@ +#!/usr/bin/env node + +const { buildDoctorReport } = require('./lib/install-lifecycle'); +const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests'); + +function showHelp(exitCode = 0) { + console.log(` +Usage: node scripts/doctor.js [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--json] + +Diagnose drift and missing managed files for ECC install-state in the current context. +`); + process.exit(exitCode); +} + +function parseArgs(argv) { + const args = argv.slice(2); + const parsed = { + targets: [], + json: false, + help: false, + }; + + for (let index = 0; index < args.length; index += 1) { + const arg = args[index]; + + if (arg === '--target') { + parsed.targets.push(args[index + 1] || null); + index += 1; + } else if (arg === '--json') { + parsed.json = true; + } else if (arg === '--help' || arg === '-h') { + parsed.help = true; + } else { + throw new Error(`Unknown argument: ${arg}`); + } + } + + return parsed; +} + +function statusLabel(status) { + if (status === 'ok') { + return 'OK'; + } + + if (status === 'warning') { + return 'WARNING'; + } + + if (status === 'error') { + return 'ERROR'; + } + + return status.toUpperCase(); +} + +function printHuman(report) { + if (report.results.length === 0) { + console.log('No ECC install-state files found for the current home/project context.'); + return; + } + + console.log('Doctor report:\n'); + for (const result of report.results) { + console.log(`- ${result.adapter.id}`); + console.log(` Status: ${statusLabel(result.status)}`); + console.log(` Install-state: ${result.installStatePath}`); + + if (result.issues.length === 0) { + console.log(' Issues: none'); + continue; + } + + for (const issue of result.issues) { + console.log(` - [${issue.severity}] ${issue.code}: ${issue.message}`); + } + } + + console.log(`\nSummary: checked=${report.summary.checkedCount}, ok=${report.summary.okCount}, warnings=${report.summary.warningCount}, errors=${report.summary.errorCount}`); +} + +function main() { + try { + const options = parseArgs(process.argv); + if (options.help) { + showHelp(0); + } + + const report = buildDoctorReport({ + repoRoot: require('path').join(__dirname, '..'), + homeDir: process.env.HOME, + projectRoot: process.cwd(), + targets: options.targets, + }); + const hasIssues = report.summary.errorCount > 0 || report.summary.warningCount > 0; + + if (options.json) { + console.log(JSON.stringify(report, null, 2)); + } else { + printHuman(report); + } + + process.exitCode = hasIssues ? 1 : 0; + } catch (error) { + console.error(`Error: ${error.message}`); + process.exit(1); + } +} + +main(); diff --git a/scripts/ecc.js b/scripts/ecc.js new file mode 100644 index 00000000..504273d5 --- /dev/null +++ b/scripts/ecc.js @@ -0,0 +1,194 @@ +#!/usr/bin/env node + +const { spawnSync } = require('child_process'); +const path = require('path'); +const { listAvailableLanguages } = require('./lib/install-executor'); + +const COMMANDS = { + install: { + script: 'install-apply.js', + description: 'Install ECC content into a supported target', + }, + plan: { + script: 'install-plan.js', + description: 'Inspect selective-install manifests and resolved plans', + }, + 'install-plan': { + script: 'install-plan.js', + description: 'Alias for plan', + }, + 'list-installed': { + script: 'list-installed.js', + description: 'Inspect install-state files for the current context', + }, + doctor: { + script: 'doctor.js', + description: 'Diagnose missing or drifted ECC-managed files', + }, + repair: { + script: 'repair.js', + description: 'Restore drifted or missing ECC-managed files', + }, + 'session-inspect': { + script: 'session-inspect.js', + description: 'Emit canonical ECC session snapshots from dmux or Claude history targets', + }, + uninstall: { + script: 'uninstall.js', + description: 'Remove ECC-managed files recorded in install-state', + }, +}; + +const PRIMARY_COMMANDS = [ + 'install', + 'plan', + 'list-installed', + 'doctor', + 'repair', + 'session-inspect', + 'uninstall', +]; + +function showHelp(exitCode = 0) { + console.log(` +ECC selective-install CLI + +Usage: + ecc [args...] + ecc [install args...] + +Commands: +${PRIMARY_COMMANDS.map(command => ` ${command.padEnd(15)} ${COMMANDS[command].description}`).join('\n')} + +Compatibility: + ecc-install Legacy install entrypoint retained for existing flows + ecc [args...] Without a command, args are routed to "install" + ecc help Show help for a specific command + +Examples: + ecc typescript + ecc install --profile developer --target claude + ecc plan --profile core --target cursor + ecc list-installed --json + ecc doctor --target cursor + ecc repair --dry-run + ecc session-inspect claude:latest + ecc uninstall --target antigravity --dry-run +`); + + process.exit(exitCode); +} + +function resolveCommand(argv) { + const args = argv.slice(2); + + if (args.length === 0) { + return { mode: 'help' }; + } + + const [firstArg, ...restArgs] = args; + + if (firstArg === '--help' || firstArg === '-h') { + return { mode: 'help' }; + } + + if (firstArg === 'help') { + return { + mode: 'help-command', + command: restArgs[0] || null, + }; + } + + if (COMMANDS[firstArg]) { + return { + mode: 'command', + command: firstArg, + args: restArgs, + }; + } + + const knownLegacyLanguages = listAvailableLanguages(); + const shouldTreatAsImplicitInstall = ( + firstArg.startsWith('-') + || knownLegacyLanguages.includes(firstArg) + ); + + if (!shouldTreatAsImplicitInstall) { + throw new Error(`Unknown command: ${firstArg}`); + } + + return { + mode: 'command', + command: 'install', + args, + }; +} + +function runCommand(commandName, args) { + const command = COMMANDS[commandName]; + if (!command) { + throw new Error(`Unknown command: ${commandName}`); + } + + const result = spawnSync( + process.execPath, + [path.join(__dirname, command.script), ...args], + { + cwd: process.cwd(), + env: process.env, + encoding: 'utf8', + } + ); + + if (result.error) { + throw result.error; + } + + if (result.stdout) { + process.stdout.write(result.stdout); + } + + if (result.stderr) { + process.stderr.write(result.stderr); + } + + if (typeof result.status === 'number') { + return result.status; + } + + if (result.signal) { + throw new Error(`Command "${commandName}" terminated by signal ${result.signal}`); + } + + return 1; +} + +function main() { + try { + const resolution = resolveCommand(process.argv); + + if (resolution.mode === 'help') { + showHelp(0); + } + + if (resolution.mode === 'help-command') { + if (!resolution.command) { + showHelp(0); + } + + if (!COMMANDS[resolution.command]) { + throw new Error(`Unknown command: ${resolution.command}`); + } + + process.exitCode = runCommand(resolution.command, ['--help']); + return; + } + + process.exitCode = runCommand(resolution.command, resolution.args); + } catch (error) { + console.error(`Error: ${error.message}`); + process.exit(1); + } +} + +main(); diff --git a/scripts/install-apply.js b/scripts/install-apply.js new file mode 100644 index 00000000..d3041c20 --- /dev/null +++ b/scripts/install-apply.js @@ -0,0 +1,137 @@ +#!/usr/bin/env node +/** + * Refactored ECC installer runtime. + * + * Keeps the legacy language-based install entrypoint intact while moving + * target-specific mutation logic into testable Node code. + */ + +const { + SUPPORTED_INSTALL_TARGETS, + listAvailableLanguages, +} = require('./lib/install-executor'); +const { + LEGACY_INSTALL_TARGETS, + normalizeInstallRequest, + parseInstallArgs, +} = require('./lib/install/request'); +const { loadInstallConfig } = require('./lib/install/config'); +const { applyInstallPlan } = require('./lib/install/apply'); +const { createInstallPlanFromRequest } = require('./lib/install/runtime'); + +function showHelp(exitCode = 0) { + const languages = listAvailableLanguages(); + + console.log(` +Usage: install.sh [--target <${LEGACY_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] [ ...] + install.sh [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] --profile [--with ]... [--without ]... + install.sh [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] --modules [--with ]... [--without ]... + install.sh [--dry-run] [--json] --config + +Targets: + claude (default) - Install rules to ~/.claude/rules/ + cursor - Install rules, hooks, and bundled Cursor configs to ./.cursor/ + antigravity - Install rules, workflows, skills, and agents to ./.agent/ + +Options: + --profile Resolve and install a manifest profile + --modules Resolve and install explicit module IDs + --with Include a user-facing install component + --without + Exclude a user-facing install component + --config Load install intent from ecc-install.json + --dry-run Show the install plan without copying files + --json Emit machine-readable plan/result JSON + --help Show this help text + +Available languages: +${languages.map(language => ` - ${language}`).join('\n')} +`); + + process.exit(exitCode); +} + +function printHumanPlan(plan, dryRun) { + console.log(`${dryRun ? 'Dry-run install plan' : 'Applying install plan'}:\n`); + console.log(`Mode: ${plan.mode}`); + console.log(`Target: ${plan.target}`); + console.log(`Adapter: ${plan.adapter.id}`); + console.log(`Install root: ${plan.installRoot}`); + console.log(`Install-state: ${plan.installStatePath}`); + if (plan.mode === 'legacy') { + console.log(`Languages: ${plan.languages.join(', ')}`); + } else { + console.log(`Profile: ${plan.profileId || '(custom modules)'}`); + console.log(`Included components: ${plan.includedComponentIds.join(', ') || '(none)'}`); + console.log(`Excluded components: ${plan.excludedComponentIds.join(', ') || '(none)'}`); + console.log(`Requested modules: ${plan.requestedModuleIds.join(', ') || '(none)'}`); + console.log(`Selected modules: ${plan.selectedModuleIds.join(', ') || '(none)'}`); + if (plan.skippedModuleIds.length > 0) { + console.log(`Skipped modules: ${plan.skippedModuleIds.join(', ')}`); + } + if (plan.excludedModuleIds.length > 0) { + console.log(`Excluded modules: ${plan.excludedModuleIds.join(', ')}`); + } + } + console.log(`Operations: ${plan.operations.length}`); + + if (plan.warnings.length > 0) { + console.log('\nWarnings:'); + for (const warning of plan.warnings) { + console.log(`- ${warning}`); + } + } + + console.log('\nPlanned file operations:'); + for (const operation of plan.operations) { + console.log(`- ${operation.sourceRelativePath} -> ${operation.destinationPath}`); + } + + if (!dryRun) { + console.log(`\nDone. Install-state written to ${plan.installStatePath}`); + } +} + +function main() { + try { + const options = parseInstallArgs(process.argv); + + if (options.help) { + showHelp(0); + } + + const config = options.configPath + ? loadInstallConfig(options.configPath, { cwd: process.cwd() }) + : null; + const request = normalizeInstallRequest({ + ...options, + config, + }); + const plan = createInstallPlanFromRequest(request, { + projectRoot: process.cwd(), + homeDir: process.env.HOME, + claudeRulesDir: process.env.CLAUDE_RULES_DIR || null, + }); + + if (options.dryRun) { + if (options.json) { + console.log(JSON.stringify({ dryRun: true, plan }, null, 2)); + } else { + printHumanPlan(plan, true); + } + return; + } + + const result = applyInstallPlan(plan); + if (options.json) { + console.log(JSON.stringify({ dryRun: false, result }, null, 2)); + } else { + printHumanPlan(result, false); + } + } catch (error) { + console.error(`Error: ${error.message}`); + process.exit(1); + } +} + +main(); diff --git a/scripts/install-plan.js b/scripts/install-plan.js new file mode 100644 index 00000000..dfe02ee5 --- /dev/null +++ b/scripts/install-plan.js @@ -0,0 +1,254 @@ +#!/usr/bin/env node +/** + * Inspect selective-install profiles and module plans without mutating targets. + */ + +const { + listInstallComponents, + listInstallModules, + listInstallProfiles, + resolveInstallPlan, +} = require('./lib/install-manifests'); +const { loadInstallConfig } = require('./lib/install/config'); +const { normalizeInstallRequest } = require('./lib/install/request'); + +function showHelp() { + console.log(` +Inspect ECC selective-install manifests + +Usage: + node scripts/install-plan.js --list-profiles + node scripts/install-plan.js --list-modules + node scripts/install-plan.js --list-components [--family ] [--target ] [--json] + node scripts/install-plan.js --profile [--with ]... [--without ]... [--target ] [--json] + node scripts/install-plan.js --modules [--with ]... [--without ]... [--target ] [--json] + node scripts/install-plan.js --config [--json] + +Options: + --list-profiles List available install profiles + --list-modules List install modules + --list-components List user-facing install components + --family Filter listed components by family + --profile Resolve an install profile + --modules Resolve explicit module IDs (comma-separated) + --with Include a user-facing install component + --without + Exclude a user-facing install component + --config Load install intent from ecc-install.json + --target Filter plan for a specific target + --json Emit machine-readable JSON + --help Show this help text +`); +} + +function parseArgs(argv) { + const args = argv.slice(2); + const parsed = { + json: false, + help: false, + profileId: null, + moduleIds: [], + includeComponentIds: [], + excludeComponentIds: [], + configPath: null, + target: null, + family: null, + listProfiles: false, + listModules: false, + listComponents: false, + }; + + for (let index = 0; index < args.length; index += 1) { + const arg = args[index]; + if (arg === '--help' || arg === '-h') { + parsed.help = true; + } else if (arg === '--json') { + parsed.json = true; + } else if (arg === '--list-profiles') { + parsed.listProfiles = true; + } else if (arg === '--list-modules') { + parsed.listModules = true; + } else if (arg === '--list-components') { + parsed.listComponents = true; + } else if (arg === '--family') { + parsed.family = args[index + 1] || null; + index += 1; + } else if (arg === '--profile') { + parsed.profileId = args[index + 1] || null; + index += 1; + } else if (arg === '--modules') { + const raw = args[index + 1] || ''; + parsed.moduleIds = raw.split(',').map(value => value.trim()).filter(Boolean); + index += 1; + } else if (arg === '--with') { + const componentId = args[index + 1] || ''; + if (componentId.trim()) { + parsed.includeComponentIds.push(componentId.trim()); + } + index += 1; + } else if (arg === '--without') { + const componentId = args[index + 1] || ''; + if (componentId.trim()) { + parsed.excludeComponentIds.push(componentId.trim()); + } + index += 1; + } else if (arg === '--config') { + parsed.configPath = args[index + 1] || null; + index += 1; + } else if (arg === '--target') { + parsed.target = args[index + 1] || null; + index += 1; + } else { + throw new Error(`Unknown argument: ${arg}`); + } + } + + return parsed; +} + +function printProfiles(profiles) { + console.log('Install profiles:\n'); + for (const profile of profiles) { + console.log(`- ${profile.id} (${profile.moduleCount} modules)`); + console.log(` ${profile.description}`); + } +} + +function printModules(modules) { + console.log('Install modules:\n'); + for (const module of modules) { + console.log(`- ${module.id} [${module.kind}]`); + console.log( + ` targets=${module.targets.join(', ')} default=${module.defaultInstall} cost=${module.cost} stability=${module.stability}` + ); + console.log(` ${module.description}`); + } +} + +function printComponents(components) { + console.log('Install components:\n'); + for (const component of components) { + console.log(`- ${component.id} [${component.family}]`); + console.log(` targets=${component.targets.join(', ')} modules=${component.moduleIds.join(', ')}`); + console.log(` ${component.description}`); + } +} + +function printPlan(plan) { + console.log('Install plan:\n'); + console.log( + 'Note: target filtering and operation output currently reflect scaffold-level adapter planning, not a byte-for-byte mirror of legacy install.sh copy paths.\n' + ); + console.log(`Profile: ${plan.profileId || '(custom modules)'}`); + console.log(`Target: ${plan.target || '(all targets)'}`); + console.log(`Included components: ${plan.includedComponentIds.join(', ') || '(none)'}`); + console.log(`Excluded components: ${plan.excludedComponentIds.join(', ') || '(none)'}`); + console.log(`Requested: ${plan.requestedModuleIds.join(', ')}`); + if (plan.targetAdapterId) { + console.log(`Adapter: ${plan.targetAdapterId}`); + console.log(`Target root: ${plan.targetRoot}`); + console.log(`Install-state: ${plan.installStatePath}`); + } + console.log(''); + console.log(`Selected modules (${plan.selectedModuleIds.length}):`); + for (const module of plan.selectedModules) { + console.log(`- ${module.id} [${module.kind}]`); + } + + if (plan.skippedModuleIds.length > 0) { + console.log(''); + console.log(`Skipped for target ${plan.target} (${plan.skippedModuleIds.length}):`); + for (const module of plan.skippedModules) { + console.log(`- ${module.id} [${module.kind}]`); + } + } + + if (plan.excludedModuleIds.length > 0) { + console.log(''); + console.log(`Excluded by selection (${plan.excludedModuleIds.length}):`); + for (const module of plan.excludedModules) { + console.log(`- ${module.id} [${module.kind}]`); + } + } + + if (plan.operations.length > 0) { + console.log(''); + console.log(`Operation plan (${plan.operations.length}):`); + for (const operation of plan.operations) { + console.log( + `- ${operation.moduleId}: ${operation.sourceRelativePath} -> ${operation.destinationPath} [${operation.strategy}]` + ); + } + } +} + +function main() { + try { + const options = parseArgs(process.argv); + + if (options.help || process.argv.length <= 2) { + showHelp(); + process.exit(0); + } + + if (options.listProfiles) { + const profiles = listInstallProfiles(); + if (options.json) { + console.log(JSON.stringify({ profiles }, null, 2)); + } else { + printProfiles(profiles); + } + return; + } + + if (options.listModules) { + const modules = listInstallModules(); + if (options.json) { + console.log(JSON.stringify({ modules }, null, 2)); + } else { + printModules(modules); + } + return; + } + + if (options.listComponents) { + const components = listInstallComponents({ + family: options.family, + target: options.target, + }); + if (options.json) { + console.log(JSON.stringify({ components }, null, 2)); + } else { + printComponents(components); + } + return; + } + + const config = options.configPath + ? loadInstallConfig(options.configPath, { cwd: process.cwd() }) + : null; + const request = normalizeInstallRequest({ + ...options, + languages: [], + config, + }); + const plan = resolveInstallPlan({ + profileId: request.profileId, + moduleIds: request.moduleIds, + includeComponentIds: request.includeComponentIds, + excludeComponentIds: request.excludeComponentIds, + target: request.target, + }); + + if (options.json) { + console.log(JSON.stringify(plan, null, 2)); + } else { + printPlan(plan); + } + } catch (error) { + console.error(`Error: ${error.message}`); + process.exit(1); + } +} + +main(); diff --git a/scripts/lib/install-executor.js b/scripts/lib/install-executor.js new file mode 100644 index 00000000..3f823801 --- /dev/null +++ b/scripts/lib/install-executor.js @@ -0,0 +1,605 @@ +const fs = require('fs'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const { applyInstallPlan } = require('./install/apply'); +const { LEGACY_INSTALL_TARGETS, parseInstallArgs } = require('./install/request'); +const { + SUPPORTED_INSTALL_TARGETS, + resolveInstallPlan, +} = require('./install-manifests'); +const { getInstallTargetAdapter } = require('./install-targets/registry'); +const { createInstallState } = require('./install-state'); + +const LANGUAGE_NAME_PATTERN = /^[a-zA-Z0-9_-]+$/; +const EXCLUDED_GENERATED_SOURCE_SUFFIXES = [ + '/ecc-install-state.json', + '/ecc/install-state.json', +]; + +function getSourceRoot() { + return path.join(__dirname, '../..'); +} + +function getPackageVersion(sourceRoot) { + try { + const packageJson = JSON.parse( + fs.readFileSync(path.join(sourceRoot, 'package.json'), 'utf8') + ); + return packageJson.version || null; + } catch (_error) { + return null; + } +} + +function getManifestVersion(sourceRoot) { + try { + const modulesManifest = JSON.parse( + fs.readFileSync(path.join(sourceRoot, 'manifests', 'install-modules.json'), 'utf8') + ); + return modulesManifest.version || 1; + } catch (_error) { + return 1; + } +} + +function getRepoCommit(sourceRoot) { + try { + return execFileSync('git', ['rev-parse', 'HEAD'], { + cwd: sourceRoot, + encoding: 'utf8', + stdio: ['ignore', 'pipe', 'ignore'], + timeout: 5000, + }).trim(); + } catch (_error) { + return null; + } +} + +function readDirectoryNames(dirPath) { + if (!fs.existsSync(dirPath)) { + return []; + } + + return fs.readdirSync(dirPath, { withFileTypes: true }) + .filter(entry => entry.isDirectory()) + .map(entry => entry.name) + .sort(); +} + +function listAvailableLanguages(sourceRoot = getSourceRoot()) { + return readDirectoryNames(path.join(sourceRoot, 'rules')) + .filter(name => name !== 'common'); +} + +function validateLegacyTarget(target) { + if (!LEGACY_INSTALL_TARGETS.includes(target)) { + throw new Error( + `Unknown install target: ${target}. Expected one of ${LEGACY_INSTALL_TARGETS.join(', ')}` + ); + } +} + +function listFilesRecursive(dirPath) { + if (!fs.existsSync(dirPath)) { + return []; + } + + const files = []; + const entries = fs.readdirSync(dirPath, { withFileTypes: true }); + + for (const entry of entries) { + const absolutePath = path.join(dirPath, entry.name); + if (entry.isDirectory()) { + const childFiles = listFilesRecursive(absolutePath); + for (const childFile of childFiles) { + files.push(path.join(entry.name, childFile)); + } + } else if (entry.isFile()) { + files.push(entry.name); + } + } + + return files.sort(); +} + +function isGeneratedRuntimeSourcePath(sourceRelativePath) { + const normalizedPath = String(sourceRelativePath || '').replace(/\\/g, '/'); + return EXCLUDED_GENERATED_SOURCE_SUFFIXES.some(suffix => normalizedPath.endsWith(suffix)); +} + +function buildCopyFileOperation({ moduleId, sourcePath, sourceRelativePath, destinationPath, strategy }) { + return { + kind: 'copy-file', + moduleId, + sourcePath, + sourceRelativePath, + destinationPath, + strategy, + ownership: 'managed', + scaffoldOnly: false, + }; +} + +function addRecursiveCopyOperations(operations, options) { + const sourceDir = path.join(options.sourceRoot, options.sourceRelativeDir); + if (!fs.existsSync(sourceDir)) { + return 0; + } + + const relativeFiles = listFilesRecursive(sourceDir); + + for (const relativeFile of relativeFiles) { + const sourceRelativePath = path.join(options.sourceRelativeDir, relativeFile); + const sourcePath = path.join(options.sourceRoot, sourceRelativePath); + const destinationPath = path.join(options.destinationDir, relativeFile); + operations.push(buildCopyFileOperation({ + moduleId: options.moduleId, + sourcePath, + sourceRelativePath, + destinationPath, + strategy: options.strategy || 'preserve-relative-path', + })); + } + + return relativeFiles.length; +} + +function addFileCopyOperation(operations, options) { + const sourcePath = path.join(options.sourceRoot, options.sourceRelativePath); + if (!fs.existsSync(sourcePath)) { + return false; + } + + operations.push(buildCopyFileOperation({ + moduleId: options.moduleId, + sourcePath, + sourceRelativePath: options.sourceRelativePath, + destinationPath: options.destinationPath, + strategy: options.strategy || 'preserve-relative-path', + })); + + return true; +} + +function addMatchingRuleOperations(operations, options) { + const sourceDir = path.join(options.sourceRoot, options.sourceRelativeDir); + if (!fs.existsSync(sourceDir)) { + return 0; + } + + const files = fs.readdirSync(sourceDir, { withFileTypes: true }) + .filter(entry => entry.isFile() && options.matcher(entry.name)) + .map(entry => entry.name) + .sort(); + + for (const fileName of files) { + const sourceRelativePath = path.join(options.sourceRelativeDir, fileName); + const sourcePath = path.join(options.sourceRoot, sourceRelativePath); + const destinationPath = path.join( + options.destinationDir, + options.rename ? options.rename(fileName) : fileName + ); + + operations.push(buildCopyFileOperation({ + moduleId: options.moduleId, + sourcePath, + sourceRelativePath, + destinationPath, + strategy: options.strategy || 'flatten-copy', + })); + } + + return files.length; +} + +function isDirectoryNonEmpty(dirPath) { + return fs.existsSync(dirPath) && fs.statSync(dirPath).isDirectory() && fs.readdirSync(dirPath).length > 0; +} + +function planClaudeLegacyInstall(context) { + const adapter = getInstallTargetAdapter('claude'); + const targetRoot = adapter.resolveRoot({ homeDir: context.homeDir }); + const rulesDir = context.claudeRulesDir || path.join(targetRoot, 'rules'); + const installStatePath = adapter.getInstallStatePath({ homeDir: context.homeDir }); + const operations = []; + const warnings = []; + + if (isDirectoryNonEmpty(rulesDir)) { + warnings.push( + `Destination ${rulesDir}/ already exists and files may be overwritten` + ); + } + + addRecursiveCopyOperations(operations, { + moduleId: 'legacy-claude-rules', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('rules', 'common'), + destinationDir: path.join(rulesDir, 'common'), + }); + + for (const language of context.languages) { + if (!LANGUAGE_NAME_PATTERN.test(language)) { + warnings.push( + `Invalid language name '${language}'. Only alphanumeric, dash, and underscore are allowed` + ); + continue; + } + + const sourceDir = path.join(context.sourceRoot, 'rules', language); + if (!fs.existsSync(sourceDir)) { + warnings.push(`rules/${language}/ does not exist, skipping`); + continue; + } + + addRecursiveCopyOperations(operations, { + moduleId: 'legacy-claude-rules', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('rules', language), + destinationDir: path.join(rulesDir, language), + }); + } + + return { + mode: 'legacy', + adapter, + target: 'claude', + targetRoot, + installRoot: rulesDir, + installStatePath, + operations, + warnings, + selectedModules: ['legacy-claude-rules'], + }; +} + +function planCursorLegacyInstall(context) { + const adapter = getInstallTargetAdapter('cursor'); + const targetRoot = adapter.resolveRoot({ repoRoot: context.projectRoot }); + const installStatePath = adapter.getInstallStatePath({ repoRoot: context.projectRoot }); + const operations = []; + const warnings = []; + + addMatchingRuleOperations(operations, { + moduleId: 'legacy-cursor-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('.cursor', 'rules'), + destinationDir: path.join(targetRoot, 'rules'), + matcher: fileName => /^common-.*\.md$/.test(fileName), + }); + + for (const language of context.languages) { + if (!LANGUAGE_NAME_PATTERN.test(language)) { + warnings.push( + `Invalid language name '${language}'. Only alphanumeric, dash, and underscore are allowed` + ); + continue; + } + + const matches = addMatchingRuleOperations(operations, { + moduleId: 'legacy-cursor-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('.cursor', 'rules'), + destinationDir: path.join(targetRoot, 'rules'), + matcher: fileName => fileName.startsWith(`${language}-`) && fileName.endsWith('.md'), + }); + + if (matches === 0) { + warnings.push(`No Cursor rules for '${language}' found, skipping`); + } + } + + addRecursiveCopyOperations(operations, { + moduleId: 'legacy-cursor-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('.cursor', 'agents'), + destinationDir: path.join(targetRoot, 'agents'), + }); + addRecursiveCopyOperations(operations, { + moduleId: 'legacy-cursor-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('.cursor', 'skills'), + destinationDir: path.join(targetRoot, 'skills'), + }); + addRecursiveCopyOperations(operations, { + moduleId: 'legacy-cursor-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('.cursor', 'commands'), + destinationDir: path.join(targetRoot, 'commands'), + }); + addRecursiveCopyOperations(operations, { + moduleId: 'legacy-cursor-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('.cursor', 'hooks'), + destinationDir: path.join(targetRoot, 'hooks'), + }); + + addFileCopyOperation(operations, { + moduleId: 'legacy-cursor-install', + sourceRoot: context.sourceRoot, + sourceRelativePath: path.join('.cursor', 'hooks.json'), + destinationPath: path.join(targetRoot, 'hooks.json'), + }); + addFileCopyOperation(operations, { + moduleId: 'legacy-cursor-install', + sourceRoot: context.sourceRoot, + sourceRelativePath: path.join('.cursor', 'mcp.json'), + destinationPath: path.join(targetRoot, 'mcp.json'), + }); + + return { + mode: 'legacy', + adapter, + target: 'cursor', + targetRoot, + installRoot: targetRoot, + installStatePath, + operations, + warnings, + selectedModules: ['legacy-cursor-install'], + }; +} + +function planAntigravityLegacyInstall(context) { + const adapter = getInstallTargetAdapter('antigravity'); + const targetRoot = adapter.resolveRoot({ repoRoot: context.projectRoot }); + const installStatePath = adapter.getInstallStatePath({ repoRoot: context.projectRoot }); + const operations = []; + const warnings = []; + + if (isDirectoryNonEmpty(path.join(targetRoot, 'rules'))) { + warnings.push( + `Destination ${path.join(targetRoot, 'rules')}/ already exists and files may be overwritten` + ); + } + + addMatchingRuleOperations(operations, { + moduleId: 'legacy-antigravity-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('rules', 'common'), + destinationDir: path.join(targetRoot, 'rules'), + matcher: fileName => fileName.endsWith('.md'), + rename: fileName => `common-${fileName}`, + }); + + for (const language of context.languages) { + if (!LANGUAGE_NAME_PATTERN.test(language)) { + warnings.push( + `Invalid language name '${language}'. Only alphanumeric, dash, and underscore are allowed` + ); + continue; + } + + const sourceDir = path.join(context.sourceRoot, 'rules', language); + if (!fs.existsSync(sourceDir)) { + warnings.push(`rules/${language}/ does not exist, skipping`); + continue; + } + + addMatchingRuleOperations(operations, { + moduleId: 'legacy-antigravity-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: path.join('rules', language), + destinationDir: path.join(targetRoot, 'rules'), + matcher: fileName => fileName.endsWith('.md'), + rename: fileName => `${language}-${fileName}`, + }); + } + + addRecursiveCopyOperations(operations, { + moduleId: 'legacy-antigravity-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: 'commands', + destinationDir: path.join(targetRoot, 'workflows'), + }); + addRecursiveCopyOperations(operations, { + moduleId: 'legacy-antigravity-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: 'agents', + destinationDir: path.join(targetRoot, 'skills'), + }); + addRecursiveCopyOperations(operations, { + moduleId: 'legacy-antigravity-install', + sourceRoot: context.sourceRoot, + sourceRelativeDir: 'skills', + destinationDir: path.join(targetRoot, 'skills'), + }); + + return { + mode: 'legacy', + adapter, + target: 'antigravity', + targetRoot, + installRoot: targetRoot, + installStatePath, + operations, + warnings, + selectedModules: ['legacy-antigravity-install'], + }; +} + +function createLegacyInstallPlan(options = {}) { + const sourceRoot = options.sourceRoot || getSourceRoot(); + const projectRoot = options.projectRoot || process.cwd(); + const homeDir = options.homeDir || process.env.HOME; + const target = options.target || 'claude'; + + validateLegacyTarget(target); + + const context = { + sourceRoot, + projectRoot, + homeDir, + languages: Array.isArray(options.languages) ? options.languages : [], + claudeRulesDir: options.claudeRulesDir || process.env.CLAUDE_RULES_DIR || null, + }; + + let plan; + if (target === 'claude') { + plan = planClaudeLegacyInstall(context); + } else if (target === 'cursor') { + plan = planCursorLegacyInstall(context); + } else { + plan = planAntigravityLegacyInstall(context); + } + + const source = { + repoVersion: getPackageVersion(sourceRoot), + repoCommit: getRepoCommit(sourceRoot), + manifestVersion: getManifestVersion(sourceRoot), + }; + + const statePreview = createInstallState({ + adapter: plan.adapter, + targetRoot: plan.targetRoot, + installStatePath: plan.installStatePath, + request: { + profile: null, + modules: [], + legacyLanguages: context.languages, + legacyMode: true, + }, + resolution: { + selectedModules: plan.selectedModules, + skippedModules: [], + }, + operations: plan.operations, + source, + }); + + return { + mode: 'legacy', + target: plan.target, + adapter: { + id: plan.adapter.id, + target: plan.adapter.target, + kind: plan.adapter.kind, + }, + targetRoot: plan.targetRoot, + installRoot: plan.installRoot, + installStatePath: plan.installStatePath, + warnings: plan.warnings, + languages: context.languages, + operations: plan.operations, + statePreview, + }; +} + +function materializeScaffoldOperation(sourceRoot, operation) { + const sourcePath = path.join(sourceRoot, operation.sourceRelativePath); + if (!fs.existsSync(sourcePath)) { + return []; + } + + if (isGeneratedRuntimeSourcePath(operation.sourceRelativePath)) { + return []; + } + + const stat = fs.statSync(sourcePath); + if (stat.isFile()) { + return [buildCopyFileOperation({ + moduleId: operation.moduleId, + sourcePath, + sourceRelativePath: operation.sourceRelativePath, + destinationPath: operation.destinationPath, + strategy: operation.strategy, + })]; + } + + const relativeFiles = listFilesRecursive(sourcePath).filter(relativeFile => { + const sourceRelativePath = path.join(operation.sourceRelativePath, relativeFile); + return !isGeneratedRuntimeSourcePath(sourceRelativePath); + }); + return relativeFiles.map(relativeFile => { + const sourceRelativePath = path.join(operation.sourceRelativePath, relativeFile); + return buildCopyFileOperation({ + moduleId: operation.moduleId, + sourcePath: path.join(sourcePath, relativeFile), + sourceRelativePath, + destinationPath: path.join(operation.destinationPath, relativeFile), + strategy: operation.strategy, + }); + }); +} + +function createManifestInstallPlan(options = {}) { + const sourceRoot = options.sourceRoot || getSourceRoot(); + const projectRoot = options.projectRoot || process.cwd(); + const target = options.target || 'claude'; + const plan = resolveInstallPlan({ + repoRoot: sourceRoot, + projectRoot, + homeDir: options.homeDir, + profileId: options.profileId || null, + moduleIds: options.moduleIds || [], + includeComponentIds: options.includeComponentIds || [], + excludeComponentIds: options.excludeComponentIds || [], + target, + }); + const adapter = getInstallTargetAdapter(target); + const operations = plan.operations.flatMap(operation => materializeScaffoldOperation(sourceRoot, operation)); + const source = { + repoVersion: getPackageVersion(sourceRoot), + repoCommit: getRepoCommit(sourceRoot), + manifestVersion: getManifestVersion(sourceRoot), + }; + const statePreview = createInstallState({ + adapter, + targetRoot: plan.targetRoot, + installStatePath: plan.installStatePath, + request: { + profile: plan.profileId, + modules: Array.isArray(options.moduleIds) ? [...options.moduleIds] : [], + includeComponents: Array.isArray(options.includeComponentIds) + ? [...options.includeComponentIds] + : [], + excludeComponents: Array.isArray(options.excludeComponentIds) + ? [...options.excludeComponentIds] + : [], + legacyLanguages: [], + legacyMode: false, + }, + resolution: { + selectedModules: plan.selectedModuleIds, + skippedModules: plan.skippedModuleIds, + }, + operations, + source, + }); + + return { + mode: 'manifest', + target, + adapter: { + id: adapter.id, + target: adapter.target, + kind: adapter.kind, + }, + targetRoot: plan.targetRoot, + installRoot: plan.targetRoot, + installStatePath: plan.installStatePath, + warnings: [], + languages: [], + profileId: plan.profileId, + requestedModuleIds: plan.requestedModuleIds, + explicitModuleIds: plan.explicitModuleIds, + includedComponentIds: plan.includedComponentIds, + excludedComponentIds: plan.excludedComponentIds, + selectedModuleIds: plan.selectedModuleIds, + skippedModuleIds: plan.skippedModuleIds, + excludedModuleIds: plan.excludedModuleIds, + operations, + statePreview, + }; +} + +module.exports = { + SUPPORTED_INSTALL_TARGETS, + LEGACY_INSTALL_TARGETS, + applyInstallPlan, + createManifestInstallPlan, + createLegacyInstallPlan, + getSourceRoot, + listAvailableLanguages, + parseInstallArgs, +}; diff --git a/scripts/lib/install-lifecycle.js b/scripts/lib/install-lifecycle.js new file mode 100644 index 00000000..306c4825 --- /dev/null +++ b/scripts/lib/install-lifecycle.js @@ -0,0 +1,763 @@ +const fs = require('fs'); +const path = require('path'); + +const { resolveInstallPlan, loadInstallManifests } = require('./install-manifests'); +const { readInstallState, writeInstallState } = require('./install-state'); +const { + applyInstallPlan, + createLegacyInstallPlan, + createManifestInstallPlan, +} = require('./install-executor'); +const { + getInstallTargetAdapter, + listInstallTargetAdapters, +} = require('./install-targets/registry'); + +const DEFAULT_REPO_ROOT = path.join(__dirname, '../..'); + +function readPackageVersion(repoRoot) { + try { + const packageJson = JSON.parse(fs.readFileSync(path.join(repoRoot, 'package.json'), 'utf8')); + return packageJson.version || null; + } catch (_error) { + return null; + } +} + +function normalizeTargets(targets) { + if (!Array.isArray(targets) || targets.length === 0) { + return listInstallTargetAdapters().map(adapter => adapter.target); + } + + const normalizedTargets = []; + for (const target of targets) { + const adapter = getInstallTargetAdapter(target); + if (!normalizedTargets.includes(adapter.target)) { + normalizedTargets.push(adapter.target); + } + } + + return normalizedTargets; +} + +function compareStringArrays(left, right) { + const leftValues = Array.isArray(left) ? left : []; + const rightValues = Array.isArray(right) ? right : []; + + if (leftValues.length !== rightValues.length) { + return false; + } + + return leftValues.every((value, index) => value === rightValues[index]); +} + +function getManagedOperations(state) { + return Array.isArray(state && state.operations) + ? state.operations.filter(operation => operation.ownership === 'managed') + : []; +} + +function resolveOperationSourcePath(repoRoot, operation) { + if (operation.sourceRelativePath) { + return path.join(repoRoot, operation.sourceRelativePath); + } + + return operation.sourcePath || null; +} + +function areFilesEqual(leftPath, rightPath) { + try { + const leftStat = fs.statSync(leftPath); + const rightStat = fs.statSync(rightPath); + if (!leftStat.isFile() || !rightStat.isFile()) { + return false; + } + + return fs.readFileSync(leftPath).equals(fs.readFileSync(rightPath)); + } catch (_error) { + return false; + } +} + +function inspectManagedOperation(repoRoot, operation) { + const destinationPath = operation.destinationPath; + if (!destinationPath) { + return { + status: 'invalid-destination', + operation, + }; + } + + if (!fs.existsSync(destinationPath)) { + return { + status: 'missing', + operation, + destinationPath, + }; + } + + if (operation.kind !== 'copy-file') { + return { + status: 'unverified', + operation, + destinationPath, + }; + } + + const sourcePath = resolveOperationSourcePath(repoRoot, operation); + if (!sourcePath || !fs.existsSync(sourcePath)) { + return { + status: 'missing-source', + operation, + destinationPath, + sourcePath, + }; + } + + if (!areFilesEqual(sourcePath, destinationPath)) { + return { + status: 'drifted', + operation, + destinationPath, + sourcePath, + }; + } + + return { + status: 'ok', + operation, + destinationPath, + sourcePath, + }; +} + +function summarizeManagedOperationHealth(repoRoot, operations) { + return operations.reduce((summary, operation) => { + const inspection = inspectManagedOperation(repoRoot, operation); + if (inspection.status === 'missing') { + summary.missing.push(inspection); + } else if (inspection.status === 'drifted') { + summary.drifted.push(inspection); + } else if (inspection.status === 'missing-source') { + summary.missingSource.push(inspection); + } else if (inspection.status === 'unverified' || inspection.status === 'invalid-destination') { + summary.unverified.push(inspection); + } + return summary; + }, { + missing: [], + drifted: [], + missingSource: [], + unverified: [], + }); +} + +function buildDiscoveryRecord(adapter, context) { + const installTargetInput = { + homeDir: context.homeDir, + projectRoot: context.projectRoot, + repoRoot: context.projectRoot, + }; + const targetRoot = adapter.resolveRoot(installTargetInput); + const installStatePath = adapter.getInstallStatePath(installTargetInput); + const exists = fs.existsSync(installStatePath); + + if (!exists) { + return { + adapter: { + id: adapter.id, + target: adapter.target, + kind: adapter.kind, + }, + targetRoot, + installStatePath, + exists: false, + state: null, + error: null, + }; + } + + try { + const state = readInstallState(installStatePath); + return { + adapter: { + id: adapter.id, + target: adapter.target, + kind: adapter.kind, + }, + targetRoot, + installStatePath, + exists: true, + state, + error: null, + }; + } catch (error) { + return { + adapter: { + id: adapter.id, + target: adapter.target, + kind: adapter.kind, + }, + targetRoot, + installStatePath, + exists: true, + state: null, + error: error.message, + }; + } +} + +function discoverInstalledStates(options = {}) { + const context = { + homeDir: options.homeDir || process.env.HOME, + projectRoot: options.projectRoot || process.cwd(), + }; + const targets = normalizeTargets(options.targets); + + return targets.map(target => { + const adapter = getInstallTargetAdapter(target); + return buildDiscoveryRecord(adapter, context); + }); +} + +function buildIssue(severity, code, message, extra = {}) { + return { + severity, + code, + message, + ...extra, + }; +} + +function determineStatus(issues) { + if (issues.some(issue => issue.severity === 'error')) { + return 'error'; + } + + if (issues.some(issue => issue.severity === 'warning')) { + return 'warning'; + } + + return 'ok'; +} + +function analyzeRecord(record, context) { + const issues = []; + + if (record.error) { + issues.push(buildIssue('error', 'invalid-install-state', record.error)); + return { + ...record, + status: determineStatus(issues), + issues, + }; + } + + const state = record.state; + if (!state) { + return { + ...record, + status: 'missing', + issues, + }; + } + + if (!fs.existsSync(state.target.root)) { + issues.push(buildIssue( + 'error', + 'missing-target-root', + `Target root does not exist: ${state.target.root}` + )); + } + + if (state.target.root !== record.targetRoot) { + issues.push(buildIssue( + 'warning', + 'target-root-mismatch', + `Recorded target root differs from current target root (${record.targetRoot})`, + { + recordedTargetRoot: state.target.root, + currentTargetRoot: record.targetRoot, + } + )); + } + + if (state.target.installStatePath !== record.installStatePath) { + issues.push(buildIssue( + 'warning', + 'install-state-path-mismatch', + `Recorded install-state path differs from current path (${record.installStatePath})`, + { + recordedInstallStatePath: state.target.installStatePath, + currentInstallStatePath: record.installStatePath, + } + )); + } + + const managedOperations = getManagedOperations(state); + const operationHealth = summarizeManagedOperationHealth(context.repoRoot, managedOperations); + const missingManagedOperations = operationHealth.missing; + + if (missingManagedOperations.length > 0) { + issues.push(buildIssue( + 'error', + 'missing-managed-files', + `${missingManagedOperations.length} managed file(s) are missing`, + { + paths: missingManagedOperations.map(entry => entry.destinationPath), + } + )); + } + + if (operationHealth.drifted.length > 0) { + issues.push(buildIssue( + 'warning', + 'drifted-managed-files', + `${operationHealth.drifted.length} managed file(s) differ from the source repo`, + { + paths: operationHealth.drifted.map(entry => entry.destinationPath), + } + )); + } + + if (operationHealth.missingSource.length > 0) { + issues.push(buildIssue( + 'error', + 'missing-source-files', + `${operationHealth.missingSource.length} source file(s) referenced by install-state are missing`, + { + paths: operationHealth.missingSource.map(entry => entry.sourcePath).filter(Boolean), + } + )); + } + + if (operationHealth.unverified.length > 0) { + issues.push(buildIssue( + 'warning', + 'unverified-managed-operations', + `${operationHealth.unverified.length} managed operation(s) could not be content-verified`, + { + paths: operationHealth.unverified.map(entry => entry.destinationPath).filter(Boolean), + } + )); + } + + if (state.source.manifestVersion !== context.manifestVersion) { + issues.push(buildIssue( + 'warning', + 'manifest-version-mismatch', + `Recorded manifest version ${state.source.manifestVersion} differs from current manifest version ${context.manifestVersion}` + )); + } + + if ( + context.packageVersion + && state.source.repoVersion + && state.source.repoVersion !== context.packageVersion + ) { + issues.push(buildIssue( + 'warning', + 'repo-version-mismatch', + `Recorded repo version ${state.source.repoVersion} differs from current repo version ${context.packageVersion}` + )); + } + + if (!state.request.legacyMode) { + try { + const desiredPlan = resolveInstallPlan({ + repoRoot: context.repoRoot, + projectRoot: context.projectRoot, + homeDir: context.homeDir, + target: record.adapter.target, + profileId: state.request.profile || null, + moduleIds: state.request.modules || [], + includeComponentIds: state.request.includeComponents || [], + excludeComponentIds: state.request.excludeComponents || [], + }); + + if ( + !compareStringArrays(desiredPlan.selectedModuleIds, state.resolution.selectedModules) + || !compareStringArrays(desiredPlan.skippedModuleIds, state.resolution.skippedModules) + ) { + issues.push(buildIssue( + 'warning', + 'resolution-drift', + 'Current manifest resolution differs from recorded install-state', + { + expectedSelectedModules: desiredPlan.selectedModuleIds, + recordedSelectedModules: state.resolution.selectedModules, + expectedSkippedModules: desiredPlan.skippedModuleIds, + recordedSkippedModules: state.resolution.skippedModules, + } + )); + } + } catch (error) { + issues.push(buildIssue( + 'error', + 'resolution-unavailable', + error.message + )); + } + } + + return { + ...record, + status: determineStatus(issues), + issues, + }; +} + +function buildDoctorReport(options = {}) { + const repoRoot = options.repoRoot || DEFAULT_REPO_ROOT; + const manifests = loadInstallManifests({ repoRoot }); + const records = discoverInstalledStates({ + homeDir: options.homeDir, + projectRoot: options.projectRoot, + targets: options.targets, + }).filter(record => record.exists); + const context = { + repoRoot, + homeDir: options.homeDir || process.env.HOME, + projectRoot: options.projectRoot || process.cwd(), + manifestVersion: manifests.modulesVersion, + packageVersion: readPackageVersion(repoRoot), + }; + const results = records.map(record => analyzeRecord(record, context)); + const summary = results.reduce((accumulator, result) => { + const errorCount = result.issues.filter(issue => issue.severity === 'error').length; + const warningCount = result.issues.filter(issue => issue.severity === 'warning').length; + + return { + checkedCount: accumulator.checkedCount + 1, + okCount: accumulator.okCount + (result.status === 'ok' ? 1 : 0), + errorCount: accumulator.errorCount + errorCount, + warningCount: accumulator.warningCount + warningCount, + }; + }, { + checkedCount: 0, + okCount: 0, + errorCount: 0, + warningCount: 0, + }); + + return { + generatedAt: new Date().toISOString(), + packageVersion: context.packageVersion, + manifestVersion: context.manifestVersion, + results, + summary, + }; +} + +function createRepairPlanFromRecord(record, context) { + const state = record.state; + if (!state) { + throw new Error('No install-state available for repair'); + } + + if (state.request.legacyMode) { + const operations = getManagedOperations(state).map(operation => ({ + ...operation, + sourcePath: resolveOperationSourcePath(context.repoRoot, operation), + })); + + const statePreview = { + ...state, + operations: operations.map(operation => ({ ...operation })), + source: { + ...state.source, + repoVersion: context.packageVersion, + manifestVersion: context.manifestVersion, + }, + lastValidatedAt: new Date().toISOString(), + }; + + return { + mode: 'legacy', + target: record.adapter.target, + adapter: record.adapter, + targetRoot: state.target.root, + installRoot: state.target.root, + installStatePath: state.target.installStatePath, + warnings: [], + languages: Array.isArray(state.request.legacyLanguages) + ? [...state.request.legacyLanguages] + : [], + operations, + statePreview, + }; + } + + const desiredPlan = createManifestInstallPlan({ + sourceRoot: context.repoRoot, + target: record.adapter.target, + profileId: state.request.profile || null, + moduleIds: state.request.modules || [], + includeComponentIds: state.request.includeComponents || [], + excludeComponentIds: state.request.excludeComponents || [], + projectRoot: context.projectRoot, + homeDir: context.homeDir, + }); + + return { + ...desiredPlan, + statePreview: { + ...desiredPlan.statePreview, + installedAt: state.installedAt, + lastValidatedAt: new Date().toISOString(), + }, + }; +} + +function repairInstalledStates(options = {}) { + const repoRoot = options.repoRoot || DEFAULT_REPO_ROOT; + const manifests = loadInstallManifests({ repoRoot }); + const context = { + repoRoot, + homeDir: options.homeDir || process.env.HOME, + projectRoot: options.projectRoot || process.cwd(), + manifestVersion: manifests.modulesVersion, + packageVersion: readPackageVersion(repoRoot), + }; + const records = discoverInstalledStates({ + homeDir: context.homeDir, + projectRoot: context.projectRoot, + targets: options.targets, + }).filter(record => record.exists); + + const results = records.map(record => { + if (record.error) { + return { + adapter: record.adapter, + status: 'error', + installStatePath: record.installStatePath, + repairedPaths: [], + plannedRepairs: [], + error: record.error, + }; + } + + try { + const desiredPlan = createRepairPlanFromRecord(record, context); + const operationHealth = summarizeManagedOperationHealth(context.repoRoot, desiredPlan.operations); + + if (operationHealth.missingSource.length > 0) { + return { + adapter: record.adapter, + status: 'error', + installStatePath: record.installStatePath, + repairedPaths: [], + plannedRepairs: [], + error: `Missing source file(s): ${operationHealth.missingSource.map(entry => entry.sourcePath).join(', ')}`, + }; + } + + const repairOperations = [ + ...operationHealth.missing.map(entry => ({ ...entry.operation })), + ...operationHealth.drifted.map(entry => ({ ...entry.operation })), + ]; + const plannedRepairs = repairOperations.map(operation => operation.destinationPath); + + if (options.dryRun) { + return { + adapter: record.adapter, + status: plannedRepairs.length > 0 ? 'planned' : 'ok', + installStatePath: record.installStatePath, + repairedPaths: [], + plannedRepairs, + stateRefreshed: plannedRepairs.length === 0, + error: null, + }; + } + + if (repairOperations.length > 0) { + applyInstallPlan({ + ...desiredPlan, + operations: repairOperations, + statePreview: desiredPlan.statePreview, + }); + } else { + writeInstallState(desiredPlan.installStatePath, desiredPlan.statePreview); + } + + return { + adapter: record.adapter, + status: repairOperations.length > 0 ? 'repaired' : 'ok', + installStatePath: record.installStatePath, + repairedPaths: plannedRepairs, + plannedRepairs: [], + stateRefreshed: true, + error: null, + }; + } catch (error) { + return { + adapter: record.adapter, + status: 'error', + installStatePath: record.installStatePath, + repairedPaths: [], + plannedRepairs: [], + error: error.message, + }; + } + }); + + const summary = results.reduce((accumulator, result) => ({ + checkedCount: accumulator.checkedCount + 1, + repairedCount: accumulator.repairedCount + (result.status === 'repaired' ? 1 : 0), + plannedRepairCount: accumulator.plannedRepairCount + (result.status === 'planned' ? 1 : 0), + errorCount: accumulator.errorCount + (result.status === 'error' ? 1 : 0), + }), { + checkedCount: 0, + repairedCount: 0, + plannedRepairCount: 0, + errorCount: 0, + }); + + return { + dryRun: Boolean(options.dryRun), + generatedAt: new Date().toISOString(), + results, + summary, + }; +} + +function cleanupEmptyParentDirs(filePath, stopAt) { + let currentPath = path.dirname(filePath); + const normalizedStopAt = path.resolve(stopAt); + + while ( + currentPath + && path.resolve(currentPath).startsWith(normalizedStopAt) + && path.resolve(currentPath) !== normalizedStopAt + ) { + if (!fs.existsSync(currentPath)) { + currentPath = path.dirname(currentPath); + continue; + } + + const stat = fs.lstatSync(currentPath); + if (!stat.isDirectory() || fs.readdirSync(currentPath).length > 0) { + break; + } + + fs.rmdirSync(currentPath); + currentPath = path.dirname(currentPath); + } +} + +function uninstallInstalledStates(options = {}) { + const records = discoverInstalledStates({ + homeDir: options.homeDir, + projectRoot: options.projectRoot, + targets: options.targets, + }).filter(record => record.exists); + + const results = records.map(record => { + if (record.error || !record.state) { + return { + adapter: record.adapter, + status: 'error', + installStatePath: record.installStatePath, + removedPaths: [], + plannedRemovals: [], + error: record.error || 'No valid install-state available', + }; + } + + const state = record.state; + const plannedRemovals = Array.from(new Set([ + ...getManagedOperations(state).map(operation => operation.destinationPath), + state.target.installStatePath, + ])); + + if (options.dryRun) { + return { + adapter: record.adapter, + status: 'planned', + installStatePath: record.installStatePath, + removedPaths: [], + plannedRemovals, + error: null, + }; + } + + try { + const removedPaths = []; + const cleanupTargets = []; + const filePaths = Array.from(new Set( + getManagedOperations(state).map(operation => operation.destinationPath) + )).sort((left, right) => right.length - left.length); + + for (const filePath of filePaths) { + if (!fs.existsSync(filePath)) { + continue; + } + + const stat = fs.lstatSync(filePath); + if (stat.isDirectory()) { + throw new Error(`Refusing to remove managed directory path without explicit support: ${filePath}`); + } + + fs.rmSync(filePath, { force: true }); + removedPaths.push(filePath); + cleanupTargets.push(filePath); + } + + if (fs.existsSync(state.target.installStatePath)) { + fs.rmSync(state.target.installStatePath, { force: true }); + removedPaths.push(state.target.installStatePath); + cleanupTargets.push(state.target.installStatePath); + } + + for (const cleanupTarget of cleanupTargets) { + cleanupEmptyParentDirs(cleanupTarget, state.target.root); + } + + return { + adapter: record.adapter, + status: 'uninstalled', + installStatePath: record.installStatePath, + removedPaths, + plannedRemovals: [], + error: null, + }; + } catch (error) { + return { + adapter: record.adapter, + status: 'error', + installStatePath: record.installStatePath, + removedPaths: [], + plannedRemovals, + error: error.message, + }; + } + }); + + const summary = results.reduce((accumulator, result) => ({ + checkedCount: accumulator.checkedCount + 1, + uninstalledCount: accumulator.uninstalledCount + (result.status === 'uninstalled' ? 1 : 0), + plannedRemovalCount: accumulator.plannedRemovalCount + (result.status === 'planned' ? 1 : 0), + errorCount: accumulator.errorCount + (result.status === 'error' ? 1 : 0), + }), { + checkedCount: 0, + uninstalledCount: 0, + plannedRemovalCount: 0, + errorCount: 0, + }); + + return { + dryRun: Boolean(options.dryRun), + generatedAt: new Date().toISOString(), + results, + summary, + }; +} + +module.exports = { + DEFAULT_REPO_ROOT, + buildDoctorReport, + discoverInstalledStates, + normalizeTargets, + repairInstalledStates, + uninstallInstalledStates, +}; diff --git a/scripts/lib/install-manifests.js b/scripts/lib/install-manifests.js new file mode 100644 index 00000000..63274a37 --- /dev/null +++ b/scripts/lib/install-manifests.js @@ -0,0 +1,305 @@ +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { planInstallTargetScaffold } = require('./install-targets/registry'); + +const DEFAULT_REPO_ROOT = path.join(__dirname, '../..'); +const SUPPORTED_INSTALL_TARGETS = ['claude', 'cursor', 'antigravity', 'codex', 'opencode']; +const COMPONENT_FAMILY_PREFIXES = { + baseline: 'baseline:', + language: 'lang:', + framework: 'framework:', + capability: 'capability:', +}; + +function readJson(filePath, label) { + try { + return JSON.parse(fs.readFileSync(filePath, 'utf8')); + } catch (error) { + throw new Error(`Failed to read ${label}: ${error.message}`); + } +} + +function dedupeStrings(values) { + return [...new Set((Array.isArray(values) ? values : []).map(value => String(value).trim()).filter(Boolean))]; +} + +function intersectTargets(modules) { + if (!Array.isArray(modules) || modules.length === 0) { + return []; + } + + return SUPPORTED_INSTALL_TARGETS.filter(target => ( + modules.every(module => Array.isArray(module.targets) && module.targets.includes(target)) + )); +} + +function getManifestPaths(repoRoot = DEFAULT_REPO_ROOT) { + return { + modulesPath: path.join(repoRoot, 'manifests', 'install-modules.json'), + profilesPath: path.join(repoRoot, 'manifests', 'install-profiles.json'), + componentsPath: path.join(repoRoot, 'manifests', 'install-components.json'), + }; +} + +function loadInstallManifests(options = {}) { + const repoRoot = options.repoRoot || DEFAULT_REPO_ROOT; + const { modulesPath, profilesPath, componentsPath } = getManifestPaths(repoRoot); + + if (!fs.existsSync(modulesPath) || !fs.existsSync(profilesPath)) { + throw new Error(`Install manifests not found under ${repoRoot}`); + } + + const modulesData = readJson(modulesPath, 'install-modules.json'); + const profilesData = readJson(profilesPath, 'install-profiles.json'); + const componentsData = fs.existsSync(componentsPath) + ? readJson(componentsPath, 'install-components.json') + : { version: null, components: [] }; + const modules = Array.isArray(modulesData.modules) ? modulesData.modules : []; + const profiles = profilesData && typeof profilesData.profiles === 'object' + ? profilesData.profiles + : {}; + const components = Array.isArray(componentsData.components) ? componentsData.components : []; + const modulesById = new Map(modules.map(module => [module.id, module])); + const componentsById = new Map(components.map(component => [component.id, component])); + + return { + repoRoot, + modulesPath, + profilesPath, + componentsPath, + modules, + profiles, + components, + modulesById, + componentsById, + modulesVersion: modulesData.version, + profilesVersion: profilesData.version, + componentsVersion: componentsData.version, + }; +} + +function listInstallProfiles(options = {}) { + const manifests = loadInstallManifests(options); + return Object.entries(manifests.profiles).map(([id, profile]) => ({ + id, + description: profile.description, + moduleCount: Array.isArray(profile.modules) ? profile.modules.length : 0, + })); +} + +function listInstallModules(options = {}) { + const manifests = loadInstallManifests(options); + return manifests.modules.map(module => ({ + id: module.id, + kind: module.kind, + description: module.description, + targets: module.targets, + defaultInstall: module.defaultInstall, + cost: module.cost, + stability: module.stability, + dependencyCount: Array.isArray(module.dependencies) ? module.dependencies.length : 0, + })); +} + +function listInstallComponents(options = {}) { + const manifests = loadInstallManifests(options); + const family = options.family || null; + const target = options.target || null; + + if (family && !Object.hasOwn(COMPONENT_FAMILY_PREFIXES, family)) { + throw new Error( + `Unknown component family: ${family}. Expected one of ${Object.keys(COMPONENT_FAMILY_PREFIXES).join(', ')}` + ); + } + + if (target && !SUPPORTED_INSTALL_TARGETS.includes(target)) { + throw new Error( + `Unknown install target: ${target}. Expected one of ${SUPPORTED_INSTALL_TARGETS.join(', ')}` + ); + } + + return manifests.components + .filter(component => !family || component.family === family) + .map(component => { + const moduleIds = dedupeStrings(component.modules); + const modules = moduleIds + .map(moduleId => manifests.modulesById.get(moduleId)) + .filter(Boolean); + const targets = intersectTargets(modules); + + return { + id: component.id, + family: component.family, + description: component.description, + moduleIds, + moduleCount: moduleIds.length, + targets, + }; + }) + .filter(component => !target || component.targets.includes(target)); +} + +function expandComponentIdsToModuleIds(componentIds, manifests) { + const expandedModuleIds = []; + + for (const componentId of dedupeStrings(componentIds)) { + const component = manifests.componentsById.get(componentId); + if (!component) { + throw new Error(`Unknown install component: ${componentId}`); + } + expandedModuleIds.push(...component.modules); + } + + return dedupeStrings(expandedModuleIds); +} + +function resolveInstallPlan(options = {}) { + const manifests = loadInstallManifests(options); + const profileId = options.profileId || null; + const explicitModuleIds = dedupeStrings(options.moduleIds); + const includedComponentIds = dedupeStrings(options.includeComponentIds); + const excludedComponentIds = dedupeStrings(options.excludeComponentIds); + const requestedModuleIds = []; + + if (profileId) { + const profile = manifests.profiles[profileId]; + if (!profile) { + throw new Error(`Unknown install profile: ${profileId}`); + } + requestedModuleIds.push(...profile.modules); + } + + requestedModuleIds.push(...explicitModuleIds); + requestedModuleIds.push(...expandComponentIdsToModuleIds(includedComponentIds, manifests)); + + const excludedModuleIds = expandComponentIdsToModuleIds(excludedComponentIds, manifests); + const excludedModuleOwners = new Map(); + for (const componentId of excludedComponentIds) { + const component = manifests.componentsById.get(componentId); + if (!component) { + throw new Error(`Unknown install component: ${componentId}`); + } + for (const moduleId of component.modules) { + const owners = excludedModuleOwners.get(moduleId) || []; + owners.push(componentId); + excludedModuleOwners.set(moduleId, owners); + } + } + + const target = options.target || null; + if (target && !SUPPORTED_INSTALL_TARGETS.includes(target)) { + throw new Error( + `Unknown install target: ${target}. Expected one of ${SUPPORTED_INSTALL_TARGETS.join(', ')}` + ); + } + + const effectiveRequestedIds = dedupeStrings( + requestedModuleIds.filter(moduleId => !excludedModuleOwners.has(moduleId)) + ); + + if (requestedModuleIds.length === 0) { + throw new Error('No install profile, module IDs, or included component IDs were provided'); + } + + if (effectiveRequestedIds.length === 0) { + throw new Error('Selection excludes every requested install module'); + } + + const selectedIds = new Set(); + const skippedTargetIds = new Set(); + const excludedIds = new Set(excludedModuleIds); + const visitingIds = new Set(); + const resolvedIds = new Set(); + + function resolveModule(moduleId, dependencyOf) { + const module = manifests.modulesById.get(moduleId); + if (!module) { + throw new Error(`Unknown install module: ${moduleId}`); + } + + if (excludedModuleOwners.has(moduleId)) { + if (dependencyOf) { + const owners = excludedModuleOwners.get(moduleId) || []; + throw new Error( + `Module ${dependencyOf} depends on excluded module ${moduleId}${owners.length > 0 ? ` (excluded by ${owners.join(', ')})` : ''}` + ); + } + return; + } + + if (target && !module.targets.includes(target)) { + if (dependencyOf) { + throw new Error( + `Module ${dependencyOf} depends on ${moduleId}, which does not support target ${target}` + ); + } + skippedTargetIds.add(moduleId); + return; + } + + if (resolvedIds.has(moduleId)) { + return; + } + + if (visitingIds.has(moduleId)) { + throw new Error(`Circular install dependency detected at ${moduleId}`); + } + + visitingIds.add(moduleId); + for (const dependencyId of module.dependencies) { + resolveModule(dependencyId, moduleId); + } + visitingIds.delete(moduleId); + resolvedIds.add(moduleId); + selectedIds.add(moduleId); + } + + for (const moduleId of effectiveRequestedIds) { + resolveModule(moduleId, null); + } + + const selectedModules = manifests.modules.filter(module => selectedIds.has(module.id)); + const skippedModules = manifests.modules.filter(module => skippedTargetIds.has(module.id)); + const excludedModules = manifests.modules.filter(module => excludedIds.has(module.id)); + const scaffoldPlan = target + ? planInstallTargetScaffold({ + target, + repoRoot: manifests.repoRoot, + projectRoot: options.projectRoot || manifests.repoRoot, + homeDir: options.homeDir || os.homedir(), + modules: selectedModules, + }) + : null; + + return { + repoRoot: manifests.repoRoot, + profileId, + target, + requestedModuleIds: effectiveRequestedIds, + explicitModuleIds, + includedComponentIds, + excludedComponentIds, + selectedModuleIds: selectedModules.map(module => module.id), + skippedModuleIds: skippedModules.map(module => module.id), + excludedModuleIds: excludedModules.map(module => module.id), + selectedModules, + skippedModules, + excludedModules, + targetAdapterId: scaffoldPlan ? scaffoldPlan.adapter.id : null, + targetRoot: scaffoldPlan ? scaffoldPlan.targetRoot : null, + installStatePath: scaffoldPlan ? scaffoldPlan.installStatePath : null, + operations: scaffoldPlan ? scaffoldPlan.operations : [], + }; +} + +module.exports = { + DEFAULT_REPO_ROOT, + SUPPORTED_INSTALL_TARGETS, + getManifestPaths, + loadInstallManifests, + listInstallComponents, + listInstallModules, + listInstallProfiles, + resolveInstallPlan, +}; diff --git a/scripts/lib/install-state.js b/scripts/lib/install-state.js new file mode 100644 index 00000000..69208f92 --- /dev/null +++ b/scripts/lib/install-state.js @@ -0,0 +1,120 @@ +const fs = require('fs'); +const path = require('path'); +const Ajv = require('ajv'); + +const SCHEMA_PATH = path.join(__dirname, '..', '..', 'schemas', 'install-state.schema.json'); + +let cachedValidator = null; + +function readJson(filePath, label) { + try { + return JSON.parse(fs.readFileSync(filePath, 'utf8')); + } catch (error) { + throw new Error(`Failed to read ${label}: ${error.message}`); + } +} + +function getValidator() { + if (cachedValidator) { + return cachedValidator; + } + + const schema = readJson(SCHEMA_PATH, 'install-state schema'); + const ajv = new Ajv({ allErrors: true }); + cachedValidator = ajv.compile(schema); + return cachedValidator; +} + +function formatValidationErrors(errors = []) { + return errors + .map(error => `${error.instancePath || '/'} ${error.message}`) + .join('; '); +} + +function validateInstallState(state) { + const validator = getValidator(); + const valid = validator(state); + return { + valid, + errors: validator.errors || [], + }; +} + +function assertValidInstallState(state, label) { + const result = validateInstallState(state); + if (!result.valid) { + throw new Error(`Invalid install-state${label ? ` (${label})` : ''}: ${formatValidationErrors(result.errors)}`); + } +} + +function createInstallState(options) { + const installedAt = options.installedAt || new Date().toISOString(); + const state = { + schemaVersion: 'ecc.install.v1', + installedAt, + target: { + id: options.adapter.id, + target: options.adapter.target || undefined, + kind: options.adapter.kind || undefined, + root: options.targetRoot, + installStatePath: options.installStatePath, + }, + request: { + profile: options.request.profile || null, + modules: Array.isArray(options.request.modules) ? [...options.request.modules] : [], + includeComponents: Array.isArray(options.request.includeComponents) + ? [...options.request.includeComponents] + : [], + excludeComponents: Array.isArray(options.request.excludeComponents) + ? [...options.request.excludeComponents] + : [], + legacyLanguages: Array.isArray(options.request.legacyLanguages) + ? [...options.request.legacyLanguages] + : [], + legacyMode: Boolean(options.request.legacyMode), + }, + resolution: { + selectedModules: Array.isArray(options.resolution.selectedModules) + ? [...options.resolution.selectedModules] + : [], + skippedModules: Array.isArray(options.resolution.skippedModules) + ? [...options.resolution.skippedModules] + : [], + }, + source: { + repoVersion: options.source.repoVersion || null, + repoCommit: options.source.repoCommit || null, + manifestVersion: options.source.manifestVersion, + }, + operations: Array.isArray(options.operations) + ? options.operations.map(operation => ({ ...operation })) + : [], + }; + + if (options.lastValidatedAt) { + state.lastValidatedAt = options.lastValidatedAt; + } + + assertValidInstallState(state, 'create'); + return state; +} + +function readInstallState(filePath) { + const state = readJson(filePath, 'install-state'); + assertValidInstallState(state, filePath); + return state; +} + +function writeInstallState(filePath, state) { + assertValidInstallState(state, filePath); + fs.mkdirSync(path.dirname(filePath), { recursive: true }); + fs.writeFileSync(filePath, `${JSON.stringify(state, null, 2)}\n`); + return state; +} + +module.exports = { + createInstallState, + readInstallState, + validateInstallState, + writeInstallState, +}; diff --git a/scripts/lib/install-targets/antigravity-project.js b/scripts/lib/install-targets/antigravity-project.js new file mode 100644 index 00000000..0d156405 --- /dev/null +++ b/scripts/lib/install-targets/antigravity-project.js @@ -0,0 +1,9 @@ +const { createInstallTargetAdapter } = require('./helpers'); + +module.exports = createInstallTargetAdapter({ + id: 'antigravity-project', + target: 'antigravity', + kind: 'project', + rootSegments: ['.agent'], + installStatePathSegments: ['ecc-install-state.json'], +}); diff --git a/scripts/lib/install-targets/claude-home.js b/scripts/lib/install-targets/claude-home.js new file mode 100644 index 00000000..03e0b4ef --- /dev/null +++ b/scripts/lib/install-targets/claude-home.js @@ -0,0 +1,10 @@ +const { createInstallTargetAdapter } = require('./helpers'); + +module.exports = createInstallTargetAdapter({ + id: 'claude-home', + target: 'claude', + kind: 'home', + rootSegments: ['.claude'], + installStatePathSegments: ['ecc', 'install-state.json'], + nativeRootRelativePath: '.claude-plugin', +}); diff --git a/scripts/lib/install-targets/codex-home.js b/scripts/lib/install-targets/codex-home.js new file mode 100644 index 00000000..ae29b41a --- /dev/null +++ b/scripts/lib/install-targets/codex-home.js @@ -0,0 +1,10 @@ +const { createInstallTargetAdapter } = require('./helpers'); + +module.exports = createInstallTargetAdapter({ + id: 'codex-home', + target: 'codex', + kind: 'home', + rootSegments: ['.codex'], + installStatePathSegments: ['ecc-install-state.json'], + nativeRootRelativePath: '.codex', +}); diff --git a/scripts/lib/install-targets/cursor-project.js b/scripts/lib/install-targets/cursor-project.js new file mode 100644 index 00000000..0af26fb3 --- /dev/null +++ b/scripts/lib/install-targets/cursor-project.js @@ -0,0 +1,10 @@ +const { createInstallTargetAdapter } = require('./helpers'); + +module.exports = createInstallTargetAdapter({ + id: 'cursor-project', + target: 'cursor', + kind: 'project', + rootSegments: ['.cursor'], + installStatePathSegments: ['ecc-install-state.json'], + nativeRootRelativePath: '.cursor', +}); diff --git a/scripts/lib/install-targets/helpers.js b/scripts/lib/install-targets/helpers.js new file mode 100644 index 00000000..411b3468 --- /dev/null +++ b/scripts/lib/install-targets/helpers.js @@ -0,0 +1,89 @@ +const os = require('os'); +const path = require('path'); + +function normalizeRelativePath(relativePath) { + return String(relativePath || '') + .replace(/\\/g, '/') + .replace(/^\.\/+/, '') + .replace(/\/+$/, ''); +} + +function resolveBaseRoot(scope, input = {}) { + if (scope === 'home') { + return input.homeDir || os.homedir(); + } + + if (scope === 'project') { + const projectRoot = input.projectRoot || input.repoRoot; + if (!projectRoot) { + throw new Error('projectRoot or repoRoot is required for project install targets'); + } + return projectRoot; + } + + throw new Error(`Unsupported install target scope: ${scope}`); +} + +function createInstallTargetAdapter(config) { + const adapter = { + id: config.id, + target: config.target, + kind: config.kind, + nativeRootRelativePath: config.nativeRootRelativePath || null, + supports(target) { + return target === config.target || target === config.id; + }, + resolveRoot(input = {}) { + const baseRoot = resolveBaseRoot(config.kind, input); + return path.join(baseRoot, ...config.rootSegments); + }, + getInstallStatePath(input = {}) { + const root = adapter.resolveRoot(input); + return path.join(root, ...config.installStatePathSegments); + }, + resolveDestinationPath(sourceRelativePath, input = {}) { + const normalizedSourcePath = normalizeRelativePath(sourceRelativePath); + const targetRoot = adapter.resolveRoot(input); + + if ( + config.nativeRootRelativePath + && normalizedSourcePath === normalizeRelativePath(config.nativeRootRelativePath) + ) { + return targetRoot; + } + + return path.join(targetRoot, normalizedSourcePath); + }, + determineStrategy(sourceRelativePath) { + const normalizedSourcePath = normalizeRelativePath(sourceRelativePath); + + if ( + config.nativeRootRelativePath + && normalizedSourcePath === normalizeRelativePath(config.nativeRootRelativePath) + ) { + return 'sync-root-children'; + } + + return 'preserve-relative-path'; + }, + createScaffoldOperation(moduleId, sourceRelativePath, input = {}) { + const normalizedSourcePath = normalizeRelativePath(sourceRelativePath); + return { + kind: 'copy-path', + moduleId, + sourceRelativePath: normalizedSourcePath, + destinationPath: adapter.resolveDestinationPath(normalizedSourcePath, input), + strategy: adapter.determineStrategy(normalizedSourcePath), + ownership: 'managed', + scaffoldOnly: true, + }; + }, + }; + + return Object.freeze(adapter); +} + +module.exports = { + createInstallTargetAdapter, + normalizeRelativePath, +}; diff --git a/scripts/lib/install-targets/opencode-home.js b/scripts/lib/install-targets/opencode-home.js new file mode 100644 index 00000000..c8e629e4 --- /dev/null +++ b/scripts/lib/install-targets/opencode-home.js @@ -0,0 +1,10 @@ +const { createInstallTargetAdapter } = require('./helpers'); + +module.exports = createInstallTargetAdapter({ + id: 'opencode-home', + target: 'opencode', + kind: 'home', + rootSegments: ['.opencode'], + installStatePathSegments: ['ecc-install-state.json'], + nativeRootRelativePath: '.opencode', +}); diff --git a/scripts/lib/install-targets/registry.js b/scripts/lib/install-targets/registry.js new file mode 100644 index 00000000..96260bc2 --- /dev/null +++ b/scripts/lib/install-targets/registry.js @@ -0,0 +1,64 @@ +const antigravityProject = require('./antigravity-project'); +const claudeHome = require('./claude-home'); +const codexHome = require('./codex-home'); +const cursorProject = require('./cursor-project'); +const opencodeHome = require('./opencode-home'); + +const ADAPTERS = Object.freeze([ + claudeHome, + cursorProject, + antigravityProject, + codexHome, + opencodeHome, +]); + +function listInstallTargetAdapters() { + return ADAPTERS.slice(); +} + +function getInstallTargetAdapter(targetOrAdapterId) { + const adapter = ADAPTERS.find(candidate => candidate.supports(targetOrAdapterId)); + + if (!adapter) { + throw new Error(`Unknown install target adapter: ${targetOrAdapterId}`); + } + + return adapter; +} + +function planInstallTargetScaffold(options = {}) { + const adapter = getInstallTargetAdapter(options.target); + const modules = Array.isArray(options.modules) ? options.modules : []; + const planningInput = { + repoRoot: options.repoRoot, + projectRoot: options.projectRoot || options.repoRoot, + homeDir: options.homeDir, + }; + const targetRoot = adapter.resolveRoot(planningInput); + const installStatePath = adapter.getInstallStatePath(planningInput); + const operations = modules.flatMap(module => { + const paths = Array.isArray(module.paths) ? module.paths : []; + return paths.map(sourceRelativePath => adapter.createScaffoldOperation( + module.id, + sourceRelativePath, + planningInput + )); + }); + + return { + adapter: { + id: adapter.id, + target: adapter.target, + kind: adapter.kind, + }, + targetRoot, + installStatePath, + operations, + }; +} + +module.exports = { + getInstallTargetAdapter, + listInstallTargetAdapters, + planInstallTargetScaffold, +}; diff --git a/scripts/lib/install/apply.js b/scripts/lib/install/apply.js new file mode 100644 index 00000000..567d4a78 --- /dev/null +++ b/scripts/lib/install/apply.js @@ -0,0 +1,23 @@ +'use strict'; + +const fs = require('fs'); + +const { writeInstallState } = require('../install-state'); + +function applyInstallPlan(plan) { + for (const operation of plan.operations) { + fs.mkdirSync(require('path').dirname(operation.destinationPath), { recursive: true }); + fs.copyFileSync(operation.sourcePath, operation.destinationPath); + } + + writeInstallState(plan.installStatePath, plan.statePreview); + + return { + ...plan, + applied: true, + }; +} + +module.exports = { + applyInstallPlan, +}; diff --git a/scripts/lib/install/config.js b/scripts/lib/install/config.js new file mode 100644 index 00000000..3858a646 --- /dev/null +++ b/scripts/lib/install/config.js @@ -0,0 +1,82 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const Ajv = require('ajv'); + +const DEFAULT_INSTALL_CONFIG = 'ecc-install.json'; +const CONFIG_SCHEMA_PATH = path.join(__dirname, '..', '..', '..', 'schemas', 'ecc-install-config.schema.json'); + +let cachedValidator = null; + +function readJson(filePath, label) { + try { + return JSON.parse(fs.readFileSync(filePath, 'utf8')); + } catch (error) { + throw new Error(`Invalid JSON in ${label}: ${error.message}`); + } +} + +function getValidator() { + if (cachedValidator) { + return cachedValidator; + } + + const schema = readJson(CONFIG_SCHEMA_PATH, 'ecc-install-config.schema.json'); + const ajv = new Ajv({ allErrors: true }); + cachedValidator = ajv.compile(schema); + return cachedValidator; +} + +function dedupeStrings(values) { + return [...new Set((Array.isArray(values) ? values : []).map(value => String(value).trim()).filter(Boolean))]; +} + +function formatValidationErrors(errors = []) { + return errors.map(error => `${error.instancePath || '/'} ${error.message}`).join('; '); +} + +function resolveInstallConfigPath(configPath, options = {}) { + if (!configPath) { + throw new Error('An install config path is required'); + } + + const cwd = options.cwd || process.cwd(); + return path.isAbsolute(configPath) + ? configPath + : path.resolve(cwd, configPath); +} + +function loadInstallConfig(configPath, options = {}) { + const resolvedPath = resolveInstallConfigPath(configPath, options); + + if (!fs.existsSync(resolvedPath)) { + throw new Error(`Install config not found: ${resolvedPath}`); + } + + const raw = readJson(resolvedPath, path.basename(resolvedPath)); + const validator = getValidator(); + + if (!validator(raw)) { + throw new Error( + `Invalid install config ${resolvedPath}: ${formatValidationErrors(validator.errors)}` + ); + } + + return { + path: resolvedPath, + version: raw.version, + target: raw.target || null, + profileId: raw.profile || null, + moduleIds: dedupeStrings(raw.modules), + includeComponentIds: dedupeStrings(raw.include), + excludeComponentIds: dedupeStrings(raw.exclude), + options: raw.options && typeof raw.options === 'object' ? { ...raw.options } : {}, + }; +} + +module.exports = { + DEFAULT_INSTALL_CONFIG, + loadInstallConfig, + resolveInstallConfigPath, +}; diff --git a/scripts/lib/install/request.js b/scripts/lib/install/request.js new file mode 100644 index 00000000..29dffe0d --- /dev/null +++ b/scripts/lib/install/request.js @@ -0,0 +1,113 @@ +'use strict'; + +const LEGACY_INSTALL_TARGETS = ['claude', 'cursor', 'antigravity']; + +function dedupeStrings(values) { + return [...new Set((Array.isArray(values) ? values : []).map(value => String(value).trim()).filter(Boolean))]; +} + +function parseInstallArgs(argv) { + const args = argv.slice(2); + const parsed = { + target: null, + dryRun: false, + json: false, + help: false, + configPath: null, + profileId: null, + moduleIds: [], + includeComponentIds: [], + excludeComponentIds: [], + languages: [], + }; + + for (let index = 0; index < args.length; index += 1) { + const arg = args[index]; + + if (arg === '--target') { + parsed.target = args[index + 1] || null; + index += 1; + } else if (arg === '--config') { + parsed.configPath = args[index + 1] || null; + index += 1; + } else if (arg === '--profile') { + parsed.profileId = args[index + 1] || null; + index += 1; + } else if (arg === '--modules') { + const raw = args[index + 1] || ''; + parsed.moduleIds = raw.split(',').map(value => value.trim()).filter(Boolean); + index += 1; + } else if (arg === '--with') { + const componentId = args[index + 1] || ''; + if (componentId.trim()) { + parsed.includeComponentIds.push(componentId.trim()); + } + index += 1; + } else if (arg === '--without') { + const componentId = args[index + 1] || ''; + if (componentId.trim()) { + parsed.excludeComponentIds.push(componentId.trim()); + } + index += 1; + } else if (arg === '--dry-run') { + parsed.dryRun = true; + } else if (arg === '--json') { + parsed.json = true; + } else if (arg === '--help' || arg === '-h') { + parsed.help = true; + } else if (arg.startsWith('--')) { + throw new Error(`Unknown argument: ${arg}`); + } else { + parsed.languages.push(arg); + } + } + + return parsed; +} + +function normalizeInstallRequest(options = {}) { + const config = options.config && typeof options.config === 'object' + ? options.config + : null; + const profileId = options.profileId || config?.profileId || null; + const moduleIds = dedupeStrings([...(config?.moduleIds || []), ...(options.moduleIds || [])]); + const includeComponentIds = dedupeStrings([ + ...(config?.includeComponentIds || []), + ...(options.includeComponentIds || []), + ]); + const excludeComponentIds = dedupeStrings([ + ...(config?.excludeComponentIds || []), + ...(options.excludeComponentIds || []), + ]); + const languages = Array.isArray(options.languages) ? [...options.languages] : []; + const target = options.target || config?.target || 'claude'; + const hasManifestBaseSelection = Boolean(profileId) || moduleIds.length > 0 || includeComponentIds.length > 0; + const usingManifestMode = hasManifestBaseSelection || excludeComponentIds.length > 0; + + if (usingManifestMode && languages.length > 0) { + throw new Error( + 'Legacy language arguments cannot be combined with --profile, --modules, --with, --without, or manifest config selections' + ); + } + + if (!options.help && !hasManifestBaseSelection && languages.length === 0) { + throw new Error('No install profile, module IDs, included components, or legacy languages were provided'); + } + + return { + mode: usingManifestMode ? 'manifest' : 'legacy', + target, + profileId, + moduleIds, + includeComponentIds, + excludeComponentIds, + languages, + configPath: config?.path || options.configPath || null, + }; +} + +module.exports = { + LEGACY_INSTALL_TARGETS, + normalizeInstallRequest, + parseInstallArgs, +}; diff --git a/scripts/lib/install/runtime.js b/scripts/lib/install/runtime.js new file mode 100644 index 00000000..4770b2f5 --- /dev/null +++ b/scripts/lib/install/runtime.js @@ -0,0 +1,42 @@ +'use strict'; + +const { + createLegacyInstallPlan, + createManifestInstallPlan, +} = require('../install-executor'); + +function createInstallPlanFromRequest(request, options = {}) { + if (!request || typeof request !== 'object') { + throw new Error('A normalized install request is required'); + } + + if (request.mode === 'manifest') { + return createManifestInstallPlan({ + target: request.target, + profileId: request.profileId, + moduleIds: request.moduleIds, + includeComponentIds: request.includeComponentIds, + excludeComponentIds: request.excludeComponentIds, + projectRoot: options.projectRoot, + homeDir: options.homeDir, + sourceRoot: options.sourceRoot, + }); + } + + if (request.mode === 'legacy') { + return createLegacyInstallPlan({ + target: request.target, + languages: request.languages, + projectRoot: options.projectRoot, + homeDir: options.homeDir, + claudeRulesDir: options.claudeRulesDir, + sourceRoot: options.sourceRoot, + }); + } + + throw new Error(`Unsupported install request mode: ${request.mode}`); +} + +module.exports = { + createInstallPlanFromRequest, +}; diff --git a/scripts/lib/orchestration-session.js b/scripts/lib/orchestration-session.js index f544714d..9449020f 100644 --- a/scripts/lib/orchestration-session.js +++ b/scripts/lib/orchestration-session.js @@ -154,7 +154,8 @@ function loadWorkerSnapshots(coordinationDir) { }); } -function listTmuxPanes(sessionName) { +function listTmuxPanes(sessionName, options = {}) { + const { spawnSyncImpl = spawnSync } = options; const format = [ '#{pane_id}', '#{window_index}', @@ -167,12 +168,15 @@ function listTmuxPanes(sessionName) { '#{pane_pid}' ].join('\t'); - const result = spawnSync('tmux', ['list-panes', '-t', sessionName, '-F', format], { + const result = spawnSyncImpl('tmux', ['list-panes', '-t', sessionName, '-F', format], { encoding: 'utf8', stdio: ['ignore', 'pipe', 'pipe'] }); if (result.error) { + if (result.error.code === 'ENOENT') { + return []; + } throw result.error; } diff --git a/scripts/lib/session-adapters/canonical-session.js b/scripts/lib/session-adapters/canonical-session.js new file mode 100644 index 00000000..76cbcdea --- /dev/null +++ b/scripts/lib/session-adapters/canonical-session.js @@ -0,0 +1,138 @@ +'use strict'; + +const path = require('path'); + +const SESSION_SCHEMA_VERSION = 'ecc.session.v1'; + +function buildAggregates(workers) { + const states = workers.reduce((accumulator, worker) => { + const state = worker.state || 'unknown'; + accumulator[state] = (accumulator[state] || 0) + 1; + return accumulator; + }, {}); + + return { + workerCount: workers.length, + states + }; +} + +function deriveDmuxSessionState(snapshot) { + if (snapshot.sessionActive) { + return 'active'; + } + + if (snapshot.workerCount > 0) { + return 'idle'; + } + + return 'missing'; +} + +function normalizeDmuxSnapshot(snapshot, sourceTarget) { + const workers = (snapshot.workers || []).map(worker => ({ + id: worker.workerSlug, + label: worker.workerSlug, + state: worker.status.state || 'unknown', + branch: worker.status.branch || null, + worktree: worker.status.worktree || null, + runtime: { + kind: 'tmux-pane', + command: worker.pane ? worker.pane.currentCommand || null : null, + pid: worker.pane ? worker.pane.pid || null : null, + active: worker.pane ? Boolean(worker.pane.active) : false, + dead: worker.pane ? Boolean(worker.pane.dead) : false, + }, + intent: { + objective: worker.task.objective || '', + seedPaths: Array.isArray(worker.task.seedPaths) ? worker.task.seedPaths : [] + }, + outputs: { + summary: Array.isArray(worker.handoff.summary) ? worker.handoff.summary : [], + validation: Array.isArray(worker.handoff.validation) ? worker.handoff.validation : [], + remainingRisks: Array.isArray(worker.handoff.remainingRisks) ? worker.handoff.remainingRisks : [] + }, + artifacts: { + statusFile: worker.files.status, + taskFile: worker.files.task, + handoffFile: worker.files.handoff + } + })); + + return { + schemaVersion: SESSION_SCHEMA_VERSION, + adapterId: 'dmux-tmux', + session: { + id: snapshot.sessionName, + kind: 'orchestrated', + state: deriveDmuxSessionState(snapshot), + repoRoot: snapshot.repoRoot || null, + sourceTarget + }, + workers, + aggregates: buildAggregates(workers) + }; +} + +function deriveClaudeWorkerId(session) { + if (session.shortId && session.shortId !== 'no-id') { + return session.shortId; + } + + return path.basename(session.filename || session.sessionPath || 'session', '.tmp'); +} + +function normalizeClaudeHistorySession(session, sourceTarget) { + const metadata = session.metadata || {}; + const workerId = deriveClaudeWorkerId(session); + const worker = { + id: workerId, + label: metadata.title || session.filename || workerId, + state: 'recorded', + branch: metadata.branch || null, + worktree: metadata.worktree || null, + runtime: { + kind: 'claude-session', + command: 'claude', + pid: null, + active: false, + dead: true, + }, + intent: { + objective: metadata.inProgress && metadata.inProgress.length > 0 + ? metadata.inProgress[0] + : (metadata.title || ''), + seedPaths: [] + }, + outputs: { + summary: Array.isArray(metadata.completed) ? metadata.completed : [], + validation: [], + remainingRisks: metadata.notes ? [metadata.notes] : [] + }, + artifacts: { + sessionFile: session.sessionPath, + context: metadata.context || null + } + }; + + return { + schemaVersion: SESSION_SCHEMA_VERSION, + adapterId: 'claude-history', + session: { + id: workerId, + kind: 'history', + state: 'recorded', + repoRoot: metadata.worktree || null, + sourceTarget + }, + workers: [worker], + aggregates: buildAggregates([worker]) + }; +} + +module.exports = { + SESSION_SCHEMA_VERSION, + buildAggregates, + normalizeClaudeHistorySession, + normalizeDmuxSnapshot +}; diff --git a/scripts/lib/session-adapters/claude-history.js b/scripts/lib/session-adapters/claude-history.js new file mode 100644 index 00000000..60641df3 --- /dev/null +++ b/scripts/lib/session-adapters/claude-history.js @@ -0,0 +1,147 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +const sessionManager = require('../session-manager'); +const sessionAliases = require('../session-aliases'); +const { normalizeClaudeHistorySession } = require('./canonical-session'); + +function parseClaudeTarget(target) { + if (typeof target !== 'string') { + return null; + } + + for (const prefix of ['claude-history:', 'claude:', 'history:']) { + if (target.startsWith(prefix)) { + return target.slice(prefix.length).trim(); + } + } + + return null; +} + +function isSessionFileTarget(target, cwd) { + if (typeof target !== 'string' || target.length === 0) { + return false; + } + + const absoluteTarget = path.resolve(cwd, target); + return fs.existsSync(absoluteTarget) + && fs.statSync(absoluteTarget).isFile() + && absoluteTarget.endsWith('.tmp'); +} + +function hydrateSessionFromPath(sessionPath) { + const filename = path.basename(sessionPath); + const parsed = sessionManager.parseSessionFilename(filename); + if (!parsed) { + throw new Error(`Unsupported session file: ${sessionPath}`); + } + + const content = sessionManager.getSessionContent(sessionPath); + const stats = fs.statSync(sessionPath); + + return { + ...parsed, + sessionPath, + content, + metadata: sessionManager.parseSessionMetadata(content), + stats: sessionManager.getSessionStats(content || ''), + size: stats.size, + modifiedTime: stats.mtime, + createdTime: stats.birthtime || stats.ctime + }; +} + +function resolveSessionRecord(target, cwd) { + const explicitTarget = parseClaudeTarget(target); + + if (explicitTarget) { + if (explicitTarget === 'latest') { + const [latest] = sessionManager.getAllSessions({ limit: 1 }).sessions; + if (!latest) { + throw new Error('No Claude session history found'); + } + + return { + session: sessionManager.getSessionById(latest.filename, true), + sourceTarget: { + type: 'claude-history', + value: 'latest' + } + }; + } + + const alias = sessionAliases.resolveAlias(explicitTarget); + if (alias) { + return { + session: hydrateSessionFromPath(alias.sessionPath), + sourceTarget: { + type: 'claude-alias', + value: explicitTarget + } + }; + } + + const session = sessionManager.getSessionById(explicitTarget, true); + if (!session) { + throw new Error(`Claude session not found: ${explicitTarget}`); + } + + return { + session, + sourceTarget: { + type: 'claude-history', + value: explicitTarget + } + }; + } + + if (isSessionFileTarget(target, cwd)) { + return { + session: hydrateSessionFromPath(path.resolve(cwd, target)), + sourceTarget: { + type: 'session-file', + value: path.resolve(cwd, target) + } + }; + } + + throw new Error(`Unsupported Claude session target: ${target}`); +} + +function createClaudeHistoryAdapter() { + return { + id: 'claude-history', + canOpen(target, context = {}) { + if (context.adapterId && context.adapterId !== 'claude-history') { + return false; + } + + if (context.adapterId === 'claude-history') { + return true; + } + + const cwd = context.cwd || process.cwd(); + return parseClaudeTarget(target) !== null || isSessionFileTarget(target, cwd); + }, + open(target, context = {}) { + const cwd = context.cwd || process.cwd(); + + return { + adapterId: 'claude-history', + getSnapshot() { + const { session, sourceTarget } = resolveSessionRecord(target, cwd); + return normalizeClaudeHistorySession(session, sourceTarget); + } + }; + } + }; +} + +module.exports = { + createClaudeHistoryAdapter, + isSessionFileTarget, + parseClaudeTarget +}; diff --git a/scripts/lib/session-adapters/dmux-tmux.js b/scripts/lib/session-adapters/dmux-tmux.js new file mode 100644 index 00000000..43b5be95 --- /dev/null +++ b/scripts/lib/session-adapters/dmux-tmux.js @@ -0,0 +1,78 @@ +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +const { collectSessionSnapshot } = require('../orchestration-session'); +const { normalizeDmuxSnapshot } = require('./canonical-session'); + +function isPlanFileTarget(target, cwd) { + if (typeof target !== 'string' || target.length === 0) { + return false; + } + + const absoluteTarget = path.resolve(cwd, target); + return fs.existsSync(absoluteTarget) + && fs.statSync(absoluteTarget).isFile() + && path.extname(absoluteTarget) === '.json'; +} + +function isSessionNameTarget(target, cwd) { + if (typeof target !== 'string' || target.length === 0) { + return false; + } + + const coordinationDir = path.resolve(cwd, '.claude', 'orchestration', target); + return fs.existsSync(coordinationDir) && fs.statSync(coordinationDir).isDirectory(); +} + +function buildSourceTarget(target, cwd) { + if (isPlanFileTarget(target, cwd)) { + return { + type: 'plan', + value: path.resolve(cwd, target) + }; + } + + return { + type: 'session', + value: target + }; +} + +function createDmuxTmuxAdapter(options = {}) { + const collectSessionSnapshotImpl = options.collectSessionSnapshotImpl || collectSessionSnapshot; + + return { + id: 'dmux-tmux', + canOpen(target, context = {}) { + if (context.adapterId && context.adapterId !== 'dmux-tmux') { + return false; + } + + if (context.adapterId === 'dmux-tmux') { + return true; + } + + const cwd = context.cwd || process.cwd(); + return isPlanFileTarget(target, cwd) || isSessionNameTarget(target, cwd); + }, + open(target, context = {}) { + const cwd = context.cwd || process.cwd(); + + return { + adapterId: 'dmux-tmux', + getSnapshot() { + const snapshot = collectSessionSnapshotImpl(target, cwd); + return normalizeDmuxSnapshot(snapshot, buildSourceTarget(target, cwd)); + } + }; + } + }; +} + +module.exports = { + createDmuxTmuxAdapter, + isPlanFileTarget, + isSessionNameTarget +}; diff --git a/scripts/lib/session-adapters/registry.js b/scripts/lib/session-adapters/registry.js new file mode 100644 index 00000000..d3a2fc4f --- /dev/null +++ b/scripts/lib/session-adapters/registry.js @@ -0,0 +1,42 @@ +'use strict'; + +const { createClaudeHistoryAdapter } = require('./claude-history'); +const { createDmuxTmuxAdapter } = require('./dmux-tmux'); + +function createDefaultAdapters() { + return [ + createClaudeHistoryAdapter(), + createDmuxTmuxAdapter() + ]; +} + +function createAdapterRegistry(options = {}) { + const adapters = options.adapters || createDefaultAdapters(); + + return { + adapters, + select(target, context = {}) { + const adapter = adapters.find(candidate => candidate.canOpen(target, context)); + if (!adapter) { + throw new Error(`No session adapter matched target: ${target}`); + } + + return adapter; + }, + open(target, context = {}) { + const adapter = this.select(target, context); + return adapter.open(target, context); + } + }; +} + +function inspectSessionTarget(target, options = {}) { + const registry = createAdapterRegistry(options); + return registry.open(target, options).getSnapshot(); +} + +module.exports = { + createAdapterRegistry, + createDefaultAdapters, + inspectSessionTarget +}; diff --git a/scripts/list-installed.js b/scripts/list-installed.js new file mode 100644 index 00000000..4803f965 --- /dev/null +++ b/scripts/list-installed.js @@ -0,0 +1,90 @@ +#!/usr/bin/env node + +const { discoverInstalledStates } = require('./lib/install-lifecycle'); +const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests'); + +function showHelp(exitCode = 0) { + console.log(` +Usage: node scripts/list-installed.js [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--json] + +Inspect ECC install-state files for the current home/project context. +`); + process.exit(exitCode); +} + +function parseArgs(argv) { + const args = argv.slice(2); + const parsed = { + targets: [], + json: false, + help: false, + }; + + for (let index = 0; index < args.length; index += 1) { + const arg = args[index]; + + if (arg === '--target') { + parsed.targets.push(args[index + 1] || null); + index += 1; + } else if (arg === '--json') { + parsed.json = true; + } else if (arg === '--help' || arg === '-h') { + parsed.help = true; + } else { + throw new Error(`Unknown argument: ${arg}`); + } + } + + return parsed; +} + +function printHuman(records) { + if (records.length === 0) { + console.log('No ECC install-state files found for the current home/project context.'); + return; + } + + console.log('Installed ECC targets:\n'); + for (const record of records) { + if (record.error) { + console.log(`- ${record.adapter.id}: INVALID (${record.error})`); + continue; + } + + const state = record.state; + console.log(`- ${record.adapter.id}`); + console.log(` Root: ${state.target.root}`); + console.log(` Installed: ${state.installedAt}`); + console.log(` Profile: ${state.request.profile || '(legacy/custom)'}`); + console.log(` Modules: ${(state.resolution.selectedModules || []).join(', ') || '(none)'}`); + console.log(` Legacy languages: ${(state.request.legacyLanguages || []).join(', ') || '(none)'}`); + console.log(` Source version: ${state.source.repoVersion || '(unknown)'}`); + } +} + +function main() { + try { + const options = parseArgs(process.argv); + if (options.help) { + showHelp(0); + } + + const records = discoverInstalledStates({ + homeDir: process.env.HOME, + projectRoot: process.cwd(), + targets: options.targets, + }).filter(record => record.exists); + + if (options.json) { + console.log(JSON.stringify({ records }, null, 2)); + return; + } + + printHuman(records); + } catch (error) { + console.error(`Error: ${error.message}`); + process.exit(1); + } +} + +main(); diff --git a/scripts/orchestration-status.js b/scripts/orchestration-status.js index 309a9f3d..33aaa688 100644 --- a/scripts/orchestration-status.js +++ b/scripts/orchestration-status.js @@ -4,7 +4,7 @@ const fs = require('fs'); const path = require('path'); -const { collectSessionSnapshot } = require('./lib/orchestration-session'); +const { inspectSessionTarget } = require('./lib/session-adapters/registry'); function usage() { console.log([ @@ -35,7 +35,10 @@ function main() { process.exit(1); } - const snapshot = collectSessionSnapshot(target, process.cwd()); + const snapshot = inspectSessionTarget(target, { + cwd: process.cwd(), + adapterId: 'dmux-tmux' + }); const json = JSON.stringify(snapshot, null, 2); if (writePath) { diff --git a/scripts/repair.js b/scripts/repair.js new file mode 100644 index 00000000..382cfa4d --- /dev/null +++ b/scripts/repair.js @@ -0,0 +1,97 @@ +#!/usr/bin/env node + +const { repairInstalledStates } = require('./lib/install-lifecycle'); +const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests'); + +function showHelp(exitCode = 0) { + console.log(` +Usage: node scripts/repair.js [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] + +Rebuild ECC-managed files recorded in install-state for the current context. +`); + process.exit(exitCode); +} + +function parseArgs(argv) { + const args = argv.slice(2); + const parsed = { + targets: [], + dryRun: false, + json: false, + help: false, + }; + + for (let index = 0; index < args.length; index += 1) { + const arg = args[index]; + + if (arg === '--target') { + parsed.targets.push(args[index + 1] || null); + index += 1; + } else if (arg === '--dry-run') { + parsed.dryRun = true; + } else if (arg === '--json') { + parsed.json = true; + } else if (arg === '--help' || arg === '-h') { + parsed.help = true; + } else { + throw new Error(`Unknown argument: ${arg}`); + } + } + + return parsed; +} + +function printHuman(result) { + if (result.results.length === 0) { + console.log('No ECC install-state files found for the current home/project context.'); + return; + } + + console.log('Repair summary:\n'); + for (const entry of result.results) { + console.log(`- ${entry.adapter.id}`); + console.log(` Status: ${entry.status.toUpperCase()}`); + console.log(` Install-state: ${entry.installStatePath}`); + + if (entry.error) { + console.log(` Error: ${entry.error}`); + continue; + } + + const paths = result.dryRun ? entry.plannedRepairs : entry.repairedPaths; + console.log(` ${result.dryRun ? 'Planned repairs' : 'Repaired paths'}: ${paths.length}`); + } + + console.log(`\nSummary: checked=${result.summary.checkedCount}, ${result.dryRun ? 'planned' : 'repaired'}=${result.dryRun ? result.summary.plannedRepairCount : result.summary.repairedCount}, errors=${result.summary.errorCount}`); +} + +function main() { + try { + const options = parseArgs(process.argv); + if (options.help) { + showHelp(0); + } + + const result = repairInstalledStates({ + repoRoot: require('path').join(__dirname, '..'), + homeDir: process.env.HOME, + projectRoot: process.cwd(), + targets: options.targets, + dryRun: options.dryRun, + }); + const hasErrors = result.summary.errorCount > 0; + + if (options.json) { + console.log(JSON.stringify(result, null, 2)); + } else { + printHuman(result); + } + + process.exitCode = hasErrors ? 1 : 0; + } catch (error) { + console.error(`Error: ${error.message}`); + process.exit(1); + } +} + +main(); diff --git a/scripts/session-inspect.js b/scripts/session-inspect.js new file mode 100644 index 00000000..9e658998 --- /dev/null +++ b/scripts/session-inspect.js @@ -0,0 +1,77 @@ +#!/usr/bin/env node +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +const { inspectSessionTarget } = require('./lib/session-adapters/registry'); + +function usage() { + console.log([ + 'Usage:', + ' node scripts/session-inspect.js [--adapter ] [--write ]', + '', + 'Targets:', + ' Dmux/orchestration plan file', + ' Dmux session name when the coordination directory exists', + ' claude:latest Most recent Claude session history entry', + ' claude: Specific Claude session or alias', + ' Direct path to a Claude session file', + '', + 'Examples:', + ' node scripts/session-inspect.js .claude/plan/workflow.json', + ' node scripts/session-inspect.js workflow-visual-proof', + ' node scripts/session-inspect.js claude:latest', + ' node scripts/session-inspect.js claude:a1b2c3d4 --write /tmp/session.json' + ].join('\n')); +} + +function parseArgs(argv) { + const args = argv.slice(2); + const target = args.find(argument => !argument.startsWith('--')); + + const adapterIndex = args.indexOf('--adapter'); + const adapterId = adapterIndex >= 0 ? args[adapterIndex + 1] : null; + + const writeIndex = args.indexOf('--write'); + const writePath = writeIndex >= 0 ? args[writeIndex + 1] : null; + + return { target, adapterId, writePath }; +} + +function main() { + const { target, adapterId, writePath } = parseArgs(process.argv); + + if (!target) { + usage(); + process.exit(1); + } + + const snapshot = inspectSessionTarget(target, { + cwd: process.cwd(), + adapterId + }); + const payload = JSON.stringify(snapshot, null, 2); + + if (writePath) { + const absoluteWritePath = path.resolve(writePath); + fs.mkdirSync(path.dirname(absoluteWritePath), { recursive: true }); + fs.writeFileSync(absoluteWritePath, payload + '\n', 'utf8'); + } + + console.log(payload); +} + +if (require.main === module) { + try { + main(); + } catch (error) { + console.error(`[session-inspect] ${error.message}`); + process.exit(1); + } +} + +module.exports = { + main, + parseArgs +}; diff --git a/scripts/uninstall.js b/scripts/uninstall.js new file mode 100644 index 00000000..6456c406 --- /dev/null +++ b/scripts/uninstall.js @@ -0,0 +1,96 @@ +#!/usr/bin/env node + +const { uninstallInstalledStates } = require('./lib/install-lifecycle'); +const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests'); + +function showHelp(exitCode = 0) { + console.log(` +Usage: node scripts/uninstall.js [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] + +Remove ECC-managed files recorded in install-state for the current context. +`); + process.exit(exitCode); +} + +function parseArgs(argv) { + const args = argv.slice(2); + const parsed = { + targets: [], + dryRun: false, + json: false, + help: false, + }; + + for (let index = 0; index < args.length; index += 1) { + const arg = args[index]; + + if (arg === '--target') { + parsed.targets.push(args[index + 1] || null); + index += 1; + } else if (arg === '--dry-run') { + parsed.dryRun = true; + } else if (arg === '--json') { + parsed.json = true; + } else if (arg === '--help' || arg === '-h') { + parsed.help = true; + } else { + throw new Error(`Unknown argument: ${arg}`); + } + } + + return parsed; +} + +function printHuman(result) { + if (result.results.length === 0) { + console.log('No ECC install-state files found for the current home/project context.'); + return; + } + + console.log('Uninstall summary:\n'); + for (const entry of result.results) { + console.log(`- ${entry.adapter.id}`); + console.log(` Status: ${entry.status.toUpperCase()}`); + console.log(` Install-state: ${entry.installStatePath}`); + + if (entry.error) { + console.log(` Error: ${entry.error}`); + continue; + } + + const paths = result.dryRun ? entry.plannedRemovals : entry.removedPaths; + console.log(` ${result.dryRun ? 'Planned removals' : 'Removed paths'}: ${paths.length}`); + } + + console.log(`\nSummary: checked=${result.summary.checkedCount}, ${result.dryRun ? 'planned' : 'uninstalled'}=${result.dryRun ? result.summary.plannedRemovalCount : result.summary.uninstalledCount}, errors=${result.summary.errorCount}`); +} + +function main() { + try { + const options = parseArgs(process.argv); + if (options.help) { + showHelp(0); + } + + const result = uninstallInstalledStates({ + homeDir: process.env.HOME, + projectRoot: process.cwd(), + targets: options.targets, + dryRun: options.dryRun, + }); + const hasErrors = result.summary.errorCount > 0; + + if (options.json) { + console.log(JSON.stringify(result, null, 2)); + } else { + printHuman(result); + } + + process.exitCode = hasErrors ? 1 : 0; + } catch (error) { + console.error(`Error: ${error.message}`); + process.exit(1); + } +} + +main(); diff --git a/skills/configure-ecc/SKILL.md b/skills/configure-ecc/SKILL.md index a762868b..53fe74a9 100644 --- a/skills/configure-ecc/SKILL.md +++ b/skills/configure-ecc/SKILL.md @@ -82,7 +82,7 @@ If the user chooses niche or core + niche, continue to category selection below ### 2b: Choose Skill Categories -There are 35 skills organized into 7 categories. Use `AskUserQuestion` with `multiSelect: true`: +There are 7 selectable category groups below. The detailed confirmation lists that follow cover 41 skills across 8 categories, plus 1 standalone template. Use `AskUserQuestion` with `multiSelect: true`: ``` Question: "Which skill categories do you want to install?" diff --git a/skills/continuous-learning-v2/agents/observer-loop.sh b/skills/continuous-learning-v2/agents/observer-loop.sh index f4aca82b..a0f655dd 100755 --- a/skills/continuous-learning-v2/agents/observer-loop.sh +++ b/skills/continuous-learning-v2/agents/observer-loop.sh @@ -91,7 +91,8 @@ PROMPT max_turns=10 fi - claude --model haiku --max-turns "$max_turns" --print < "$prompt_file" >> "$LOG_FILE" 2>&1 & + # Prevent observe.sh from recording this automated Haiku session as observations + ECC_SKIP_OBSERVE=1 ECC_HOOK_PROFILE=minimal claude --model haiku --max-turns "$max_turns" --print < "$prompt_file" >> "$LOG_FILE" 2>&1 & claude_pid=$! ( diff --git a/skills/continuous-learning-v2/hooks/observe.sh b/skills/continuous-learning-v2/hooks/observe.sh index 33ec6f04..3e1a2b93 100755 --- a/skills/continuous-learning-v2/hooks/observe.sh +++ b/skills/continuous-learning-v2/hooks/observe.sh @@ -73,6 +73,50 @@ if [ -n "$STDIN_CWD" ] && [ -d "$STDIN_CWD" ]; then export CLAUDE_PROJECT_DIR="$STDIN_CWD" fi +# ───────────────────────────────────────────── +# Lightweight config and automated session guards +# ───────────────────────────────────────────── + +CONFIG_DIR="${HOME}/.claude/homunculus" + +# Skip if disabled +if [ -f "$CONFIG_DIR/disabled" ]; then + exit 0 +fi + +# Prevent observe.sh from firing on non-human sessions to avoid: +# - ECC observing its own Haiku observer sessions (self-loop) +# - ECC observing other tools' automated sessions +# - automated sessions creating project-scoped homunculus metadata + +# Layer 1: entrypoint. Only interactive terminal sessions should continue. +case "${CLAUDE_CODE_ENTRYPOINT:-cli}" in + cli) ;; + *) exit 0 ;; +esac + +# Layer 2: minimal hook profile suppresses non-essential hooks. +[ "${ECC_HOOK_PROFILE:-standard}" = "minimal" ] && exit 0 + +# Layer 3: cooperative skip env var for automated sessions. +[ "${ECC_SKIP_OBSERVE:-0}" = "1" ] && exit 0 + +# Layer 4: subagent sessions are automated by definition. +_ECC_AGENT_ID=$(echo "$INPUT_JSON" | "$PYTHON_CMD" -c "import json,sys; print(json.load(sys.stdin).get('agent_id',''))" 2>/dev/null || true) +[ -n "$_ECC_AGENT_ID" ] && exit 0 + +# Layer 5: known observer-session path exclusions. +_ECC_SKIP_PATHS="${ECC_OBSERVE_SKIP_PATHS:-observer-sessions,.claude-mem}" +if [ -n "$STDIN_CWD" ]; then + IFS=',' read -ra _ECC_SKIP_ARRAY <<< "$_ECC_SKIP_PATHS" + for _pattern in "${_ECC_SKIP_ARRAY[@]}"; do + _pattern="${_pattern#"${_pattern%%[![:space:]]*}"}" + _pattern="${_pattern%"${_pattern##*[![:space:]]}"}" + [ -z "$_pattern" ] && continue + case "$STDIN_CWD" in *"$_pattern"*) exit 0 ;; esac + done +fi + # ───────────────────────────────────────────── # Project detection # ───────────────────────────────────────────── @@ -89,15 +133,9 @@ PYTHON_CMD="${CLV2_PYTHON_CMD:-$PYTHON_CMD}" # Configuration # ───────────────────────────────────────────── -CONFIG_DIR="${HOME}/.claude/homunculus" OBSERVATIONS_FILE="${PROJECT_DIR}/observations.jsonl" MAX_FILE_SIZE_MB=10 -# Skip if disabled -if [ -f "$CONFIG_DIR/disabled" ]; then - exit 0 -fi - # Auto-purge observation files older than 30 days (runs once per session) PURGE_MARKER="${PROJECT_DIR}/.last-purge" if [ ! -f "$PURGE_MARKER" ] || [ "$(find "$PURGE_MARKER" -mtime +1 2>/dev/null)" ]; then diff --git a/skills/crosspost/SKILL.md b/skills/crosspost/SKILL.md index 81e2a29d..da07ec80 100644 --- a/skills/crosspost/SKILL.md +++ b/skills/crosspost/SKILL.md @@ -161,8 +161,10 @@ resp = requests.post( "linkedin": {"text": linkedin_version}, "threads": {"text": threads_version} } - } + }, + timeout=30, ) +resp.raise_for_status() ``` ### Manual Posting diff --git a/skills/exa-search/SKILL.md b/skills/exa-search/SKILL.md index f3cffb8d..567dc944 100644 --- a/skills/exa-search/SKILL.md +++ b/skills/exa-search/SKILL.md @@ -24,17 +24,14 @@ Exa MCP server must be configured. Add to `~/.claude.json`: ```json "exa-web-search": { "command": "npx", - "args": [ - "-y", - "exa-mcp-server", - "tools=web_search_exa,get_code_context_exa,crawling_exa,company_research_exa,linkedin_search_exa,deep_researcher_start,deep_researcher_check" - ], + "args": ["-y", "exa-mcp-server"], "env": { "EXA_API_KEY": "YOUR_EXA_API_KEY_HERE" } } ``` Get an API key at [exa.ai](https://exa.ai). -If you omit the `tools=...` argument, only a smaller default tool set may be enabled. +This repo's current Exa setup documents the tool surface exposed here: `web_search_exa` and `get_code_context_exa`. +If your Exa server exposes additional tools, verify their exact names before depending on them in docs or prompts. ## Core Tools @@ -51,29 +48,9 @@ web_search_exa(query: "latest AI developments 2026", numResults: 5) |-------|------|---------|-------| | `query` | string | required | Search query | | `numResults` | number | 8 | Number of results | - -### web_search_advanced_exa -Filtered search with domain and date constraints. - -``` -web_search_advanced_exa( - query: "React Server Components best practices", - numResults: 5, - includeDomains: ["github.com", "react.dev"], - startPublishedDate: "2025-01-01" -) -``` - -**Parameters:** - -| Param | Type | Default | Notes | -|-------|------|---------|-------| -| `query` | string | required | Search query | -| `numResults` | number | 8 | Number of results | -| `includeDomains` | string[] | none | Limit to specific domains | -| `excludeDomains` | string[] | none | Exclude specific domains | -| `startPublishedDate` | string | none | ISO date filter (start) | -| `endPublishedDate` | string | none | ISO date filter (end) | +| `type` | string | `auto` | Search mode | +| `livecrawl` | string | `fallback` | Prefer live crawling when needed | +| `category` | string | none | Optional focus such as `company` or `research paper` | ### get_code_context_exa Find code examples and documentation from GitHub, Stack Overflow, and docs sites. @@ -89,52 +66,6 @@ get_code_context_exa(query: "Python asyncio patterns", tokensNum: 3000) | `query` | string | required | Code or API search query | | `tokensNum` | number | 5000 | Content tokens (1000-50000) | -### company_research_exa -Research companies for business intelligence and news. - -``` -company_research_exa(companyName: "Anthropic", numResults: 5) -``` - -**Parameters:** - -| Param | Type | Default | Notes | -|-------|------|---------|-------| -| `companyName` | string | required | Company name | -| `numResults` | number | 5 | Number of results | - -### linkedin_search_exa -Find professional profiles and company-adjacent people research. - -``` -linkedin_search_exa(query: "AI safety researchers at Anthropic", numResults: 5) -``` - -### crawling_exa -Extract full page content from a URL. - -``` -crawling_exa(url: "https://example.com/article", tokensNum: 5000) -``` - -**Parameters:** - -| Param | Type | Default | Notes | -|-------|------|---------|-------| -| `url` | string | required | URL to extract | -| `tokensNum` | number | 5000 | Content tokens | - -### deep_researcher_start / deep_researcher_check -Start an AI research agent that runs asynchronously. - -``` -# Start research -deep_researcher_start(query: "comprehensive analysis of AI code editors in 2026") - -# Check status (returns results when complete) -deep_researcher_check(researchId: "") -``` - ## Usage Patterns ### Quick Lookup @@ -147,27 +78,24 @@ web_search_exa(query: "Node.js 22 new features", numResults: 3) get_code_context_exa(query: "Rust error handling patterns Result type", tokensNum: 3000) ``` -### Company Due Diligence +### Company or People Research ``` -company_research_exa(companyName: "Vercel", numResults: 5) -web_search_advanced_exa(query: "Vercel funding valuation 2026", numResults: 3) +web_search_exa(query: "Vercel funding valuation 2026", numResults: 3, category: "company") +web_search_exa(query: "site:linkedin.com/in AI safety researchers Anthropic", numResults: 5) ``` ### Technical Deep Dive ``` -# Start async research -deep_researcher_start(query: "WebAssembly component model status and adoption") -# ... do other work ... -deep_researcher_check(researchId: "") +web_search_exa(query: "WebAssembly component model status and adoption", numResults: 5) +get_code_context_exa(query: "WebAssembly component model examples", tokensNum: 4000) ``` ## Tips -- Use `web_search_exa` for broad queries, `web_search_advanced_exa` for filtered results +- Use `web_search_exa` for current information, company lookups, and broad discovery +- Use search operators like `site:`, quoted phrases, and `intitle:` to narrow results - Lower `tokensNum` (1000-2000) for focused code snippets, higher (5000+) for comprehensive context -- Combine `company_research_exa` with `web_search_advanced_exa` for thorough company analysis -- Use `crawling_exa` to get full content from specific URLs found in search results -- `deep_researcher_start` is best for comprehensive topics that benefit from AI synthesis +- Use `get_code_context_exa` when you need API usage or code examples rather than general web pages ## Related Skills diff --git a/skills/fal-ai-media/SKILL.md b/skills/fal-ai-media/SKILL.md index 254be99e..ffe701d1 100644 --- a/skills/fal-ai-media/SKILL.md +++ b/skills/fal-ai-media/SKILL.md @@ -253,7 +253,7 @@ estimate_cost( estimate_type: "unit_price", endpoints: { "fal-ai/nano-banana-pro": { - "num_images": 1 + "unit_quantity": 1 } } ) diff --git a/tests/ci/validators.test.js b/tests/ci/validators.test.js index f016a538..ca52941b 100644 --- a/tests/ci/validators.test.js +++ b/tests/ci/validators.test.js @@ -14,6 +14,10 @@ const os = require('os'); const { execFileSync } = require('child_process'); const validatorsDir = path.join(__dirname, '..', '..', 'scripts', 'ci'); +const repoRoot = path.join(__dirname, '..', '..'); +const modulesSchemaPath = path.join(repoRoot, 'schemas', 'install-modules.schema.json'); +const profilesSchemaPath = path.join(repoRoot, 'schemas', 'install-profiles.schema.json'); +const componentsSchemaPath = path.join(repoRoot, 'schemas', 'install-components.schema.json'); // Test helpers function test(name, fn) { @@ -36,6 +40,18 @@ function cleanupTestDir(testDir) { fs.rmSync(testDir, { recursive: true, force: true }); } +function writeJson(filePath, value) { + fs.mkdirSync(path.dirname(filePath), { recursive: true }); + fs.writeFileSync(filePath, JSON.stringify(value, null, 2)); +} + +function writeInstallComponentsManifest(testDir, components) { + writeJson(path.join(testDir, 'manifests', 'install-components.json'), { + version: 1, + components, + }); +} + /** * Run a validator script via a wrapper that overrides its directory constant. * This allows testing error cases without modifying real project files. @@ -2164,6 +2180,369 @@ function runTests() { cleanupTestDir(testDir); })) passed++; else failed++; + // ========================================== + // validate-install-manifests.js + // ========================================== + console.log('\nvalidate-install-manifests.js:'); + + if (test('passes on real project install manifests', () => { + const result = runValidator('validate-install-manifests'); + assert.strictEqual(result.code, 0, `Should pass, got stderr: ${result.stderr}`); + assert.ok(result.stdout.includes('Validated'), 'Should output validation count'); + })) passed++; else failed++; + + if (test('exits 0 when install manifests do not exist', () => { + const testDir = createTestDir(); + const result = runValidatorWithDirs('validate-install-manifests', { + REPO_ROOT: testDir, + MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'), + PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json') + }); + assert.strictEqual(result.code, 0, 'Should skip when manifests are missing'); + assert.ok(result.stdout.includes('skipping'), 'Should say skipping'); + cleanupTestDir(testDir); + })) passed++; else failed++; + + if (test('fails on invalid install manifest JSON', () => { + const testDir = createTestDir(); + const manifestsDir = path.join(testDir, 'manifests'); + fs.mkdirSync(manifestsDir, { recursive: true }); + fs.writeFileSync(path.join(manifestsDir, 'install-modules.json'), '{ invalid json'); + writeJson(path.join(manifestsDir, 'install-profiles.json'), { + version: 1, + profiles: {} + }); + + const result = runValidatorWithDirs('validate-install-manifests', { + REPO_ROOT: testDir, + MODULES_MANIFEST_PATH: path.join(manifestsDir, 'install-modules.json'), + PROFILES_MANIFEST_PATH: path.join(manifestsDir, 'install-profiles.json'), + COMPONENTS_MANIFEST_PATH: path.join(manifestsDir, 'install-components.json'), + MODULES_SCHEMA_PATH: modulesSchemaPath, + PROFILES_SCHEMA_PATH: profilesSchemaPath, + COMPONENTS_SCHEMA_PATH: componentsSchemaPath + }); + assert.strictEqual(result.code, 1, 'Should fail on invalid JSON'); + assert.ok(result.stderr.includes('Invalid JSON'), 'Should report invalid JSON'); + cleanupTestDir(testDir); + })) passed++; else failed++; + + if (test('fails when install module references a missing path', () => { + const testDir = createTestDir(); + writeJson(path.join(testDir, 'manifests', 'install-modules.json'), { + version: 1, + modules: [ + { + id: 'rules-core', + kind: 'rules', + description: 'Rules', + paths: ['rules'], + targets: ['claude'], + dependencies: [], + defaultInstall: true, + cost: 'light', + stability: 'stable' + }, + { + id: 'security', + kind: 'skills', + description: 'Security', + paths: ['skills/security-review'], + targets: ['codex'], + dependencies: [], + defaultInstall: false, + cost: 'medium', + stability: 'stable' + } + ] + }); + writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), { + version: 1, + profiles: { + core: { description: 'Core', modules: ['rules-core'] }, + developer: { description: 'Developer', modules: ['rules-core'] }, + security: { description: 'Security', modules: ['rules-core', 'security'] }, + research: { description: 'Research', modules: ['rules-core'] }, + full: { description: 'Full', modules: ['rules-core', 'security'] } + } + }); + writeInstallComponentsManifest(testDir, [ + { + id: 'baseline:rules', + family: 'baseline', + description: 'Rules', + modules: ['rules-core'] + }, + { + id: 'capability:security', + family: 'capability', + description: 'Security', + modules: ['security'] + } + ]); + fs.mkdirSync(path.join(testDir, 'rules'), { recursive: true }); + + const result = runValidatorWithDirs('validate-install-manifests', { + REPO_ROOT: testDir, + MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'), + PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'), + COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'), + MODULES_SCHEMA_PATH: modulesSchemaPath, + PROFILES_SCHEMA_PATH: profilesSchemaPath, + COMPONENTS_SCHEMA_PATH: componentsSchemaPath + }); + assert.strictEqual(result.code, 1, 'Should fail when a referenced path is missing'); + assert.ok(result.stderr.includes('references missing path'), 'Should report missing path'); + cleanupTestDir(testDir); + })) passed++; else failed++; + + if (test('fails when two install modules claim the same path', () => { + const testDir = createTestDir(); + writeJson(path.join(testDir, 'manifests', 'install-modules.json'), { + version: 1, + modules: [ + { + id: 'agents-core', + kind: 'agents', + description: 'Agents', + paths: ['agents'], + targets: ['codex'], + dependencies: [], + defaultInstall: true, + cost: 'light', + stability: 'stable' + }, + { + id: 'commands-core', + kind: 'commands', + description: 'Commands', + paths: ['agents'], + targets: ['codex'], + dependencies: [], + defaultInstall: true, + cost: 'light', + stability: 'stable' + } + ] + }); + writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), { + version: 1, + profiles: { + core: { description: 'Core', modules: ['agents-core', 'commands-core'] }, + developer: { description: 'Developer', modules: ['agents-core', 'commands-core'] }, + security: { description: 'Security', modules: ['agents-core', 'commands-core'] }, + research: { description: 'Research', modules: ['agents-core', 'commands-core'] }, + full: { description: 'Full', modules: ['agents-core', 'commands-core'] } + } + }); + writeInstallComponentsManifest(testDir, [ + { + id: 'baseline:agents', + family: 'baseline', + description: 'Agents', + modules: ['agents-core'] + }, + { + id: 'baseline:commands', + family: 'baseline', + description: 'Commands', + modules: ['commands-core'] + } + ]); + fs.mkdirSync(path.join(testDir, 'agents'), { recursive: true }); + + const result = runValidatorWithDirs('validate-install-manifests', { + REPO_ROOT: testDir, + MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'), + PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'), + COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'), + MODULES_SCHEMA_PATH: modulesSchemaPath, + PROFILES_SCHEMA_PATH: profilesSchemaPath, + COMPONENTS_SCHEMA_PATH: componentsSchemaPath + }); + assert.strictEqual(result.code, 1, 'Should fail on duplicate claimed paths'); + assert.ok(result.stderr.includes('claimed by both'), 'Should report duplicate path claims'); + cleanupTestDir(testDir); + })) passed++; else failed++; + + if (test('fails when an install profile references an unknown module', () => { + const testDir = createTestDir(); + writeJson(path.join(testDir, 'manifests', 'install-modules.json'), { + version: 1, + modules: [ + { + id: 'rules-core', + kind: 'rules', + description: 'Rules', + paths: ['rules'], + targets: ['claude'], + dependencies: [], + defaultInstall: true, + cost: 'light', + stability: 'stable' + } + ] + }); + writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), { + version: 1, + profiles: { + core: { description: 'Core', modules: ['rules-core'] }, + developer: { description: 'Developer', modules: ['rules-core'] }, + security: { description: 'Security', modules: ['rules-core'] }, + research: { description: 'Research', modules: ['rules-core'] }, + full: { description: 'Full', modules: ['rules-core', 'ghost-module'] } + } + }); + writeInstallComponentsManifest(testDir, [ + { + id: 'baseline:rules', + family: 'baseline', + description: 'Rules', + modules: ['rules-core'] + } + ]); + fs.mkdirSync(path.join(testDir, 'rules'), { recursive: true }); + + const result = runValidatorWithDirs('validate-install-manifests', { + REPO_ROOT: testDir, + MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'), + PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'), + COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'), + MODULES_SCHEMA_PATH: modulesSchemaPath, + PROFILES_SCHEMA_PATH: profilesSchemaPath, + COMPONENTS_SCHEMA_PATH: componentsSchemaPath + }); + assert.strictEqual(result.code, 1, 'Should fail on unknown profile module'); + assert.ok(result.stderr.includes('references unknown module ghost-module'), + 'Should report unknown module reference'); + cleanupTestDir(testDir); + })) passed++; else failed++; + + if (test('passes on a valid standalone install manifest fixture', () => { + const testDir = createTestDir(); + writeJson(path.join(testDir, 'manifests', 'install-modules.json'), { + version: 1, + modules: [ + { + id: 'rules-core', + kind: 'rules', + description: 'Rules', + paths: ['rules'], + targets: ['claude'], + dependencies: [], + defaultInstall: true, + cost: 'light', + stability: 'stable' + }, + { + id: 'orchestration', + kind: 'orchestration', + description: 'Orchestration', + paths: ['scripts/orchestrate-worktrees.js'], + targets: ['codex'], + dependencies: ['rules-core'], + defaultInstall: false, + cost: 'medium', + stability: 'beta' + } + ] + }); + writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), { + version: 1, + profiles: { + core: { description: 'Core', modules: ['rules-core'] }, + developer: { description: 'Developer', modules: ['rules-core', 'orchestration'] }, + security: { description: 'Security', modules: ['rules-core'] }, + research: { description: 'Research', modules: ['rules-core'] }, + full: { description: 'Full', modules: ['rules-core', 'orchestration'] } + } + }); + writeInstallComponentsManifest(testDir, [ + { + id: 'baseline:rules', + family: 'baseline', + description: 'Rules', + modules: ['rules-core'] + }, + { + id: 'capability:orchestration', + family: 'capability', + description: 'Orchestration', + modules: ['orchestration'] + } + ]); + fs.mkdirSync(path.join(testDir, 'rules'), { recursive: true }); + fs.mkdirSync(path.join(testDir, 'scripts'), { recursive: true }); + fs.writeFileSync(path.join(testDir, 'scripts', 'orchestrate-worktrees.js'), '#!/usr/bin/env node\n'); + + const result = runValidatorWithDirs('validate-install-manifests', { + REPO_ROOT: testDir, + MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'), + PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'), + COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'), + MODULES_SCHEMA_PATH: modulesSchemaPath, + PROFILES_SCHEMA_PATH: profilesSchemaPath, + COMPONENTS_SCHEMA_PATH: componentsSchemaPath + }); + assert.strictEqual(result.code, 0, `Should pass valid fixture, got stderr: ${result.stderr}`); + assert.ok(result.stdout.includes('Validated 2 install modules, 2 install components, and 5 profiles'), + 'Should report validated install manifest counts'); + cleanupTestDir(testDir); + })) passed++; else failed++; + + if (test('fails when an install component references an unknown module', () => { + const testDir = createTestDir(); + writeJson(path.join(testDir, 'manifests', 'install-modules.json'), { + version: 1, + modules: [ + { + id: 'rules-core', + kind: 'rules', + description: 'Rules', + paths: ['rules'], + targets: ['claude'], + dependencies: [], + defaultInstall: true, + cost: 'light', + stability: 'stable' + } + ] + }); + writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), { + version: 1, + profiles: { + core: { description: 'Core', modules: ['rules-core'] }, + developer: { description: 'Developer', modules: ['rules-core'] }, + security: { description: 'Security', modules: ['rules-core'] }, + research: { description: 'Research', modules: ['rules-core'] }, + full: { description: 'Full', modules: ['rules-core'] } + } + }); + writeInstallComponentsManifest(testDir, [ + { + id: 'capability:security', + family: 'capability', + description: 'Security', + modules: ['ghost-module'] + } + ]); + fs.mkdirSync(path.join(testDir, 'rules'), { recursive: true }); + + const result = runValidatorWithDirs('validate-install-manifests', { + REPO_ROOT: testDir, + MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'), + PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'), + COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'), + MODULES_SCHEMA_PATH: modulesSchemaPath, + PROFILES_SCHEMA_PATH: profilesSchemaPath, + COMPONENTS_SCHEMA_PATH: componentsSchemaPath + }); + assert.strictEqual(result.code, 1, 'Should fail on unknown component module'); + assert.ok(result.stderr.includes('references unknown module ghost-module'), + 'Should report unknown component module'); + cleanupTestDir(testDir); + })) passed++; else failed++; + // Summary console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); process.exit(failed > 0 ? 1 : 0); diff --git a/tests/hooks/hooks.test.js b/tests/hooks/hooks.test.js index 6e814bd3..a5c9acd8 100644 --- a/tests/hooks/hooks.test.js +++ b/tests/hooks/hooks.test.js @@ -88,9 +88,7 @@ function runShellScript(scriptPath, args = [], input = '', env = {}, cwd = proce // Create a temporary test directory function createTestDir() { - const testDir = path.join(os.tmpdir(), `hooks-test-${Date.now()}`); - fs.mkdirSync(testDir, { recursive: true }); - return testDir; + return fs.mkdtempSync(path.join(os.tmpdir(), 'hooks-test-')); } // Clean up test directory @@ -2159,6 +2157,8 @@ async function runTests() { assert.ok(observerLoopSource.includes('ECC_OBSERVER_MAX_TURNS'), 'observer-loop should allow max-turn overrides'); assert.ok(observerLoopSource.includes('max_turns="${ECC_OBSERVER_MAX_TURNS:-10}"'), 'observer-loop should default to 10 turns'); assert.ok(!observerLoopSource.includes('--max-turns 3'), 'observer-loop should not hardcode a 3-turn limit'); + assert.ok(observerLoopSource.includes('ECC_SKIP_OBSERVE=1'), 'observer-loop should suppress observe.sh for automated sessions'); + assert.ok(observerLoopSource.includes('ECC_HOOK_PROFILE=minimal'), 'observer-loop should run automated analysis with the minimal hook profile'); }) ) passed++; @@ -2292,6 +2292,77 @@ async function runTests() { } })) passed++; else failed++; + if (await asyncTest('observe.sh skips automated sessions before project detection side effects', async () => { + const observePath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'hooks', 'observe.sh'); + const cases = [ + { + name: 'non-cli entrypoint', + env: { CLAUDE_CODE_ENTRYPOINT: 'mcp' } + }, + { + name: 'minimal hook profile', + env: { CLAUDE_CODE_ENTRYPOINT: 'cli', ECC_HOOK_PROFILE: 'minimal' } + }, + { + name: 'cooperative skip env', + env: { CLAUDE_CODE_ENTRYPOINT: 'cli', ECC_SKIP_OBSERVE: '1' } + }, + { + name: 'subagent payload', + env: { CLAUDE_CODE_ENTRYPOINT: 'cli' }, + payload: { agent_id: 'agent-123' } + }, + { + name: 'cwd skip path', + env: { + CLAUDE_CODE_ENTRYPOINT: 'cli', + ECC_OBSERVE_SKIP_PATHS: ' observer-sessions , .claude-mem ' + }, + cwdSuffix: path.join('observer-sessions', 'worker') + } + ]; + + for (const testCase of cases) { + const homeDir = createTestDir(); + const projectDir = createTestDir(); + + try { + const cwd = testCase.cwdSuffix ? path.join(projectDir, testCase.cwdSuffix) : projectDir; + fs.mkdirSync(cwd, { recursive: true }); + + const payload = JSON.stringify({ + tool_name: 'Bash', + tool_input: { command: 'echo hello' }, + tool_response: 'ok', + session_id: `session-${testCase.name.replace(/[^a-z0-9]+/gi, '-')}`, + cwd, + ...(testCase.payload || {}) + }); + + const result = await runShellScript(observePath, ['post'], payload, { + HOME: homeDir, + ...testCase.env + }, projectDir); + + assert.strictEqual(result.code, 0, `${testCase.name} should exit successfully, stderr: ${result.stderr}`); + + const homunculusDir = path.join(homeDir, '.claude', 'homunculus'); + const registryPath = path.join(homunculusDir, 'projects.json'); + const projectsDir = path.join(homunculusDir, 'projects'); + + assert.ok(!fs.existsSync(registryPath), `${testCase.name} should not create projects.json`); + + const projectEntries = fs.existsSync(projectsDir) + ? fs.readdirSync(projectsDir).filter(entry => fs.statSync(path.join(projectsDir, entry)).isDirectory()) + : []; + assert.strictEqual(projectEntries.length, 0, `${testCase.name} should not create project directories`); + } finally { + cleanupTestDir(homeDir); + cleanupTestDir(projectDir); + } + } + })) passed++; else failed++; + if (await asyncTest('matches .tsx extension for type checking', async () => { const testDir = createTestDir(); const testFile = path.join(testDir, 'component.tsx'); @@ -4528,8 +4599,11 @@ async function runTests() { const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8'); // The real string message should appear assert.ok(content.includes('Real user message'), 'Should include the string content user message'); - // Numeric/boolean/object content should NOT appear as text - assert.ok(!content.includes('42'), 'Numeric content should be skipped (else branch → empty string → filtered)'); + // Numeric/boolean/object content should NOT appear as task bullets. + // The full file may legitimately contain "42" in timestamps like 03:42. + assert.ok(!content.includes('\n- 42\n'), 'Numeric content should not be rendered as a task bullet'); + assert.ok(!content.includes('\n- true\n'), 'Boolean content should not be rendered as a task bullet'); + assert.ok(!content.includes('\n- [object Object]\n'), 'Object content should not be stringified into a task bullet'); } finally { fs.rmSync(isoHome, { recursive: true, force: true }); } diff --git a/tests/lib/install-config.test.js b/tests/lib/install-config.test.js new file mode 100644 index 00000000..9dc1b5c8 --- /dev/null +++ b/tests/lib/install-config.test.js @@ -0,0 +1,117 @@ +/** + * Tests for scripts/lib/install/config.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +const { + loadInstallConfig, + resolveInstallConfigPath, +} = require('../../scripts/lib/install/config'); + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function createTempDir(prefix) { + return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); +} + +function cleanup(dirPath) { + fs.rmSync(dirPath, { recursive: true, force: true }); +} + +function writeJson(filePath, value) { + fs.mkdirSync(path.dirname(filePath), { recursive: true }); + fs.writeFileSync(filePath, JSON.stringify(value, null, 2)); +} + +function runTests() { + console.log('\n=== Testing install/config.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('resolves relative config paths from the provided cwd', () => { + const cwd = '/workspace/app'; + const resolved = resolveInstallConfigPath('configs/ecc-install.json', { cwd }); + assert.strictEqual(resolved, path.join(cwd, 'configs', 'ecc-install.json')); + })) passed++; else failed++; + + if (test('loads and normalizes a valid install config', () => { + const cwd = createTempDir('install-config-'); + + try { + const configPath = path.join(cwd, 'ecc-install.json'); + writeJson(configPath, { + version: 1, + target: 'cursor', + profile: 'developer', + modules: ['platform-configs', 'platform-configs'], + include: ['lang:typescript', 'framework:nextjs', 'lang:typescript'], + exclude: ['capability:media'], + options: { + includeExamples: false, + }, + }); + + const config = loadInstallConfig('ecc-install.json', { cwd }); + assert.strictEqual(config.path, configPath); + assert.strictEqual(config.target, 'cursor'); + assert.strictEqual(config.profileId, 'developer'); + assert.deepStrictEqual(config.moduleIds, ['platform-configs']); + assert.deepStrictEqual(config.includeComponentIds, ['lang:typescript', 'framework:nextjs']); + assert.deepStrictEqual(config.excludeComponentIds, ['capability:media']); + assert.deepStrictEqual(config.options, { includeExamples: false }); + } finally { + cleanup(cwd); + } + })) passed++; else failed++; + + if (test('rejects invalid config schema values', () => { + const cwd = createTempDir('install-config-'); + + try { + writeJson(path.join(cwd, 'ecc-install.json'), { + version: 2, + target: 'ghost-target', + }); + + assert.throws( + () => loadInstallConfig('ecc-install.json', { cwd }), + /Invalid install config/ + ); + } finally { + cleanup(cwd); + } + })) passed++; else failed++; + + if (test('fails when the install config does not exist', () => { + const cwd = createTempDir('install-config-'); + + try { + assert.throws( + () => loadInstallConfig('ecc-install.json', { cwd }), + /Install config not found/ + ); + } finally { + cleanup(cwd); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/lib/install-lifecycle.test.js b/tests/lib/install-lifecycle.test.js new file mode 100644 index 00000000..ae7754db --- /dev/null +++ b/tests/lib/install-lifecycle.test.js @@ -0,0 +1,357 @@ +/** + * Tests for scripts/lib/install-lifecycle.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +const { + buildDoctorReport, + discoverInstalledStates, +} = require('../../scripts/lib/install-lifecycle'); +const { + createInstallState, + writeInstallState, +} = require('../../scripts/lib/install-state'); + +const REPO_ROOT = path.join(__dirname, '..', '..'); +const CURRENT_PACKAGE_VERSION = JSON.parse( + fs.readFileSync(path.join(REPO_ROOT, 'package.json'), 'utf8') +).version; +const CURRENT_MANIFEST_VERSION = JSON.parse( + fs.readFileSync(path.join(REPO_ROOT, 'manifests', 'install-modules.json'), 'utf8') +).version; + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function createTempDir(prefix) { + return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); +} + +function cleanup(dirPath) { + fs.rmSync(dirPath, { recursive: true, force: true }); +} + +function writeState(filePath, options) { + const state = createInstallState(options); + writeInstallState(filePath, state); + return state; +} + +function runTests() { + console.log('\n=== Testing install-lifecycle.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('discovers installed states for multiple targets in the current context', () => { + const homeDir = createTempDir('install-lifecycle-home-'); + const projectRoot = createTempDir('install-lifecycle-project-'); + + try { + const claudeStatePath = path.join(homeDir, '.claude', 'ecc', 'install-state.json'); + const cursorStatePath = path.join(projectRoot, '.cursor', 'ecc-install-state.json'); + + writeState(claudeStatePath, { + adapter: { id: 'claude-home', target: 'claude', kind: 'home' }, + targetRoot: path.join(homeDir, '.claude'), + installStatePath: claudeStatePath, + request: { + profile: null, + modules: [], + legacyLanguages: ['typescript'], + legacyMode: true, + }, + resolution: { + selectedModules: ['legacy-claude-rules'], + skippedModules: [], + }, + operations: [], + source: { + repoVersion: CURRENT_PACKAGE_VERSION, + repoCommit: 'abc123', + manifestVersion: CURRENT_MANIFEST_VERSION, + }, + }); + + writeState(cursorStatePath, { + adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' }, + targetRoot: path.join(projectRoot, '.cursor'), + installStatePath: cursorStatePath, + request: { + profile: 'core', + modules: [], + legacyLanguages: [], + legacyMode: false, + }, + resolution: { + selectedModules: ['rules-core', 'platform-configs'], + skippedModules: [], + }, + operations: [], + source: { + repoVersion: CURRENT_PACKAGE_VERSION, + repoCommit: 'def456', + manifestVersion: CURRENT_MANIFEST_VERSION, + }, + }); + + const records = discoverInstalledStates({ + homeDir, + projectRoot, + targets: ['claude', 'cursor'], + }); + + assert.strictEqual(records.length, 2); + assert.strictEqual(records[0].exists, true); + assert.strictEqual(records[1].exists, true); + assert.strictEqual(records[0].state.target.id, 'claude-home'); + assert.strictEqual(records[1].state.target.id, 'cursor-project'); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + if (test('doctor reports missing managed files as an error', () => { + const homeDir = createTempDir('install-lifecycle-home-'); + const projectRoot = createTempDir('install-lifecycle-project-'); + + try { + const targetRoot = path.join(projectRoot, '.cursor'); + const statePath = path.join(targetRoot, 'ecc-install-state.json'); + fs.mkdirSync(targetRoot, { recursive: true }); + + writeState(statePath, { + adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' }, + targetRoot, + installStatePath: statePath, + request: { + profile: null, + modules: ['platform-configs'], + legacyLanguages: [], + legacyMode: false, + }, + resolution: { + selectedModules: ['platform-configs'], + skippedModules: [], + }, + operations: [ + { + kind: 'copy-file', + moduleId: 'platform-configs', + sourceRelativePath: '.cursor/hooks.json', + destinationPath: path.join(targetRoot, 'hooks.json'), + strategy: 'sync-root-children', + ownership: 'managed', + scaffoldOnly: false, + }, + ], + source: { + repoVersion: CURRENT_PACKAGE_VERSION, + repoCommit: 'abc123', + manifestVersion: CURRENT_MANIFEST_VERSION, + }, + }); + + const report = buildDoctorReport({ + repoRoot: REPO_ROOT, + homeDir, + projectRoot, + targets: ['cursor'], + }); + + assert.strictEqual(report.results.length, 1); + assert.strictEqual(report.results[0].status, 'error'); + assert.ok(report.results[0].issues.some(issue => issue.code === 'missing-managed-files')); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + if (test('doctor reports a healthy legacy install when managed files are present', () => { + const homeDir = createTempDir('install-lifecycle-home-'); + const projectRoot = createTempDir('install-lifecycle-project-'); + + try { + const targetRoot = path.join(homeDir, '.claude'); + const statePath = path.join(targetRoot, 'ecc', 'install-state.json'); + const managedFile = path.join(targetRoot, 'rules', 'common', 'coding-style.md'); + const sourceContent = fs.readFileSync(path.join(REPO_ROOT, 'rules', 'common', 'coding-style.md'), 'utf8'); + fs.mkdirSync(path.dirname(managedFile), { recursive: true }); + fs.writeFileSync(managedFile, sourceContent); + + writeState(statePath, { + adapter: { id: 'claude-home', target: 'claude', kind: 'home' }, + targetRoot, + installStatePath: statePath, + request: { + profile: null, + modules: [], + legacyLanguages: ['typescript'], + legacyMode: true, + }, + resolution: { + selectedModules: ['legacy-claude-rules'], + skippedModules: [], + }, + operations: [ + { + kind: 'copy-file', + moduleId: 'legacy-claude-rules', + sourceRelativePath: 'rules/common/coding-style.md', + destinationPath: managedFile, + strategy: 'preserve-relative-path', + ownership: 'managed', + scaffoldOnly: false, + }, + ], + source: { + repoVersion: CURRENT_PACKAGE_VERSION, + repoCommit: 'abc123', + manifestVersion: CURRENT_MANIFEST_VERSION, + }, + }); + + const report = buildDoctorReport({ + repoRoot: REPO_ROOT, + homeDir, + projectRoot, + targets: ['claude'], + }); + + assert.strictEqual(report.results.length, 1); + assert.strictEqual(report.results[0].status, 'ok'); + assert.strictEqual(report.results[0].issues.length, 0); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + if (test('doctor reports drifted managed files as a warning', () => { + const homeDir = createTempDir('install-lifecycle-home-'); + const projectRoot = createTempDir('install-lifecycle-project-'); + + try { + const targetRoot = path.join(projectRoot, '.cursor'); + const statePath = path.join(targetRoot, 'ecc-install-state.json'); + const sourcePath = path.join(REPO_ROOT, '.cursor', 'hooks.json'); + const destinationPath = path.join(targetRoot, 'hooks.json'); + fs.mkdirSync(path.dirname(destinationPath), { recursive: true }); + fs.writeFileSync(destinationPath, '{"drifted":true}\n'); + + writeState(statePath, { + adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' }, + targetRoot, + installStatePath: statePath, + request: { + profile: null, + modules: ['platform-configs'], + legacyLanguages: [], + legacyMode: false, + }, + resolution: { + selectedModules: ['platform-configs'], + skippedModules: [], + }, + operations: [ + { + kind: 'copy-file', + moduleId: 'platform-configs', + sourcePath, + sourceRelativePath: '.cursor/hooks.json', + destinationPath, + strategy: 'sync-root-children', + ownership: 'managed', + scaffoldOnly: false, + }, + ], + source: { + repoVersion: CURRENT_PACKAGE_VERSION, + repoCommit: 'abc123', + manifestVersion: CURRENT_MANIFEST_VERSION, + }, + }); + + const report = buildDoctorReport({ + repoRoot: REPO_ROOT, + homeDir, + projectRoot, + targets: ['cursor'], + }); + + assert.strictEqual(report.results.length, 1); + assert.strictEqual(report.results[0].status, 'warning'); + assert.ok(report.results[0].issues.some(issue => issue.code === 'drifted-managed-files')); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + if (test('doctor reports manifest resolution drift for non-legacy installs', () => { + const homeDir = createTempDir('install-lifecycle-home-'); + const projectRoot = createTempDir('install-lifecycle-project-'); + + try { + const targetRoot = path.join(projectRoot, '.cursor'); + const statePath = path.join(targetRoot, 'ecc-install-state.json'); + fs.mkdirSync(targetRoot, { recursive: true }); + + writeState(statePath, { + adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' }, + targetRoot, + installStatePath: statePath, + request: { + profile: 'core', + modules: [], + legacyLanguages: [], + legacyMode: false, + }, + resolution: { + selectedModules: ['rules-core'], + skippedModules: [], + }, + operations: [], + source: { + repoVersion: CURRENT_PACKAGE_VERSION, + repoCommit: 'abc123', + manifestVersion: CURRENT_MANIFEST_VERSION, + }, + }); + + const report = buildDoctorReport({ + repoRoot: REPO_ROOT, + homeDir, + projectRoot, + targets: ['cursor'], + }); + + assert.strictEqual(report.results.length, 1); + assert.strictEqual(report.results[0].status, 'warning'); + assert.ok(report.results[0].issues.some(issue => issue.code === 'resolution-drift')); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/lib/install-manifests.test.js b/tests/lib/install-manifests.test.js new file mode 100644 index 00000000..cfdcdcf7 --- /dev/null +++ b/tests/lib/install-manifests.test.js @@ -0,0 +1,196 @@ +/** + * Tests for scripts/lib/install-manifests.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +const { + loadInstallManifests, + listInstallComponents, + listInstallModules, + listInstallProfiles, + resolveInstallPlan, +} = require('../../scripts/lib/install-manifests'); + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function createTestRepo() { + const root = fs.mkdtempSync(path.join(os.tmpdir(), 'install-manifests-')); + fs.mkdirSync(path.join(root, 'manifests'), { recursive: true }); + return root; +} + +function cleanupTestRepo(root) { + fs.rmSync(root, { recursive: true, force: true }); +} + +function writeJson(filePath, value) { + fs.mkdirSync(path.dirname(filePath), { recursive: true }); + fs.writeFileSync(filePath, JSON.stringify(value, null, 2)); +} + +function runTests() { + console.log('\n=== Testing install-manifests.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('loads real project install manifests', () => { + const manifests = loadInstallManifests(); + assert.ok(manifests.modules.length >= 1, 'Should load modules'); + assert.ok(Object.keys(manifests.profiles).length >= 1, 'Should load profiles'); + assert.ok(manifests.components.length >= 1, 'Should load components'); + })) passed++; else failed++; + + if (test('lists install profiles from the real project', () => { + const profiles = listInstallProfiles(); + assert.ok(profiles.some(profile => profile.id === 'core'), 'Should include core profile'); + assert.ok(profiles.some(profile => profile.id === 'full'), 'Should include full profile'); + })) passed++; else failed++; + + if (test('lists install modules from the real project', () => { + const modules = listInstallModules(); + assert.ok(modules.some(module => module.id === 'rules-core'), 'Should include rules-core'); + assert.ok(modules.some(module => module.id === 'orchestration'), 'Should include orchestration'); + })) passed++; else failed++; + + if (test('lists install components from the real project', () => { + const components = listInstallComponents(); + assert.ok(components.some(component => component.id === 'lang:typescript'), + 'Should include lang:typescript'); + assert.ok(components.some(component => component.id === 'capability:security'), + 'Should include capability:security'); + })) passed++; else failed++; + + if (test('resolves a real project profile with target-specific skips', () => { + const projectRoot = '/workspace/app'; + const plan = resolveInstallPlan({ profileId: 'developer', target: 'cursor', projectRoot }); + assert.ok(plan.selectedModuleIds.includes('rules-core'), 'Should keep rules-core'); + assert.ok(plan.selectedModuleIds.includes('commands-core'), 'Should keep commands-core'); + assert.ok(!plan.selectedModuleIds.includes('orchestration'), + 'Should not select unsupported orchestration module for cursor'); + assert.ok(plan.skippedModuleIds.includes('orchestration'), + 'Should report unsupported orchestration module as skipped'); + assert.strictEqual(plan.targetAdapterId, 'cursor-project'); + assert.strictEqual(plan.targetRoot, path.join(projectRoot, '.cursor')); + assert.strictEqual(plan.installStatePath, path.join(projectRoot, '.cursor', 'ecc-install-state.json')); + assert.ok(plan.operations.length > 0, 'Should include scaffold operations'); + assert.ok( + plan.operations.some(operation => ( + operation.sourceRelativePath === '.cursor' + && operation.strategy === 'sync-root-children' + )), + 'Should flatten the native cursor root' + ); + })) passed++; else failed++; + + if (test('resolves explicit modules with dependency expansion', () => { + const plan = resolveInstallPlan({ moduleIds: ['security'] }); + assert.ok(plan.selectedModuleIds.includes('security'), 'Should include requested module'); + assert.ok(plan.selectedModuleIds.includes('workflow-quality'), + 'Should include transitive dependency'); + assert.ok(plan.selectedModuleIds.includes('platform-configs'), + 'Should include nested dependency'); + })) passed++; else failed++; + + if (test('resolves included and excluded user-facing components', () => { + const plan = resolveInstallPlan({ + profileId: 'core', + includeComponentIds: ['capability:security'], + excludeComponentIds: ['capability:orchestration'], + target: 'claude', + }); + + assert.deepStrictEqual(plan.includedComponentIds, ['capability:security']); + assert.deepStrictEqual(plan.excludedComponentIds, ['capability:orchestration']); + assert.ok(plan.selectedModuleIds.includes('security'), 'Should include modules from selected components'); + assert.ok(!plan.selectedModuleIds.includes('orchestration'), 'Should exclude modules from excluded components'); + assert.ok(plan.excludedModuleIds.includes('orchestration'), + 'Should report modules removed by excluded components'); + })) passed++; else failed++; + + if (test('fails when a selected component depends on an excluded component module', () => { + assert.throws( + () => resolveInstallPlan({ + includeComponentIds: ['capability:social'], + excludeComponentIds: ['capability:content'], + }), + /depends on excluded module business-content/ + ); + })) passed++; else failed++; + + if (test('throws on unknown install profile', () => { + assert.throws( + () => resolveInstallPlan({ profileId: 'ghost-profile' }), + /Unknown install profile/ + ); + })) passed++; else failed++; + + if (test('throws on unknown install target', () => { + assert.throws( + () => resolveInstallPlan({ profileId: 'core', target: 'not-a-target' }), + /Unknown install target/ + ); + })) passed++; else failed++; + + if (test('throws when a dependency does not support the requested target', () => { + const repoRoot = createTestRepo(); + writeJson(path.join(repoRoot, 'manifests', 'install-modules.json'), { + version: 1, + modules: [ + { + id: 'parent', + kind: 'skills', + description: 'Parent', + paths: ['parent'], + targets: ['claude'], + dependencies: ['child'], + defaultInstall: false, + cost: 'light', + stability: 'stable' + }, + { + id: 'child', + kind: 'skills', + description: 'Child', + paths: ['child'], + targets: ['cursor'], + dependencies: [], + defaultInstall: false, + cost: 'light', + stability: 'stable' + } + ] + }); + writeJson(path.join(repoRoot, 'manifests', 'install-profiles.json'), { + version: 1, + profiles: { + core: { description: 'Core', modules: ['parent'] } + } + }); + + assert.throws( + () => resolveInstallPlan({ repoRoot, profileId: 'core', target: 'claude' }), + /does not support target claude/ + ); + cleanupTestRepo(repoRoot); + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/lib/install-request.test.js b/tests/lib/install-request.test.js new file mode 100644 index 00000000..95e8e333 --- /dev/null +++ b/tests/lib/install-request.test.js @@ -0,0 +1,147 @@ +/** + * Tests for scripts/lib/install/request.js + */ + +const assert = require('assert'); + +const { + normalizeInstallRequest, + parseInstallArgs, +} = require('../../scripts/lib/install/request'); + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing install/request.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('parses manifest-mode CLI arguments', () => { + const parsed = parseInstallArgs([ + 'node', + 'scripts/install-apply.js', + '--target', 'cursor', + '--profile', 'developer', + '--with', 'lang:typescript', + '--without', 'capability:media', + '--config', 'ecc-install.json', + '--dry-run', + '--json' + ]); + + assert.strictEqual(parsed.target, 'cursor'); + assert.strictEqual(parsed.profileId, 'developer'); + assert.strictEqual(parsed.configPath, 'ecc-install.json'); + assert.deepStrictEqual(parsed.includeComponentIds, ['lang:typescript']); + assert.deepStrictEqual(parsed.excludeComponentIds, ['capability:media']); + assert.strictEqual(parsed.dryRun, true); + assert.strictEqual(parsed.json, true); + assert.deepStrictEqual(parsed.languages, []); + })) passed++; else failed++; + + if (test('normalizes legacy language installs into a canonical request', () => { + const request = normalizeInstallRequest({ + target: 'claude', + profileId: null, + moduleIds: [], + languages: ['typescript', 'python'] + }); + + assert.strictEqual(request.mode, 'legacy'); + assert.strictEqual(request.target, 'claude'); + assert.deepStrictEqual(request.languages, ['typescript', 'python']); + assert.deepStrictEqual(request.moduleIds, []); + assert.strictEqual(request.profileId, null); + })) passed++; else failed++; + + if (test('normalizes manifest installs into a canonical request', () => { + const request = normalizeInstallRequest({ + target: 'cursor', + profileId: 'developer', + moduleIds: [], + includeComponentIds: ['lang:typescript'], + excludeComponentIds: ['capability:media'], + languages: [] + }); + + assert.strictEqual(request.mode, 'manifest'); + assert.strictEqual(request.target, 'cursor'); + assert.strictEqual(request.profileId, 'developer'); + assert.deepStrictEqual(request.includeComponentIds, ['lang:typescript']); + assert.deepStrictEqual(request.excludeComponentIds, ['capability:media']); + assert.deepStrictEqual(request.languages, []); + })) passed++; else failed++; + + if (test('merges config-backed component selections with CLI overrides', () => { + const request = normalizeInstallRequest({ + target: 'cursor', + profileId: null, + moduleIds: ['platform-configs'], + includeComponentIds: ['framework:nextjs'], + excludeComponentIds: ['capability:media'], + languages: [], + configPath: '/workspace/app/ecc-install.json', + config: { + path: '/workspace/app/ecc-install.json', + target: 'claude', + profileId: 'developer', + moduleIds: ['workflow-quality'], + includeComponentIds: ['lang:typescript'], + excludeComponentIds: ['capability:orchestration'], + }, + }); + + assert.strictEqual(request.mode, 'manifest'); + assert.strictEqual(request.target, 'cursor'); + assert.strictEqual(request.profileId, 'developer'); + assert.deepStrictEqual(request.moduleIds, ['workflow-quality', 'platform-configs']); + assert.deepStrictEqual(request.includeComponentIds, ['lang:typescript', 'framework:nextjs']); + assert.deepStrictEqual(request.excludeComponentIds, ['capability:orchestration', 'capability:media']); + assert.strictEqual(request.configPath, '/workspace/app/ecc-install.json'); + })) passed++; else failed++; + + if (test('rejects mixing legacy languages with manifest flags', () => { + assert.throws( + () => normalizeInstallRequest({ + target: 'claude', + profileId: 'core', + moduleIds: [], + includeComponentIds: [], + excludeComponentIds: [], + languages: ['typescript'] + }), + /cannot be combined/ + ); + })) passed++; else failed++; + + if (test('rejects empty install requests when not asking for help', () => { + assert.throws( + () => normalizeInstallRequest({ + target: 'claude', + profileId: null, + moduleIds: [], + includeComponentIds: [], + excludeComponentIds: [], + languages: [], + help: false + }), + /No install profile, module IDs, included components, or legacy languages/ + ); + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/lib/install-state.test.js b/tests/lib/install-state.test.js new file mode 100644 index 00000000..e3a28370 --- /dev/null +++ b/tests/lib/install-state.test.js @@ -0,0 +1,139 @@ +/** + * Tests for scripts/lib/install-state.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +const { + createInstallState, + readInstallState, + writeInstallState, +} = require('../../scripts/lib/install-state'); + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function createTestDir() { + return fs.mkdtempSync(path.join(os.tmpdir(), 'install-state-')); +} + +function cleanupTestDir(dirPath) { + fs.rmSync(dirPath, { recursive: true, force: true }); +} + +function runTests() { + console.log('\n=== Testing install-state.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('creates a valid install-state payload', () => { + const state = createInstallState({ + adapter: { id: 'cursor-project' }, + targetRoot: '/repo/.cursor', + installStatePath: '/repo/.cursor/ecc-install-state.json', + request: { + profile: 'developer', + modules: ['orchestration'], + legacyLanguages: ['typescript'], + legacyMode: true, + }, + resolution: { + selectedModules: ['rules-core', 'orchestration'], + skippedModules: [], + }, + operations: [ + { + kind: 'copy-path', + moduleId: 'rules-core', + sourceRelativePath: 'rules', + destinationPath: '/repo/.cursor/rules', + strategy: 'preserve-relative-path', + ownership: 'managed', + scaffoldOnly: true, + }, + ], + source: { + repoVersion: '1.9.0', + repoCommit: 'abc123', + manifestVersion: 1, + }, + installedAt: '2026-03-13T00:00:00Z', + }); + + assert.strictEqual(state.schemaVersion, 'ecc.install.v1'); + assert.strictEqual(state.target.id, 'cursor-project'); + assert.strictEqual(state.request.profile, 'developer'); + assert.strictEqual(state.operations.length, 1); + })) passed++; else failed++; + + if (test('writes and reads install-state from disk', () => { + const testDir = createTestDir(); + const statePath = path.join(testDir, 'ecc-install-state.json'); + + try { + const state = createInstallState({ + adapter: { id: 'claude-home' }, + targetRoot: path.join(testDir, '.claude'), + installStatePath: statePath, + request: { + profile: 'core', + modules: [], + legacyLanguages: [], + legacyMode: false, + }, + resolution: { + selectedModules: ['rules-core'], + skippedModules: [], + }, + operations: [], + source: { + repoVersion: '1.9.0', + repoCommit: 'abc123', + manifestVersion: 1, + }, + }); + + writeInstallState(statePath, state); + const loaded = readInstallState(statePath); + + assert.strictEqual(loaded.target.id, 'claude-home'); + assert.strictEqual(loaded.request.profile, 'core'); + assert.deepStrictEqual(loaded.resolution.selectedModules, ['rules-core']); + } finally { + cleanupTestDir(testDir); + } + })) passed++; else failed++; + + if (test('rejects invalid install-state payloads on read', () => { + const testDir = createTestDir(); + const statePath = path.join(testDir, 'ecc-install-state.json'); + + try { + fs.writeFileSync(statePath, JSON.stringify({ schemaVersion: 'ecc.install.v1' }, null, 2)); + assert.throws( + () => readInstallState(statePath), + /Invalid install-state/ + ); + } finally { + cleanupTestDir(testDir); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/lib/install-targets.test.js b/tests/lib/install-targets.test.js new file mode 100644 index 00000000..2d67b0e6 --- /dev/null +++ b/tests/lib/install-targets.test.js @@ -0,0 +1,110 @@ +/** + * Tests for scripts/lib/install-targets/registry.js + */ + +const assert = require('assert'); +const path = require('path'); + +const { + getInstallTargetAdapter, + listInstallTargetAdapters, + planInstallTargetScaffold, +} = require('../../scripts/lib/install-targets/registry'); + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing install-target adapters ===\n'); + + let passed = 0; + let failed = 0; + + if (test('lists supported target adapters', () => { + const adapters = listInstallTargetAdapters(); + const targets = adapters.map(adapter => adapter.target); + assert.ok(targets.includes('claude'), 'Should include claude target'); + assert.ok(targets.includes('cursor'), 'Should include cursor target'); + assert.ok(targets.includes('antigravity'), 'Should include antigravity target'); + assert.ok(targets.includes('codex'), 'Should include codex target'); + assert.ok(targets.includes('opencode'), 'Should include opencode target'); + })) passed++; else failed++; + + if (test('resolves cursor adapter root and install-state path from project root', () => { + const adapter = getInstallTargetAdapter('cursor'); + const projectRoot = '/workspace/app'; + const root = adapter.resolveRoot({ projectRoot }); + const statePath = adapter.getInstallStatePath({ projectRoot }); + + assert.strictEqual(root, path.join(projectRoot, '.cursor')); + assert.strictEqual(statePath, path.join(projectRoot, '.cursor', 'ecc-install-state.json')); + })) passed++; else failed++; + + if (test('resolves claude adapter root and install-state path from home dir', () => { + const adapter = getInstallTargetAdapter('claude'); + const homeDir = '/Users/example'; + const root = adapter.resolveRoot({ homeDir, repoRoot: '/repo/ecc' }); + const statePath = adapter.getInstallStatePath({ homeDir, repoRoot: '/repo/ecc' }); + + assert.strictEqual(root, path.join(homeDir, '.claude')); + assert.strictEqual(statePath, path.join(homeDir, '.claude', 'ecc', 'install-state.json')); + })) passed++; else failed++; + + if (test('plans scaffold operations and flattens native target roots', () => { + const repoRoot = '/repo/ecc'; + const projectRoot = '/workspace/app'; + const modules = [ + { + id: 'platform-configs', + paths: ['.cursor', 'mcp-configs'], + }, + { + id: 'rules-core', + paths: ['rules'], + }, + ]; + + const plan = planInstallTargetScaffold({ + target: 'cursor', + repoRoot, + projectRoot, + modules, + }); + + assert.strictEqual(plan.adapter.id, 'cursor-project'); + assert.strictEqual(plan.targetRoot, path.join(projectRoot, '.cursor')); + assert.strictEqual(plan.installStatePath, path.join(projectRoot, '.cursor', 'ecc-install-state.json')); + + const flattened = plan.operations.find(operation => operation.sourceRelativePath === '.cursor'); + const preserved = plan.operations.find(operation => operation.sourceRelativePath === 'rules'); + + assert.ok(flattened, 'Should include .cursor scaffold operation'); + assert.strictEqual(flattened.strategy, 'sync-root-children'); + assert.strictEqual(flattened.destinationPath, path.join(projectRoot, '.cursor')); + + assert.ok(preserved, 'Should include rules scaffold operation'); + assert.strictEqual(preserved.strategy, 'preserve-relative-path'); + assert.strictEqual(preserved.destinationPath, path.join(projectRoot, '.cursor', 'rules')); + })) passed++; else failed++; + + if (test('throws on unknown target adapter', () => { + assert.throws( + () => getInstallTargetAdapter('ghost-target'), + /Unknown install target adapter/ + ); + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/lib/orchestration-session.test.js b/tests/lib/orchestration-session.test.js index 21e7bfae..e107ca31 100644 --- a/tests/lib/orchestration-session.test.js +++ b/tests/lib/orchestration-session.test.js @@ -7,6 +7,7 @@ const path = require('path'); const { buildSessionSnapshot, + listTmuxPanes, loadWorkerSnapshots, parseWorkerHandoff, parseWorkerStatus, @@ -186,6 +187,16 @@ test('buildSessionSnapshot merges tmux panes with worker metadata', () => { } }); +test('listTmuxPanes returns an empty array when tmux is unavailable', () => { + const panes = listTmuxPanes('workflow-visual-proof', { + spawnSyncImpl: () => ({ + error: Object.assign(new Error('tmux not found'), { code: 'ENOENT' }) + }) + }); + + assert.deepStrictEqual(panes, []); +}); + test('resolveSnapshotTarget handles plan files and direct session names', () => { const tempRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-orch-target-')); const repoRoot = path.join(tempRoot, 'repo'); diff --git a/tests/lib/session-adapters.test.js b/tests/lib/session-adapters.test.js new file mode 100644 index 00000000..64f7e4e2 --- /dev/null +++ b/tests/lib/session-adapters.test.js @@ -0,0 +1,214 @@ +'use strict'; + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); + +const { createClaudeHistoryAdapter } = require('../../scripts/lib/session-adapters/claude-history'); +const { createDmuxTmuxAdapter } = require('../../scripts/lib/session-adapters/dmux-tmux'); +const { createAdapterRegistry } = require('../../scripts/lib/session-adapters/registry'); + +console.log('=== Testing session-adapters ===\n'); + +let passed = 0; +let failed = 0; + +function test(name, fn) { + try { + fn(); + console.log(` ✓ ${name}`); + passed += 1; + } catch (error) { + console.log(` ✗ ${name}: ${error.message}`); + failed += 1; + } +} + +function withHome(homeDir, fn) { + const previousHome = process.env.HOME; + process.env.HOME = homeDir; + + try { + fn(); + } finally { + if (typeof previousHome === 'string') { + process.env.HOME = previousHome; + } else { + delete process.env.HOME; + } + } +} + +test('dmux adapter normalizes orchestration snapshots into canonical form', () => { + const adapter = createDmuxTmuxAdapter({ + collectSessionSnapshotImpl: () => ({ + sessionName: 'workflow-visual-proof', + coordinationDir: '/tmp/.claude/orchestration/workflow-visual-proof', + repoRoot: '/tmp/repo', + targetType: 'plan', + sessionActive: true, + paneCount: 1, + workerCount: 1, + workerStates: { running: 1 }, + panes: [{ + paneId: '%95', + windowIndex: 1, + paneIndex: 0, + title: 'seed-check', + currentCommand: 'codex', + currentPath: '/tmp/worktree', + active: false, + dead: false, + pid: 1234 + }], + workers: [{ + workerSlug: 'seed-check', + workerDir: '/tmp/.claude/orchestration/workflow-visual-proof/seed-check', + status: { + state: 'running', + updated: '2026-03-13T00:00:00Z', + branch: 'feature/seed-check', + worktree: '/tmp/worktree', + taskFile: '/tmp/task.md', + handoffFile: '/tmp/handoff.md' + }, + task: { + objective: 'Inspect seeded files.', + seedPaths: ['scripts/orchestrate-worktrees.js'] + }, + handoff: { + summary: ['Pending'], + validation: [], + remainingRisks: ['No screenshot yet'] + }, + files: { + status: '/tmp/status.md', + task: '/tmp/task.md', + handoff: '/tmp/handoff.md' + }, + pane: { + paneId: '%95', + title: 'seed-check' + } + }] + }) + }); + + const snapshot = adapter.open('workflow-visual-proof').getSnapshot(); + + assert.strictEqual(snapshot.schemaVersion, 'ecc.session.v1'); + assert.strictEqual(snapshot.adapterId, 'dmux-tmux'); + assert.strictEqual(snapshot.session.id, 'workflow-visual-proof'); + assert.strictEqual(snapshot.session.kind, 'orchestrated'); + assert.strictEqual(snapshot.session.sourceTarget.type, 'session'); + assert.strictEqual(snapshot.aggregates.workerCount, 1); + assert.strictEqual(snapshot.workers[0].runtime.kind, 'tmux-pane'); + assert.strictEqual(snapshot.workers[0].outputs.remainingRisks[0], 'No screenshot yet'); +}); + +test('claude-history adapter loads the latest recorded session', () => { + const homeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-adapter-home-')); + const sessionsDir = path.join(homeDir, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + + const sessionPath = path.join(sessionsDir, '2026-03-13-a1b2c3d4-session.tmp'); + fs.writeFileSync(sessionPath, [ + '# Session Review', + '', + '**Date:** 2026-03-13', + '**Started:** 09:00', + '**Last Updated:** 11:30', + '**Project:** everything-claude-code', + '**Branch:** feat/session-adapter', + '**Worktree:** /tmp/ecc-worktree', + '', + '### Completed', + '- [x] Build snapshot prototype', + '', + '### In Progress', + '- [ ] Add CLI wrapper', + '', + '### Notes for Next Session', + 'Need a second adapter.', + '', + '### Context to Load', + '```', + 'scripts/lib/orchestration-session.js', + '```' + ].join('\n')); + + try { + withHome(homeDir, () => { + const adapter = createClaudeHistoryAdapter(); + const snapshot = adapter.open('claude:latest').getSnapshot(); + + assert.strictEqual(snapshot.schemaVersion, 'ecc.session.v1'); + assert.strictEqual(snapshot.adapterId, 'claude-history'); + assert.strictEqual(snapshot.session.kind, 'history'); + assert.strictEqual(snapshot.session.state, 'recorded'); + assert.strictEqual(snapshot.workers.length, 1); + assert.strictEqual(snapshot.workers[0].branch, 'feat/session-adapter'); + assert.strictEqual(snapshot.workers[0].worktree, '/tmp/ecc-worktree'); + assert.strictEqual(snapshot.workers[0].runtime.kind, 'claude-session'); + assert.strictEqual(snapshot.workers[0].artifacts.sessionFile, sessionPath); + assert.ok(snapshot.workers[0].outputs.summary.includes('Build snapshot prototype')); + }); + } finally { + fs.rmSync(homeDir, { recursive: true, force: true }); + } +}); + +test('adapter registry routes plan files to dmux and explicit claude targets to history', () => { + const repoRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-registry-repo-')); + const planPath = path.join(repoRoot, 'workflow.json'); + fs.writeFileSync(planPath, JSON.stringify({ + sessionName: 'workflow-visual-proof', + repoRoot, + coordinationRoot: path.join(repoRoot, '.claude', 'orchestration') + })); + + const homeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-registry-home-')); + const sessionsDir = path.join(homeDir, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + fs.writeFileSync( + path.join(sessionsDir, '2026-03-13-z9y8x7w6-session.tmp'), + '# History Session\n\n**Branch:** feat/history\n' + ); + + try { + withHome(homeDir, () => { + const registry = createAdapterRegistry({ + adapters: [ + createDmuxTmuxAdapter({ + collectSessionSnapshotImpl: () => ({ + sessionName: 'workflow-visual-proof', + coordinationDir: path.join(repoRoot, '.claude', 'orchestration', 'workflow-visual-proof'), + repoRoot, + targetType: 'plan', + sessionActive: false, + paneCount: 0, + workerCount: 0, + workerStates: {}, + panes: [], + workers: [] + }) + }), + createClaudeHistoryAdapter() + ] + }); + + const dmuxSnapshot = registry.open(planPath, { cwd: repoRoot }).getSnapshot(); + const claudeSnapshot = registry.open('claude:latest', { cwd: repoRoot }).getSnapshot(); + + assert.strictEqual(dmuxSnapshot.adapterId, 'dmux-tmux'); + assert.strictEqual(claudeSnapshot.adapterId, 'claude-history'); + }); + } finally { + fs.rmSync(repoRoot, { recursive: true, force: true }); + fs.rmSync(homeDir, { recursive: true, force: true }); + } +}); + +console.log(`\n=== Results: ${passed} passed, ${failed} failed ===`); +if (failed > 0) process.exit(1); diff --git a/tests/scripts/doctor.test.js b/tests/scripts/doctor.test.js new file mode 100644 index 00000000..7cb540da --- /dev/null +++ b/tests/scripts/doctor.test.js @@ -0,0 +1,190 @@ +/** + * Tests for scripts/doctor.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'doctor.js'); +const REPO_ROOT = path.join(__dirname, '..', '..'); +const CURRENT_PACKAGE_VERSION = JSON.parse( + fs.readFileSync(path.join(REPO_ROOT, 'package.json'), 'utf8') +).version; +const CURRENT_MANIFEST_VERSION = JSON.parse( + fs.readFileSync(path.join(REPO_ROOT, 'manifests', 'install-modules.json'), 'utf8') +).version; +const { + createInstallState, + writeInstallState, +} = require('../../scripts/lib/install-state'); + +function createTempDir(prefix) { + return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); +} + +function cleanup(dirPath) { + fs.rmSync(dirPath, { recursive: true, force: true }); +} + +function writeState(filePath, options) { + const state = createInstallState(options); + writeInstallState(filePath, state); +} + +function run(args = [], options = {}) { + const env = { + ...process.env, + HOME: options.homeDir || process.env.HOME, + }; + + try { + const stdout = execFileSync('node', [SCRIPT, ...args], { + cwd: options.cwd, + env, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000, + }); + + return { code: 0, stdout, stderr: '' }; + } catch (error) { + return { + code: error.status || 1, + stdout: error.stdout || '', + stderr: error.stderr || '', + }; + } +} + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing doctor.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('reports a healthy install with exit code 0', () => { + const homeDir = createTempDir('doctor-home-'); + const projectRoot = createTempDir('doctor-project-'); + + try { + const targetRoot = path.join(homeDir, '.claude'); + const statePath = path.join(targetRoot, 'ecc', 'install-state.json'); + const managedFile = path.join(targetRoot, 'rules', 'common', 'coding-style.md'); + const sourceContent = fs.readFileSync(path.join(REPO_ROOT, 'rules', 'common', 'coding-style.md'), 'utf8'); + fs.mkdirSync(path.dirname(managedFile), { recursive: true }); + fs.writeFileSync(managedFile, sourceContent); + + writeState(statePath, { + adapter: { id: 'claude-home', target: 'claude', kind: 'home' }, + targetRoot, + installStatePath: statePath, + request: { + profile: null, + modules: [], + legacyLanguages: ['typescript'], + legacyMode: true, + }, + resolution: { + selectedModules: ['legacy-claude-rules'], + skippedModules: [], + }, + operations: [ + { + kind: 'copy-file', + moduleId: 'legacy-claude-rules', + sourceRelativePath: 'rules/common/coding-style.md', + destinationPath: managedFile, + strategy: 'preserve-relative-path', + ownership: 'managed', + scaffoldOnly: false, + }, + ], + source: { + repoVersion: CURRENT_PACKAGE_VERSION, + repoCommit: 'abc123', + manifestVersion: CURRENT_MANIFEST_VERSION, + }, + }); + + const result = run(['--target', 'claude'], { cwd: projectRoot, homeDir }); + assert.strictEqual(result.code, 0, result.stderr); + assert.ok(result.stdout.includes('Doctor report')); + assert.ok(result.stdout.includes('Status: OK')); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + if (test('reports issues and exits 1 for unhealthy installs', () => { + const homeDir = createTempDir('doctor-home-'); + const projectRoot = createTempDir('doctor-project-'); + + try { + const targetRoot = path.join(projectRoot, '.cursor'); + const statePath = path.join(targetRoot, 'ecc-install-state.json'); + fs.mkdirSync(targetRoot, { recursive: true }); + + writeState(statePath, { + adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' }, + targetRoot, + installStatePath: statePath, + request: { + profile: null, + modules: ['platform-configs'], + legacyLanguages: [], + legacyMode: false, + }, + resolution: { + selectedModules: ['platform-configs'], + skippedModules: [], + }, + operations: [ + { + kind: 'copy-file', + moduleId: 'platform-configs', + sourceRelativePath: '.cursor/hooks.json', + destinationPath: path.join(targetRoot, 'hooks.json'), + strategy: 'sync-root-children', + ownership: 'managed', + scaffoldOnly: false, + }, + ], + source: { + repoVersion: CURRENT_PACKAGE_VERSION, + repoCommit: 'abc123', + manifestVersion: CURRENT_MANIFEST_VERSION, + }, + }); + + const result = run(['--target', 'cursor', '--json'], { cwd: projectRoot, homeDir }); + assert.strictEqual(result.code, 1); + const parsed = JSON.parse(result.stdout); + assert.strictEqual(parsed.summary.errorCount, 1); + assert.ok(parsed.results[0].issues.some(issue => issue.code === 'missing-managed-files')); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/scripts/ecc.test.js b/tests/scripts/ecc.test.js new file mode 100644 index 00000000..b3641b69 --- /dev/null +++ b/tests/scripts/ecc.test.js @@ -0,0 +1,139 @@ +/** + * Tests for scripts/ecc.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { spawnSync } = require('child_process'); + +const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'ecc.js'); + +function runCli(args, options = {}) { + return spawnSync('node', [SCRIPT, ...args], { + encoding: 'utf8', + cwd: options.cwd || process.cwd(), + env: { + ...process.env, + ...(options.env || {}), + }, + }); +} + +function createTempDir(prefix) { + return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); +} + +function parseJson(stdout) { + return JSON.parse(stdout.trim()); +} + +function runTest(name, fn) { + try { + fn(); + console.log(` ✓ ${name}`); + return true; + } catch (error) { + console.log(` ✗ ${name}`); + console.error(` ${error.message}`); + return false; + } +} + +function main() { + console.log('\n=== Testing ecc.js ===\n'); + + let passed = 0; + let failed = 0; + + const tests = [ + ['shows top-level help', () => { + const result = runCli(['--help']); + assert.strictEqual(result.status, 0); + assert.match(result.stdout, /ECC selective-install CLI/); + assert.match(result.stdout, /list-installed/); + assert.match(result.stdout, /doctor/); + }], + ['delegates explicit install command', () => { + const result = runCli(['install', '--dry-run', '--json', 'typescript']); + assert.strictEqual(result.status, 0, result.stderr); + const payload = parseJson(result.stdout); + assert.strictEqual(payload.dryRun, true); + assert.strictEqual(payload.plan.mode, 'legacy'); + assert.deepStrictEqual(payload.plan.languages, ['typescript']); + }], + ['routes implicit top-level args to install', () => { + const result = runCli(['--dry-run', '--json', 'typescript']); + assert.strictEqual(result.status, 0, result.stderr); + const payload = parseJson(result.stdout); + assert.strictEqual(payload.dryRun, true); + assert.strictEqual(payload.plan.mode, 'legacy'); + assert.deepStrictEqual(payload.plan.languages, ['typescript']); + }], + ['delegates plan command', () => { + const result = runCli(['plan', '--list-profiles', '--json']); + assert.strictEqual(result.status, 0, result.stderr); + const payload = parseJson(result.stdout); + assert.ok(Array.isArray(payload.profiles)); + assert.ok(payload.profiles.length > 0); + }], + ['delegates lifecycle commands', () => { + const homeDir = createTempDir('ecc-cli-home-'); + const projectRoot = createTempDir('ecc-cli-project-'); + const result = runCli(['list-installed', '--json'], { + cwd: projectRoot, + env: { HOME: homeDir }, + }); + assert.strictEqual(result.status, 0, result.stderr); + const payload = parseJson(result.stdout); + assert.deepStrictEqual(payload.records, []); + }], + ['delegates session-inspect command', () => { + const homeDir = createTempDir('ecc-cli-home-'); + const sessionsDir = path.join(homeDir, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + fs.writeFileSync( + path.join(sessionsDir, '2026-03-13-a1b2c3d4-session.tmp'), + '# ECC Session\n\n**Branch:** feat/ecc-cli\n' + ); + + const result = runCli(['session-inspect', 'claude:latest'], { + env: { HOME: homeDir }, + }); + + assert.strictEqual(result.status, 0, result.stderr); + const payload = parseJson(result.stdout); + assert.strictEqual(payload.adapterId, 'claude-history'); + assert.strictEqual(payload.workers[0].branch, 'feat/ecc-cli'); + }], + ['supports help for a subcommand', () => { + const result = runCli(['help', 'repair']); + assert.strictEqual(result.status, 0, result.stderr); + assert.match(result.stdout, /Usage: node scripts\/repair\.js/); + }], + ['fails on unknown commands instead of treating them as installs', () => { + const result = runCli(['bogus']); + assert.strictEqual(result.status, 1); + assert.match(result.stderr, /Unknown command: bogus/); + }], + ['fails on unknown help subcommands', () => { + const result = runCli(['help', 'bogus']); + assert.strictEqual(result.status, 1); + assert.match(result.stderr, /Unknown command: bogus/); + }], + ]; + + for (const [name, fn] of tests) { + if (runTest(name, fn)) { + passed += 1; + } else { + failed += 1; + } + } + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +main(); diff --git a/tests/scripts/install-apply.test.js b/tests/scripts/install-apply.test.js new file mode 100644 index 00000000..9f66db15 --- /dev/null +++ b/tests/scripts/install-apply.test.js @@ -0,0 +1,309 @@ +/** + * Tests for scripts/install-apply.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'install-apply.js'); + +function createTempDir(prefix) { + return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); +} + +function cleanup(dirPath) { + fs.rmSync(dirPath, { recursive: true, force: true }); +} + +function readJson(filePath) { + return JSON.parse(fs.readFileSync(filePath, 'utf8')); +} + +function run(args = [], options = {}) { + const env = { + ...process.env, + HOME: options.homeDir || process.env.HOME, + ...(options.env || {}), + }; + + try { + const stdout = execFileSync('node', [SCRIPT, ...args], { + cwd: options.cwd, + env, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000, + }); + + return { code: 0, stdout, stderr: '' }; + } catch (error) { + return { + code: error.status || 1, + stdout: error.stdout || '', + stderr: error.stderr || '', + }; + } +} + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing install-apply.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('shows help with --help', () => { + const result = run(['--help']); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('Usage:')); + assert.ok(result.stdout.includes('--dry-run')); + assert.ok(result.stdout.includes('--profile ')); + assert.ok(result.stdout.includes('--modules ')); + })) passed++; else failed++; + + if (test('rejects mixing legacy languages with manifest profile flags', () => { + const result = run(['--profile', 'core', 'typescript']); + assert.strictEqual(result.code, 1); + assert.ok(result.stderr.includes('cannot be combined')); + })) passed++; else failed++; + + if (test('installs Claude rules and writes install-state', () => { + const homeDir = createTempDir('install-apply-home-'); + const projectDir = createTempDir('install-apply-project-'); + + try { + const result = run(['typescript'], { cwd: projectDir, homeDir }); + assert.strictEqual(result.code, 0, result.stderr); + + const rulesDir = path.join(homeDir, '.claude', 'rules'); + assert.ok(fs.existsSync(path.join(rulesDir, 'common', 'coding-style.md'))); + assert.ok(fs.existsSync(path.join(rulesDir, 'typescript', 'testing.md'))); + + const statePath = path.join(homeDir, '.claude', 'ecc', 'install-state.json'); + const state = readJson(statePath); + assert.strictEqual(state.target.id, 'claude-home'); + assert.deepStrictEqual(state.request.legacyLanguages, ['typescript']); + assert.strictEqual(state.request.legacyMode, true); + assert.ok( + state.operations.some(operation => ( + operation.destinationPath === path.join(rulesDir, 'common', 'coding-style.md') + )), + 'Should record common rule file operation' + ); + } finally { + cleanup(homeDir); + cleanup(projectDir); + } + })) passed++; else failed++; + + if (test('installs Cursor configs and writes install-state', () => { + const homeDir = createTempDir('install-apply-home-'); + const projectDir = createTempDir('install-apply-project-'); + + try { + const result = run(['--target', 'cursor', 'typescript'], { cwd: projectDir, homeDir }); + assert.strictEqual(result.code, 0, result.stderr); + + assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-coding-style.md'))); + assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'typescript-testing.md'))); + assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json'))); + assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks', 'session-start.js'))); + assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'skills', 'article-writing', 'SKILL.md'))); + + const statePath = path.join(projectDir, '.cursor', 'ecc-install-state.json'); + const state = readJson(statePath); + const normalizedProjectDir = fs.realpathSync(projectDir); + assert.strictEqual(state.target.id, 'cursor-project'); + assert.strictEqual(state.target.root, path.join(normalizedProjectDir, '.cursor')); + assert.ok( + state.operations.some(operation => ( + operation.destinationPath === path.join(normalizedProjectDir, '.cursor', 'hooks', 'session-start.js') + )), + 'Should record hook file copy operation' + ); + } finally { + cleanup(homeDir); + cleanup(projectDir); + } + })) passed++; else failed++; + + if (test('installs Antigravity configs and writes install-state', () => { + const homeDir = createTempDir('install-apply-home-'); + const projectDir = createTempDir('install-apply-project-'); + + try { + const result = run(['--target', 'antigravity', 'typescript'], { cwd: projectDir, homeDir }); + assert.strictEqual(result.code, 0, result.stderr); + + assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'rules', 'common-coding-style.md'))); + assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'rules', 'typescript-testing.md'))); + assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'workflows', 'code-review.md'))); + assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'skills', 'architect.md'))); + assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'skills', 'article-writing', 'SKILL.md'))); + + const statePath = path.join(projectDir, '.agent', 'ecc-install-state.json'); + const state = readJson(statePath); + assert.strictEqual(state.target.id, 'antigravity-project'); + assert.ok( + state.operations.some(operation => ( + operation.destinationPath.endsWith(path.join('.agent', 'workflows', 'code-review.md')) + )), + 'Should record workflow file copy operation' + ); + } finally { + cleanup(homeDir); + cleanup(projectDir); + } + })) passed++; else failed++; + + if (test('supports dry-run without mutating the target project', () => { + const homeDir = createTempDir('install-apply-home-'); + const projectDir = createTempDir('install-apply-project-'); + + try { + const result = run(['--target', 'cursor', '--dry-run', 'typescript'], { + cwd: projectDir, + homeDir, + }); + assert.strictEqual(result.code, 0, result.stderr); + assert.ok(result.stdout.includes('Dry-run install plan')); + assert.ok(!fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json'))); + assert.ok(!fs.existsSync(path.join(projectDir, '.cursor', 'ecc-install-state.json'))); + } finally { + cleanup(homeDir); + cleanup(projectDir); + } + })) passed++; else failed++; + + if (test('supports manifest profile dry-runs through the installer', () => { + const homeDir = createTempDir('install-apply-home-'); + const projectDir = createTempDir('install-apply-project-'); + + try { + const result = run(['--profile', 'core', '--dry-run'], { cwd: projectDir, homeDir }); + assert.strictEqual(result.code, 0, result.stderr); + assert.ok(result.stdout.includes('Mode: manifest')); + assert.ok(result.stdout.includes('Profile: core')); + assert.ok(result.stdout.includes('Included components: (none)')); + assert.ok(result.stdout.includes('Selected modules: rules-core, agents-core, commands-core, hooks-runtime, platform-configs, workflow-quality')); + assert.ok(!fs.existsSync(path.join(homeDir, '.claude', 'ecc', 'install-state.json'))); + } finally { + cleanup(homeDir); + cleanup(projectDir); + } + })) passed++; else failed++; + + if (test('installs manifest profiles and writes non-legacy install-state', () => { + const homeDir = createTempDir('install-apply-home-'); + const projectDir = createTempDir('install-apply-project-'); + + try { + const result = run(['--profile', 'core'], { cwd: projectDir, homeDir }); + assert.strictEqual(result.code, 0, result.stderr); + + const claudeRoot = path.join(homeDir, '.claude'); + assert.ok(fs.existsSync(path.join(claudeRoot, 'rules', 'common', 'coding-style.md'))); + assert.ok(fs.existsSync(path.join(claudeRoot, 'agents', 'architect.md'))); + assert.ok(fs.existsSync(path.join(claudeRoot, 'commands', 'plan.md'))); + assert.ok(fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json'))); + assert.ok(fs.existsSync(path.join(claudeRoot, 'scripts', 'hooks', 'session-end.js'))); + assert.ok(fs.existsSync(path.join(claudeRoot, 'plugin.json'))); + + const state = readJson(path.join(claudeRoot, 'ecc', 'install-state.json')); + assert.strictEqual(state.request.profile, 'core'); + assert.strictEqual(state.request.legacyMode, false); + assert.deepStrictEqual(state.request.legacyLanguages, []); + assert.ok(state.resolution.selectedModules.includes('platform-configs')); + assert.ok( + state.operations.some(operation => ( + operation.destinationPath === path.join(claudeRoot, 'commands', 'plan.md') + )), + 'Should record manifest-driven command file copy' + ); + } finally { + cleanup(homeDir); + cleanup(projectDir); + } + })) passed++; else failed++; + + if (test('installs explicit modules for cursor using manifest operations', () => { + const homeDir = createTempDir('install-apply-home-'); + const projectDir = createTempDir('install-apply-project-'); + + try { + const result = run(['--target', 'cursor', '--modules', 'platform-configs'], { + cwd: projectDir, + homeDir, + }); + assert.strictEqual(result.code, 0, result.stderr); + assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json'))); + assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-agents.md'))); + + const state = readJson(path.join(projectDir, '.cursor', 'ecc-install-state.json')); + assert.strictEqual(state.request.profile, null); + assert.deepStrictEqual(state.request.modules, ['platform-configs']); + assert.deepStrictEqual(state.request.includeComponents, []); + assert.deepStrictEqual(state.request.excludeComponents, []); + assert.strictEqual(state.request.legacyMode, false); + assert.ok(state.resolution.selectedModules.includes('platform-configs')); + assert.ok( + !state.operations.some(operation => operation.destinationPath.endsWith('ecc-install-state.json')), + 'Manifest copy operations should not include generated install-state files' + ); + } finally { + cleanup(homeDir); + cleanup(projectDir); + } + })) passed++; else failed++; + + if (test('installs from ecc-install.json and persists component selections', () => { + const homeDir = createTempDir('install-apply-home-'); + const projectDir = createTempDir('install-apply-project-'); + const configPath = path.join(projectDir, 'ecc-install.json'); + + try { + fs.writeFileSync(configPath, JSON.stringify({ + version: 1, + target: 'claude', + profile: 'developer', + include: ['capability:security'], + exclude: ['capability:orchestration'], + }, null, 2)); + + const result = run(['--config', configPath], { cwd: projectDir, homeDir }); + assert.strictEqual(result.code, 0, result.stderr); + + assert.ok(fs.existsSync(path.join(homeDir, '.claude', 'skills', 'security-review', 'SKILL.md'))); + assert.ok(!fs.existsSync(path.join(homeDir, '.claude', 'skills', 'dmux-workflows', 'SKILL.md'))); + + const state = readJson(path.join(homeDir, '.claude', 'ecc', 'install-state.json')); + assert.strictEqual(state.request.profile, 'developer'); + assert.deepStrictEqual(state.request.includeComponents, ['capability:security']); + assert.deepStrictEqual(state.request.excludeComponents, ['capability:orchestration']); + assert.ok(state.resolution.selectedModules.includes('security')); + assert.ok(!state.resolution.selectedModules.includes('orchestration')); + } finally { + cleanup(homeDir); + cleanup(projectDir); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/scripts/install-plan.test.js b/tests/scripts/install-plan.test.js new file mode 100644 index 00000000..ad9341a5 --- /dev/null +++ b/tests/scripts/install-plan.test.js @@ -0,0 +1,154 @@ +/** + * Tests for scripts/install-plan.js + */ + +const assert = require('assert'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'install-plan.js'); + +function run(args = []) { + try { + const stdout = execFileSync('node', [SCRIPT, ...args], { + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000, + }); + return { code: 0, stdout, stderr: '' }; + } catch (error) { + return { + code: error.status || 1, + stdout: error.stdout || '', + stderr: error.stderr || '', + }; + } +} + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing install-plan.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('shows help with no arguments', () => { + const result = run(); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('Inspect ECC selective-install manifests')); + })) passed++; else failed++; + + if (test('lists install profiles', () => { + const result = run(['--list-profiles']); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('Install profiles')); + assert.ok(result.stdout.includes('core')); + })) passed++; else failed++; + + if (test('lists install modules', () => { + const result = run(['--list-modules']); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('Install modules')); + assert.ok(result.stdout.includes('rules-core')); + })) passed++; else failed++; + + if (test('lists install components', () => { + const result = run(['--list-components', '--family', 'language']); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('Install components')); + assert.ok(result.stdout.includes('lang:typescript')); + assert.ok(!result.stdout.includes('capability:security')); + })) passed++; else failed++; + + if (test('prints a filtered install plan for a profile and target', () => { + const result = run([ + '--profile', 'developer', + '--with', 'capability:security', + '--without', 'capability:orchestration', + '--target', 'cursor' + ]); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('Install plan')); + assert.ok(result.stdout.includes('Included components: capability:security')); + assert.ok(result.stdout.includes('Excluded components: capability:orchestration')); + assert.ok(result.stdout.includes('Adapter: cursor-project')); + assert.ok(result.stdout.includes('Target root:')); + assert.ok(result.stdout.includes('Install-state:')); + assert.ok(result.stdout.includes('Operation plan')); + assert.ok(result.stdout.includes('Excluded by selection')); + assert.ok(result.stdout.includes('security')); + })) passed++; else failed++; + + if (test('emits JSON for explicit module resolution', () => { + const result = run([ + '--modules', 'security', + '--with', 'capability:research', + '--target', 'cursor', + '--json' + ]); + assert.strictEqual(result.code, 0); + const parsed = JSON.parse(result.stdout); + assert.ok(parsed.selectedModuleIds.includes('security')); + assert.ok(parsed.selectedModuleIds.includes('research-apis')); + assert.ok(parsed.selectedModuleIds.includes('workflow-quality')); + assert.deepStrictEqual(parsed.includedComponentIds, ['capability:research']); + assert.strictEqual(parsed.targetAdapterId, 'cursor-project'); + assert.ok(Array.isArray(parsed.operations)); + assert.ok(parsed.operations.length > 0); + })) passed++; else failed++; + + if (test('loads planning intent from ecc-install.json', () => { + const configDir = path.join(__dirname, '..', 'fixtures', 'tmp-install-plan-config'); + const configPath = path.join(configDir, 'ecc-install.json'); + + try { + require('fs').mkdirSync(configDir, { recursive: true }); + require('fs').writeFileSync(configPath, JSON.stringify({ + version: 1, + target: 'cursor', + profile: 'core', + include: ['capability:security'], + exclude: ['capability:orchestration'], + }, null, 2)); + + const result = run(['--config', configPath, '--json']); + assert.strictEqual(result.code, 0); + const parsed = JSON.parse(result.stdout); + assert.strictEqual(parsed.target, 'cursor'); + assert.deepStrictEqual(parsed.includedComponentIds, ['capability:security']); + assert.deepStrictEqual(parsed.excludedComponentIds, ['capability:orchestration']); + assert.ok(parsed.selectedModuleIds.includes('security')); + assert.ok(!parsed.selectedModuleIds.includes('orchestration')); + } finally { + require('fs').rmSync(configDir, { recursive: true, force: true }); + } + })) passed++; else failed++; + + if (test('fails on unknown arguments', () => { + const result = run(['--unknown-flag']); + assert.strictEqual(result.code, 1); + assert.ok(result.stderr.includes('Unknown argument')); + })) passed++; else failed++; + + if (test('fails on invalid install target', () => { + const result = run(['--profile', 'core', '--target', 'not-a-target']); + assert.strictEqual(result.code, 1); + assert.ok(result.stderr.includes('Unknown install target')); + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/scripts/install-sh.test.js b/tests/scripts/install-sh.test.js new file mode 100644 index 00000000..56e4333a --- /dev/null +++ b/tests/scripts/install-sh.test.js @@ -0,0 +1,87 @@ +/** + * Tests for install.sh wrapper delegation + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const SCRIPT = path.join(__dirname, '..', '..', 'install.sh'); + +function createTempDir(prefix) { + return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); +} + +function cleanup(dirPath) { + fs.rmSync(dirPath, { recursive: true, force: true }); +} + +function run(args = [], options = {}) { + const env = { + ...process.env, + HOME: options.homeDir || process.env.HOME, + }; + + try { + const stdout = execFileSync('bash', [SCRIPT, ...args], { + cwd: options.cwd, + env, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000, + }); + + return { code: 0, stdout, stderr: '' }; + } catch (error) { + return { + code: error.status || 1, + stdout: error.stdout || '', + stderr: error.stderr || '', + }; + } +} + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing install.sh ===\n'); + + let passed = 0; + let failed = 0; + + if (test('delegates to the Node installer and preserves dry-run output', () => { + const homeDir = createTempDir('install-sh-home-'); + const projectDir = createTempDir('install-sh-project-'); + + try { + const result = run(['--target', 'cursor', '--dry-run', 'typescript'], { + cwd: projectDir, + homeDir, + }); + + assert.strictEqual(result.code, 0, result.stderr); + assert.ok(result.stdout.includes('Dry-run install plan')); + assert.ok(!fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json'))); + } finally { + cleanup(homeDir); + cleanup(projectDir); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/scripts/list-installed.test.js b/tests/scripts/list-installed.test.js new file mode 100644 index 00000000..a8c75c2d --- /dev/null +++ b/tests/scripts/list-installed.test.js @@ -0,0 +1,139 @@ +/** + * Tests for scripts/list-installed.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'list-installed.js'); +const REPO_ROOT = path.join(__dirname, '..', '..'); +const CURRENT_PACKAGE_VERSION = JSON.parse( + fs.readFileSync(path.join(REPO_ROOT, 'package.json'), 'utf8') +).version; +const CURRENT_MANIFEST_VERSION = JSON.parse( + fs.readFileSync(path.join(REPO_ROOT, 'manifests', 'install-modules.json'), 'utf8') +).version; +const { + createInstallState, + writeInstallState, +} = require('../../scripts/lib/install-state'); + +function createTempDir(prefix) { + return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); +} + +function cleanup(dirPath) { + fs.rmSync(dirPath, { recursive: true, force: true }); +} + +function writeState(filePath, options) { + const state = createInstallState(options); + writeInstallState(filePath, state); +} + +function run(args = [], options = {}) { + const env = { + ...process.env, + HOME: options.homeDir || process.env.HOME, + }; + + try { + const stdout = execFileSync('node', [SCRIPT, ...args], { + cwd: options.cwd, + env, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000, + }); + + return { code: 0, stdout, stderr: '' }; + } catch (error) { + return { + code: error.status || 1, + stdout: error.stdout || '', + stderr: error.stderr || '', + }; + } +} + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing list-installed.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('reports when no install-state files are present', () => { + const homeDir = createTempDir('list-installed-home-'); + const projectRoot = createTempDir('list-installed-project-'); + + try { + const result = run([], { cwd: projectRoot, homeDir }); + assert.strictEqual(result.code, 0); + assert.ok(result.stdout.includes('No ECC install-state files found')); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + if (test('emits JSON for discovered install-state records', () => { + const homeDir = createTempDir('list-installed-home-'); + const projectRoot = createTempDir('list-installed-project-'); + + try { + const statePath = path.join(projectRoot, '.cursor', 'ecc-install-state.json'); + writeState(statePath, { + adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' }, + targetRoot: path.join(projectRoot, '.cursor'), + installStatePath: statePath, + request: { + profile: 'core', + modules: [], + legacyLanguages: [], + legacyMode: false, + }, + resolution: { + selectedModules: ['rules-core', 'platform-configs'], + skippedModules: [], + }, + operations: [], + source: { + repoVersion: CURRENT_PACKAGE_VERSION, + repoCommit: 'abc123', + manifestVersion: CURRENT_MANIFEST_VERSION, + }, + }); + + const result = run(['--json'], { cwd: projectRoot, homeDir }); + assert.strictEqual(result.code, 0, result.stderr); + + const parsed = JSON.parse(result.stdout); + assert.strictEqual(parsed.records.length, 1); + assert.strictEqual(parsed.records[0].state.target.id, 'cursor-project'); + assert.strictEqual(parsed.records[0].state.request.profile, 'core'); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/scripts/orchestration-status.test.js b/tests/scripts/orchestration-status.test.js new file mode 100644 index 00000000..c034eccd --- /dev/null +++ b/tests/scripts/orchestration-status.test.js @@ -0,0 +1,76 @@ +/** + * Tests for scripts/orchestration-status.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'orchestration-status.js'); + +function run(args = [], options = {}) { + try { + const stdout = execFileSync('node', [SCRIPT, ...args], { + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000, + cwd: options.cwd || process.cwd(), + }); + return { code: 0, stdout, stderr: '' }; + } catch (error) { + return { + code: error.status || 1, + stdout: error.stdout || '', + stderr: error.stderr || '', + }; + } +} + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing orchestration-status.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('emits canonical dmux snapshots for plan files', () => { + const repoRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-orch-status-repo-')); + + try { + const planPath = path.join(repoRoot, 'workflow.json'); + fs.writeFileSync(planPath, JSON.stringify({ + sessionName: 'workflow-visual-proof', + repoRoot, + coordinationRoot: path.join(repoRoot, '.claude', 'orchestration') + })); + + const result = run([planPath], { cwd: repoRoot }); + assert.strictEqual(result.code, 0, result.stderr); + + const payload = JSON.parse(result.stdout); + assert.strictEqual(payload.adapterId, 'dmux-tmux'); + assert.strictEqual(payload.session.id, 'workflow-visual-proof'); + assert.strictEqual(payload.session.sourceTarget.type, 'plan'); + } finally { + fs.rmSync(repoRoot, { recursive: true, force: true }); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/scripts/repair.test.js b/tests/scripts/repair.test.js new file mode 100644 index 00000000..1ba0a1db --- /dev/null +++ b/tests/scripts/repair.test.js @@ -0,0 +1,159 @@ +/** + * Tests for scripts/repair.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const INSTALL_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'install-apply.js'); +const DOCTOR_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'doctor.js'); +const REPAIR_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'repair.js'); +const REPO_ROOT = path.join(__dirname, '..', '..'); + +function createTempDir(prefix) { + return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); +} + +function cleanup(dirPath) { + fs.rmSync(dirPath, { recursive: true, force: true }); +} + +function runNode(scriptPath, args = [], options = {}) { + const env = { + ...process.env, + HOME: options.homeDir || process.env.HOME, + }; + + try { + const stdout = execFileSync('node', [scriptPath, ...args], { + cwd: options.cwd, + env, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000, + }); + + return { code: 0, stdout, stderr: '' }; + } catch (error) { + return { + code: error.status || 1, + stdout: error.stdout || '', + stderr: error.stderr || '', + }; + } +} + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing repair.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('repairs drifted managed files and refreshes install-state', () => { + const homeDir = createTempDir('repair-home-'); + const projectRoot = createTempDir('repair-project-'); + + try { + const installResult = runNode(INSTALL_SCRIPT, ['--target', 'cursor', '--modules', 'platform-configs'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(installResult.code, 0, installResult.stderr); + + const cursorRoot = path.join(projectRoot, '.cursor'); + const managedPath = path.join(cursorRoot, 'hooks.json'); + const statePath = path.join(cursorRoot, 'ecc-install-state.json'); + const managedRealPath = fs.realpathSync(cursorRoot); + const expectedManagedPath = path.join(managedRealPath, 'hooks.json'); + const expectedContent = fs.readFileSync(path.join(REPO_ROOT, '.cursor', 'hooks.json'), 'utf8'); + const installedAtBefore = JSON.parse(fs.readFileSync(statePath, 'utf8')).installedAt; + + fs.writeFileSync(managedPath, '{"drifted":true}\n'); + + const doctorBefore = runNode(DOCTOR_SCRIPT, ['--target', 'cursor', '--json'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(doctorBefore.code, 1); + assert.ok(JSON.parse(doctorBefore.stdout).results[0].issues.some(issue => issue.code === 'drifted-managed-files')); + + const repairResult = runNode(REPAIR_SCRIPT, ['--target', 'cursor', '--json'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(repairResult.code, 0, repairResult.stderr); + + const parsed = JSON.parse(repairResult.stdout); + assert.strictEqual(parsed.results[0].status, 'repaired'); + assert.ok(parsed.results[0].repairedPaths.includes(expectedManagedPath)); + assert.strictEqual(fs.readFileSync(managedPath, 'utf8'), expectedContent); + + const repairedState = JSON.parse(fs.readFileSync(statePath, 'utf8')); + assert.strictEqual(repairedState.installedAt, installedAtBefore); + assert.ok(repairedState.lastValidatedAt); + + const doctorAfter = runNode(DOCTOR_SCRIPT, ['--target', 'cursor'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(doctorAfter.code, 0, doctorAfter.stderr); + assert.ok(doctorAfter.stdout.includes('Status: OK')); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + if (test('supports dry-run without mutating drifted files', () => { + const homeDir = createTempDir('repair-home-'); + const projectRoot = createTempDir('repair-project-'); + + try { + const installResult = runNode(INSTALL_SCRIPT, ['--target', 'cursor', '--modules', 'platform-configs'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(installResult.code, 0, installResult.stderr); + + const cursorRoot = path.join(projectRoot, '.cursor'); + const managedPath = path.join(cursorRoot, 'hooks.json'); + const managedRealPath = fs.realpathSync(cursorRoot); + const expectedManagedPath = path.join(managedRealPath, 'hooks.json'); + const driftedContent = '{"drifted":true}\n'; + fs.writeFileSync(managedPath, driftedContent); + + const repairResult = runNode(REPAIR_SCRIPT, ['--target', 'cursor', '--dry-run', '--json'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(repairResult.code, 0, repairResult.stderr); + const parsed = JSON.parse(repairResult.stdout); + assert.strictEqual(parsed.dryRun, true); + assert.ok(parsed.results[0].plannedRepairs.includes(expectedManagedPath)); + assert.strictEqual(fs.readFileSync(managedPath, 'utf8'), driftedContent); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/scripts/session-inspect.test.js b/tests/scripts/session-inspect.test.js new file mode 100644 index 00000000..5f5b0465 --- /dev/null +++ b/tests/scripts/session-inspect.test.js @@ -0,0 +1,116 @@ +/** + * Tests for scripts/session-inspect.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'session-inspect.js'); + +function run(args = [], options = {}) { + try { + const stdout = execFileSync('node', [SCRIPT, ...args], { + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000, + cwd: options.cwd || process.cwd(), + env: { + ...process.env, + ...(options.env || {}) + } + }); + return { code: 0, stdout, stderr: '' }; + } catch (error) { + return { + code: error.status || 1, + stdout: error.stdout || '', + stderr: error.stderr || '', + }; + } +} + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing session-inspect.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('shows usage when no target is provided', () => { + const result = run(); + assert.strictEqual(result.code, 1); + assert.ok(result.stdout.includes('Usage:')); + })) passed++; else failed++; + + if (test('prints canonical JSON for claude history targets', () => { + const homeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-inspect-home-')); + const sessionsDir = path.join(homeDir, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + + try { + fs.writeFileSync( + path.join(sessionsDir, '2026-03-13-a1b2c3d4-session.tmp'), + '# Inspect Session\n\n**Branch:** feat/session-inspect\n' + ); + + const result = run(['claude:latest'], { + env: { HOME: homeDir } + }); + + assert.strictEqual(result.code, 0, result.stderr); + const payload = JSON.parse(result.stdout); + assert.strictEqual(payload.adapterId, 'claude-history'); + assert.strictEqual(payload.session.kind, 'history'); + assert.strictEqual(payload.workers[0].branch, 'feat/session-inspect'); + } finally { + fs.rmSync(homeDir, { recursive: true, force: true }); + } + })) passed++; else failed++; + + if (test('writes snapshot JSON to disk when --write is provided', () => { + const homeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-inspect-home-')); + const outputDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-inspect-out-')); + const sessionsDir = path.join(homeDir, '.claude', 'sessions'); + fs.mkdirSync(sessionsDir, { recursive: true }); + + const outputPath = path.join(outputDir, 'snapshot.json'); + + try { + fs.writeFileSync( + path.join(sessionsDir, '2026-03-13-a1b2c3d4-session.tmp'), + '# Inspect Session\n\n**Branch:** feat/session-inspect\n' + ); + + const result = run(['claude:latest', '--write', outputPath], { + env: { HOME: homeDir } + }); + + assert.strictEqual(result.code, 0, result.stderr); + assert.ok(fs.existsSync(outputPath)); + const written = JSON.parse(fs.readFileSync(outputPath, 'utf8')); + assert.strictEqual(written.adapterId, 'claude-history'); + } finally { + fs.rmSync(homeDir, { recursive: true, force: true }); + fs.rmSync(outputDir, { recursive: true, force: true }); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests(); diff --git a/tests/scripts/uninstall.test.js b/tests/scripts/uninstall.test.js new file mode 100644 index 00000000..601f9a7f --- /dev/null +++ b/tests/scripts/uninstall.test.js @@ -0,0 +1,133 @@ +/** + * Tests for scripts/uninstall.js + */ + +const assert = require('assert'); +const fs = require('fs'); +const os = require('os'); +const path = require('path'); +const { execFileSync } = require('child_process'); + +const INSTALL_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'install-apply.js'); +const UNINSTALL_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'uninstall.js'); + +function createTempDir(prefix) { + return fs.mkdtempSync(path.join(os.tmpdir(), prefix)); +} + +function cleanup(dirPath) { + fs.rmSync(dirPath, { recursive: true, force: true }); +} + +function runNode(scriptPath, args = [], options = {}) { + const env = { + ...process.env, + HOME: options.homeDir || process.env.HOME, + }; + + try { + const stdout = execFileSync('node', [scriptPath, ...args], { + cwd: options.cwd, + env, + encoding: 'utf8', + stdio: ['pipe', 'pipe', 'pipe'], + timeout: 10000, + }); + + return { code: 0, stdout, stderr: '' }; + } catch (error) { + return { + code: error.status || 1, + stdout: error.stdout || '', + stderr: error.stderr || '', + }; + } +} + +function test(name, fn) { + try { + fn(); + console.log(` \u2713 ${name}`); + return true; + } catch (error) { + console.log(` \u2717 ${name}`); + console.log(` Error: ${error.message}`); + return false; + } +} + +function runTests() { + console.log('\n=== Testing uninstall.js ===\n'); + + let passed = 0; + let failed = 0; + + if (test('removes managed files and keeps unrelated files', () => { + const homeDir = createTempDir('uninstall-home-'); + const projectRoot = createTempDir('uninstall-project-'); + + try { + const installResult = runNode(INSTALL_SCRIPT, ['--target', 'cursor', '--modules', 'platform-configs'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(installResult.code, 0, installResult.stderr); + + const cursorRoot = path.join(projectRoot, '.cursor'); + const managedPath = path.join(cursorRoot, 'hooks.json'); + const statePath = path.join(cursorRoot, 'ecc-install-state.json'); + const unrelatedPath = path.join(cursorRoot, 'custom-user-note.txt'); + fs.writeFileSync(unrelatedPath, 'leave me alone'); + + const uninstallResult = runNode(UNINSTALL_SCRIPT, ['--target', 'cursor'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(uninstallResult.code, 0, uninstallResult.stderr); + assert.ok(uninstallResult.stdout.includes('Uninstall summary')); + assert.ok(!fs.existsSync(managedPath)); + assert.ok(!fs.existsSync(statePath)); + assert.ok(fs.existsSync(unrelatedPath)); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + if (test('supports dry-run without removing files', () => { + const homeDir = createTempDir('uninstall-home-'); + const projectRoot = createTempDir('uninstall-project-'); + + try { + const installResult = runNode(INSTALL_SCRIPT, ['--target', 'cursor', '--modules', 'platform-configs'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(installResult.code, 0, installResult.stderr); + + const cursorRoot = path.join(projectRoot, '.cursor'); + const managedPath = path.join(cursorRoot, 'hooks.json'); + const statePath = path.join(cursorRoot, 'ecc-install-state.json'); + + const uninstallResult = runNode(UNINSTALL_SCRIPT, ['--target', 'cursor', '--dry-run', '--json'], { + cwd: projectRoot, + homeDir, + }); + assert.strictEqual(uninstallResult.code, 0, uninstallResult.stderr); + + const parsed = JSON.parse(uninstallResult.stdout); + assert.strictEqual(parsed.dryRun, true); + assert.ok(parsed.results[0].plannedRemovals.length > 0); + assert.ok(fs.existsSync(managedPath)); + assert.ok(fs.existsSync(statePath)); + } finally { + cleanup(homeDir); + cleanup(projectRoot); + } + })) passed++; else failed++; + + console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`); + process.exit(failed > 0 ? 1 : 0); +} + +runTests();