feat: orchestration harness, selective install, observer improvements

This commit is contained in:
Affaan Mustafa
2026-03-14 12:55:25 -07:00
parent 424f3b3729
commit 4e028bd2d2
76 changed files with 11050 additions and 340 deletions

View File

@@ -230,6 +230,8 @@ print(f"Cache creation: {message.usage.cache_creation_input_tokens}")
Process large volumes asynchronously at 50% cost reduction:
```python
import time
batch = client.messages.batches.create(
requests=[
{
@@ -306,6 +308,8 @@ while True:
## Error Handling
```python
import time
from anthropic import APIError, RateLimitError, APIConnectionError
try:

View File

@@ -148,6 +148,7 @@ A pattern I've been using that's made a real difference:
If using a crossposting service (e.g., Postbridge, Buffer, or a custom API), the pattern looks like:
```python
import os
import requests
resp = requests.post(

View File

@@ -92,6 +92,7 @@ def post_thread(oauth, tweets: list[str]) -> list[str]:
if reply_to:
payload["reply"] = {"in_reply_to_tweet_id": reply_to}
resp = oauth.post("https://api.x.com/2/tweets", json=payload)
resp.raise_for_status()
tweet_id = resp.json()["data"]["id"]
ids.append(tweet_id)
reply_to = tweet_id
@@ -167,6 +168,8 @@ resp = oauth.post(
Always check `x-rate-limit-remaining` and `x-rate-limit-reset` headers.
```python
import time
remaining = int(resp.headers.get("x-rate-limit-remaining", 0))
if remaining < 5:
reset = int(resp.headers.get("x-rate-limit-reset", 0))

View File

@@ -0,0 +1,322 @@
# ECC 2.0 Session Adapter Discovery
## Purpose
This document turns the March 11 ECC 2.0 control-plane direction into a
concrete adapter and snapshot design grounded in the orchestration code that
already exists in this repo.
## Current Implemented Substrate
The repo already has a real first-pass orchestration substrate:
- `scripts/lib/tmux-worktree-orchestrator.js`
provisions tmux panes plus isolated git worktrees
- `scripts/orchestrate-worktrees.js`
is the current session launcher
- `scripts/lib/orchestration-session.js`
collects machine-readable session snapshots
- `scripts/orchestration-status.js`
exports those snapshots from a session name or plan file
- `commands/sessions.md`
already exposes adjacent session-history concepts from Claude's local store
- `scripts/lib/session-adapters/canonical-session.js`
defines the canonical `ecc.session.v1` normalization layer
- `scripts/lib/session-adapters/dmux-tmux.js`
wraps the current orchestration snapshot collector as adapter `dmux-tmux`
- `scripts/lib/session-adapters/claude-history.js`
normalizes Claude local session history as a second adapter
- `scripts/lib/session-adapters/registry.js`
selects adapters from explicit targets and target types
- `scripts/session-inspect.js`
emits canonical read-only session snapshots through the adapter registry
In practice, ECC can already answer:
- what workers exist in a tmux-orchestrated session
- what pane each worker is attached to
- what task, status, and handoff files exist for each worker
- whether the session is active and how many panes/workers exist
- what the most recent Claude local session looked like in the same canonical
snapshot shape as orchestration sessions
That is enough to prove the substrate. It is not yet enough to qualify as a
general ECC 2.0 control plane.
## What The Current Snapshot Actually Models
The current snapshot model coming out of `scripts/lib/orchestration-session.js`
has these effective fields:
```json
{
"sessionName": "workflow-visual-proof",
"coordinationDir": ".../.claude/orchestration/workflow-visual-proof",
"repoRoot": "...",
"targetType": "plan",
"sessionActive": true,
"paneCount": 2,
"workerCount": 2,
"workerStates": {
"running": 1,
"completed": 1
},
"panes": [
{
"paneId": "%95",
"windowIndex": 1,
"paneIndex": 0,
"title": "seed-check",
"currentCommand": "codex",
"currentPath": "/tmp/worktree",
"active": false,
"dead": false,
"pid": 1234
}
],
"workers": [
{
"workerSlug": "seed-check",
"workerDir": ".../seed-check",
"status": {
"state": "running",
"updated": "...",
"branch": "...",
"worktree": "...",
"taskFile": "...",
"handoffFile": "..."
},
"task": {
"objective": "...",
"seedPaths": ["scripts/orchestrate-worktrees.js"]
},
"handoff": {
"summary": [],
"validation": [],
"remainingRisks": []
},
"files": {
"status": ".../status.md",
"task": ".../task.md",
"handoff": ".../handoff.md"
},
"pane": {
"paneId": "%95",
"title": "seed-check"
}
}
]
}
```
This is already a useful operator payload. The main limitation is that it is
implicitly tied to one execution style:
- tmux pane identity
- worker slug equals pane title
- markdown coordination files
- plan-file or session-name lookup rules
## Gap Between ECC 1.x And ECC 2.0
ECC 1.x currently has two different "session" surfaces:
1. Claude local session history
2. Orchestration runtime/session snapshots
Those surfaces are adjacent but not unified.
The missing ECC 2.0 layer is a harness-neutral session adapter boundary that
can normalize:
- tmux-orchestrated workers
- plain Claude sessions
- Codex worktree sessions
- OpenCode sessions
- future GitHub/App or remote-control sessions
Without that adapter layer, any future operator UI would be forced to read
tmux-specific details and coordination markdown directly.
## Adapter Boundary
ECC 2.0 should introduce a canonical session adapter contract.
Suggested minimal interface:
```ts
type SessionAdapter = {
id: string;
canOpen(target: SessionTarget): boolean;
open(target: SessionTarget): Promise<AdapterHandle>;
};
type AdapterHandle = {
getSnapshot(): Promise<CanonicalSessionSnapshot>;
streamEvents?(onEvent: (event: SessionEvent) => void): Promise<() => void>;
runAction?(action: SessionAction): Promise<ActionResult>;
};
```
### Canonical Snapshot Shape
Suggested first-pass canonical payload:
```json
{
"schemaVersion": "ecc.session.v1",
"adapterId": "dmux-tmux",
"session": {
"id": "workflow-visual-proof",
"kind": "orchestrated",
"state": "active",
"repoRoot": "...",
"sourceTarget": {
"type": "plan",
"value": ".claude/plan/workflow-visual-proof.json"
}
},
"workers": [
{
"id": "seed-check",
"label": "seed-check",
"state": "running",
"branch": "...",
"worktree": "...",
"runtime": {
"kind": "tmux-pane",
"command": "codex",
"pid": 1234,
"active": false,
"dead": false
},
"intent": {
"objective": "...",
"seedPaths": ["scripts/orchestrate-worktrees.js"]
},
"outputs": {
"summary": [],
"validation": [],
"remainingRisks": []
},
"artifacts": {
"statusFile": "...",
"taskFile": "...",
"handoffFile": "..."
}
}
],
"aggregates": {
"workerCount": 2,
"states": {
"running": 1,
"completed": 1
}
}
}
```
This preserves the useful signal already present while removing tmux-specific
details from the control-plane contract.
## First Adapters To Support
### 1. `dmux-tmux`
Wrap the logic already living in
`scripts/lib/orchestration-session.js`.
This is the easiest first adapter because the substrate is already real.
### 2. `claude-history`
Normalize the data that
`commands/sessions.md`
and the existing session-manager utilities already expose:
- session id / alias
- branch
- worktree
- project path
- recency / file size / item counts
This provides a non-orchestrated baseline for ECC 2.0.
### 3. `codex-worktree`
Use the same canonical shape, but back it with Codex-native execution metadata
instead of tmux assumptions where available.
### 4. `opencode`
Use the same adapter boundary once OpenCode session metadata is stable enough to
normalize.
## What Should Stay Out Of The Adapter Layer
The adapter layer should not own:
- business logic for merge sequencing
- operator UI layout
- pricing or monetization decisions
- install profile selection
- tmux lifecycle orchestration itself
Its job is narrower:
- detect session targets
- load normalized snapshots
- optionally stream runtime events
- optionally expose safe actions
## Current File Layout
The adapter layer now lives in:
```text
scripts/lib/session-adapters/
canonical-session.js
dmux-tmux.js
claude-history.js
registry.js
scripts/session-inspect.js
tests/lib/session-adapters.test.js
tests/scripts/session-inspect.test.js
```
The current orchestration snapshot parser is now being consumed as an adapter
implementation rather than remaining the only product contract.
## Immediate Next Steps
1. Add a third adapter, likely `codex-worktree`, so the abstraction moves
beyond tmux plus Claude-history.
2. Decide whether canonical snapshots need separate `state` and `health`
fields before UI work starts.
3. Decide whether event streaming belongs in v1 or stays out until after the
snapshot layer proves itself.
4. Build operator-facing panels only on top of the adapter registry, not by
reading orchestration internals directly.
## Open Questions
1. Should worker identity be keyed by worker slug, branch, or stable UUID?
2. Do we need separate `state` and `health` fields at the canonical layer?
3. Should event streaming be part of v1, or should ECC 2.0 ship snapshot-only
first?
4. How much path information should be redacted before snapshots leave the local
machine?
5. Should the adapter registry live inside this repo long-term, or move into the
eventual ECC 2.0 control-plane app once the interface stabilizes?
## Recommendation
Treat the current tmux/worktree implementation as adapter `0`, not as the final
product surface.
The shortest path to ECC 2.0 is:
1. preserve the current orchestration substrate
2. wrap it in a canonical session adapter contract
3. add one non-tmux adapter
4. only then start building operator panels on top

View File

@@ -0,0 +1,286 @@
# Mega Plan Repo Prompt List — March 12, 2026
## Purpose
Use these prompts to split the remaining March 11 mega-plan work by repo.
They are written for parallel agents and assume the March 12 orchestration and
Windows CI lane is already merged via `#417`.
## Current Snapshot
- `everything-claude-code` has finished the orchestration, Codex baseline, and
Windows CI recovery lane.
- The next open ECC Phase 1 items are:
- review `#399`
- convert recurring discussion pressure into tracked issues
- define selective-install architecture
- write the ECC 2.0 discovery doc
- `agentshield`, `ECC-website`, and `skill-creator-app` all have dirty
`main` worktrees and should not be edited directly on `main`.
- `applications/` is not a standalone git repo. It lives inside the parent
workspace repo at `<ECC_ROOT>`.
## Repo: `everything-claude-code`
### Prompt A — PR `#399` Review and Merge Readiness
```text
Work in: <ECC_ROOT>/everything-claude-code
Goal:
Review PR #399 ("fix(observe): 5-layer automated session guard to prevent
self-loop observations") against the actual loop problem described in issue
#398 and the March 11 mega plan. Do not assume the old failing CI on the PR is
still meaningful, because the Windows baseline was repaired later in #417.
Tasks:
1. Read issue #398 and PR #399 in full.
2. Inspect the observe hook implementation and tests locally.
3. Determine whether the PR really prevents observer self-observation,
automated-session observation, and runaway recursive loops.
4. Identify any missing env-based bypass, idle gating, or session exclusion
behavior.
5. Produce a merge recommendation with findings ordered by severity.
Constraints:
- Do not merge automatically.
- Do not rewrite unrelated hook behavior.
- If you make code changes, keep them tightly scoped to observe behavior and
tests.
Deliverables:
- review summary
- exact findings with file references
- recommended merge / rework decision
- test commands run
```
### Prompt B — Roadmap Issues Extraction
```text
Work in: <ECC_ROOT>/everything-claude-code
Goal:
Convert recurring discussion pressure from the mega plan into concrete GitHub
issues. Focus on high-signal roadmap items that unblock ECC 1.x and ECC 2.0.
Create issue drafts or a ready-to-post issue bundle for:
1. selective install profiles
2. uninstall / doctor / repair lifecycle
3. generated skill placement and provenance policy
4. governance past the tool call
5. ECC 2.0 discovery doc / adapter contracts
Tasks:
1. Read the March 11 mega plan and March 12 handoff.
2. Deduplicate against already-open issues.
3. Draft issue titles, problem statements, scope, non-goals, acceptance
criteria, and file/system areas affected.
Constraints:
- Do not create filler issues.
- Prefer 4-6 high-value issues over a large backlog dump.
- Keep each issue scoped so it could plausibly land in one focused PR series.
Deliverables:
- issue shortlist
- ready-to-post issue bodies
- duplication notes against existing issues
```
### Prompt C — ECC 2.0 Discovery and Adapter Spec
```text
Work in: <ECC_ROOT>/everything-claude-code
Goal:
Turn the existing ECC 2.0 vision into a first concrete discovery doc focused on
adapter contracts, session/task state, token accounting, and security/policy
events.
Tasks:
1. Use the current orchestration/session snapshot code as the baseline.
2. Define a normalized adapter contract for Claude Code, Codex, OpenCode, and
later Cursor / GitHub App integration.
3. Define the initial SQLite-backed data model for sessions, tasks, worktrees,
events, findings, and approvals.
4. Define what stays in ECC 1.x versus what belongs in ECC 2.0.
5. Call out unresolved product decisions separately from implementation
requirements.
Constraints:
- Treat the current tmux/worktree/session snapshot substrate as the starting
point, not a blank slate.
- Keep the doc implementation-oriented.
Deliverables:
- discovery doc
- adapter contract sketch
- event model sketch
- unresolved questions list
```
## Repo: `agentshield`
### Prompt — False Positive Audit and Regression Plan
```text
Work in: <ECC_ROOT>/agentshield
Goal:
Advance the AgentShield Phase 2 workstream from the mega plan: reduce false
positives, especially where declarative deny rules, block hooks, docs examples,
or config snippets are misclassified as executable risk.
Important repo state:
- branch is currently main
- dirty files exist in CLAUDE.md and README.md
- classify or park existing edits before broader changes
Tasks:
1. Inspect the current false-positive behavior around:
- .claude hook configs
- AGENTS.md / CLAUDE.md
- .cursor rules
- .opencode plugin configs
- sample deny-list patterns
2. Separate parser behavior for declarative patterns vs executable commands.
3. Propose regression coverage additions and the exact fixture set needed.
4. If safe after branch setup, implement the first pass of the classifier fix.
Constraints:
- do not work directly on dirty main
- keep fixes parser/classifier-scoped
- document any remaining ambiguity explicitly
Deliverables:
- branch recommendation
- false-positive taxonomy
- proposed or landed regression tests
- remaining edge cases
```
## Repo: `ECC-website`
### Prompt — Landing Rewrite and Product Framing
```text
Work in: <ECC_ROOT>/ECC-website
Goal:
Execute the website lane from the mega plan by rewriting the landing/product
framing away from "config repo" and toward "open agent harness system" plus
future control-plane direction.
Important repo state:
- branch is currently main
- dirty files exist in favicon assets and multiple page/component files
- branch before meaningful work and preserve existing edits unless explicitly
classified as stale
Tasks:
1. Classify the dirty main worktree state.
2. Rewrite the landing page narrative around:
- open agent harness system
- runtime guardrails
- cross-harness parity
- operator visibility and security
3. Define or update the next key pages:
- /skills
- /security
- /platforms
- /system or /dashboard
4. Keep the page visually intentional and product-forward, not generic SaaS.
Constraints:
- do not silently overwrite existing dirty work
- preserve existing design system where it is coherent
- distinguish ECC 1.x toolkit from ECC 2.0 control plane clearly
Deliverables:
- branch recommendation
- landing-page rewrite diff or content spec
- follow-up page map
- deployment readiness notes
```
## Repo: `skill-creator-app`
### Prompt — Skill Import Pipeline and Product Fit
```text
Work in: <ECC_ROOT>/skill-creator-app
Goal:
Align skill-creator-app with the mega-plan external skill sourcing and audited
import pipeline workstream.
Important repo state:
- branch is currently main
- dirty files exist in README.md and src/lib/github.ts
- classify or park existing changes before broader work
Tasks:
1. Assess whether the app should support:
- inventorying external skills
- provenance tagging
- dependency/risk audit fields
- ECC convention adaptation workflows
2. Review the existing GitHub integration surface in src/lib/github.ts.
3. Produce a concrete product/technical scope for an audited import pipeline.
4. If safe after branching, land the smallest enabling changes for metadata
capture or GitHub ingestion.
Constraints:
- do not turn this into a generic prompt-builder
- keep the focus on audited skill ingestion and ECC-compatible output
Deliverables:
- product-fit summary
- recommended scope for v1
- data fields / workflow steps for the import pipeline
- code changes if they are small and clearly justified
```
## Repo: `ECC` Workspace (`applications/`, `knowledge/`, `tasks/`)
### Prompt — Example Apps and Workflow Reliability Proofs
```text
Work in: <ECC_ROOT>
Goal:
Use the parent ECC workspace to support the mega-plan hosted/workflow lanes.
This is not a standalone applications repo; it is the umbrella workspace that
contains applications/, knowledge/, tasks/, and related planning assets.
Tasks:
1. Inventory what in applications/ is real product code vs placeholder.
2. Identify where example repos or demo apps should live for:
- GitHub App workflow proofs
- ECC 2.0 prototype spikes
- example install / setup reliability checks
3. Propose a clean workspace structure so product code, research, and planning
stop bleeding into each other.
4. Recommend which proof-of-concept should be built first.
Constraints:
- do not move large directories blindly
- distinguish repo structure recommendations from immediate code changes
- keep recommendations compatible with the current multi-repo ECC setup
Deliverables:
- workspace inventory
- proposed structure
- first demo/app recommendation
- follow-up branch/worktree plan
```
## Local Continuation
The current worktree should stay on ECC-native Phase 1 work that does not touch
the existing dirty skill-file changes here. The best next local tasks are:
1. selective-install architecture
2. ECC 2.0 discovery doc
3. PR `#399` review

View File

@@ -0,0 +1,272 @@
# Phase 1 Issue Bundle — March 12, 2026
## Status
These issue drafts were prepared from the March 11 mega plan plus the March 12
handoff. I attempted to open them directly in GitHub, but issue creation was
blocked by missing GitHub authentication in the MCP session.
## GitHub Status
These drafts were later posted via `gh`:
- `#423` Implement manifest-driven selective install profiles for ECC
- `#421` Add ECC install-state plus uninstall / doctor / repair lifecycle
- `#424` Define canonical session adapter contract for ECC 2.0 control plane
- `#422` Define generated skill placement and provenance policy
- `#425` Define governance and visibility past the tool call
The bodies below are preserved as the local source bundle used to create the
issues.
## Issue 1
### Title
Implement manifest-driven selective install profiles for ECC
### Labels
- `enhancement`
### Body
```md
## Problem
ECC still installs primarily by target and language. The repo now has first-pass
selective-install manifests and a non-mutating plan resolver, but the installer
itself does not yet consume those profiles.
Current groundwork already landed in-repo:
- `manifests/install-modules.json`
- `manifests/install-profiles.json`
- `scripts/ci/validate-install-manifests.js`
- `scripts/lib/install-manifests.js`
- `scripts/install-plan.js`
That means the missing step is no longer design discovery. The missing step is
execution: wire profile/module resolution into the actual install flow while
preserving backward compatibility.
## Scope
Implement manifest-driven install execution for current ECC targets:
- `claude`
- `cursor`
- `antigravity`
Add first-pass support for:
- `ecc-install --profile <name>`
- `ecc-install --modules <id,id,...>`
- target-aware filtering based on module target support
- backward-compatible legacy language installs during rollout
## Non-Goals
- Full uninstall/doctor/repair lifecycle in the same issue
- Codex/OpenCode install targets in the first pass if that blocks rollout
- Reorganizing the repository into separate published packages
## Acceptance Criteria
- `install.sh` can resolve and install a named profile
- `install.sh` can resolve explicit module IDs
- Unsupported modules for a target are skipped or rejected deterministically
- Legacy language-based install mode still works
- Tests cover profile resolution and installer behavior
- Docs explain the new preferred profile/module install path
```
## Issue 2
### Title
Add ECC install-state plus uninstall / doctor / repair lifecycle
### Labels
- `enhancement`
### Body
```md
## Problem
ECC has no canonical installed-state record. That makes uninstall, repair, and
post-install inspection nondeterministic.
Today the repo can classify installable content, but it still cannot reliably
answer:
- what profile/modules were installed
- what target they were installed into
- what paths ECC owns
- how to remove or repair only ECC-managed files
Without install-state, lifecycle commands are guesswork.
## Scope
Introduce a durable install-state contract and the first lifecycle commands:
- `ecc list-installed`
- `ecc uninstall`
- `ecc doctor`
- `ecc repair`
Suggested state locations:
- Claude: `~/.claude/ecc/install-state.json`
- Cursor: `./.cursor/ecc-install-state.json`
- Antigravity: `./.agent/ecc-install-state.json`
The state file should capture at minimum:
- installed version
- timestamp
- target
- profile
- resolved modules
- copied/managed paths
- source repo version or package version
## Non-Goals
- Rebuilding the installer architecture from scratch
- Full remote/cloud control-plane functionality
- Target support expansion beyond the current local installers unless it falls
out naturally
## Acceptance Criteria
- Successful installs write install-state deterministically
- `list-installed` reports target/profile/modules/version cleanly
- `doctor` reports missing or drifted managed paths
- `repair` restores missing managed files from recorded install-state
- `uninstall` removes only ECC-managed files and leaves unrelated local files
alone
- Tests cover install-state creation and lifecycle behavior
```
## Issue 3
### Title
Define canonical session adapter contract for ECC 2.0 control plane
### Labels
- `enhancement`
### Body
```md
## Problem
ECC now has real orchestration/session substrate, but it is still
implementation-specific.
Current state:
- tmux/worktree orchestration exists
- machine-readable session snapshots exist
- Claude local session-history commands exist
What does not exist yet is a harness-neutral adapter boundary that can normalize
session/task state across:
- tmux-orchestrated workers
- plain Claude sessions
- Codex worktrees
- OpenCode sessions
- later remote or GitHub-integrated operator surfaces
Without that adapter contract, any future ECC 2.0 operator shell will be forced
to read tmux-specific and markdown-coordination details directly.
## Scope
Define and implement the first-pass canonical session adapter layer.
Suggested deliverables:
- adapter registry
- canonical session snapshot schema
- `dmux-tmux` adapter backed by current orchestration code
- `claude-history` adapter backed by current session history utilities
- read-only inspection CLI for canonical session snapshots
## Non-Goals
- Full ECC 2.0 UI in the same issue
- Monetization/GitHub App implementation
- Remote multi-user control plane
## Acceptance Criteria
- There is a documented canonical snapshot contract
- Current tmux orchestration snapshot code is wrapped as an adapter rather than
the top-level product contract
- A second non-tmux adapter exists to prove the abstraction is real
- Tests cover adapter selection and normalized snapshot output
- The design clearly separates adapter concerns from orchestration and UI
concerns
```
## Issue 4
### Title
Define generated skill placement and provenance policy
### Labels
- `enhancement`
### Body
```md
## Problem
ECC now has a large and growing skill surface, but generated/imported/learned
skills do not yet have a clear long-term placement and provenance policy.
This creates several problems:
- unclear separation between curated skills and generated/learned skills
- validator noise around directories that may or may not exist locally
- weak provenance for imported or machine-generated skill content
- uncertainty about where future automated learning outputs should live
As ECC grows, the repo needs explicit rules for where generated skill artifacts
belong and how they are identified.
## Scope
Define a repo-wide policy for:
- curated vs generated vs imported skill placement
- provenance metadata requirements
- validator behavior for optional/generated skill directories
- whether generated skills are shipped, ignored, or materialized during
install/build steps
## Non-Goals
- Building a full external skill marketplace
- Rewriting all existing skill content in one pass
- Solving every content-quality issue in the same issue
## Acceptance Criteria
- A documented placement policy exists for generated/imported skills
- Provenance requirements are explicit
- Validators no longer produce ambiguous behavior around optional/generated
skill locations
- The policy clearly states what is publishable vs local-only
- Follow-on implementation work is split into concrete, bounded PR-sized steps
```

View File

@@ -0,0 +1,59 @@
# PR 399 Review — March 12, 2026
## Scope
Reviewed `#399`:
- title: `fix(observe): 5-layer automated session guard to prevent self-loop observations`
- head: `e7df0e588ceecfcd1072ef616034ccd33bb0f251`
- files changed:
- `skills/continuous-learning-v2/hooks/observe.sh`
- `skills/continuous-learning-v2/agents/observer-loop.sh`
## Findings
### Medium
1. `skills/continuous-learning-v2/hooks/observe.sh`
The new `CLAUDE_CODE_ENTRYPOINT` guard uses a finite allowlist of known
non-`cli` values (`sdk-ts`, `sdk-py`, `sdk-cli`, `mcp`, `remote`).
That leaves a forward-compatibility hole: any future non-`cli` entrypoint value
will fall through and be treated as interactive. That reintroduces the exact
class of automated-session observation the PR is trying to prevent.
The safer rule is:
- allow only `cli`
- treat every other explicit entrypoint as automated
- keep the default fallback as `cli` when the variable is unset
Suggested shape:
```bash
case "${CLAUDE_CODE_ENTRYPOINT:-cli}" in
cli) ;;
*) exit 0 ;;
esac
```
## Merge Recommendation
`Needs one follow-up change before merge.`
The PR direction is correct:
- it closes the ECC self-observation loop in `observer-loop.sh`
- it adds multiple guard layers in the right area of `observe.sh`
- it already addressed the cheaper-first ordering and skip-path trimming issues
But the entrypoint guard should be generalized before merge so the automation
filter does not silently age out when Claude Code introduces additional
non-interactive entrypoints.
## Residual Risk
- There is still no dedicated regression test coverage around the new shell
guard behavior, so the final merge should include at least one executable
verification pass for the entrypoint and skip-path cases.

View File

@@ -0,0 +1,355 @@
# PR Review And Queue Triage — March 13, 2026
## Snapshot
This document records a live GitHub triage snapshot for the
`everything-claude-code` pull-request queue as of `2026-03-13T08:33:31Z`.
Sources used:
- `gh pr view`
- `gh pr checks`
- `gh pr diff --name-only`
- targeted local verification against the merged `#399` head
Stale threshold used for this pass:
- `last updated before 2026-02-11` (`>30` days before March 13, 2026)
## PR `#399` Retrospective Review
PR:
- `#399``fix(observe): 5-layer automated session guard to prevent self-loop observations`
- state: `MERGED`
- merged at: `2026-03-13T06:40:03Z`
- merge commit: `c52a28ace9e7e84c00309fc7b629955dfc46ecf9`
Files changed:
- `skills/continuous-learning-v2/hooks/observe.sh`
- `skills/continuous-learning-v2/agents/observer-loop.sh`
Validation performed against merged head `546628182200c16cc222b97673ddd79e942eacce`:
- `bash -n` on both changed shell scripts
- `node tests/hooks/hooks.test.js` (`204` passed, `0` failed)
- targeted hook invocations for:
- interactive CLI session
- `CLAUDE_CODE_ENTRYPOINT=mcp`
- `ECC_HOOK_PROFILE=minimal`
- `ECC_SKIP_OBSERVE=1`
- `agent_id` payload
- trimmed `ECC_OBSERVE_SKIP_PATHS`
Behavioral result:
- the core self-loop fix works
- automated-session guard branches suppress observation writes as intended
- the final `non-cli => exit` entrypoint logic is the correct fail-closed shape
Remaining findings:
1. Medium: skipped automated sessions still create homunculus project state
before the new guards exit.
`observe.sh` resolves `cwd` and sources project detection before reaching the
automated-session guard block, so `detect-project.sh` still creates
`projects/<id>/...` directories and updates `projects.json` for sessions that
later exit early.
2. Low: the new guard matrix shipped without direct regression coverage.
The hook test suite still validates adjacent behavior, but it does not
directly assert the new `CLAUDE_CODE_ENTRYPOINT`, `ECC_HOOK_PROFILE`,
`ECC_SKIP_OBSERVE`, `agent_id`, or trimmed skip-path branches.
Verdict:
- `#399` is technically correct for its primary goal and was safe to merge as
the urgent loop-stop fix.
- It still warrants a follow-up issue or patch to move automated-session guards
ahead of project-registration side effects and to add explicit guard-path
tests.
## Open PR Inventory
There are currently `4` open PRs.
### Queue Table
| PR | Title | Draft | Mergeable | Merge State | Updated | Stale | Current Verdict |
| --- | --- | --- | --- | --- | --- | --- | --- |
| `#292` | `chore(config): governance and config foundation (PR #272 split 1/6)` | `false` | `MERGEABLE` | `UNSTABLE` | `2026-03-13T07:26:55Z` | `No` | `Best current merge candidate` |
| `#298` | `feat(agents,skills,rules): add Rust, Java, mobile, DevOps, and performance content` | `false` | `CONFLICTING` | `DIRTY` | `2026-03-11T04:29:07Z` | `No` | `Needs changes before review can finish` |
| `#336` | `Customisation for Codex CLI - Features from Claude Code and OpenCode` | `true` | `MERGEABLE` | `UNSTABLE` | `2026-03-13T07:26:12Z` | `No` | `Needs manual review and draft exit` |
| `#420` | `feat: add laravel skills` | `true` | `MERGEABLE` | `UNSTABLE` | `2026-03-12T22:57:36Z` | `No` | `Low-risk draft, review after draft exit` |
No currently open PR is stale by the `>30 days since last update` rule.
## Per-PR Assessment
### `#292` — Governance / Config Foundation
Live state:
- open
- non-draft
- `MERGEABLE`
- merge state `UNSTABLE`
- visible checks:
- `CodeRabbit` passed
- `GitGuardian Security Checks` passed
Scope:
- `.env.example`
- `.github/ISSUE_TEMPLATE/copilot-task.md`
- `.github/PULL_REQUEST_TEMPLATE.md`
- `.gitignore`
- `.markdownlint.json`
- `.tool-versions`
- `VERSION`
Assessment:
- This is the cleanest merge candidate in the current queue.
- The branch was already refreshed onto current `main`.
- The currently visible bot feedback is minor/nit-level rather than obviously
merge-blocking.
- The main caution is that only external bot checks are visible right now; no
GitHub Actions matrix run appears in the current PR checks output.
Current recommendation:
- `Mergeable after one final owner pass.`
- If you want a conservative path, do one quick human review of the remaining
`.env.example`, PR-template, and `.tool-versions` nitpicks before merge.
### `#298` — Large Multi-Domain Content Expansion
Live state:
- open
- non-draft
- `CONFLICTING`
- merge state `DIRTY`
- visible checks:
- `CodeRabbit` passed
- `GitGuardian Security Checks` passed
- `cubic · AI code reviewer` passed
Scope:
- `35` files
- large documentation and skill/rule expansion across Java, Rust, mobile,
DevOps, performance, data, and MLOps
Assessment:
- This PR is not ready for merge.
- It conflicts with current `main`, so it is not even mergeable at the branch
level yet.
- cubic identified `34` issues across `35` files in the current review.
Those findings are substantive and technical, not just style cleanup, and
they cover broken or misleading examples across several new skills.
- Even without the conflict, the scope is large enough that it needs a deliberate
content-fix pass rather than a quick merge decision.
Current recommendation:
- `Needs changes.`
- Rebase or restack first, then resolve the substantive example-quality issues.
- If momentum matters, split by domain rather than carrying one very large PR.
### `#336` — Codex CLI Customization
Live state:
- open
- draft
- `MERGEABLE`
- merge state `UNSTABLE`
- visible checks:
- `CodeRabbit` passed
- `GitGuardian Security Checks` passed
Scope:
- `scripts/codex-git-hooks/pre-commit`
- `scripts/codex-git-hooks/pre-push`
- `scripts/codex/check-codex-global-state.sh`
- `scripts/codex/install-global-git-hooks.sh`
- `scripts/sync-ecc-to-codex.sh`
Assessment:
- This PR is no longer conflicting, but it is still draft-only and has not had
a meaningful first-party review pass.
- It modifies user-global Codex setup behavior and git-hook installation, so the
operational blast radius is higher than a docs-only PR.
- The visible checks are only external bots; there is no full GitHub Actions run
shown in the current check set.
- Because the branch comes from a contributor fork `main`, it also deserves an
extra sanity pass on what exactly is being proposed before changing status.
Current recommendation:
- `Needs changes before merge readiness`, where the required changes are process
and review oriented rather than an already-proven code defect:
- finish manual review
- run or confirm validation on the global-state scripts
- take it out of draft only after that review is complete
### `#420` — Laravel Skills
Live state:
- open
- draft
- `MERGEABLE`
- merge state `UNSTABLE`
- visible checks:
- `CodeRabbit` passed
- `GitGuardian Security Checks` passed
Scope:
- `README.md`
- `examples/laravel-api-CLAUDE.md`
- `rules/php/patterns.md`
- `rules/php/security.md`
- `rules/php/testing.md`
- `skills/configure-ecc/SKILL.md`
- `skills/laravel-patterns/SKILL.md`
- `skills/laravel-security/SKILL.md`
- `skills/laravel-tdd/SKILL.md`
- `skills/laravel-verification/SKILL.md`
Assessment:
- This is content-heavy and operationally lower risk than `#336`.
- It is still draft and has not had a substantive human review pass yet.
- The visible checks are external bots only.
- Nothing in the live PR state suggests a merge blocker yet, but it is not ready
to be merged simply because it is still draft and under-reviewed.
Current recommendation:
- `Review next after the highest-priority non-draft work.`
- Likely a good review candidate once the author is ready to exit draft.
## Mergeability Buckets
### Mergeable Now Or After A Final Owner Pass
- `#292`
### Needs Changes Before Merge
- `#298`
- `#336`
### Draft / Needs Review Before Any Merge Decision
- `#420`
### Stale `>30 Days`
- none
## Recommended Order
1. `#292`
This is the cleanest live merge candidate.
2. `#420`
Low runtime risk, but wait for draft exit and a real review pass.
3. `#336`
Review carefully because it changes global Codex sync and hook behavior.
4. `#298`
Rebase and fix the substantive content issues before spending more review time
on it.
## Bottom Line
- `#399`: safe bugfix merge with one follow-up cleanup still warranted
- `#292`: highest-priority merge candidate in the current open queue
- `#298`: not mergeable; conflicts plus substantive content defects
- `#336`: no longer conflicting, but not ready while still draft and lightly
validated
- `#420`: draft, low-risk content lane, review after the non-draft queue
## Live Refresh
Refreshed at `2026-03-13T22:11:40Z`.
### Main Branch
- `origin/main` is green right now, including the Windows test matrix.
- Mainline CI repair is not the current bottleneck.
### Updated Queue Read
#### `#292` — Governance / Config Foundation
- open
- non-draft
- `MERGEABLE`
- visible checks:
- `CodeRabbit` passed
- `GitGuardian Security Checks` passed
- highest-signal remaining work is not CI repair; it is the small correctness
pass on `.env.example` and PR-template alignment before merge
Current recommendation:
- `Next actionable PR.`
- Either patch the remaining doc/config correctness issues, or do one final
owner pass and merge if you accept the current tradeoffs.
#### `#420` — Laravel Skills
- open
- draft
- `MERGEABLE`
- visible checks:
- `CodeRabbit` skipped because the PR is draft
- `GitGuardian Security Checks` passed
- no substantive human review is visible yet
Current recommendation:
- `Review after the non-draft queue.`
- Low implementation risk, but not merge-ready while still draft and
under-reviewed.
#### `#336` — Codex CLI Customization
- open
- draft
- `MERGEABLE`
- visible checks:
- `CodeRabbit` passed
- `GitGuardian Security Checks` passed
- still needs a deliberate manual review because it touches global Codex sync
and git-hook installation behavior
Current recommendation:
- `Manual-review lane, not immediate merge lane.`
#### `#298` — Large Content Expansion
- open
- non-draft
- `CONFLICTING`
- still the hardest remaining PR in the queue
Current recommendation:
- `Last priority among current open PRs.`
- Rebase first, then handle the substantive content/example corrections.
### Current Order
1. `#292`
2. `#420`
3. `#336`
4. `#298`

View File

@@ -0,0 +1,933 @@
# ECC 2.0 Selective Install Discovery
## Purpose
This document turns the March 11 mega-plan selective-install requirement into a
concrete ECC 2.0 discovery design.
The goal is not just "fewer files copied during install." The actual target is
an install system that can answer, deterministically:
- what was requested
- what was resolved
- what was copied or generated
- what target-specific transforms were applied
- what ECC owns and may safely remove or repair later
That is the missing contract between ECC 1.x installation and an ECC 2.0
control plane.
## Current Implemented Foundation
The first selective-install substrate already exists in-repo:
- `manifests/install-modules.json`
- `manifests/install-profiles.json`
- `schemas/install-modules.schema.json`
- `schemas/install-profiles.schema.json`
- `schemas/install-state.schema.json`
- `scripts/ci/validate-install-manifests.js`
- `scripts/lib/install-manifests.js`
- `scripts/lib/install/request.js`
- `scripts/lib/install/runtime.js`
- `scripts/lib/install/apply.js`
- `scripts/lib/install-targets/`
- `scripts/lib/install-state.js`
- `scripts/lib/install-executor.js`
- `scripts/lib/install-lifecycle.js`
- `scripts/ecc.js`
- `scripts/install-apply.js`
- `scripts/install-plan.js`
- `scripts/list-installed.js`
- `scripts/doctor.js`
Current capabilities:
- machine-readable module and profile catalogs
- CI validation that manifest entries point at real repo paths
- dependency expansion and target filtering
- adapter-aware operation planning
- canonical request normalization for legacy and manifest install modes
- explicit runtime dispatch from normalized requests into plan creation
- legacy and manifest installs both write durable install-state
- read-only inspection of install plans before any mutation
- unified `ecc` CLI routing install, planning, and lifecycle commands
- lifecycle inspection and mutation via `list-installed`, `doctor`, `repair`,
and `uninstall`
Current limitation:
- target-specific merge/remove semantics are still scaffold-level for some modules
- legacy `ecc-install` compatibility still points at `install.sh`
- publish surface is still broad in `package.json`
## Current Code Review
The current installer stack is already much healthier than the original
language-first shell installer, but it still concentrates too much
responsibility in a few files.
### Current Runtime Path
The runtime flow today is:
1. `install.sh`
thin shell wrapper that resolves the real package root
2. `scripts/install-apply.js`
user-facing installer CLI for legacy and manifest modes
3. `scripts/lib/install/request.js`
CLI parsing plus canonical request normalization
4. `scripts/lib/install/runtime.js`
runtime dispatch from normalized requests into install plans
5. `scripts/lib/install-executor.js`
argument translation, legacy compatibility, operation materialization,
filesystem mutation, and install-state write
6. `scripts/lib/install-manifests.js`
module/profile catalog loading plus dependency expansion
7. `scripts/lib/install-targets/`
target root and destination-path scaffolding
8. `scripts/lib/install-state.js`
schema-backed install-state read/write
9. `scripts/lib/install-lifecycle.js`
doctor/repair/uninstall behavior derived from stored operations
That is enough to prove the selective-install substrate, but not enough to make
the installer architecture feel settled.
### Current Strengths
- install intent is now explicit through `--profile` and `--modules`
- request parsing and request normalization are now split from the CLI shell
- target root resolution is already adapterized
- lifecycle commands now use durable install-state instead of guessing
- the repo already has a unified Node entrypoint through `ecc` and
`install-apply.js`
### Current Coupling Still Present
1. `install-executor.js` is smaller than before, but still carrying too many
planning and materialization layers at once.
The request boundary is now extracted, but legacy request translation,
manifest-plan expansion, and operation materialization still live together.
2. target adapters are still too thin.
Today they mostly resolve roots and scaffold destination paths. The real
install semantics still live in executor branches and path heuristics.
3. the planner/executor boundary is not clean enough yet.
`install-manifests.js` resolves modules, but the final install operation set
is still partly constructed in executor-specific logic.
4. lifecycle behavior depends on low-level recorded operations more than on
stable module semantics.
That works for plain file copy, but becomes brittle for merge/generate/remove
behaviors.
5. compatibility mode is mixed directly into the main installer runtime.
Legacy language installs should behave like a request adapter, not as a
parallel installer architecture.
## Proposed Modular Architecture Changes
The next architectural step is to separate the installer into explicit layers,
with each layer returning stable data instead of immediately mutating files.
### Target State
The desired install pipeline is:
1. CLI surface
2. request normalization
3. module resolution
4. target planning
5. operation planning
6. execution
7. install-state persistence
8. lifecycle services built on the same operation contract
The main idea is simple:
- manifests describe content
- adapters describe target-specific landing semantics
- planners describe what should happen
- executors apply those plans
- lifecycle commands reuse the same plan/state model instead of reinventing it
### Proposed Runtime Layers
#### 1. CLI Surface
Responsibility:
- parse user intent only
- route to install, plan, doctor, repair, uninstall
- render human or JSON output
Should not own:
- legacy language translation
- target-specific install rules
- operation construction
Suggested files:
```text
scripts/ecc.js
scripts/install-apply.js
scripts/install-plan.js
scripts/doctor.js
scripts/repair.js
scripts/uninstall.js
```
These stay as entrypoints, but become thin wrappers around library modules.
#### 2. Request Normalizer
Responsibility:
- translate raw CLI flags into a canonical install request
- convert legacy language installs into a compatibility request shape
- reject mixed or ambiguous inputs early
Suggested canonical request:
```json
{
"mode": "manifest",
"target": "cursor",
"profile": "developer",
"modules": [],
"legacyLanguages": [],
"dryRun": false
}
```
or, in compatibility mode:
```json
{
"mode": "legacy-compat",
"target": "claude",
"profile": null,
"modules": [],
"legacyLanguages": ["typescript", "python"],
"dryRun": false
}
```
This lets the rest of the pipeline ignore whether the request came from old or
new CLI syntax.
#### 3. Module Resolver
Responsibility:
- load manifest catalogs
- expand dependencies
- reject conflicts
- filter unsupported modules per target
- return a canonical resolution object
This layer should stay pure and read-only.
It should not know:
- destination filesystem paths
- merge semantics
- copy strategies
Current nearest file:
- `scripts/lib/install-manifests.js`
Suggested split:
```text
scripts/lib/install/catalog.js
scripts/lib/install/resolve-request.js
scripts/lib/install/resolve-modules.js
```
#### 4. Target Planner
Responsibility:
- select the install target adapter
- resolve target root
- resolve install-state path
- expand module-to-target mapping rules
- emit target-aware operation intents
This is where target-specific meaning should live.
Examples:
- Claude may preserve native hierarchy under `~/.claude`
- Cursor may sync bundled `.cursor` root children differently from rules
- generated configs may require merge or replace semantics depending on target
Current nearest files:
- `scripts/lib/install-targets/helpers.js`
- `scripts/lib/install-targets/registry.js`
Suggested evolution:
```text
scripts/lib/install/targets/registry.js
scripts/lib/install/targets/claude-home.js
scripts/lib/install/targets/cursor-project.js
scripts/lib/install/targets/antigravity-project.js
```
Each adapter should eventually expose more than `resolveRoot`.
It should own path and strategy mapping for its target family.
#### 5. Operation Planner
Responsibility:
- turn module resolution plus adapter rules into a typed operation graph
- emit first-class operations such as:
- `copy-file`
- `copy-tree`
- `merge-json`
- `render-template`
- `remove`
- attach ownership and validation metadata
This is the missing architectural seam in the current installer.
Today, operations are partly scaffold-level and partly executor-specific.
ECC 2.0 should make operation planning a standalone phase so that:
- `plan` becomes a true preview of execution
- `doctor` can validate intended behavior, not just current files
- `repair` can rebuild exact missing work safely
- `uninstall` can reverse only managed operations
#### 6. Execution Engine
Responsibility:
- apply a typed operation graph
- enforce overwrite and ownership rules
- stage writes safely
- collect final applied-operation results
This layer should not decide *what* to do.
It should only decide *how* to apply a provided operation kind safely.
Current nearest file:
- `scripts/lib/install-executor.js`
Recommended refactor:
```text
scripts/lib/install/executor/apply-plan.js
scripts/lib/install/executor/apply-copy.js
scripts/lib/install/executor/apply-merge-json.js
scripts/lib/install/executor/apply-remove.js
```
That turns executor logic from one large branching runtime into a set of small
operation handlers.
#### 7. Install-State Store
Responsibility:
- validate and persist install-state
- record canonical request, resolution, and applied operations
- support lifecycle commands without forcing them to reverse-engineer installs
Current nearest file:
- `scripts/lib/install-state.js`
This layer is already close to the right shape. The main remaining change is to
store richer operation metadata once merge/generate semantics are real.
#### 8. Lifecycle Services
Responsibility:
- `list-installed`: inspect state only
- `doctor`: compare desired/install-state view against current filesystem
- `repair`: regenerate a plan from state and reapply safe operations
- `uninstall`: remove only ECC-owned outputs
Current nearest file:
- `scripts/lib/install-lifecycle.js`
This layer should eventually operate on operation kinds and ownership policies,
not just on raw `copy-file` records.
## Proposed File Layout
The clean modular end state should look roughly like this:
```text
scripts/lib/install/
catalog.js
request.js
resolve-modules.js
plan-operations.js
state-store.js
targets/
registry.js
claude-home.js
cursor-project.js
antigravity-project.js
codex-home.js
opencode-home.js
executor/
apply-plan.js
apply-copy.js
apply-merge-json.js
apply-render-template.js
apply-remove.js
lifecycle/
discover.js
doctor.js
repair.js
uninstall.js
```
This is not a packaging split.
It is a code-ownership split inside the current repo so each layer has one job.
## Migration Map From Current Files
The lowest-risk migration path is evolutionary, not a rewrite.
### Keep
- `install.sh` as the public compatibility shim
- `scripts/ecc.js` as the unified CLI
- `scripts/lib/install-state.js` as the starting point for the state store
- current target adapter IDs and state locations
### Extract
- request parsing and compatibility translation out of
`scripts/lib/install-executor.js`
- target-aware operation planning out of executor branches and into target
adapters plus planner modules
- lifecycle-specific analysis out of the shared lifecycle monolith into smaller
services
### Replace Gradually
- broad path-copy heuristics with typed operations
- scaffold-only adapter planning with adapter-owned semantics
- legacy language install branches with legacy request translation into the same
planner/executor pipeline
## Immediate Architecture Changes To Make Next
If the goal is ECC 2.0 and not just “working enough,” the next modularization
steps should be:
1. split `install-executor.js` into request normalization, operation planning,
and execution modules
2. move target-specific strategy decisions into adapter-owned planning methods
3. make `repair` and `uninstall` operate on typed operation handlers rather than
only plain `copy-file` records
4. teach manifests about install strategy and ownership so the planner no
longer depends on path heuristics
5. narrow the npm publish surface only after the internal module boundaries are
stable
## Why The Current Model Is Not Enough
Today ECC still behaves like a broad payload copier:
- `install.sh` is language-first and target-branch-heavy
- targets are partly implicit in directory layout
- uninstall, repair, and doctor now exist but are still early lifecycle commands
- the repo cannot prove what a prior install actually wrote
- publish surface is still broad in `package.json`
That creates the problems already called out in the mega plan:
- users pull more content than their harness or workflow needs
- support and upgrades are harder because installs are not recorded
- target behavior drifts because install logic is duplicated in shell branches
- future targets like Codex or OpenCode require more special-case logic instead
of reusing a stable install contract
## ECC 2.0 Design Thesis
Selective install should be modeled as:
1. resolve requested intent into a canonical module graph
2. translate that graph through a target adapter
3. execute a deterministic install operation set
4. write install-state as the durable source of truth
That means ECC 2.0 needs two contracts, not one:
- a content contract
what modules exist and how they depend on each other
- a target contract
how those modules land inside Claude, Cursor, Antigravity, Codex, or OpenCode
The current repo only had the first half in early form.
The current repo now has the first full vertical slice, but not the full
target-specific semantics.
## Design Constraints
1. Keep `everything-claude-code` as the canonical source repo.
2. Preserve existing `install.sh` flows during migration.
3. Support home-scoped and project-scoped targets from the same planner.
4. Make uninstall/repair/doctor possible without guessing.
5. Avoid per-target copy logic leaking back into module definitions.
6. Keep future Codex and OpenCode support additive, not a rewrite.
## Canonical Artifacts
### 1. Module Catalog
The module catalog is the canonical content graph.
Current fields already implemented:
- `id`
- `kind`
- `description`
- `paths`
- `targets`
- `dependencies`
- `defaultInstall`
- `cost`
- `stability`
Fields still needed for ECC 2.0:
- `installStrategy`
for example `copy`, `flatten-rules`, `generate`, `merge-config`
- `ownership`
whether ECC fully owns the target path or only generated files under it
- `pathMode`
for example `preserve`, `flatten`, `target-template`
- `conflicts`
modules or path families that cannot coexist on one target
- `publish`
whether the module is packaged by default, optional, or generated post-install
Suggested future shape:
```json
{
"id": "hooks-runtime",
"kind": "hooks",
"paths": ["hooks", "scripts/hooks"],
"targets": ["claude", "cursor", "opencode"],
"dependencies": [],
"installStrategy": "copy",
"pathMode": "preserve",
"ownership": "managed",
"defaultInstall": true,
"cost": "medium",
"stability": "stable"
}
```
### 2. Profile Catalog
Profiles stay thin.
They should express user intent, not duplicate target logic.
Current examples already implemented:
- `core`
- `developer`
- `security`
- `research`
- `full`
Fields still needed:
- `defaultTargets`
- `recommendedFor`
- `excludes`
- `requiresConfirmation`
That lets ECC 2.0 say things like:
- `developer` is the recommended default for Claude and Cursor
- `research` may be heavy for narrow local installs
- `full` is allowed but not default
### 3. Target Adapters
This is the main missing layer.
The module graph should not know:
- where Claude home lives
- how Cursor flattens or remaps content
- which config files need merge semantics instead of blind copy
That belongs to a target adapter.
Suggested interface:
```ts
type InstallTargetAdapter = {
id: string;
kind: "home" | "project";
supports(target: string): boolean;
resolveRoot(input?: string): Promise<string>;
planOperations(input: InstallOperationInput): Promise<InstallOperation[]>;
validate?(input: InstallOperationInput): Promise<ValidationIssue[]>;
};
```
Suggested first adapters:
1. `claude-home`
writes into `~/.claude/...`
2. `cursor-project`
writes into `./.cursor/...`
3. `antigravity-project`
writes into `./.agent/...`
4. `codex-home`
later
5. `opencode-home`
later
This matches the same pattern already proposed in the session-adapter discovery
doc: canonical contract first, harness-specific adapter second.
## Install Planning Model
The current `scripts/install-plan.js` CLI proves the repo can resolve requested
modules into a filtered module set.
ECC 2.0 needs the next layer: operation planning.
Suggested phases:
1. input normalization
- parse `--target`
- parse `--profile`
- parse `--modules`
- optionally translate legacy language args
2. module resolution
- expand dependencies
- reject conflicts
- filter by supported targets
3. adapter planning
- resolve target root
- derive exact copy or generation operations
- identify config merges and target remaps
4. dry-run output
- show selected modules
- show skipped modules
- show exact file operations
5. mutation
- execute the operation plan
6. state write
- persist install-state only after successful completion
Suggested operation shape:
```json
{
"kind": "copy",
"moduleId": "rules-core",
"source": "rules/common/coding-style.md",
"destination": "/Users/example/.claude/rules/common/coding-style.md",
"ownership": "managed",
"overwritePolicy": "replace"
}
```
Other operation kinds:
- `copy`
- `copy-tree`
- `flatten-copy`
- `render-template`
- `merge-json`
- `merge-jsonc`
- `mkdir`
- `remove`
## Install-State Contract
Install-state is the durable contract that ECC 1.x is missing.
Suggested path conventions:
- Claude target:
`~/.claude/ecc/install-state.json`
- Cursor target:
`./.cursor/ecc-install-state.json`
- Antigravity target:
`./.agent/ecc-install-state.json`
- future Codex target:
`~/.codex/ecc-install-state.json`
Suggested payload:
```json
{
"schemaVersion": "ecc.install.v1",
"installedAt": "2026-03-13T00:00:00Z",
"lastValidatedAt": "2026-03-13T00:00:00Z",
"target": {
"id": "claude-home",
"root": "/Users/example/.claude"
},
"request": {
"profile": "developer",
"modules": ["orchestration"],
"legacyLanguages": ["typescript", "python"]
},
"resolution": {
"selectedModules": [
"rules-core",
"agents-core",
"commands-core",
"hooks-runtime",
"platform-configs",
"workflow-quality",
"framework-language",
"database",
"orchestration"
],
"skippedModules": []
},
"source": {
"repoVersion": "1.9.0",
"repoCommit": "git-sha",
"manifestVersion": 1
},
"operations": [
{
"kind": "copy",
"moduleId": "rules-core",
"destination": "/Users/example/.claude/rules/common/coding-style.md",
"digest": "sha256:..."
}
]
}
```
State requirements:
- enough detail for uninstall to remove only ECC-managed outputs
- enough detail for repair to compare desired versus actual installed files
- enough detail for doctor to explain drift instead of guessing
## Lifecycle Commands
The following commands are the lifecycle surface for install-state:
1. `ecc list-installed`
2. `ecc uninstall`
3. `ecc doctor`
4. `ecc repair`
Current implementation status:
- `ecc list-installed` routes to `node scripts/list-installed.js`
- `ecc uninstall` routes to `node scripts/uninstall.js`
- `ecc doctor` routes to `node scripts/doctor.js`
- `ecc repair` routes to `node scripts/repair.js`
- legacy script entrypoints remain available during migration
### `list-installed`
Responsibilities:
- show target id and root
- show requested profile/modules
- show resolved modules
- show source version and install time
### `uninstall`
Responsibilities:
- load install-state
- remove only ECC-managed destinations recorded in state
- leave user-authored unrelated files untouched
- delete install-state only after successful cleanup
### `doctor`
Responsibilities:
- detect missing managed files
- detect unexpected config drift
- detect target roots that no longer exist
- detect manifest/version mismatch
### `repair`
Responsibilities:
- rebuild the desired operation plan from install-state
- re-copy missing or drifted managed files
- refuse repair if requested modules no longer exist in the current manifest
unless a compatibility map exists
## Legacy Compatibility Layer
Current `install.sh` accepts:
- `--target <claude|cursor|antigravity>`
- a list of language names
That behavior cannot disappear in one cut because users already depend on it.
ECC 2.0 should translate legacy language arguments into a compatibility request.
Suggested approach:
1. keep existing CLI shape for legacy mode
2. map language names to module requests such as:
- `rules-core`
- target-compatible rule subsets
3. write install-state even for legacy installs
4. label the request as `legacyMode: true`
Example:
```json
{
"request": {
"legacyMode": true,
"legacyLanguages": ["typescript", "python"]
}
}
```
This keeps old behavior available while moving all installs onto the same state
contract.
## Publish Boundary
The current npm package still publishes a broad payload through `package.json`.
ECC 2.0 should improve this carefully.
Recommended sequence:
1. keep one canonical npm package first
2. use manifests to drive install-time selection before changing publish shape
3. only later consider reducing packaged surface where safe
Why:
- selective install can ship before aggressive package surgery
- uninstall and repair depend on install-state more than publish changes
- Codex/OpenCode support is easier if the package source remains unified
Possible later directions:
- generated slim bundles per profile
- generated target-specific tarballs
- optional remote fetch of heavy modules
Those are Phase 3 or later, not prerequisites for profile-aware installs.
## File Layout Recommendation
Suggested next files:
```text
scripts/lib/install-targets/
claude-home.js
cursor-project.js
antigravity-project.js
registry.js
scripts/lib/install-state.js
scripts/ecc.js
scripts/install-apply.js
scripts/list-installed.js
scripts/uninstall.js
scripts/doctor.js
scripts/repair.js
tests/lib/install-targets.test.js
tests/lib/install-state.test.js
tests/lib/install-lifecycle.test.js
```
`install.sh` can remain the user-facing entry point during migration, but it
should become a thin shell around a Node-based planner and executor rather than
keep growing per-target shell branches.
## Implementation Sequence
### Phase 1: Planner To Contract
1. keep current manifest schema and resolver
2. add operation planning on top of resolved modules
3. define `ecc.install.v1` state schema
4. write install-state on successful install
### Phase 2: Target Adapters
1. extract Claude install behavior into `claude-home` adapter
2. extract Cursor install behavior into `cursor-project` adapter
3. extract Antigravity install behavior into `antigravity-project` adapter
4. reduce `install.sh` to argument parsing plus adapter invocation
### Phase 3: Lifecycle
1. add stronger target-specific merge/remove semantics
2. extend repair/uninstall coverage for non-copy operations
3. reduce package shipping surface to the module graph instead of broad folders
4. decide when `ecc-install` should become a thin alias for `ecc install`
### Phase 4: Publish And Future Targets
1. evaluate safe reduction of `package.json` publish surface
2. add `codex-home`
3. add `opencode-home`
4. consider generated profile bundles if packaging pressure remains high
## Immediate Repo-Local Next Steps
The highest-signal next implementation moves in this repo are:
1. add target-specific merge/remove semantics for config-like modules
2. extend repair and uninstall beyond simple copy-file operations
3. reduce package shipping surface to the module graph instead of broad folders
4. decide whether `ecc-install` remains separate or becomes `ecc install`
5. add tests that lock down:
- target-specific merge/remove behavior
- repair and uninstall safety for non-copy operations
- unified `ecc` CLI routing and compatibility guarantees
## Open Questions
1. Should rules stay language-addressable in legacy mode forever, or only during
the migration window?
2. Should `platform-configs` always install with `core`, or be split into
smaller target-specific modules?
3. Do we want config merge semantics recorded at the operation level or only in
adapter logic?
4. Should heavy skill families eventually move to fetch-on-demand rather than
package-time inclusion?
5. Should Codex and OpenCode target adapters ship only after the Claude/Cursor
lifecycle commands are stable?
## Recommendation
Treat the current manifest resolver as adapter `0` for installs:
1. preserve the current install surface
2. move real copy behavior behind target adapters
3. write install-state for every successful install
4. make uninstall, doctor, and repair depend only on install-state
5. only then shrink packaging or add more targets
That is the shortest path from ECC 1.x installer sprawl to an ECC 2.0
install/control contract that is deterministic, supportable, and extensible.

View File

@@ -0,0 +1,489 @@
# ECC Selective Install Design
## Purpose
This document defines the user-facing selective-install design for ECC.
It complements
`docs/SELECTIVE-INSTALL-ARCHITECTURE.md`, which focuses on internal runtime
architecture and code boundaries.
This document answers the product and operator questions first:
- how users choose ECC components
- what the CLI should feel like
- what config file should exist
- how installation should behave across harness targets
- how the design maps onto the current ECC codebase without requiring a rewrite
## Problem
Today ECC still feels like a large payload installer even though the repo now
has first-pass manifest and lifecycle support.
Users need a simpler mental model:
- install the baseline
- add the language packs they actually use
- add the framework configs they actually want
- add optional capability packs like security, research, or orchestration
The selective-install system should make ECC feel composable instead of
all-or-nothing.
In the current substrate, user-facing components are still an alias layer over
coarser internal install modules. That means include/exclude is already useful
at the module-selection level, but some file-level boundaries remain imperfect
until the underlying module graph is split more finely.
## Goals
1. Let users install a small default ECC footprint quickly.
2. Let users compose installs from reusable component families:
- core rules
- language packs
- framework packs
- capability packs
- target/platform configs
3. Keep one consistent UX across Claude, Cursor, Antigravity, Codex, and
OpenCode.
4. Keep installs inspectable, repairable, and uninstallable.
5. Preserve backward compatibility with the current `ecc-install typescript`
style during rollout.
## Non-Goals
- packaging ECC into multiple npm packages in the first phase
- building a remote marketplace
- full control-plane UI in the same phase
- solving every skill-classification problem before selective install ships
## User Experience Principles
### 1. Start Small
A user should be able to get a useful ECC install with one command:
```bash
ecc install --target claude --profile core
```
The default experience should not assume the user wants every skill family and
every framework.
### 2. Build Up By Intent
The user should think in terms of:
- "I want the developer baseline"
- "I need TypeScript and Python"
- "I want Next.js and Django"
- "I want the security pack"
The user should not have to know raw internal repo paths.
### 3. Preview Before Mutation
Every install path should support dry-run planning:
```bash
ecc install --target cursor --profile developer --with lang:typescript --with framework:nextjs --dry-run
```
The plan should clearly show:
- selected components
- skipped components
- target root
- managed paths
- expected install-state location
### 4. Local Configuration Should Be First-Class
Teams should be able to commit a project-level install config and use:
```bash
ecc install --config ecc-install.json
```
That allows deterministic installs across contributors and CI.
## Component Model
The current manifest already uses install modules and profiles. The user-facing
design should keep that internal structure, but present it as four main
component families.
Near-term implementation note: some user-facing component IDs still resolve to
shared internal modules, especially in the language/framework layer. The
catalog improves UX immediately while preserving a clean path toward finer
module granularity in later phases.
### 1. Baseline
These are the default ECC building blocks:
- core rules
- baseline agents
- core commands
- runtime hooks
- platform configs
- workflow quality primitives
Examples of current internal modules:
- `rules-core`
- `agents-core`
- `commands-core`
- `hooks-runtime`
- `platform-configs`
- `workflow-quality`
### 2. Language Packs
Language packs group rules, guidance, and workflows for a language ecosystem.
Examples:
- `lang:typescript`
- `lang:python`
- `lang:go`
- `lang:java`
- `lang:rust`
Each language pack should resolve to one or more internal modules plus
target-specific assets.
### 3. Framework Packs
Framework packs sit above language packs and pull in framework-specific rules,
skills, and optional setup.
Examples:
- `framework:react`
- `framework:nextjs`
- `framework:django`
- `framework:springboot`
- `framework:laravel`
Framework packs should depend on the correct language pack or baseline
primitives where appropriate.
### 4. Capability Packs
Capability packs are cross-cutting ECC feature bundles.
Examples:
- `capability:security`
- `capability:research`
- `capability:orchestration`
- `capability:media`
- `capability:content`
These should map onto the current module families already being introduced in
the manifests.
## Profiles
Profiles remain the fastest on-ramp.
Recommended user-facing profiles:
- `core`
minimal baseline, safe default for most users trying ECC
- `developer`
best default for active software engineering work
- `security`
baseline plus security-heavy guidance
- `research`
baseline plus research/content/investigation tools
- `full`
everything classified and currently supported
Profiles should be composable with additional `--with` and `--without` flags.
Example:
```bash
ecc install --target claude --profile developer --with lang:typescript --with framework:nextjs --without capability:orchestration
```
## Proposed CLI Design
### Primary Commands
```bash
ecc install
ecc plan
ecc list-installed
ecc doctor
ecc repair
ecc uninstall
ecc catalog
```
### Install CLI
Recommended shape:
```bash
ecc install [--target <target>] [--profile <name>] [--with <component>]... [--without <component>]... [--config <path>] [--dry-run] [--json]
```
Examples:
```bash
ecc install --target claude --profile core
ecc install --target cursor --profile developer --with lang:typescript --with framework:nextjs
ecc install --target antigravity --with capability:security --with lang:python
ecc install --config ecc-install.json
```
### Plan CLI
Recommended shape:
```bash
ecc plan [same selection flags as install]
```
Purpose:
- produce a preview without mutation
- act as the canonical debugging surface for selective install
### Catalog CLI
Recommended shape:
```bash
ecc catalog profiles
ecc catalog components
ecc catalog components --family language
ecc catalog show framework:nextjs
```
Purpose:
- let users discover valid component names without reading docs
- keep config authoring approachable
### Compatibility CLI
These legacy flows should still work during migration:
```bash
ecc-install typescript
ecc-install --target cursor typescript
ecc typescript
```
Internally these should normalize into the new request model and write
install-state the same way as modern installs.
## Proposed Config File
### Filename
Recommended default:
- `ecc-install.json`
Optional future support:
- `.ecc/install.json`
### Config Shape
```json
{
"$schema": "./schemas/ecc-install-config.schema.json",
"version": 1,
"target": "cursor",
"profile": "developer",
"include": [
"lang:typescript",
"lang:python",
"framework:nextjs",
"capability:security"
],
"exclude": [
"capability:media"
],
"options": {
"hooksProfile": "standard",
"mcpCatalog": "baseline",
"includeExamples": false
}
}
```
### Field Semantics
- `target`
selected harness target such as `claude`, `cursor`, or `antigravity`
- `profile`
baseline profile to start from
- `include`
additional components to add
- `exclude`
components to subtract from the profile result
- `options`
target/runtime tuning flags that do not change component identity
### Precedence Rules
1. CLI arguments override config file values.
2. config file overrides profile defaults.
3. profile defaults override internal module defaults.
This keeps the behavior predictable and easy to explain.
## Modular Installation Flow
The user-facing flow should be:
1. load config file if provided or auto-detected
2. merge CLI intent on top of config intent
3. normalize the request into a canonical selection
4. expand profile into baseline components
5. add `include` components
6. subtract `exclude` components
7. resolve dependencies and target compatibility
8. render a plan
9. apply operations if not in dry-run mode
10. write install-state
The important UX property is that the exact same flow powers:
- `install`
- `plan`
- `repair`
- `uninstall`
The commands differ in action, not in how ECC understands the selected install.
## Target Behavior
Selective install should preserve the same conceptual component graph across all
targets, while letting target adapters decide how content lands.
### Claude
Best fit for:
- home-scoped ECC baseline
- commands, agents, rules, hooks, platform config, orchestration
### Cursor
Best fit for:
- project-scoped installs
- rules plus project-local automation and config
### Antigravity
Best fit for:
- project-scoped agent/rule/workflow installs
### Codex / OpenCode
Should remain additive targets rather than special forks of the installer.
The selective-install design should make these just new adapters plus new
target-specific mapping rules, not new installer architectures.
## Technical Feasibility
This design is feasible because the repo already has:
- install module and profile manifests
- target adapters with install-state paths
- plan inspection
- install-state recording
- lifecycle commands
- a unified `ecc` CLI surface
The missing work is not conceptual invention. The missing work is productizing
the current substrate into a cleaner user-facing component model.
### Feasible In Phase 1
- profile + include/exclude selection
- `ecc-install.json` config file parsing
- catalog/discovery command
- alias mapping from user-facing component IDs to internal module sets
- dry-run and JSON planning
### Feasible In Phase 2
- richer target adapter semantics
- merge-aware operations for config-like assets
- stronger repair/uninstall behavior for non-copy operations
### Later
- reduced publish surface
- generated slim bundles
- remote component fetch
## Mapping To Current ECC Manifests
The current manifests do not yet expose a true user-facing `lang:*` /
`framework:*` / `capability:*` taxonomy. That should be introduced as a
presentation layer on top of the existing modules, not as a second installer
engine.
Recommended approach:
- keep `install-modules.json` as the internal resolution catalog
- add a user-facing component catalog that maps friendly component IDs to one or
more internal modules
- let profiles reference either internal modules or user-facing component IDs
during the migration window
That avoids breaking the current selective-install substrate while improving UX.
## Suggested Rollout
### Phase 1: Design And Discovery
- finalize the user-facing component taxonomy
- add the config schema
- add CLI design and precedence rules
### Phase 2: User-Facing Resolution Layer
- implement component aliases
- implement config-file parsing
- implement `include` / `exclude`
- implement `catalog`
### Phase 3: Stronger Target Semantics
- move more logic into target-owned planning
- support merge/generate operations cleanly
- improve repair/uninstall fidelity
### Phase 4: Packaging Optimization
- narrow published surface
- evaluate generated bundles
## Recommendation
The next implementation move should not be "rewrite the installer."
It should be:
1. keep the current manifest/runtime substrate
2. add a user-facing component catalog and config file
3. add `include` / `exclude` selection and catalog discovery
4. let the existing planner and lifecycle stack consume that model
That is the shortest path from the current ECC codebase to a real selective
install experience that feels like ECC 2.0 instead of a large legacy installer.

View File

@@ -1,246 +1,17 @@
#!/usr/bin/env bash
# install.sh — Install claude rules while preserving directory structure.
# install.sh — Legacy shell entrypoint for the ECC installer.
#
# Usage:
# ./install.sh [--target <claude|cursor>] <language> [<language> ...]
#
# Examples:
# ./install.sh typescript
# ./install.sh typescript python golang
# ./install.sh --target cursor typescript
# ./install.sh --target cursor typescript python golang
#
# Targets:
# claude (default) — Install rules to ~/.claude/rules/
# cursor — Install rules, agents, skills, commands, and MCP to ./.cursor/
# antigravity — Install configs to .agent/
#
# This script copies rules into the target directory keeping the common/ and
# language-specific subdirectories intact so that:
# 1. Files with the same name in common/ and <language>/ don't overwrite
# each other.
# 2. Relative references (e.g. ../common/coding-style.md) remain valid.
# This wrapper resolves the real repo/package root when invoked through a
# symlinked npm bin, then delegates to the Node-based installer runtime.
set -euo pipefail
# Resolve symlinks — needed when invoked as `ecc-install` via npm/bun bin symlink
SCRIPT_PATH="$0"
while [ -L "$SCRIPT_PATH" ]; do
link_dir="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)"
SCRIPT_PATH="$(readlink "$SCRIPT_PATH")"
# Resolve relative symlinks
[[ "$SCRIPT_PATH" != /* ]] && SCRIPT_PATH="$link_dir/$SCRIPT_PATH"
done
SCRIPT_DIR="$(cd "$(dirname "$SCRIPT_PATH")" && pwd)"
RULES_DIR="$SCRIPT_DIR/rules"
# --- Parse --target flag ---
TARGET="claude"
if [[ "${1:-}" == "--target" ]]; then
if [[ -z "${2:-}" ]]; then
echo "Error: --target requires a value (claude or cursor)" >&2
exit 1
fi
TARGET="$2"
shift 2
fi
if [[ "$TARGET" != "claude" && "$TARGET" != "cursor" && "$TARGET" != "antigravity" ]]; then
echo "Error: unknown target '$TARGET'. Must be 'claude', 'cursor', or 'antigravity'." >&2
exit 1
fi
# --- Usage ---
if [[ $# -eq 0 ]]; then
echo "Usage: $0 [--target <claude|cursor|antigravity>] <language> [<language> ...]"
echo ""
echo "Targets:"
echo " claude (default) — Install rules to ~/.claude/rules/"
echo " cursor — Install rules, agents, skills, commands, and MCP to ./.cursor/"
echo " antigravity — Install configs to .agent/"
echo ""
echo "Available languages:"
for dir in "$RULES_DIR"/*/; do
name="$(basename "$dir")"
[[ "$name" == "common" ]] && continue
echo " - $name"
done
exit 1
fi
# --- Claude target (existing behavior) ---
if [[ "$TARGET" == "claude" ]]; then
DEST_DIR="${CLAUDE_RULES_DIR:-$HOME/.claude/rules}"
# Warn if destination already exists (user may have local customizations)
if [[ -d "$DEST_DIR" ]] && [[ "$(ls -A "$DEST_DIR" 2>/dev/null)" ]]; then
echo "Note: $DEST_DIR/ already exists. Existing files will be overwritten."
echo " Back up any local customizations before proceeding."
fi
# Always install common rules
echo "Installing common rules -> $DEST_DIR/common/"
mkdir -p "$DEST_DIR/common"
cp -r "$RULES_DIR/common/." "$DEST_DIR/common/"
# Install each requested language
for lang in "$@"; do
# Validate language name to prevent path traversal
if [[ ! "$lang" =~ ^[a-zA-Z0-9_-]+$ ]]; then
echo "Error: invalid language name '$lang'. Only alphanumeric, dash, and underscore allowed." >&2
continue
fi
lang_dir="$RULES_DIR/$lang"
if [[ ! -d "$lang_dir" ]]; then
echo "Warning: rules/$lang/ does not exist, skipping." >&2
continue
fi
echo "Installing $lang rules -> $DEST_DIR/$lang/"
mkdir -p "$DEST_DIR/$lang"
cp -r "$lang_dir/." "$DEST_DIR/$lang/"
done
echo "Done. Rules installed to $DEST_DIR/"
fi
# --- Cursor target ---
if [[ "$TARGET" == "cursor" ]]; then
DEST_DIR=".cursor"
CURSOR_SRC="$SCRIPT_DIR/.cursor"
echo "Installing Cursor configs to $DEST_DIR/"
# --- Rules ---
echo "Installing common rules -> $DEST_DIR/rules/"
mkdir -p "$DEST_DIR/rules"
# Copy common rules (flattened names like common-coding-style.md)
if [[ -d "$CURSOR_SRC/rules" ]]; then
for f in "$CURSOR_SRC/rules"/common-*.md; do
[[ -f "$f" ]] && cp "$f" "$DEST_DIR/rules/"
done
fi
# Install language-specific rules
for lang in "$@"; do
# Validate language name to prevent path traversal
if [[ ! "$lang" =~ ^[a-zA-Z0-9_-]+$ ]]; then
echo "Error: invalid language name '$lang'. Only alphanumeric, dash, and underscore allowed." >&2
continue
fi
if [[ -d "$CURSOR_SRC/rules" ]]; then
found=false
for f in "$CURSOR_SRC/rules"/${lang}-*.md; do
if [[ -f "$f" ]]; then
cp "$f" "$DEST_DIR/rules/"
found=true
fi
done
if $found; then
echo "Installing $lang rules -> $DEST_DIR/rules/"
else
echo "Warning: no Cursor rules for '$lang' found, skipping." >&2
fi
fi
done
# --- Agents ---
if [[ -d "$CURSOR_SRC/agents" ]]; then
echo "Installing agents -> $DEST_DIR/agents/"
mkdir -p "$DEST_DIR/agents"
cp -r "$CURSOR_SRC/agents/." "$DEST_DIR/agents/"
fi
# --- Skills ---
if [[ -d "$CURSOR_SRC/skills" ]]; then
echo "Installing skills -> $DEST_DIR/skills/"
mkdir -p "$DEST_DIR/skills"
cp -r "$CURSOR_SRC/skills/." "$DEST_DIR/skills/"
fi
# --- Commands ---
if [[ -d "$CURSOR_SRC/commands" ]]; then
echo "Installing commands -> $DEST_DIR/commands/"
mkdir -p "$DEST_DIR/commands"
cp -r "$CURSOR_SRC/commands/." "$DEST_DIR/commands/"
fi
# --- Hooks ---
if [[ -f "$CURSOR_SRC/hooks.json" ]]; then
echo "Installing hooks config -> $DEST_DIR/hooks.json"
cp "$CURSOR_SRC/hooks.json" "$DEST_DIR/hooks.json"
fi
if [[ -d "$CURSOR_SRC/hooks" ]]; then
echo "Installing hook scripts -> $DEST_DIR/hooks/"
mkdir -p "$DEST_DIR/hooks"
cp -r "$CURSOR_SRC/hooks/." "$DEST_DIR/hooks/"
fi
# --- MCP Config ---
if [[ -f "$CURSOR_SRC/mcp.json" ]]; then
echo "Installing MCP config -> $DEST_DIR/mcp.json"
cp "$CURSOR_SRC/mcp.json" "$DEST_DIR/mcp.json"
fi
echo "Done. Cursor configs installed to $DEST_DIR/"
fi
# --- Antigravity target ---
if [[ "$TARGET" == "antigravity" ]]; then
DEST_DIR=".agent"
if [[ -d "$DEST_DIR/rules" ]] && [[ "$(ls -A "$DEST_DIR/rules" 2>/dev/null)" ]]; then
echo "Note: $DEST_DIR/rules/ already exists. Existing files will be overwritten."
echo " Back up any local customizations before proceeding."
fi
# --- Rules ---
echo "Installing common rules -> $DEST_DIR/rules/"
mkdir -p "$DEST_DIR/rules"
if [[ -d "$RULES_DIR/common" ]]; then
for f in "$RULES_DIR/common"/*.md; do
if [[ -f "$f" ]]; then
cp "$f" "$DEST_DIR/rules/common-$(basename "$f")"
fi
done
fi
for lang in "$@"; do
# Validate language name to prevent path traversal
if [[ ! "$lang" =~ ^[a-zA-Z0-9_-]+$ ]]; then
echo "Error: invalid language name '$lang'. Only alphanumeric, dash, and underscore allowed." >&2
continue
fi
lang_dir="$RULES_DIR/$lang"
if [[ ! -d "$lang_dir" ]]; then
echo "Warning: rules/$lang/ does not exist, skipping." >&2
continue
fi
echo "Installing $lang rules -> $DEST_DIR/rules/"
for f in "$lang_dir"/*.md; do
if [[ -f "$f" ]]; then
cp "$f" "$DEST_DIR/rules/${lang}-$(basename "$f")"
fi
done
done
# --- Workflows (Commands) ---
if [[ -d "$SCRIPT_DIR/commands" ]]; then
echo "Installing commands -> $DEST_DIR/workflows/"
mkdir -p "$DEST_DIR/workflows"
cp -r "$SCRIPT_DIR/commands/." "$DEST_DIR/workflows/"
fi
# --- Skills and Agents ---
mkdir -p "$DEST_DIR/skills"
if [[ -d "$SCRIPT_DIR/agents" ]]; then
echo "Installing agents -> $DEST_DIR/skills/"
cp -r "$SCRIPT_DIR/agents/." "$DEST_DIR/skills/"
fi
if [[ -d "$SCRIPT_DIR/skills" ]]; then
echo "Installing skills -> $DEST_DIR/skills/"
cp -r "$SCRIPT_DIR/skills/." "$DEST_DIR/skills/"
fi
echo "Done. Antigravity configs installed to $DEST_DIR/"
fi
exec node "$SCRIPT_DIR/scripts/install-apply.js" "$@"

View File

@@ -0,0 +1,173 @@
{
"version": 1,
"components": [
{
"id": "baseline:rules",
"family": "baseline",
"description": "Core shared rules and supported language rule packs.",
"modules": [
"rules-core"
]
},
{
"id": "baseline:agents",
"family": "baseline",
"description": "Baseline agent definitions and shared AGENTS guidance.",
"modules": [
"agents-core"
]
},
{
"id": "baseline:commands",
"family": "baseline",
"description": "Core command library and workflow command docs.",
"modules": [
"commands-core"
]
},
{
"id": "baseline:hooks",
"family": "baseline",
"description": "Hook runtime configs and hook helper scripts.",
"modules": [
"hooks-runtime"
]
},
{
"id": "baseline:platform",
"family": "baseline",
"description": "Platform configs, package-manager setup, and MCP catalog defaults.",
"modules": [
"platform-configs"
]
},
{
"id": "baseline:workflow",
"family": "baseline",
"description": "Evaluation, TDD, verification, and compaction workflow support.",
"modules": [
"workflow-quality"
]
},
{
"id": "lang:typescript",
"family": "language",
"description": "TypeScript and frontend/backend application-engineering guidance. Currently resolves through the shared framework-language module.",
"modules": [
"framework-language"
]
},
{
"id": "lang:python",
"family": "language",
"description": "Python and Django-oriented engineering guidance. Currently resolves through the shared framework-language module.",
"modules": [
"framework-language"
]
},
{
"id": "lang:go",
"family": "language",
"description": "Go-focused coding and testing guidance. Currently resolves through the shared framework-language module.",
"modules": [
"framework-language"
]
},
{
"id": "lang:java",
"family": "language",
"description": "Java and Spring application guidance. Currently resolves through the shared framework-language module.",
"modules": [
"framework-language"
]
},
{
"id": "framework:react",
"family": "framework",
"description": "React-focused engineering guidance. Currently resolves through the shared framework-language module.",
"modules": [
"framework-language"
]
},
{
"id": "framework:nextjs",
"family": "framework",
"description": "Next.js-focused engineering guidance. Currently resolves through the shared framework-language module.",
"modules": [
"framework-language"
]
},
{
"id": "framework:django",
"family": "framework",
"description": "Django-focused engineering guidance. Currently resolves through the shared framework-language module.",
"modules": [
"framework-language"
]
},
{
"id": "framework:springboot",
"family": "framework",
"description": "Spring Boot-focused engineering guidance. Currently resolves through the shared framework-language module.",
"modules": [
"framework-language"
]
},
{
"id": "capability:database",
"family": "capability",
"description": "Database and persistence-oriented skills.",
"modules": [
"database"
]
},
{
"id": "capability:security",
"family": "capability",
"description": "Security review and security-focused framework guidance.",
"modules": [
"security"
]
},
{
"id": "capability:research",
"family": "capability",
"description": "Research and API-integration skills for deep investigations and external tooling.",
"modules": [
"research-apis"
]
},
{
"id": "capability:content",
"family": "capability",
"description": "Business, writing, market, and investor communication skills.",
"modules": [
"business-content"
]
},
{
"id": "capability:social",
"family": "capability",
"description": "Social publishing and distribution skills.",
"modules": [
"social-distribution"
]
},
{
"id": "capability:media",
"family": "capability",
"description": "Media generation and AI-assisted editing skills.",
"modules": [
"media-generation"
]
},
{
"id": "capability:orchestration",
"family": "capability",
"description": "Worktree and tmux orchestration runtime and workflow docs.",
"modules": [
"orchestration"
]
}
]
}

View File

@@ -0,0 +1,335 @@
{
"version": 1,
"modules": [
{
"id": "rules-core",
"kind": "rules",
"description": "Shared and language rules for supported harness targets.",
"paths": [
"rules"
],
"targets": [
"claude",
"cursor",
"antigravity"
],
"dependencies": [],
"defaultInstall": true,
"cost": "light",
"stability": "stable"
},
{
"id": "agents-core",
"kind": "agents",
"description": "Agent definitions and project-level agent guidance.",
"paths": [
".agents",
"agents",
"AGENTS.md"
],
"targets": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
],
"dependencies": [],
"defaultInstall": true,
"cost": "light",
"stability": "stable"
},
{
"id": "commands-core",
"kind": "commands",
"description": "Core slash-command library and command docs.",
"paths": [
"commands"
],
"targets": [
"claude",
"cursor",
"antigravity",
"opencode"
],
"dependencies": [],
"defaultInstall": true,
"cost": "medium",
"stability": "stable"
},
{
"id": "hooks-runtime",
"kind": "hooks",
"description": "Runtime hook configs and hook script helpers.",
"paths": [
"hooks",
"scripts/hooks"
],
"targets": [
"claude",
"cursor",
"opencode"
],
"dependencies": [],
"defaultInstall": true,
"cost": "medium",
"stability": "stable"
},
{
"id": "platform-configs",
"kind": "platform",
"description": "Baseline platform configs, package-manager setup, and MCP catalog.",
"paths": [
".claude-plugin",
".codex",
".cursor",
".opencode",
"mcp-configs",
"scripts/setup-package-manager.js"
],
"targets": [
"claude",
"cursor",
"codex",
"opencode"
],
"dependencies": [],
"defaultInstall": true,
"cost": "light",
"stability": "stable"
},
{
"id": "framework-language",
"kind": "skills",
"description": "Core framework, language, and application-engineering skills.",
"paths": [
"skills/backend-patterns",
"skills/coding-standards",
"skills/frontend-patterns",
"skills/frontend-slides",
"skills/golang-patterns",
"skills/golang-testing",
"skills/python-patterns",
"skills/python-testing",
"skills/django-patterns",
"skills/django-tdd",
"skills/django-verification",
"skills/java-coding-standards",
"skills/springboot-patterns",
"skills/springboot-tdd",
"skills/springboot-verification"
],
"targets": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
],
"dependencies": [
"rules-core",
"agents-core",
"commands-core",
"platform-configs"
],
"defaultInstall": false,
"cost": "medium",
"stability": "stable"
},
{
"id": "database",
"kind": "skills",
"description": "Database and persistence-focused skills.",
"paths": [
"skills/clickhouse-io",
"skills/jpa-patterns",
"skills/postgres-patterns"
],
"targets": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
],
"dependencies": [
"platform-configs"
],
"defaultInstall": false,
"cost": "medium",
"stability": "stable"
},
{
"id": "workflow-quality",
"kind": "skills",
"description": "Evaluation, TDD, verification, learning, and compaction skills.",
"paths": [
"skills/continuous-learning",
"skills/continuous-learning-v2",
"skills/eval-harness",
"skills/iterative-retrieval",
"skills/strategic-compact",
"skills/tdd-workflow",
"skills/verification-loop"
],
"targets": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
],
"dependencies": [
"platform-configs"
],
"defaultInstall": true,
"cost": "medium",
"stability": "stable"
},
{
"id": "security",
"kind": "skills",
"description": "Security review and security-focused framework guidance.",
"paths": [
"skills/security-review",
"skills/security-scan",
"skills/django-security",
"skills/springboot-security",
"the-security-guide.md"
],
"targets": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
],
"dependencies": [
"workflow-quality"
],
"defaultInstall": false,
"cost": "medium",
"stability": "stable"
},
{
"id": "research-apis",
"kind": "skills",
"description": "Research and API integration skills for deep investigations and model integrations.",
"paths": [
"skills/claude-api",
"skills/deep-research",
"skills/exa-search"
],
"targets": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
],
"dependencies": [
"platform-configs"
],
"defaultInstall": false,
"cost": "medium",
"stability": "stable"
},
{
"id": "business-content",
"kind": "skills",
"description": "Business, writing, market, and investor communication skills.",
"paths": [
"skills/article-writing",
"skills/content-engine",
"skills/investor-materials",
"skills/investor-outreach",
"skills/market-research"
],
"targets": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
],
"dependencies": [
"platform-configs"
],
"defaultInstall": false,
"cost": "heavy",
"stability": "stable"
},
{
"id": "social-distribution",
"kind": "skills",
"description": "Social publishing and distribution skills.",
"paths": [
"skills/crosspost",
"skills/x-api"
],
"targets": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
],
"dependencies": [
"business-content"
],
"defaultInstall": false,
"cost": "medium",
"stability": "stable"
},
{
"id": "media-generation",
"kind": "skills",
"description": "Media generation and AI-assisted editing skills.",
"paths": [
"skills/fal-ai-media",
"skills/video-editing",
"skills/videodb"
],
"targets": [
"claude",
"cursor",
"codex",
"opencode"
],
"dependencies": [
"platform-configs"
],
"defaultInstall": false,
"cost": "heavy",
"stability": "beta"
},
{
"id": "orchestration",
"kind": "orchestration",
"description": "Worktree/tmux orchestration runtime and workflow docs.",
"paths": [
"commands/multi-workflow.md",
"commands/orchestrate.md",
"commands/sessions.md",
"scripts/lib/orchestration-session.js",
"scripts/lib/tmux-worktree-orchestrator.js",
"scripts/orchestrate-codex-worker.sh",
"scripts/orchestrate-worktrees.js",
"scripts/orchestration-status.js",
"skills/dmux-workflows"
],
"targets": [
"claude",
"codex",
"opencode"
],
"dependencies": [
"commands-core",
"platform-configs"
],
"defaultInstall": false,
"cost": "medium",
"stability": "beta"
}
]
}

View File

@@ -0,0 +1,75 @@
{
"version": 1,
"profiles": {
"core": {
"description": "Minimal harness baseline with commands, hooks, platform configs, and quality workflow support.",
"modules": [
"rules-core",
"agents-core",
"commands-core",
"hooks-runtime",
"platform-configs",
"workflow-quality"
]
},
"developer": {
"description": "Default engineering profile for most ECC users working across app codebases.",
"modules": [
"rules-core",
"agents-core",
"commands-core",
"hooks-runtime",
"platform-configs",
"workflow-quality",
"framework-language",
"database",
"orchestration"
]
},
"security": {
"description": "Security-heavy setup with baseline runtime support and security-specific guidance.",
"modules": [
"rules-core",
"agents-core",
"commands-core",
"hooks-runtime",
"platform-configs",
"workflow-quality",
"security"
]
},
"research": {
"description": "Research and content-oriented setup for investigation, synthesis, and publishing workflows.",
"modules": [
"rules-core",
"agents-core",
"commands-core",
"hooks-runtime",
"platform-configs",
"workflow-quality",
"research-apis",
"business-content",
"social-distribution"
]
},
"full": {
"description": "Complete ECC install with all currently classified modules.",
"modules": [
"rules-core",
"agents-core",
"commands-core",
"hooks-runtime",
"platform-configs",
"framework-language",
"database",
"workflow-quality",
"security",
"research-apis",
"business-content",
"social-distribution",
"media-generation",
"orchestration"
]
}
}
}

View File

@@ -59,19 +59,28 @@
"examples/user-CLAUDE.md",
"examples/statusline.json",
"hooks/",
"manifests/",
"mcp-configs/",
"plugins/",
"rules/",
"schemas/",
"scripts/ci/",
"scripts/ecc.js",
"scripts/hooks/",
"scripts/lib/",
"scripts/claw.js",
"scripts/doctor.js",
"scripts/install-apply.js",
"scripts/install-plan.js",
"scripts/list-installed.js",
"scripts/orchestration-status.js",
"scripts/orchestrate-codex-worker.sh",
"scripts/orchestrate-worktrees.js",
"scripts/setup-package-manager.js",
"scripts/skill-create-output.js",
"scripts/repair.js",
"scripts/session-inspect.js",
"scripts/uninstall.js",
"skills/",
"AGENTS.md",
".claude-plugin/plugin.json",
@@ -80,16 +89,17 @@
"llms.txt"
],
"bin": {
"ecc": "scripts/ecc.js",
"ecc-install": "install.sh"
},
"scripts": {
"postinstall": "echo '\\n ecc-universal installed!\\n Run: npx ecc-install typescript\\n Docs: https://github.com/affaan-m/everything-claude-code\\n'",
"postinstall": "echo '\\n ecc-universal installed!\\n Run: npx ecc typescript\\n Compat: npx ecc-install typescript\\n Docs: https://github.com/affaan-m/everything-claude-code\\n'",
"lint": "eslint . && markdownlint '**/*.md' --ignore node_modules",
"claw": "node scripts/claw.js",
"orchestrate:status": "node scripts/orchestration-status.js",
"orchestrate:worker": "bash scripts/orchestrate-codex-worker.sh",
"orchestrate:tmux": "node scripts/orchestrate-worktrees.js",
"test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js",
"test": "node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-install-manifests.js && node scripts/ci/validate-no-personal-paths.js && node tests/run-all.js",
"coverage": "c8 --all --include=\"scripts/**/*.js\" --check-coverage --lines 80 --functions 80 --branches 80 --statements 80 --reporter=text --reporter=lcov node tests/run-all.js"
},
"devDependencies": {

View File

@@ -0,0 +1,58 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "ECC Install Config",
"type": "object",
"additionalProperties": false,
"required": [
"version"
],
"properties": {
"$schema": {
"type": "string",
"minLength": 1
},
"version": {
"type": "integer",
"const": 1
},
"target": {
"type": "string",
"enum": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
]
},
"profile": {
"type": "string",
"pattern": "^[a-z0-9-]+$"
},
"modules": {
"type": "array",
"items": {
"type": "string",
"pattern": "^[a-z0-9-]+$"
}
},
"include": {
"type": "array",
"items": {
"type": "string",
"pattern": "^(baseline|lang|framework|capability):[a-z0-9-]+$"
}
},
"exclude": {
"type": "array",
"items": {
"type": "string",
"pattern": "^(baseline|lang|framework|capability):[a-z0-9-]+$"
}
},
"options": {
"type": "object",
"additionalProperties": true
}
}
}

View File

@@ -0,0 +1,56 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "ECC Install Components",
"type": "object",
"additionalProperties": false,
"required": [
"version",
"components"
],
"properties": {
"version": {
"type": "integer",
"minimum": 1
},
"components": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": false,
"required": [
"id",
"family",
"description",
"modules"
],
"properties": {
"id": {
"type": "string",
"pattern": "^(baseline|lang|framework|capability):[a-z0-9-]+$"
},
"family": {
"type": "string",
"enum": [
"baseline",
"language",
"framework",
"capability"
]
},
"description": {
"type": "string",
"minLength": 1
},
"modules": {
"type": "array",
"minItems": 1,
"items": {
"type": "string",
"pattern": "^[a-z0-9-]+$"
}
}
}
}
}
}
}

View File

@@ -0,0 +1,105 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "ECC Install Modules",
"type": "object",
"properties": {
"version": {
"type": "integer",
"minimum": 1
},
"modules": {
"type": "array",
"minItems": 1,
"items": {
"type": "object",
"properties": {
"id": {
"type": "string",
"pattern": "^[a-z0-9-]+$"
},
"kind": {
"type": "string",
"enum": [
"rules",
"agents",
"commands",
"hooks",
"platform",
"orchestration",
"skills"
]
},
"description": {
"type": "string",
"minLength": 1
},
"paths": {
"type": "array",
"minItems": 1,
"items": {
"type": "string",
"minLength": 1
}
},
"targets": {
"type": "array",
"minItems": 1,
"items": {
"type": "string",
"enum": [
"claude",
"cursor",
"antigravity",
"codex",
"opencode"
]
}
},
"dependencies": {
"type": "array",
"items": {
"type": "string",
"pattern": "^[a-z0-9-]+$"
}
},
"defaultInstall": {
"type": "boolean"
},
"cost": {
"type": "string",
"enum": [
"light",
"medium",
"heavy"
]
},
"stability": {
"type": "string",
"enum": [
"experimental",
"beta",
"stable"
]
}
},
"required": [
"id",
"kind",
"description",
"paths",
"targets",
"dependencies",
"defaultInstall",
"cost",
"stability"
],
"additionalProperties": false
}
}
},
"required": [
"version",
"modules"
],
"additionalProperties": false
}

View File

@@ -0,0 +1,45 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "ECC Install Profiles",
"type": "object",
"properties": {
"version": {
"type": "integer",
"minimum": 1
},
"profiles": {
"type": "object",
"minProperties": 1,
"propertyNames": {
"pattern": "^[a-z0-9-]+$"
},
"additionalProperties": {
"type": "object",
"properties": {
"description": {
"type": "string",
"minLength": 1
},
"modules": {
"type": "array",
"minItems": 1,
"items": {
"type": "string",
"pattern": "^[a-z0-9-]+$"
}
}
},
"required": [
"description",
"modules"
],
"additionalProperties": false
}
}
},
"required": [
"version",
"profiles"
],
"additionalProperties": false
}

View File

@@ -0,0 +1,210 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "ECC install state",
"type": "object",
"additionalProperties": false,
"required": [
"schemaVersion",
"installedAt",
"target",
"request",
"resolution",
"source",
"operations"
],
"properties": {
"schemaVersion": {
"type": "string",
"const": "ecc.install.v1"
},
"installedAt": {
"type": "string",
"minLength": 1
},
"lastValidatedAt": {
"type": "string",
"minLength": 1
},
"target": {
"type": "object",
"additionalProperties": false,
"required": [
"id",
"root",
"installStatePath"
],
"properties": {
"id": {
"type": "string",
"minLength": 1
},
"target": {
"type": "string",
"minLength": 1
},
"kind": {
"type": "string",
"enum": [
"home",
"project"
]
},
"root": {
"type": "string",
"minLength": 1
},
"installStatePath": {
"type": "string",
"minLength": 1
}
}
},
"request": {
"type": "object",
"additionalProperties": false,
"required": [
"profile",
"modules",
"includeComponents",
"excludeComponents",
"legacyLanguages",
"legacyMode"
],
"properties": {
"profile": {
"type": [
"string",
"null"
]
},
"modules": {
"type": "array",
"items": {
"type": "string",
"minLength": 1
}
},
"includeComponents": {
"type": "array",
"items": {
"type": "string",
"minLength": 1
}
},
"excludeComponents": {
"type": "array",
"items": {
"type": "string",
"minLength": 1
}
},
"legacyLanguages": {
"type": "array",
"items": {
"type": "string",
"minLength": 1
}
},
"legacyMode": {
"type": "boolean"
}
}
},
"resolution": {
"type": "object",
"additionalProperties": false,
"required": [
"selectedModules",
"skippedModules"
],
"properties": {
"selectedModules": {
"type": "array",
"items": {
"type": "string",
"minLength": 1
}
},
"skippedModules": {
"type": "array",
"items": {
"type": "string",
"minLength": 1
}
}
}
},
"source": {
"type": "object",
"additionalProperties": false,
"required": [
"repoVersion",
"repoCommit",
"manifestVersion"
],
"properties": {
"repoVersion": {
"type": [
"string",
"null"
]
},
"repoCommit": {
"type": [
"string",
"null"
]
},
"manifestVersion": {
"type": "integer",
"minimum": 1
}
}
},
"operations": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": true,
"required": [
"kind",
"moduleId",
"sourceRelativePath",
"destinationPath",
"strategy",
"ownership",
"scaffoldOnly"
],
"properties": {
"kind": {
"type": "string",
"minLength": 1
},
"moduleId": {
"type": "string",
"minLength": 1
},
"sourceRelativePath": {
"type": "string",
"minLength": 1
},
"destinationPath": {
"type": "string",
"minLength": 1
},
"strategy": {
"type": "string",
"minLength": 1
},
"ownership": {
"type": "string",
"minLength": 1
},
"scaffoldOnly": {
"type": "boolean"
}
}
}
}
}
}

View File

@@ -0,0 +1,211 @@
#!/usr/bin/env node
/**
* Validate selective-install manifests and profile/module relationships.
*/
const fs = require('fs');
const path = require('path');
const Ajv = require('ajv');
const REPO_ROOT = path.join(__dirname, '../..');
const MODULES_MANIFEST_PATH = path.join(REPO_ROOT, 'manifests/install-modules.json');
const PROFILES_MANIFEST_PATH = path.join(REPO_ROOT, 'manifests/install-profiles.json');
const COMPONENTS_MANIFEST_PATH = path.join(REPO_ROOT, 'manifests/install-components.json');
const MODULES_SCHEMA_PATH = path.join(REPO_ROOT, 'schemas/install-modules.schema.json');
const PROFILES_SCHEMA_PATH = path.join(REPO_ROOT, 'schemas/install-profiles.schema.json');
const COMPONENTS_SCHEMA_PATH = path.join(REPO_ROOT, 'schemas/install-components.schema.json');
const COMPONENT_FAMILY_PREFIXES = {
baseline: 'baseline:',
language: 'lang:',
framework: 'framework:',
capability: 'capability:',
};
function readJson(filePath, label) {
try {
return JSON.parse(fs.readFileSync(filePath, 'utf8'));
} catch (error) {
throw new Error(`Invalid JSON in ${label}: ${error.message}`);
}
}
function normalizeRelativePath(relativePath) {
return String(relativePath).replace(/\\/g, '/').replace(/\/+$/, '');
}
function validateSchema(ajv, schemaPath, data, label) {
const schema = readJson(schemaPath, `${label} schema`);
const validate = ajv.compile(schema);
const valid = validate(data);
if (!valid) {
for (const error of validate.errors) {
console.error(
`ERROR: ${label} schema: ${error.instancePath || '/'} ${error.message}`
);
}
return true;
}
return false;
}
function validateInstallManifests() {
if (!fs.existsSync(MODULES_MANIFEST_PATH) || !fs.existsSync(PROFILES_MANIFEST_PATH)) {
console.log('Install manifests not found, skipping validation');
process.exit(0);
}
let hasErrors = false;
let modulesData;
let profilesData;
let componentsData = { version: null, components: [] };
try {
modulesData = readJson(MODULES_MANIFEST_PATH, 'install-modules.json');
profilesData = readJson(PROFILES_MANIFEST_PATH, 'install-profiles.json');
if (fs.existsSync(COMPONENTS_MANIFEST_PATH)) {
componentsData = readJson(COMPONENTS_MANIFEST_PATH, 'install-components.json');
}
} catch (error) {
console.error(`ERROR: ${error.message}`);
process.exit(1);
}
const ajv = new Ajv({ allErrors: true });
hasErrors = validateSchema(ajv, MODULES_SCHEMA_PATH, modulesData, 'install-modules.json') || hasErrors;
hasErrors = validateSchema(ajv, PROFILES_SCHEMA_PATH, profilesData, 'install-profiles.json') || hasErrors;
if (fs.existsSync(COMPONENTS_MANIFEST_PATH)) {
hasErrors = validateSchema(ajv, COMPONENTS_SCHEMA_PATH, componentsData, 'install-components.json') || hasErrors;
}
if (hasErrors) {
process.exit(1);
}
const modules = Array.isArray(modulesData.modules) ? modulesData.modules : [];
const moduleIds = new Set();
const claimedPaths = new Map();
for (const module of modules) {
if (moduleIds.has(module.id)) {
console.error(`ERROR: Duplicate install module id: ${module.id}`);
hasErrors = true;
}
moduleIds.add(module.id);
for (const dependency of module.dependencies) {
if (!moduleIds.has(dependency) && !modules.some(candidate => candidate.id === dependency)) {
console.error(`ERROR: Module ${module.id} depends on unknown module ${dependency}`);
hasErrors = true;
}
if (dependency === module.id) {
console.error(`ERROR: Module ${module.id} cannot depend on itself`);
hasErrors = true;
}
}
for (const relativePath of module.paths) {
const normalizedPath = normalizeRelativePath(relativePath);
const absolutePath = path.join(REPO_ROOT, normalizedPath);
if (!fs.existsSync(absolutePath)) {
console.error(
`ERROR: Module ${module.id} references missing path: ${normalizedPath}`
);
hasErrors = true;
}
if (claimedPaths.has(normalizedPath)) {
console.error(
`ERROR: Install path ${normalizedPath} is claimed by both ${claimedPaths.get(normalizedPath)} and ${module.id}`
);
hasErrors = true;
} else {
claimedPaths.set(normalizedPath, module.id);
}
}
}
const profiles = profilesData.profiles || {};
const components = Array.isArray(componentsData.components) ? componentsData.components : [];
const expectedProfileIds = ['core', 'developer', 'security', 'research', 'full'];
for (const profileId of expectedProfileIds) {
if (!profiles[profileId]) {
console.error(`ERROR: Missing required install profile: ${profileId}`);
hasErrors = true;
}
}
for (const [profileId, profile] of Object.entries(profiles)) {
const seenModules = new Set();
for (const moduleId of profile.modules) {
if (!moduleIds.has(moduleId)) {
console.error(
`ERROR: Profile ${profileId} references unknown module ${moduleId}`
);
hasErrors = true;
}
if (seenModules.has(moduleId)) {
console.error(
`ERROR: Profile ${profileId} contains duplicate module ${moduleId}`
);
hasErrors = true;
}
seenModules.add(moduleId);
}
}
if (profiles.full) {
const fullModules = new Set(profiles.full.modules);
for (const moduleId of moduleIds) {
if (!fullModules.has(moduleId)) {
console.error(`ERROR: full profile is missing module ${moduleId}`);
hasErrors = true;
}
}
}
const componentIds = new Set();
for (const component of components) {
if (componentIds.has(component.id)) {
console.error(`ERROR: Duplicate install component id: ${component.id}`);
hasErrors = true;
}
componentIds.add(component.id);
const expectedPrefix = COMPONENT_FAMILY_PREFIXES[component.family];
if (expectedPrefix && !component.id.startsWith(expectedPrefix)) {
console.error(
`ERROR: Component ${component.id} does not match expected ${component.family} prefix ${expectedPrefix}`
);
hasErrors = true;
}
const seenModules = new Set();
for (const moduleId of component.modules) {
if (!moduleIds.has(moduleId)) {
console.error(`ERROR: Component ${component.id} references unknown module ${moduleId}`);
hasErrors = true;
}
if (seenModules.has(moduleId)) {
console.error(`ERROR: Component ${component.id} contains duplicate module ${moduleId}`);
hasErrors = true;
}
seenModules.add(moduleId);
}
}
if (hasErrors) {
process.exit(1);
}
console.log(
`Validated ${modules.length} install modules, ${components.length} install components, and ${Object.keys(profiles).length} profiles`
);
}
validateInstallManifests();

110
scripts/doctor.js Normal file
View File

@@ -0,0 +1,110 @@
#!/usr/bin/env node
const { buildDoctorReport } = require('./lib/install-lifecycle');
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
function showHelp(exitCode = 0) {
console.log(`
Usage: node scripts/doctor.js [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--json]
Diagnose drift and missing managed files for ECC install-state in the current context.
`);
process.exit(exitCode);
}
function parseArgs(argv) {
const args = argv.slice(2);
const parsed = {
targets: [],
json: false,
help: false,
};
for (let index = 0; index < args.length; index += 1) {
const arg = args[index];
if (arg === '--target') {
parsed.targets.push(args[index + 1] || null);
index += 1;
} else if (arg === '--json') {
parsed.json = true;
} else if (arg === '--help' || arg === '-h') {
parsed.help = true;
} else {
throw new Error(`Unknown argument: ${arg}`);
}
}
return parsed;
}
function statusLabel(status) {
if (status === 'ok') {
return 'OK';
}
if (status === 'warning') {
return 'WARNING';
}
if (status === 'error') {
return 'ERROR';
}
return status.toUpperCase();
}
function printHuman(report) {
if (report.results.length === 0) {
console.log('No ECC install-state files found for the current home/project context.');
return;
}
console.log('Doctor report:\n');
for (const result of report.results) {
console.log(`- ${result.adapter.id}`);
console.log(` Status: ${statusLabel(result.status)}`);
console.log(` Install-state: ${result.installStatePath}`);
if (result.issues.length === 0) {
console.log(' Issues: none');
continue;
}
for (const issue of result.issues) {
console.log(` - [${issue.severity}] ${issue.code}: ${issue.message}`);
}
}
console.log(`\nSummary: checked=${report.summary.checkedCount}, ok=${report.summary.okCount}, warnings=${report.summary.warningCount}, errors=${report.summary.errorCount}`);
}
function main() {
try {
const options = parseArgs(process.argv);
if (options.help) {
showHelp(0);
}
const report = buildDoctorReport({
repoRoot: require('path').join(__dirname, '..'),
homeDir: process.env.HOME,
projectRoot: process.cwd(),
targets: options.targets,
});
const hasIssues = report.summary.errorCount > 0 || report.summary.warningCount > 0;
if (options.json) {
console.log(JSON.stringify(report, null, 2));
} else {
printHuman(report);
}
process.exitCode = hasIssues ? 1 : 0;
} catch (error) {
console.error(`Error: ${error.message}`);
process.exit(1);
}
}
main();

194
scripts/ecc.js Normal file
View File

@@ -0,0 +1,194 @@
#!/usr/bin/env node
const { spawnSync } = require('child_process');
const path = require('path');
const { listAvailableLanguages } = require('./lib/install-executor');
const COMMANDS = {
install: {
script: 'install-apply.js',
description: 'Install ECC content into a supported target',
},
plan: {
script: 'install-plan.js',
description: 'Inspect selective-install manifests and resolved plans',
},
'install-plan': {
script: 'install-plan.js',
description: 'Alias for plan',
},
'list-installed': {
script: 'list-installed.js',
description: 'Inspect install-state files for the current context',
},
doctor: {
script: 'doctor.js',
description: 'Diagnose missing or drifted ECC-managed files',
},
repair: {
script: 'repair.js',
description: 'Restore drifted or missing ECC-managed files',
},
'session-inspect': {
script: 'session-inspect.js',
description: 'Emit canonical ECC session snapshots from dmux or Claude history targets',
},
uninstall: {
script: 'uninstall.js',
description: 'Remove ECC-managed files recorded in install-state',
},
};
const PRIMARY_COMMANDS = [
'install',
'plan',
'list-installed',
'doctor',
'repair',
'session-inspect',
'uninstall',
];
function showHelp(exitCode = 0) {
console.log(`
ECC selective-install CLI
Usage:
ecc <command> [args...]
ecc [install args...]
Commands:
${PRIMARY_COMMANDS.map(command => ` ${command.padEnd(15)} ${COMMANDS[command].description}`).join('\n')}
Compatibility:
ecc-install Legacy install entrypoint retained for existing flows
ecc [args...] Without a command, args are routed to "install"
ecc help <command> Show help for a specific command
Examples:
ecc typescript
ecc install --profile developer --target claude
ecc plan --profile core --target cursor
ecc list-installed --json
ecc doctor --target cursor
ecc repair --dry-run
ecc session-inspect claude:latest
ecc uninstall --target antigravity --dry-run
`);
process.exit(exitCode);
}
function resolveCommand(argv) {
const args = argv.slice(2);
if (args.length === 0) {
return { mode: 'help' };
}
const [firstArg, ...restArgs] = args;
if (firstArg === '--help' || firstArg === '-h') {
return { mode: 'help' };
}
if (firstArg === 'help') {
return {
mode: 'help-command',
command: restArgs[0] || null,
};
}
if (COMMANDS[firstArg]) {
return {
mode: 'command',
command: firstArg,
args: restArgs,
};
}
const knownLegacyLanguages = listAvailableLanguages();
const shouldTreatAsImplicitInstall = (
firstArg.startsWith('-')
|| knownLegacyLanguages.includes(firstArg)
);
if (!shouldTreatAsImplicitInstall) {
throw new Error(`Unknown command: ${firstArg}`);
}
return {
mode: 'command',
command: 'install',
args,
};
}
function runCommand(commandName, args) {
const command = COMMANDS[commandName];
if (!command) {
throw new Error(`Unknown command: ${commandName}`);
}
const result = spawnSync(
process.execPath,
[path.join(__dirname, command.script), ...args],
{
cwd: process.cwd(),
env: process.env,
encoding: 'utf8',
}
);
if (result.error) {
throw result.error;
}
if (result.stdout) {
process.stdout.write(result.stdout);
}
if (result.stderr) {
process.stderr.write(result.stderr);
}
if (typeof result.status === 'number') {
return result.status;
}
if (result.signal) {
throw new Error(`Command "${commandName}" terminated by signal ${result.signal}`);
}
return 1;
}
function main() {
try {
const resolution = resolveCommand(process.argv);
if (resolution.mode === 'help') {
showHelp(0);
}
if (resolution.mode === 'help-command') {
if (!resolution.command) {
showHelp(0);
}
if (!COMMANDS[resolution.command]) {
throw new Error(`Unknown command: ${resolution.command}`);
}
process.exitCode = runCommand(resolution.command, ['--help']);
return;
}
process.exitCode = runCommand(resolution.command, resolution.args);
} catch (error) {
console.error(`Error: ${error.message}`);
process.exit(1);
}
}
main();

137
scripts/install-apply.js Normal file
View File

@@ -0,0 +1,137 @@
#!/usr/bin/env node
/**
* Refactored ECC installer runtime.
*
* Keeps the legacy language-based install entrypoint intact while moving
* target-specific mutation logic into testable Node code.
*/
const {
SUPPORTED_INSTALL_TARGETS,
listAvailableLanguages,
} = require('./lib/install-executor');
const {
LEGACY_INSTALL_TARGETS,
normalizeInstallRequest,
parseInstallArgs,
} = require('./lib/install/request');
const { loadInstallConfig } = require('./lib/install/config');
const { applyInstallPlan } = require('./lib/install/apply');
const { createInstallPlanFromRequest } = require('./lib/install/runtime');
function showHelp(exitCode = 0) {
const languages = listAvailableLanguages();
console.log(`
Usage: install.sh [--target <${LEGACY_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] <language> [<language> ...]
install.sh [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] --profile <name> [--with <component>]... [--without <component>]...
install.sh [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json] --modules <id,id,...> [--with <component>]... [--without <component>]...
install.sh [--dry-run] [--json] --config <path>
Targets:
claude (default) - Install rules to ~/.claude/rules/
cursor - Install rules, hooks, and bundled Cursor configs to ./.cursor/
antigravity - Install rules, workflows, skills, and agents to ./.agent/
Options:
--profile <name> Resolve and install a manifest profile
--modules <ids> Resolve and install explicit module IDs
--with <component> Include a user-facing install component
--without <component>
Exclude a user-facing install component
--config <path> Load install intent from ecc-install.json
--dry-run Show the install plan without copying files
--json Emit machine-readable plan/result JSON
--help Show this help text
Available languages:
${languages.map(language => ` - ${language}`).join('\n')}
`);
process.exit(exitCode);
}
function printHumanPlan(plan, dryRun) {
console.log(`${dryRun ? 'Dry-run install plan' : 'Applying install plan'}:\n`);
console.log(`Mode: ${plan.mode}`);
console.log(`Target: ${plan.target}`);
console.log(`Adapter: ${plan.adapter.id}`);
console.log(`Install root: ${plan.installRoot}`);
console.log(`Install-state: ${plan.installStatePath}`);
if (plan.mode === 'legacy') {
console.log(`Languages: ${plan.languages.join(', ')}`);
} else {
console.log(`Profile: ${plan.profileId || '(custom modules)'}`);
console.log(`Included components: ${plan.includedComponentIds.join(', ') || '(none)'}`);
console.log(`Excluded components: ${plan.excludedComponentIds.join(', ') || '(none)'}`);
console.log(`Requested modules: ${plan.requestedModuleIds.join(', ') || '(none)'}`);
console.log(`Selected modules: ${plan.selectedModuleIds.join(', ') || '(none)'}`);
if (plan.skippedModuleIds.length > 0) {
console.log(`Skipped modules: ${plan.skippedModuleIds.join(', ')}`);
}
if (plan.excludedModuleIds.length > 0) {
console.log(`Excluded modules: ${plan.excludedModuleIds.join(', ')}`);
}
}
console.log(`Operations: ${plan.operations.length}`);
if (plan.warnings.length > 0) {
console.log('\nWarnings:');
for (const warning of plan.warnings) {
console.log(`- ${warning}`);
}
}
console.log('\nPlanned file operations:');
for (const operation of plan.operations) {
console.log(`- ${operation.sourceRelativePath} -> ${operation.destinationPath}`);
}
if (!dryRun) {
console.log(`\nDone. Install-state written to ${plan.installStatePath}`);
}
}
function main() {
try {
const options = parseInstallArgs(process.argv);
if (options.help) {
showHelp(0);
}
const config = options.configPath
? loadInstallConfig(options.configPath, { cwd: process.cwd() })
: null;
const request = normalizeInstallRequest({
...options,
config,
});
const plan = createInstallPlanFromRequest(request, {
projectRoot: process.cwd(),
homeDir: process.env.HOME,
claudeRulesDir: process.env.CLAUDE_RULES_DIR || null,
});
if (options.dryRun) {
if (options.json) {
console.log(JSON.stringify({ dryRun: true, plan }, null, 2));
} else {
printHumanPlan(plan, true);
}
return;
}
const result = applyInstallPlan(plan);
if (options.json) {
console.log(JSON.stringify({ dryRun: false, result }, null, 2));
} else {
printHumanPlan(result, false);
}
} catch (error) {
console.error(`Error: ${error.message}`);
process.exit(1);
}
}
main();

254
scripts/install-plan.js Normal file
View File

@@ -0,0 +1,254 @@
#!/usr/bin/env node
/**
* Inspect selective-install profiles and module plans without mutating targets.
*/
const {
listInstallComponents,
listInstallModules,
listInstallProfiles,
resolveInstallPlan,
} = require('./lib/install-manifests');
const { loadInstallConfig } = require('./lib/install/config');
const { normalizeInstallRequest } = require('./lib/install/request');
function showHelp() {
console.log(`
Inspect ECC selective-install manifests
Usage:
node scripts/install-plan.js --list-profiles
node scripts/install-plan.js --list-modules
node scripts/install-plan.js --list-components [--family <family>] [--target <target>] [--json]
node scripts/install-plan.js --profile <name> [--with <component>]... [--without <component>]... [--target <target>] [--json]
node scripts/install-plan.js --modules <id,id,...> [--with <component>]... [--without <component>]... [--target <target>] [--json]
node scripts/install-plan.js --config <path> [--json]
Options:
--list-profiles List available install profiles
--list-modules List install modules
--list-components List user-facing install components
--family <family> Filter listed components by family
--profile <name> Resolve an install profile
--modules <ids> Resolve explicit module IDs (comma-separated)
--with <component> Include a user-facing install component
--without <component>
Exclude a user-facing install component
--config <path> Load install intent from ecc-install.json
--target <target> Filter plan for a specific target
--json Emit machine-readable JSON
--help Show this help text
`);
}
function parseArgs(argv) {
const args = argv.slice(2);
const parsed = {
json: false,
help: false,
profileId: null,
moduleIds: [],
includeComponentIds: [],
excludeComponentIds: [],
configPath: null,
target: null,
family: null,
listProfiles: false,
listModules: false,
listComponents: false,
};
for (let index = 0; index < args.length; index += 1) {
const arg = args[index];
if (arg === '--help' || arg === '-h') {
parsed.help = true;
} else if (arg === '--json') {
parsed.json = true;
} else if (arg === '--list-profiles') {
parsed.listProfiles = true;
} else if (arg === '--list-modules') {
parsed.listModules = true;
} else if (arg === '--list-components') {
parsed.listComponents = true;
} else if (arg === '--family') {
parsed.family = args[index + 1] || null;
index += 1;
} else if (arg === '--profile') {
parsed.profileId = args[index + 1] || null;
index += 1;
} else if (arg === '--modules') {
const raw = args[index + 1] || '';
parsed.moduleIds = raw.split(',').map(value => value.trim()).filter(Boolean);
index += 1;
} else if (arg === '--with') {
const componentId = args[index + 1] || '';
if (componentId.trim()) {
parsed.includeComponentIds.push(componentId.trim());
}
index += 1;
} else if (arg === '--without') {
const componentId = args[index + 1] || '';
if (componentId.trim()) {
parsed.excludeComponentIds.push(componentId.trim());
}
index += 1;
} else if (arg === '--config') {
parsed.configPath = args[index + 1] || null;
index += 1;
} else if (arg === '--target') {
parsed.target = args[index + 1] || null;
index += 1;
} else {
throw new Error(`Unknown argument: ${arg}`);
}
}
return parsed;
}
function printProfiles(profiles) {
console.log('Install profiles:\n');
for (const profile of profiles) {
console.log(`- ${profile.id} (${profile.moduleCount} modules)`);
console.log(` ${profile.description}`);
}
}
function printModules(modules) {
console.log('Install modules:\n');
for (const module of modules) {
console.log(`- ${module.id} [${module.kind}]`);
console.log(
` targets=${module.targets.join(', ')} default=${module.defaultInstall} cost=${module.cost} stability=${module.stability}`
);
console.log(` ${module.description}`);
}
}
function printComponents(components) {
console.log('Install components:\n');
for (const component of components) {
console.log(`- ${component.id} [${component.family}]`);
console.log(` targets=${component.targets.join(', ')} modules=${component.moduleIds.join(', ')}`);
console.log(` ${component.description}`);
}
}
function printPlan(plan) {
console.log('Install plan:\n');
console.log(
'Note: target filtering and operation output currently reflect scaffold-level adapter planning, not a byte-for-byte mirror of legacy install.sh copy paths.\n'
);
console.log(`Profile: ${plan.profileId || '(custom modules)'}`);
console.log(`Target: ${plan.target || '(all targets)'}`);
console.log(`Included components: ${plan.includedComponentIds.join(', ') || '(none)'}`);
console.log(`Excluded components: ${plan.excludedComponentIds.join(', ') || '(none)'}`);
console.log(`Requested: ${plan.requestedModuleIds.join(', ')}`);
if (plan.targetAdapterId) {
console.log(`Adapter: ${plan.targetAdapterId}`);
console.log(`Target root: ${plan.targetRoot}`);
console.log(`Install-state: ${plan.installStatePath}`);
}
console.log('');
console.log(`Selected modules (${plan.selectedModuleIds.length}):`);
for (const module of plan.selectedModules) {
console.log(`- ${module.id} [${module.kind}]`);
}
if (plan.skippedModuleIds.length > 0) {
console.log('');
console.log(`Skipped for target ${plan.target} (${plan.skippedModuleIds.length}):`);
for (const module of plan.skippedModules) {
console.log(`- ${module.id} [${module.kind}]`);
}
}
if (plan.excludedModuleIds.length > 0) {
console.log('');
console.log(`Excluded by selection (${plan.excludedModuleIds.length}):`);
for (const module of plan.excludedModules) {
console.log(`- ${module.id} [${module.kind}]`);
}
}
if (plan.operations.length > 0) {
console.log('');
console.log(`Operation plan (${plan.operations.length}):`);
for (const operation of plan.operations) {
console.log(
`- ${operation.moduleId}: ${operation.sourceRelativePath} -> ${operation.destinationPath} [${operation.strategy}]`
);
}
}
}
function main() {
try {
const options = parseArgs(process.argv);
if (options.help || process.argv.length <= 2) {
showHelp();
process.exit(0);
}
if (options.listProfiles) {
const profiles = listInstallProfiles();
if (options.json) {
console.log(JSON.stringify({ profiles }, null, 2));
} else {
printProfiles(profiles);
}
return;
}
if (options.listModules) {
const modules = listInstallModules();
if (options.json) {
console.log(JSON.stringify({ modules }, null, 2));
} else {
printModules(modules);
}
return;
}
if (options.listComponents) {
const components = listInstallComponents({
family: options.family,
target: options.target,
});
if (options.json) {
console.log(JSON.stringify({ components }, null, 2));
} else {
printComponents(components);
}
return;
}
const config = options.configPath
? loadInstallConfig(options.configPath, { cwd: process.cwd() })
: null;
const request = normalizeInstallRequest({
...options,
languages: [],
config,
});
const plan = resolveInstallPlan({
profileId: request.profileId,
moduleIds: request.moduleIds,
includeComponentIds: request.includeComponentIds,
excludeComponentIds: request.excludeComponentIds,
target: request.target,
});
if (options.json) {
console.log(JSON.stringify(plan, null, 2));
} else {
printPlan(plan);
}
} catch (error) {
console.error(`Error: ${error.message}`);
process.exit(1);
}
}
main();

View File

@@ -0,0 +1,605 @@
const fs = require('fs');
const path = require('path');
const { execFileSync } = require('child_process');
const { applyInstallPlan } = require('./install/apply');
const { LEGACY_INSTALL_TARGETS, parseInstallArgs } = require('./install/request');
const {
SUPPORTED_INSTALL_TARGETS,
resolveInstallPlan,
} = require('./install-manifests');
const { getInstallTargetAdapter } = require('./install-targets/registry');
const { createInstallState } = require('./install-state');
const LANGUAGE_NAME_PATTERN = /^[a-zA-Z0-9_-]+$/;
const EXCLUDED_GENERATED_SOURCE_SUFFIXES = [
'/ecc-install-state.json',
'/ecc/install-state.json',
];
function getSourceRoot() {
return path.join(__dirname, '../..');
}
function getPackageVersion(sourceRoot) {
try {
const packageJson = JSON.parse(
fs.readFileSync(path.join(sourceRoot, 'package.json'), 'utf8')
);
return packageJson.version || null;
} catch (_error) {
return null;
}
}
function getManifestVersion(sourceRoot) {
try {
const modulesManifest = JSON.parse(
fs.readFileSync(path.join(sourceRoot, 'manifests', 'install-modules.json'), 'utf8')
);
return modulesManifest.version || 1;
} catch (_error) {
return 1;
}
}
function getRepoCommit(sourceRoot) {
try {
return execFileSync('git', ['rev-parse', 'HEAD'], {
cwd: sourceRoot,
encoding: 'utf8',
stdio: ['ignore', 'pipe', 'ignore'],
timeout: 5000,
}).trim();
} catch (_error) {
return null;
}
}
function readDirectoryNames(dirPath) {
if (!fs.existsSync(dirPath)) {
return [];
}
return fs.readdirSync(dirPath, { withFileTypes: true })
.filter(entry => entry.isDirectory())
.map(entry => entry.name)
.sort();
}
function listAvailableLanguages(sourceRoot = getSourceRoot()) {
return readDirectoryNames(path.join(sourceRoot, 'rules'))
.filter(name => name !== 'common');
}
function validateLegacyTarget(target) {
if (!LEGACY_INSTALL_TARGETS.includes(target)) {
throw new Error(
`Unknown install target: ${target}. Expected one of ${LEGACY_INSTALL_TARGETS.join(', ')}`
);
}
}
function listFilesRecursive(dirPath) {
if (!fs.existsSync(dirPath)) {
return [];
}
const files = [];
const entries = fs.readdirSync(dirPath, { withFileTypes: true });
for (const entry of entries) {
const absolutePath = path.join(dirPath, entry.name);
if (entry.isDirectory()) {
const childFiles = listFilesRecursive(absolutePath);
for (const childFile of childFiles) {
files.push(path.join(entry.name, childFile));
}
} else if (entry.isFile()) {
files.push(entry.name);
}
}
return files.sort();
}
function isGeneratedRuntimeSourcePath(sourceRelativePath) {
const normalizedPath = String(sourceRelativePath || '').replace(/\\/g, '/');
return EXCLUDED_GENERATED_SOURCE_SUFFIXES.some(suffix => normalizedPath.endsWith(suffix));
}
function buildCopyFileOperation({ moduleId, sourcePath, sourceRelativePath, destinationPath, strategy }) {
return {
kind: 'copy-file',
moduleId,
sourcePath,
sourceRelativePath,
destinationPath,
strategy,
ownership: 'managed',
scaffoldOnly: false,
};
}
function addRecursiveCopyOperations(operations, options) {
const sourceDir = path.join(options.sourceRoot, options.sourceRelativeDir);
if (!fs.existsSync(sourceDir)) {
return 0;
}
const relativeFiles = listFilesRecursive(sourceDir);
for (const relativeFile of relativeFiles) {
const sourceRelativePath = path.join(options.sourceRelativeDir, relativeFile);
const sourcePath = path.join(options.sourceRoot, sourceRelativePath);
const destinationPath = path.join(options.destinationDir, relativeFile);
operations.push(buildCopyFileOperation({
moduleId: options.moduleId,
sourcePath,
sourceRelativePath,
destinationPath,
strategy: options.strategy || 'preserve-relative-path',
}));
}
return relativeFiles.length;
}
function addFileCopyOperation(operations, options) {
const sourcePath = path.join(options.sourceRoot, options.sourceRelativePath);
if (!fs.existsSync(sourcePath)) {
return false;
}
operations.push(buildCopyFileOperation({
moduleId: options.moduleId,
sourcePath,
sourceRelativePath: options.sourceRelativePath,
destinationPath: options.destinationPath,
strategy: options.strategy || 'preserve-relative-path',
}));
return true;
}
function addMatchingRuleOperations(operations, options) {
const sourceDir = path.join(options.sourceRoot, options.sourceRelativeDir);
if (!fs.existsSync(sourceDir)) {
return 0;
}
const files = fs.readdirSync(sourceDir, { withFileTypes: true })
.filter(entry => entry.isFile() && options.matcher(entry.name))
.map(entry => entry.name)
.sort();
for (const fileName of files) {
const sourceRelativePath = path.join(options.sourceRelativeDir, fileName);
const sourcePath = path.join(options.sourceRoot, sourceRelativePath);
const destinationPath = path.join(
options.destinationDir,
options.rename ? options.rename(fileName) : fileName
);
operations.push(buildCopyFileOperation({
moduleId: options.moduleId,
sourcePath,
sourceRelativePath,
destinationPath,
strategy: options.strategy || 'flatten-copy',
}));
}
return files.length;
}
function isDirectoryNonEmpty(dirPath) {
return fs.existsSync(dirPath) && fs.statSync(dirPath).isDirectory() && fs.readdirSync(dirPath).length > 0;
}
function planClaudeLegacyInstall(context) {
const adapter = getInstallTargetAdapter('claude');
const targetRoot = adapter.resolveRoot({ homeDir: context.homeDir });
const rulesDir = context.claudeRulesDir || path.join(targetRoot, 'rules');
const installStatePath = adapter.getInstallStatePath({ homeDir: context.homeDir });
const operations = [];
const warnings = [];
if (isDirectoryNonEmpty(rulesDir)) {
warnings.push(
`Destination ${rulesDir}/ already exists and files may be overwritten`
);
}
addRecursiveCopyOperations(operations, {
moduleId: 'legacy-claude-rules',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('rules', 'common'),
destinationDir: path.join(rulesDir, 'common'),
});
for (const language of context.languages) {
if (!LANGUAGE_NAME_PATTERN.test(language)) {
warnings.push(
`Invalid language name '${language}'. Only alphanumeric, dash, and underscore are allowed`
);
continue;
}
const sourceDir = path.join(context.sourceRoot, 'rules', language);
if (!fs.existsSync(sourceDir)) {
warnings.push(`rules/${language}/ does not exist, skipping`);
continue;
}
addRecursiveCopyOperations(operations, {
moduleId: 'legacy-claude-rules',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('rules', language),
destinationDir: path.join(rulesDir, language),
});
}
return {
mode: 'legacy',
adapter,
target: 'claude',
targetRoot,
installRoot: rulesDir,
installStatePath,
operations,
warnings,
selectedModules: ['legacy-claude-rules'],
};
}
function planCursorLegacyInstall(context) {
const adapter = getInstallTargetAdapter('cursor');
const targetRoot = adapter.resolveRoot({ repoRoot: context.projectRoot });
const installStatePath = adapter.getInstallStatePath({ repoRoot: context.projectRoot });
const operations = [];
const warnings = [];
addMatchingRuleOperations(operations, {
moduleId: 'legacy-cursor-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('.cursor', 'rules'),
destinationDir: path.join(targetRoot, 'rules'),
matcher: fileName => /^common-.*\.md$/.test(fileName),
});
for (const language of context.languages) {
if (!LANGUAGE_NAME_PATTERN.test(language)) {
warnings.push(
`Invalid language name '${language}'. Only alphanumeric, dash, and underscore are allowed`
);
continue;
}
const matches = addMatchingRuleOperations(operations, {
moduleId: 'legacy-cursor-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('.cursor', 'rules'),
destinationDir: path.join(targetRoot, 'rules'),
matcher: fileName => fileName.startsWith(`${language}-`) && fileName.endsWith('.md'),
});
if (matches === 0) {
warnings.push(`No Cursor rules for '${language}' found, skipping`);
}
}
addRecursiveCopyOperations(operations, {
moduleId: 'legacy-cursor-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('.cursor', 'agents'),
destinationDir: path.join(targetRoot, 'agents'),
});
addRecursiveCopyOperations(operations, {
moduleId: 'legacy-cursor-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('.cursor', 'skills'),
destinationDir: path.join(targetRoot, 'skills'),
});
addRecursiveCopyOperations(operations, {
moduleId: 'legacy-cursor-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('.cursor', 'commands'),
destinationDir: path.join(targetRoot, 'commands'),
});
addRecursiveCopyOperations(operations, {
moduleId: 'legacy-cursor-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('.cursor', 'hooks'),
destinationDir: path.join(targetRoot, 'hooks'),
});
addFileCopyOperation(operations, {
moduleId: 'legacy-cursor-install',
sourceRoot: context.sourceRoot,
sourceRelativePath: path.join('.cursor', 'hooks.json'),
destinationPath: path.join(targetRoot, 'hooks.json'),
});
addFileCopyOperation(operations, {
moduleId: 'legacy-cursor-install',
sourceRoot: context.sourceRoot,
sourceRelativePath: path.join('.cursor', 'mcp.json'),
destinationPath: path.join(targetRoot, 'mcp.json'),
});
return {
mode: 'legacy',
adapter,
target: 'cursor',
targetRoot,
installRoot: targetRoot,
installStatePath,
operations,
warnings,
selectedModules: ['legacy-cursor-install'],
};
}
function planAntigravityLegacyInstall(context) {
const adapter = getInstallTargetAdapter('antigravity');
const targetRoot = adapter.resolveRoot({ repoRoot: context.projectRoot });
const installStatePath = adapter.getInstallStatePath({ repoRoot: context.projectRoot });
const operations = [];
const warnings = [];
if (isDirectoryNonEmpty(path.join(targetRoot, 'rules'))) {
warnings.push(
`Destination ${path.join(targetRoot, 'rules')}/ already exists and files may be overwritten`
);
}
addMatchingRuleOperations(operations, {
moduleId: 'legacy-antigravity-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('rules', 'common'),
destinationDir: path.join(targetRoot, 'rules'),
matcher: fileName => fileName.endsWith('.md'),
rename: fileName => `common-${fileName}`,
});
for (const language of context.languages) {
if (!LANGUAGE_NAME_PATTERN.test(language)) {
warnings.push(
`Invalid language name '${language}'. Only alphanumeric, dash, and underscore are allowed`
);
continue;
}
const sourceDir = path.join(context.sourceRoot, 'rules', language);
if (!fs.existsSync(sourceDir)) {
warnings.push(`rules/${language}/ does not exist, skipping`);
continue;
}
addMatchingRuleOperations(operations, {
moduleId: 'legacy-antigravity-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: path.join('rules', language),
destinationDir: path.join(targetRoot, 'rules'),
matcher: fileName => fileName.endsWith('.md'),
rename: fileName => `${language}-${fileName}`,
});
}
addRecursiveCopyOperations(operations, {
moduleId: 'legacy-antigravity-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: 'commands',
destinationDir: path.join(targetRoot, 'workflows'),
});
addRecursiveCopyOperations(operations, {
moduleId: 'legacy-antigravity-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: 'agents',
destinationDir: path.join(targetRoot, 'skills'),
});
addRecursiveCopyOperations(operations, {
moduleId: 'legacy-antigravity-install',
sourceRoot: context.sourceRoot,
sourceRelativeDir: 'skills',
destinationDir: path.join(targetRoot, 'skills'),
});
return {
mode: 'legacy',
adapter,
target: 'antigravity',
targetRoot,
installRoot: targetRoot,
installStatePath,
operations,
warnings,
selectedModules: ['legacy-antigravity-install'],
};
}
function createLegacyInstallPlan(options = {}) {
const sourceRoot = options.sourceRoot || getSourceRoot();
const projectRoot = options.projectRoot || process.cwd();
const homeDir = options.homeDir || process.env.HOME;
const target = options.target || 'claude';
validateLegacyTarget(target);
const context = {
sourceRoot,
projectRoot,
homeDir,
languages: Array.isArray(options.languages) ? options.languages : [],
claudeRulesDir: options.claudeRulesDir || process.env.CLAUDE_RULES_DIR || null,
};
let plan;
if (target === 'claude') {
plan = planClaudeLegacyInstall(context);
} else if (target === 'cursor') {
plan = planCursorLegacyInstall(context);
} else {
plan = planAntigravityLegacyInstall(context);
}
const source = {
repoVersion: getPackageVersion(sourceRoot),
repoCommit: getRepoCommit(sourceRoot),
manifestVersion: getManifestVersion(sourceRoot),
};
const statePreview = createInstallState({
adapter: plan.adapter,
targetRoot: plan.targetRoot,
installStatePath: plan.installStatePath,
request: {
profile: null,
modules: [],
legacyLanguages: context.languages,
legacyMode: true,
},
resolution: {
selectedModules: plan.selectedModules,
skippedModules: [],
},
operations: plan.operations,
source,
});
return {
mode: 'legacy',
target: plan.target,
adapter: {
id: plan.adapter.id,
target: plan.adapter.target,
kind: plan.adapter.kind,
},
targetRoot: plan.targetRoot,
installRoot: plan.installRoot,
installStatePath: plan.installStatePath,
warnings: plan.warnings,
languages: context.languages,
operations: plan.operations,
statePreview,
};
}
function materializeScaffoldOperation(sourceRoot, operation) {
const sourcePath = path.join(sourceRoot, operation.sourceRelativePath);
if (!fs.existsSync(sourcePath)) {
return [];
}
if (isGeneratedRuntimeSourcePath(operation.sourceRelativePath)) {
return [];
}
const stat = fs.statSync(sourcePath);
if (stat.isFile()) {
return [buildCopyFileOperation({
moduleId: operation.moduleId,
sourcePath,
sourceRelativePath: operation.sourceRelativePath,
destinationPath: operation.destinationPath,
strategy: operation.strategy,
})];
}
const relativeFiles = listFilesRecursive(sourcePath).filter(relativeFile => {
const sourceRelativePath = path.join(operation.sourceRelativePath, relativeFile);
return !isGeneratedRuntimeSourcePath(sourceRelativePath);
});
return relativeFiles.map(relativeFile => {
const sourceRelativePath = path.join(operation.sourceRelativePath, relativeFile);
return buildCopyFileOperation({
moduleId: operation.moduleId,
sourcePath: path.join(sourcePath, relativeFile),
sourceRelativePath,
destinationPath: path.join(operation.destinationPath, relativeFile),
strategy: operation.strategy,
});
});
}
function createManifestInstallPlan(options = {}) {
const sourceRoot = options.sourceRoot || getSourceRoot();
const projectRoot = options.projectRoot || process.cwd();
const target = options.target || 'claude';
const plan = resolveInstallPlan({
repoRoot: sourceRoot,
projectRoot,
homeDir: options.homeDir,
profileId: options.profileId || null,
moduleIds: options.moduleIds || [],
includeComponentIds: options.includeComponentIds || [],
excludeComponentIds: options.excludeComponentIds || [],
target,
});
const adapter = getInstallTargetAdapter(target);
const operations = plan.operations.flatMap(operation => materializeScaffoldOperation(sourceRoot, operation));
const source = {
repoVersion: getPackageVersion(sourceRoot),
repoCommit: getRepoCommit(sourceRoot),
manifestVersion: getManifestVersion(sourceRoot),
};
const statePreview = createInstallState({
adapter,
targetRoot: plan.targetRoot,
installStatePath: plan.installStatePath,
request: {
profile: plan.profileId,
modules: Array.isArray(options.moduleIds) ? [...options.moduleIds] : [],
includeComponents: Array.isArray(options.includeComponentIds)
? [...options.includeComponentIds]
: [],
excludeComponents: Array.isArray(options.excludeComponentIds)
? [...options.excludeComponentIds]
: [],
legacyLanguages: [],
legacyMode: false,
},
resolution: {
selectedModules: plan.selectedModuleIds,
skippedModules: plan.skippedModuleIds,
},
operations,
source,
});
return {
mode: 'manifest',
target,
adapter: {
id: adapter.id,
target: adapter.target,
kind: adapter.kind,
},
targetRoot: plan.targetRoot,
installRoot: plan.targetRoot,
installStatePath: plan.installStatePath,
warnings: [],
languages: [],
profileId: plan.profileId,
requestedModuleIds: plan.requestedModuleIds,
explicitModuleIds: plan.explicitModuleIds,
includedComponentIds: plan.includedComponentIds,
excludedComponentIds: plan.excludedComponentIds,
selectedModuleIds: plan.selectedModuleIds,
skippedModuleIds: plan.skippedModuleIds,
excludedModuleIds: plan.excludedModuleIds,
operations,
statePreview,
};
}
module.exports = {
SUPPORTED_INSTALL_TARGETS,
LEGACY_INSTALL_TARGETS,
applyInstallPlan,
createManifestInstallPlan,
createLegacyInstallPlan,
getSourceRoot,
listAvailableLanguages,
parseInstallArgs,
};

View File

@@ -0,0 +1,763 @@
const fs = require('fs');
const path = require('path');
const { resolveInstallPlan, loadInstallManifests } = require('./install-manifests');
const { readInstallState, writeInstallState } = require('./install-state');
const {
applyInstallPlan,
createLegacyInstallPlan,
createManifestInstallPlan,
} = require('./install-executor');
const {
getInstallTargetAdapter,
listInstallTargetAdapters,
} = require('./install-targets/registry');
const DEFAULT_REPO_ROOT = path.join(__dirname, '../..');
function readPackageVersion(repoRoot) {
try {
const packageJson = JSON.parse(fs.readFileSync(path.join(repoRoot, 'package.json'), 'utf8'));
return packageJson.version || null;
} catch (_error) {
return null;
}
}
function normalizeTargets(targets) {
if (!Array.isArray(targets) || targets.length === 0) {
return listInstallTargetAdapters().map(adapter => adapter.target);
}
const normalizedTargets = [];
for (const target of targets) {
const adapter = getInstallTargetAdapter(target);
if (!normalizedTargets.includes(adapter.target)) {
normalizedTargets.push(adapter.target);
}
}
return normalizedTargets;
}
function compareStringArrays(left, right) {
const leftValues = Array.isArray(left) ? left : [];
const rightValues = Array.isArray(right) ? right : [];
if (leftValues.length !== rightValues.length) {
return false;
}
return leftValues.every((value, index) => value === rightValues[index]);
}
function getManagedOperations(state) {
return Array.isArray(state && state.operations)
? state.operations.filter(operation => operation.ownership === 'managed')
: [];
}
function resolveOperationSourcePath(repoRoot, operation) {
if (operation.sourceRelativePath) {
return path.join(repoRoot, operation.sourceRelativePath);
}
return operation.sourcePath || null;
}
function areFilesEqual(leftPath, rightPath) {
try {
const leftStat = fs.statSync(leftPath);
const rightStat = fs.statSync(rightPath);
if (!leftStat.isFile() || !rightStat.isFile()) {
return false;
}
return fs.readFileSync(leftPath).equals(fs.readFileSync(rightPath));
} catch (_error) {
return false;
}
}
function inspectManagedOperation(repoRoot, operation) {
const destinationPath = operation.destinationPath;
if (!destinationPath) {
return {
status: 'invalid-destination',
operation,
};
}
if (!fs.existsSync(destinationPath)) {
return {
status: 'missing',
operation,
destinationPath,
};
}
if (operation.kind !== 'copy-file') {
return {
status: 'unverified',
operation,
destinationPath,
};
}
const sourcePath = resolveOperationSourcePath(repoRoot, operation);
if (!sourcePath || !fs.existsSync(sourcePath)) {
return {
status: 'missing-source',
operation,
destinationPath,
sourcePath,
};
}
if (!areFilesEqual(sourcePath, destinationPath)) {
return {
status: 'drifted',
operation,
destinationPath,
sourcePath,
};
}
return {
status: 'ok',
operation,
destinationPath,
sourcePath,
};
}
function summarizeManagedOperationHealth(repoRoot, operations) {
return operations.reduce((summary, operation) => {
const inspection = inspectManagedOperation(repoRoot, operation);
if (inspection.status === 'missing') {
summary.missing.push(inspection);
} else if (inspection.status === 'drifted') {
summary.drifted.push(inspection);
} else if (inspection.status === 'missing-source') {
summary.missingSource.push(inspection);
} else if (inspection.status === 'unverified' || inspection.status === 'invalid-destination') {
summary.unverified.push(inspection);
}
return summary;
}, {
missing: [],
drifted: [],
missingSource: [],
unverified: [],
});
}
function buildDiscoveryRecord(adapter, context) {
const installTargetInput = {
homeDir: context.homeDir,
projectRoot: context.projectRoot,
repoRoot: context.projectRoot,
};
const targetRoot = adapter.resolveRoot(installTargetInput);
const installStatePath = adapter.getInstallStatePath(installTargetInput);
const exists = fs.existsSync(installStatePath);
if (!exists) {
return {
adapter: {
id: adapter.id,
target: adapter.target,
kind: adapter.kind,
},
targetRoot,
installStatePath,
exists: false,
state: null,
error: null,
};
}
try {
const state = readInstallState(installStatePath);
return {
adapter: {
id: adapter.id,
target: adapter.target,
kind: adapter.kind,
},
targetRoot,
installStatePath,
exists: true,
state,
error: null,
};
} catch (error) {
return {
adapter: {
id: adapter.id,
target: adapter.target,
kind: adapter.kind,
},
targetRoot,
installStatePath,
exists: true,
state: null,
error: error.message,
};
}
}
function discoverInstalledStates(options = {}) {
const context = {
homeDir: options.homeDir || process.env.HOME,
projectRoot: options.projectRoot || process.cwd(),
};
const targets = normalizeTargets(options.targets);
return targets.map(target => {
const adapter = getInstallTargetAdapter(target);
return buildDiscoveryRecord(adapter, context);
});
}
function buildIssue(severity, code, message, extra = {}) {
return {
severity,
code,
message,
...extra,
};
}
function determineStatus(issues) {
if (issues.some(issue => issue.severity === 'error')) {
return 'error';
}
if (issues.some(issue => issue.severity === 'warning')) {
return 'warning';
}
return 'ok';
}
function analyzeRecord(record, context) {
const issues = [];
if (record.error) {
issues.push(buildIssue('error', 'invalid-install-state', record.error));
return {
...record,
status: determineStatus(issues),
issues,
};
}
const state = record.state;
if (!state) {
return {
...record,
status: 'missing',
issues,
};
}
if (!fs.existsSync(state.target.root)) {
issues.push(buildIssue(
'error',
'missing-target-root',
`Target root does not exist: ${state.target.root}`
));
}
if (state.target.root !== record.targetRoot) {
issues.push(buildIssue(
'warning',
'target-root-mismatch',
`Recorded target root differs from current target root (${record.targetRoot})`,
{
recordedTargetRoot: state.target.root,
currentTargetRoot: record.targetRoot,
}
));
}
if (state.target.installStatePath !== record.installStatePath) {
issues.push(buildIssue(
'warning',
'install-state-path-mismatch',
`Recorded install-state path differs from current path (${record.installStatePath})`,
{
recordedInstallStatePath: state.target.installStatePath,
currentInstallStatePath: record.installStatePath,
}
));
}
const managedOperations = getManagedOperations(state);
const operationHealth = summarizeManagedOperationHealth(context.repoRoot, managedOperations);
const missingManagedOperations = operationHealth.missing;
if (missingManagedOperations.length > 0) {
issues.push(buildIssue(
'error',
'missing-managed-files',
`${missingManagedOperations.length} managed file(s) are missing`,
{
paths: missingManagedOperations.map(entry => entry.destinationPath),
}
));
}
if (operationHealth.drifted.length > 0) {
issues.push(buildIssue(
'warning',
'drifted-managed-files',
`${operationHealth.drifted.length} managed file(s) differ from the source repo`,
{
paths: operationHealth.drifted.map(entry => entry.destinationPath),
}
));
}
if (operationHealth.missingSource.length > 0) {
issues.push(buildIssue(
'error',
'missing-source-files',
`${operationHealth.missingSource.length} source file(s) referenced by install-state are missing`,
{
paths: operationHealth.missingSource.map(entry => entry.sourcePath).filter(Boolean),
}
));
}
if (operationHealth.unverified.length > 0) {
issues.push(buildIssue(
'warning',
'unverified-managed-operations',
`${operationHealth.unverified.length} managed operation(s) could not be content-verified`,
{
paths: operationHealth.unverified.map(entry => entry.destinationPath).filter(Boolean),
}
));
}
if (state.source.manifestVersion !== context.manifestVersion) {
issues.push(buildIssue(
'warning',
'manifest-version-mismatch',
`Recorded manifest version ${state.source.manifestVersion} differs from current manifest version ${context.manifestVersion}`
));
}
if (
context.packageVersion
&& state.source.repoVersion
&& state.source.repoVersion !== context.packageVersion
) {
issues.push(buildIssue(
'warning',
'repo-version-mismatch',
`Recorded repo version ${state.source.repoVersion} differs from current repo version ${context.packageVersion}`
));
}
if (!state.request.legacyMode) {
try {
const desiredPlan = resolveInstallPlan({
repoRoot: context.repoRoot,
projectRoot: context.projectRoot,
homeDir: context.homeDir,
target: record.adapter.target,
profileId: state.request.profile || null,
moduleIds: state.request.modules || [],
includeComponentIds: state.request.includeComponents || [],
excludeComponentIds: state.request.excludeComponents || [],
});
if (
!compareStringArrays(desiredPlan.selectedModuleIds, state.resolution.selectedModules)
|| !compareStringArrays(desiredPlan.skippedModuleIds, state.resolution.skippedModules)
) {
issues.push(buildIssue(
'warning',
'resolution-drift',
'Current manifest resolution differs from recorded install-state',
{
expectedSelectedModules: desiredPlan.selectedModuleIds,
recordedSelectedModules: state.resolution.selectedModules,
expectedSkippedModules: desiredPlan.skippedModuleIds,
recordedSkippedModules: state.resolution.skippedModules,
}
));
}
} catch (error) {
issues.push(buildIssue(
'error',
'resolution-unavailable',
error.message
));
}
}
return {
...record,
status: determineStatus(issues),
issues,
};
}
function buildDoctorReport(options = {}) {
const repoRoot = options.repoRoot || DEFAULT_REPO_ROOT;
const manifests = loadInstallManifests({ repoRoot });
const records = discoverInstalledStates({
homeDir: options.homeDir,
projectRoot: options.projectRoot,
targets: options.targets,
}).filter(record => record.exists);
const context = {
repoRoot,
homeDir: options.homeDir || process.env.HOME,
projectRoot: options.projectRoot || process.cwd(),
manifestVersion: manifests.modulesVersion,
packageVersion: readPackageVersion(repoRoot),
};
const results = records.map(record => analyzeRecord(record, context));
const summary = results.reduce((accumulator, result) => {
const errorCount = result.issues.filter(issue => issue.severity === 'error').length;
const warningCount = result.issues.filter(issue => issue.severity === 'warning').length;
return {
checkedCount: accumulator.checkedCount + 1,
okCount: accumulator.okCount + (result.status === 'ok' ? 1 : 0),
errorCount: accumulator.errorCount + errorCount,
warningCount: accumulator.warningCount + warningCount,
};
}, {
checkedCount: 0,
okCount: 0,
errorCount: 0,
warningCount: 0,
});
return {
generatedAt: new Date().toISOString(),
packageVersion: context.packageVersion,
manifestVersion: context.manifestVersion,
results,
summary,
};
}
function createRepairPlanFromRecord(record, context) {
const state = record.state;
if (!state) {
throw new Error('No install-state available for repair');
}
if (state.request.legacyMode) {
const operations = getManagedOperations(state).map(operation => ({
...operation,
sourcePath: resolveOperationSourcePath(context.repoRoot, operation),
}));
const statePreview = {
...state,
operations: operations.map(operation => ({ ...operation })),
source: {
...state.source,
repoVersion: context.packageVersion,
manifestVersion: context.manifestVersion,
},
lastValidatedAt: new Date().toISOString(),
};
return {
mode: 'legacy',
target: record.adapter.target,
adapter: record.adapter,
targetRoot: state.target.root,
installRoot: state.target.root,
installStatePath: state.target.installStatePath,
warnings: [],
languages: Array.isArray(state.request.legacyLanguages)
? [...state.request.legacyLanguages]
: [],
operations,
statePreview,
};
}
const desiredPlan = createManifestInstallPlan({
sourceRoot: context.repoRoot,
target: record.adapter.target,
profileId: state.request.profile || null,
moduleIds: state.request.modules || [],
includeComponentIds: state.request.includeComponents || [],
excludeComponentIds: state.request.excludeComponents || [],
projectRoot: context.projectRoot,
homeDir: context.homeDir,
});
return {
...desiredPlan,
statePreview: {
...desiredPlan.statePreview,
installedAt: state.installedAt,
lastValidatedAt: new Date().toISOString(),
},
};
}
function repairInstalledStates(options = {}) {
const repoRoot = options.repoRoot || DEFAULT_REPO_ROOT;
const manifests = loadInstallManifests({ repoRoot });
const context = {
repoRoot,
homeDir: options.homeDir || process.env.HOME,
projectRoot: options.projectRoot || process.cwd(),
manifestVersion: manifests.modulesVersion,
packageVersion: readPackageVersion(repoRoot),
};
const records = discoverInstalledStates({
homeDir: context.homeDir,
projectRoot: context.projectRoot,
targets: options.targets,
}).filter(record => record.exists);
const results = records.map(record => {
if (record.error) {
return {
adapter: record.adapter,
status: 'error',
installStatePath: record.installStatePath,
repairedPaths: [],
plannedRepairs: [],
error: record.error,
};
}
try {
const desiredPlan = createRepairPlanFromRecord(record, context);
const operationHealth = summarizeManagedOperationHealth(context.repoRoot, desiredPlan.operations);
if (operationHealth.missingSource.length > 0) {
return {
adapter: record.adapter,
status: 'error',
installStatePath: record.installStatePath,
repairedPaths: [],
plannedRepairs: [],
error: `Missing source file(s): ${operationHealth.missingSource.map(entry => entry.sourcePath).join(', ')}`,
};
}
const repairOperations = [
...operationHealth.missing.map(entry => ({ ...entry.operation })),
...operationHealth.drifted.map(entry => ({ ...entry.operation })),
];
const plannedRepairs = repairOperations.map(operation => operation.destinationPath);
if (options.dryRun) {
return {
adapter: record.adapter,
status: plannedRepairs.length > 0 ? 'planned' : 'ok',
installStatePath: record.installStatePath,
repairedPaths: [],
plannedRepairs,
stateRefreshed: plannedRepairs.length === 0,
error: null,
};
}
if (repairOperations.length > 0) {
applyInstallPlan({
...desiredPlan,
operations: repairOperations,
statePreview: desiredPlan.statePreview,
});
} else {
writeInstallState(desiredPlan.installStatePath, desiredPlan.statePreview);
}
return {
adapter: record.adapter,
status: repairOperations.length > 0 ? 'repaired' : 'ok',
installStatePath: record.installStatePath,
repairedPaths: plannedRepairs,
plannedRepairs: [],
stateRefreshed: true,
error: null,
};
} catch (error) {
return {
adapter: record.adapter,
status: 'error',
installStatePath: record.installStatePath,
repairedPaths: [],
plannedRepairs: [],
error: error.message,
};
}
});
const summary = results.reduce((accumulator, result) => ({
checkedCount: accumulator.checkedCount + 1,
repairedCount: accumulator.repairedCount + (result.status === 'repaired' ? 1 : 0),
plannedRepairCount: accumulator.plannedRepairCount + (result.status === 'planned' ? 1 : 0),
errorCount: accumulator.errorCount + (result.status === 'error' ? 1 : 0),
}), {
checkedCount: 0,
repairedCount: 0,
plannedRepairCount: 0,
errorCount: 0,
});
return {
dryRun: Boolean(options.dryRun),
generatedAt: new Date().toISOString(),
results,
summary,
};
}
function cleanupEmptyParentDirs(filePath, stopAt) {
let currentPath = path.dirname(filePath);
const normalizedStopAt = path.resolve(stopAt);
while (
currentPath
&& path.resolve(currentPath).startsWith(normalizedStopAt)
&& path.resolve(currentPath) !== normalizedStopAt
) {
if (!fs.existsSync(currentPath)) {
currentPath = path.dirname(currentPath);
continue;
}
const stat = fs.lstatSync(currentPath);
if (!stat.isDirectory() || fs.readdirSync(currentPath).length > 0) {
break;
}
fs.rmdirSync(currentPath);
currentPath = path.dirname(currentPath);
}
}
function uninstallInstalledStates(options = {}) {
const records = discoverInstalledStates({
homeDir: options.homeDir,
projectRoot: options.projectRoot,
targets: options.targets,
}).filter(record => record.exists);
const results = records.map(record => {
if (record.error || !record.state) {
return {
adapter: record.adapter,
status: 'error',
installStatePath: record.installStatePath,
removedPaths: [],
plannedRemovals: [],
error: record.error || 'No valid install-state available',
};
}
const state = record.state;
const plannedRemovals = Array.from(new Set([
...getManagedOperations(state).map(operation => operation.destinationPath),
state.target.installStatePath,
]));
if (options.dryRun) {
return {
adapter: record.adapter,
status: 'planned',
installStatePath: record.installStatePath,
removedPaths: [],
plannedRemovals,
error: null,
};
}
try {
const removedPaths = [];
const cleanupTargets = [];
const filePaths = Array.from(new Set(
getManagedOperations(state).map(operation => operation.destinationPath)
)).sort((left, right) => right.length - left.length);
for (const filePath of filePaths) {
if (!fs.existsSync(filePath)) {
continue;
}
const stat = fs.lstatSync(filePath);
if (stat.isDirectory()) {
throw new Error(`Refusing to remove managed directory path without explicit support: ${filePath}`);
}
fs.rmSync(filePath, { force: true });
removedPaths.push(filePath);
cleanupTargets.push(filePath);
}
if (fs.existsSync(state.target.installStatePath)) {
fs.rmSync(state.target.installStatePath, { force: true });
removedPaths.push(state.target.installStatePath);
cleanupTargets.push(state.target.installStatePath);
}
for (const cleanupTarget of cleanupTargets) {
cleanupEmptyParentDirs(cleanupTarget, state.target.root);
}
return {
adapter: record.adapter,
status: 'uninstalled',
installStatePath: record.installStatePath,
removedPaths,
plannedRemovals: [],
error: null,
};
} catch (error) {
return {
adapter: record.adapter,
status: 'error',
installStatePath: record.installStatePath,
removedPaths: [],
plannedRemovals,
error: error.message,
};
}
});
const summary = results.reduce((accumulator, result) => ({
checkedCount: accumulator.checkedCount + 1,
uninstalledCount: accumulator.uninstalledCount + (result.status === 'uninstalled' ? 1 : 0),
plannedRemovalCount: accumulator.plannedRemovalCount + (result.status === 'planned' ? 1 : 0),
errorCount: accumulator.errorCount + (result.status === 'error' ? 1 : 0),
}), {
checkedCount: 0,
uninstalledCount: 0,
plannedRemovalCount: 0,
errorCount: 0,
});
return {
dryRun: Boolean(options.dryRun),
generatedAt: new Date().toISOString(),
results,
summary,
};
}
module.exports = {
DEFAULT_REPO_ROOT,
buildDoctorReport,
discoverInstalledStates,
normalizeTargets,
repairInstalledStates,
uninstallInstalledStates,
};

View File

@@ -0,0 +1,305 @@
const fs = require('fs');
const os = require('os');
const path = require('path');
const { planInstallTargetScaffold } = require('./install-targets/registry');
const DEFAULT_REPO_ROOT = path.join(__dirname, '../..');
const SUPPORTED_INSTALL_TARGETS = ['claude', 'cursor', 'antigravity', 'codex', 'opencode'];
const COMPONENT_FAMILY_PREFIXES = {
baseline: 'baseline:',
language: 'lang:',
framework: 'framework:',
capability: 'capability:',
};
function readJson(filePath, label) {
try {
return JSON.parse(fs.readFileSync(filePath, 'utf8'));
} catch (error) {
throw new Error(`Failed to read ${label}: ${error.message}`);
}
}
function dedupeStrings(values) {
return [...new Set((Array.isArray(values) ? values : []).map(value => String(value).trim()).filter(Boolean))];
}
function intersectTargets(modules) {
if (!Array.isArray(modules) || modules.length === 0) {
return [];
}
return SUPPORTED_INSTALL_TARGETS.filter(target => (
modules.every(module => Array.isArray(module.targets) && module.targets.includes(target))
));
}
function getManifestPaths(repoRoot = DEFAULT_REPO_ROOT) {
return {
modulesPath: path.join(repoRoot, 'manifests', 'install-modules.json'),
profilesPath: path.join(repoRoot, 'manifests', 'install-profiles.json'),
componentsPath: path.join(repoRoot, 'manifests', 'install-components.json'),
};
}
function loadInstallManifests(options = {}) {
const repoRoot = options.repoRoot || DEFAULT_REPO_ROOT;
const { modulesPath, profilesPath, componentsPath } = getManifestPaths(repoRoot);
if (!fs.existsSync(modulesPath) || !fs.existsSync(profilesPath)) {
throw new Error(`Install manifests not found under ${repoRoot}`);
}
const modulesData = readJson(modulesPath, 'install-modules.json');
const profilesData = readJson(profilesPath, 'install-profiles.json');
const componentsData = fs.existsSync(componentsPath)
? readJson(componentsPath, 'install-components.json')
: { version: null, components: [] };
const modules = Array.isArray(modulesData.modules) ? modulesData.modules : [];
const profiles = profilesData && typeof profilesData.profiles === 'object'
? profilesData.profiles
: {};
const components = Array.isArray(componentsData.components) ? componentsData.components : [];
const modulesById = new Map(modules.map(module => [module.id, module]));
const componentsById = new Map(components.map(component => [component.id, component]));
return {
repoRoot,
modulesPath,
profilesPath,
componentsPath,
modules,
profiles,
components,
modulesById,
componentsById,
modulesVersion: modulesData.version,
profilesVersion: profilesData.version,
componentsVersion: componentsData.version,
};
}
function listInstallProfiles(options = {}) {
const manifests = loadInstallManifests(options);
return Object.entries(manifests.profiles).map(([id, profile]) => ({
id,
description: profile.description,
moduleCount: Array.isArray(profile.modules) ? profile.modules.length : 0,
}));
}
function listInstallModules(options = {}) {
const manifests = loadInstallManifests(options);
return manifests.modules.map(module => ({
id: module.id,
kind: module.kind,
description: module.description,
targets: module.targets,
defaultInstall: module.defaultInstall,
cost: module.cost,
stability: module.stability,
dependencyCount: Array.isArray(module.dependencies) ? module.dependencies.length : 0,
}));
}
function listInstallComponents(options = {}) {
const manifests = loadInstallManifests(options);
const family = options.family || null;
const target = options.target || null;
if (family && !Object.hasOwn(COMPONENT_FAMILY_PREFIXES, family)) {
throw new Error(
`Unknown component family: ${family}. Expected one of ${Object.keys(COMPONENT_FAMILY_PREFIXES).join(', ')}`
);
}
if (target && !SUPPORTED_INSTALL_TARGETS.includes(target)) {
throw new Error(
`Unknown install target: ${target}. Expected one of ${SUPPORTED_INSTALL_TARGETS.join(', ')}`
);
}
return manifests.components
.filter(component => !family || component.family === family)
.map(component => {
const moduleIds = dedupeStrings(component.modules);
const modules = moduleIds
.map(moduleId => manifests.modulesById.get(moduleId))
.filter(Boolean);
const targets = intersectTargets(modules);
return {
id: component.id,
family: component.family,
description: component.description,
moduleIds,
moduleCount: moduleIds.length,
targets,
};
})
.filter(component => !target || component.targets.includes(target));
}
function expandComponentIdsToModuleIds(componentIds, manifests) {
const expandedModuleIds = [];
for (const componentId of dedupeStrings(componentIds)) {
const component = manifests.componentsById.get(componentId);
if (!component) {
throw new Error(`Unknown install component: ${componentId}`);
}
expandedModuleIds.push(...component.modules);
}
return dedupeStrings(expandedModuleIds);
}
function resolveInstallPlan(options = {}) {
const manifests = loadInstallManifests(options);
const profileId = options.profileId || null;
const explicitModuleIds = dedupeStrings(options.moduleIds);
const includedComponentIds = dedupeStrings(options.includeComponentIds);
const excludedComponentIds = dedupeStrings(options.excludeComponentIds);
const requestedModuleIds = [];
if (profileId) {
const profile = manifests.profiles[profileId];
if (!profile) {
throw new Error(`Unknown install profile: ${profileId}`);
}
requestedModuleIds.push(...profile.modules);
}
requestedModuleIds.push(...explicitModuleIds);
requestedModuleIds.push(...expandComponentIdsToModuleIds(includedComponentIds, manifests));
const excludedModuleIds = expandComponentIdsToModuleIds(excludedComponentIds, manifests);
const excludedModuleOwners = new Map();
for (const componentId of excludedComponentIds) {
const component = manifests.componentsById.get(componentId);
if (!component) {
throw new Error(`Unknown install component: ${componentId}`);
}
for (const moduleId of component.modules) {
const owners = excludedModuleOwners.get(moduleId) || [];
owners.push(componentId);
excludedModuleOwners.set(moduleId, owners);
}
}
const target = options.target || null;
if (target && !SUPPORTED_INSTALL_TARGETS.includes(target)) {
throw new Error(
`Unknown install target: ${target}. Expected one of ${SUPPORTED_INSTALL_TARGETS.join(', ')}`
);
}
const effectiveRequestedIds = dedupeStrings(
requestedModuleIds.filter(moduleId => !excludedModuleOwners.has(moduleId))
);
if (requestedModuleIds.length === 0) {
throw new Error('No install profile, module IDs, or included component IDs were provided');
}
if (effectiveRequestedIds.length === 0) {
throw new Error('Selection excludes every requested install module');
}
const selectedIds = new Set();
const skippedTargetIds = new Set();
const excludedIds = new Set(excludedModuleIds);
const visitingIds = new Set();
const resolvedIds = new Set();
function resolveModule(moduleId, dependencyOf) {
const module = manifests.modulesById.get(moduleId);
if (!module) {
throw new Error(`Unknown install module: ${moduleId}`);
}
if (excludedModuleOwners.has(moduleId)) {
if (dependencyOf) {
const owners = excludedModuleOwners.get(moduleId) || [];
throw new Error(
`Module ${dependencyOf} depends on excluded module ${moduleId}${owners.length > 0 ? ` (excluded by ${owners.join(', ')})` : ''}`
);
}
return;
}
if (target && !module.targets.includes(target)) {
if (dependencyOf) {
throw new Error(
`Module ${dependencyOf} depends on ${moduleId}, which does not support target ${target}`
);
}
skippedTargetIds.add(moduleId);
return;
}
if (resolvedIds.has(moduleId)) {
return;
}
if (visitingIds.has(moduleId)) {
throw new Error(`Circular install dependency detected at ${moduleId}`);
}
visitingIds.add(moduleId);
for (const dependencyId of module.dependencies) {
resolveModule(dependencyId, moduleId);
}
visitingIds.delete(moduleId);
resolvedIds.add(moduleId);
selectedIds.add(moduleId);
}
for (const moduleId of effectiveRequestedIds) {
resolveModule(moduleId, null);
}
const selectedModules = manifests.modules.filter(module => selectedIds.has(module.id));
const skippedModules = manifests.modules.filter(module => skippedTargetIds.has(module.id));
const excludedModules = manifests.modules.filter(module => excludedIds.has(module.id));
const scaffoldPlan = target
? planInstallTargetScaffold({
target,
repoRoot: manifests.repoRoot,
projectRoot: options.projectRoot || manifests.repoRoot,
homeDir: options.homeDir || os.homedir(),
modules: selectedModules,
})
: null;
return {
repoRoot: manifests.repoRoot,
profileId,
target,
requestedModuleIds: effectiveRequestedIds,
explicitModuleIds,
includedComponentIds,
excludedComponentIds,
selectedModuleIds: selectedModules.map(module => module.id),
skippedModuleIds: skippedModules.map(module => module.id),
excludedModuleIds: excludedModules.map(module => module.id),
selectedModules,
skippedModules,
excludedModules,
targetAdapterId: scaffoldPlan ? scaffoldPlan.adapter.id : null,
targetRoot: scaffoldPlan ? scaffoldPlan.targetRoot : null,
installStatePath: scaffoldPlan ? scaffoldPlan.installStatePath : null,
operations: scaffoldPlan ? scaffoldPlan.operations : [],
};
}
module.exports = {
DEFAULT_REPO_ROOT,
SUPPORTED_INSTALL_TARGETS,
getManifestPaths,
loadInstallManifests,
listInstallComponents,
listInstallModules,
listInstallProfiles,
resolveInstallPlan,
};

View File

@@ -0,0 +1,120 @@
const fs = require('fs');
const path = require('path');
const Ajv = require('ajv');
const SCHEMA_PATH = path.join(__dirname, '..', '..', 'schemas', 'install-state.schema.json');
let cachedValidator = null;
function readJson(filePath, label) {
try {
return JSON.parse(fs.readFileSync(filePath, 'utf8'));
} catch (error) {
throw new Error(`Failed to read ${label}: ${error.message}`);
}
}
function getValidator() {
if (cachedValidator) {
return cachedValidator;
}
const schema = readJson(SCHEMA_PATH, 'install-state schema');
const ajv = new Ajv({ allErrors: true });
cachedValidator = ajv.compile(schema);
return cachedValidator;
}
function formatValidationErrors(errors = []) {
return errors
.map(error => `${error.instancePath || '/'} ${error.message}`)
.join('; ');
}
function validateInstallState(state) {
const validator = getValidator();
const valid = validator(state);
return {
valid,
errors: validator.errors || [],
};
}
function assertValidInstallState(state, label) {
const result = validateInstallState(state);
if (!result.valid) {
throw new Error(`Invalid install-state${label ? ` (${label})` : ''}: ${formatValidationErrors(result.errors)}`);
}
}
function createInstallState(options) {
const installedAt = options.installedAt || new Date().toISOString();
const state = {
schemaVersion: 'ecc.install.v1',
installedAt,
target: {
id: options.adapter.id,
target: options.adapter.target || undefined,
kind: options.adapter.kind || undefined,
root: options.targetRoot,
installStatePath: options.installStatePath,
},
request: {
profile: options.request.profile || null,
modules: Array.isArray(options.request.modules) ? [...options.request.modules] : [],
includeComponents: Array.isArray(options.request.includeComponents)
? [...options.request.includeComponents]
: [],
excludeComponents: Array.isArray(options.request.excludeComponents)
? [...options.request.excludeComponents]
: [],
legacyLanguages: Array.isArray(options.request.legacyLanguages)
? [...options.request.legacyLanguages]
: [],
legacyMode: Boolean(options.request.legacyMode),
},
resolution: {
selectedModules: Array.isArray(options.resolution.selectedModules)
? [...options.resolution.selectedModules]
: [],
skippedModules: Array.isArray(options.resolution.skippedModules)
? [...options.resolution.skippedModules]
: [],
},
source: {
repoVersion: options.source.repoVersion || null,
repoCommit: options.source.repoCommit || null,
manifestVersion: options.source.manifestVersion,
},
operations: Array.isArray(options.operations)
? options.operations.map(operation => ({ ...operation }))
: [],
};
if (options.lastValidatedAt) {
state.lastValidatedAt = options.lastValidatedAt;
}
assertValidInstallState(state, 'create');
return state;
}
function readInstallState(filePath) {
const state = readJson(filePath, 'install-state');
assertValidInstallState(state, filePath);
return state;
}
function writeInstallState(filePath, state) {
assertValidInstallState(state, filePath);
fs.mkdirSync(path.dirname(filePath), { recursive: true });
fs.writeFileSync(filePath, `${JSON.stringify(state, null, 2)}\n`);
return state;
}
module.exports = {
createInstallState,
readInstallState,
validateInstallState,
writeInstallState,
};

View File

@@ -0,0 +1,9 @@
const { createInstallTargetAdapter } = require('./helpers');
module.exports = createInstallTargetAdapter({
id: 'antigravity-project',
target: 'antigravity',
kind: 'project',
rootSegments: ['.agent'],
installStatePathSegments: ['ecc-install-state.json'],
});

View File

@@ -0,0 +1,10 @@
const { createInstallTargetAdapter } = require('./helpers');
module.exports = createInstallTargetAdapter({
id: 'claude-home',
target: 'claude',
kind: 'home',
rootSegments: ['.claude'],
installStatePathSegments: ['ecc', 'install-state.json'],
nativeRootRelativePath: '.claude-plugin',
});

View File

@@ -0,0 +1,10 @@
const { createInstallTargetAdapter } = require('./helpers');
module.exports = createInstallTargetAdapter({
id: 'codex-home',
target: 'codex',
kind: 'home',
rootSegments: ['.codex'],
installStatePathSegments: ['ecc-install-state.json'],
nativeRootRelativePath: '.codex',
});

View File

@@ -0,0 +1,10 @@
const { createInstallTargetAdapter } = require('./helpers');
module.exports = createInstallTargetAdapter({
id: 'cursor-project',
target: 'cursor',
kind: 'project',
rootSegments: ['.cursor'],
installStatePathSegments: ['ecc-install-state.json'],
nativeRootRelativePath: '.cursor',
});

View File

@@ -0,0 +1,89 @@
const os = require('os');
const path = require('path');
function normalizeRelativePath(relativePath) {
return String(relativePath || '')
.replace(/\\/g, '/')
.replace(/^\.\/+/, '')
.replace(/\/+$/, '');
}
function resolveBaseRoot(scope, input = {}) {
if (scope === 'home') {
return input.homeDir || os.homedir();
}
if (scope === 'project') {
const projectRoot = input.projectRoot || input.repoRoot;
if (!projectRoot) {
throw new Error('projectRoot or repoRoot is required for project install targets');
}
return projectRoot;
}
throw new Error(`Unsupported install target scope: ${scope}`);
}
function createInstallTargetAdapter(config) {
const adapter = {
id: config.id,
target: config.target,
kind: config.kind,
nativeRootRelativePath: config.nativeRootRelativePath || null,
supports(target) {
return target === config.target || target === config.id;
},
resolveRoot(input = {}) {
const baseRoot = resolveBaseRoot(config.kind, input);
return path.join(baseRoot, ...config.rootSegments);
},
getInstallStatePath(input = {}) {
const root = adapter.resolveRoot(input);
return path.join(root, ...config.installStatePathSegments);
},
resolveDestinationPath(sourceRelativePath, input = {}) {
const normalizedSourcePath = normalizeRelativePath(sourceRelativePath);
const targetRoot = adapter.resolveRoot(input);
if (
config.nativeRootRelativePath
&& normalizedSourcePath === normalizeRelativePath(config.nativeRootRelativePath)
) {
return targetRoot;
}
return path.join(targetRoot, normalizedSourcePath);
},
determineStrategy(sourceRelativePath) {
const normalizedSourcePath = normalizeRelativePath(sourceRelativePath);
if (
config.nativeRootRelativePath
&& normalizedSourcePath === normalizeRelativePath(config.nativeRootRelativePath)
) {
return 'sync-root-children';
}
return 'preserve-relative-path';
},
createScaffoldOperation(moduleId, sourceRelativePath, input = {}) {
const normalizedSourcePath = normalizeRelativePath(sourceRelativePath);
return {
kind: 'copy-path',
moduleId,
sourceRelativePath: normalizedSourcePath,
destinationPath: adapter.resolveDestinationPath(normalizedSourcePath, input),
strategy: adapter.determineStrategy(normalizedSourcePath),
ownership: 'managed',
scaffoldOnly: true,
};
},
};
return Object.freeze(adapter);
}
module.exports = {
createInstallTargetAdapter,
normalizeRelativePath,
};

View File

@@ -0,0 +1,10 @@
const { createInstallTargetAdapter } = require('./helpers');
module.exports = createInstallTargetAdapter({
id: 'opencode-home',
target: 'opencode',
kind: 'home',
rootSegments: ['.opencode'],
installStatePathSegments: ['ecc-install-state.json'],
nativeRootRelativePath: '.opencode',
});

View File

@@ -0,0 +1,64 @@
const antigravityProject = require('./antigravity-project');
const claudeHome = require('./claude-home');
const codexHome = require('./codex-home');
const cursorProject = require('./cursor-project');
const opencodeHome = require('./opencode-home');
const ADAPTERS = Object.freeze([
claudeHome,
cursorProject,
antigravityProject,
codexHome,
opencodeHome,
]);
function listInstallTargetAdapters() {
return ADAPTERS.slice();
}
function getInstallTargetAdapter(targetOrAdapterId) {
const adapter = ADAPTERS.find(candidate => candidate.supports(targetOrAdapterId));
if (!adapter) {
throw new Error(`Unknown install target adapter: ${targetOrAdapterId}`);
}
return adapter;
}
function planInstallTargetScaffold(options = {}) {
const adapter = getInstallTargetAdapter(options.target);
const modules = Array.isArray(options.modules) ? options.modules : [];
const planningInput = {
repoRoot: options.repoRoot,
projectRoot: options.projectRoot || options.repoRoot,
homeDir: options.homeDir,
};
const targetRoot = adapter.resolveRoot(planningInput);
const installStatePath = adapter.getInstallStatePath(planningInput);
const operations = modules.flatMap(module => {
const paths = Array.isArray(module.paths) ? module.paths : [];
return paths.map(sourceRelativePath => adapter.createScaffoldOperation(
module.id,
sourceRelativePath,
planningInput
));
});
return {
adapter: {
id: adapter.id,
target: adapter.target,
kind: adapter.kind,
},
targetRoot,
installStatePath,
operations,
};
}
module.exports = {
getInstallTargetAdapter,
listInstallTargetAdapters,
planInstallTargetScaffold,
};

View File

@@ -0,0 +1,23 @@
'use strict';
const fs = require('fs');
const { writeInstallState } = require('../install-state');
function applyInstallPlan(plan) {
for (const operation of plan.operations) {
fs.mkdirSync(require('path').dirname(operation.destinationPath), { recursive: true });
fs.copyFileSync(operation.sourcePath, operation.destinationPath);
}
writeInstallState(plan.installStatePath, plan.statePreview);
return {
...plan,
applied: true,
};
}
module.exports = {
applyInstallPlan,
};

View File

@@ -0,0 +1,82 @@
'use strict';
const fs = require('fs');
const path = require('path');
const Ajv = require('ajv');
const DEFAULT_INSTALL_CONFIG = 'ecc-install.json';
const CONFIG_SCHEMA_PATH = path.join(__dirname, '..', '..', '..', 'schemas', 'ecc-install-config.schema.json');
let cachedValidator = null;
function readJson(filePath, label) {
try {
return JSON.parse(fs.readFileSync(filePath, 'utf8'));
} catch (error) {
throw new Error(`Invalid JSON in ${label}: ${error.message}`);
}
}
function getValidator() {
if (cachedValidator) {
return cachedValidator;
}
const schema = readJson(CONFIG_SCHEMA_PATH, 'ecc-install-config.schema.json');
const ajv = new Ajv({ allErrors: true });
cachedValidator = ajv.compile(schema);
return cachedValidator;
}
function dedupeStrings(values) {
return [...new Set((Array.isArray(values) ? values : []).map(value => String(value).trim()).filter(Boolean))];
}
function formatValidationErrors(errors = []) {
return errors.map(error => `${error.instancePath || '/'} ${error.message}`).join('; ');
}
function resolveInstallConfigPath(configPath, options = {}) {
if (!configPath) {
throw new Error('An install config path is required');
}
const cwd = options.cwd || process.cwd();
return path.isAbsolute(configPath)
? configPath
: path.resolve(cwd, configPath);
}
function loadInstallConfig(configPath, options = {}) {
const resolvedPath = resolveInstallConfigPath(configPath, options);
if (!fs.existsSync(resolvedPath)) {
throw new Error(`Install config not found: ${resolvedPath}`);
}
const raw = readJson(resolvedPath, path.basename(resolvedPath));
const validator = getValidator();
if (!validator(raw)) {
throw new Error(
`Invalid install config ${resolvedPath}: ${formatValidationErrors(validator.errors)}`
);
}
return {
path: resolvedPath,
version: raw.version,
target: raw.target || null,
profileId: raw.profile || null,
moduleIds: dedupeStrings(raw.modules),
includeComponentIds: dedupeStrings(raw.include),
excludeComponentIds: dedupeStrings(raw.exclude),
options: raw.options && typeof raw.options === 'object' ? { ...raw.options } : {},
};
}
module.exports = {
DEFAULT_INSTALL_CONFIG,
loadInstallConfig,
resolveInstallConfigPath,
};

View File

@@ -0,0 +1,113 @@
'use strict';
const LEGACY_INSTALL_TARGETS = ['claude', 'cursor', 'antigravity'];
function dedupeStrings(values) {
return [...new Set((Array.isArray(values) ? values : []).map(value => String(value).trim()).filter(Boolean))];
}
function parseInstallArgs(argv) {
const args = argv.slice(2);
const parsed = {
target: null,
dryRun: false,
json: false,
help: false,
configPath: null,
profileId: null,
moduleIds: [],
includeComponentIds: [],
excludeComponentIds: [],
languages: [],
};
for (let index = 0; index < args.length; index += 1) {
const arg = args[index];
if (arg === '--target') {
parsed.target = args[index + 1] || null;
index += 1;
} else if (arg === '--config') {
parsed.configPath = args[index + 1] || null;
index += 1;
} else if (arg === '--profile') {
parsed.profileId = args[index + 1] || null;
index += 1;
} else if (arg === '--modules') {
const raw = args[index + 1] || '';
parsed.moduleIds = raw.split(',').map(value => value.trim()).filter(Boolean);
index += 1;
} else if (arg === '--with') {
const componentId = args[index + 1] || '';
if (componentId.trim()) {
parsed.includeComponentIds.push(componentId.trim());
}
index += 1;
} else if (arg === '--without') {
const componentId = args[index + 1] || '';
if (componentId.trim()) {
parsed.excludeComponentIds.push(componentId.trim());
}
index += 1;
} else if (arg === '--dry-run') {
parsed.dryRun = true;
} else if (arg === '--json') {
parsed.json = true;
} else if (arg === '--help' || arg === '-h') {
parsed.help = true;
} else if (arg.startsWith('--')) {
throw new Error(`Unknown argument: ${arg}`);
} else {
parsed.languages.push(arg);
}
}
return parsed;
}
function normalizeInstallRequest(options = {}) {
const config = options.config && typeof options.config === 'object'
? options.config
: null;
const profileId = options.profileId || config?.profileId || null;
const moduleIds = dedupeStrings([...(config?.moduleIds || []), ...(options.moduleIds || [])]);
const includeComponentIds = dedupeStrings([
...(config?.includeComponentIds || []),
...(options.includeComponentIds || []),
]);
const excludeComponentIds = dedupeStrings([
...(config?.excludeComponentIds || []),
...(options.excludeComponentIds || []),
]);
const languages = Array.isArray(options.languages) ? [...options.languages] : [];
const target = options.target || config?.target || 'claude';
const hasManifestBaseSelection = Boolean(profileId) || moduleIds.length > 0 || includeComponentIds.length > 0;
const usingManifestMode = hasManifestBaseSelection || excludeComponentIds.length > 0;
if (usingManifestMode && languages.length > 0) {
throw new Error(
'Legacy language arguments cannot be combined with --profile, --modules, --with, --without, or manifest config selections'
);
}
if (!options.help && !hasManifestBaseSelection && languages.length === 0) {
throw new Error('No install profile, module IDs, included components, or legacy languages were provided');
}
return {
mode: usingManifestMode ? 'manifest' : 'legacy',
target,
profileId,
moduleIds,
includeComponentIds,
excludeComponentIds,
languages,
configPath: config?.path || options.configPath || null,
};
}
module.exports = {
LEGACY_INSTALL_TARGETS,
normalizeInstallRequest,
parseInstallArgs,
};

View File

@@ -0,0 +1,42 @@
'use strict';
const {
createLegacyInstallPlan,
createManifestInstallPlan,
} = require('../install-executor');
function createInstallPlanFromRequest(request, options = {}) {
if (!request || typeof request !== 'object') {
throw new Error('A normalized install request is required');
}
if (request.mode === 'manifest') {
return createManifestInstallPlan({
target: request.target,
profileId: request.profileId,
moduleIds: request.moduleIds,
includeComponentIds: request.includeComponentIds,
excludeComponentIds: request.excludeComponentIds,
projectRoot: options.projectRoot,
homeDir: options.homeDir,
sourceRoot: options.sourceRoot,
});
}
if (request.mode === 'legacy') {
return createLegacyInstallPlan({
target: request.target,
languages: request.languages,
projectRoot: options.projectRoot,
homeDir: options.homeDir,
claudeRulesDir: options.claudeRulesDir,
sourceRoot: options.sourceRoot,
});
}
throw new Error(`Unsupported install request mode: ${request.mode}`);
}
module.exports = {
createInstallPlanFromRequest,
};

View File

@@ -154,7 +154,8 @@ function loadWorkerSnapshots(coordinationDir) {
});
}
function listTmuxPanes(sessionName) {
function listTmuxPanes(sessionName, options = {}) {
const { spawnSyncImpl = spawnSync } = options;
const format = [
'#{pane_id}',
'#{window_index}',
@@ -167,12 +168,15 @@ function listTmuxPanes(sessionName) {
'#{pane_pid}'
].join('\t');
const result = spawnSync('tmux', ['list-panes', '-t', sessionName, '-F', format], {
const result = spawnSyncImpl('tmux', ['list-panes', '-t', sessionName, '-F', format], {
encoding: 'utf8',
stdio: ['ignore', 'pipe', 'pipe']
});
if (result.error) {
if (result.error.code === 'ENOENT') {
return [];
}
throw result.error;
}

View File

@@ -0,0 +1,138 @@
'use strict';
const path = require('path');
const SESSION_SCHEMA_VERSION = 'ecc.session.v1';
function buildAggregates(workers) {
const states = workers.reduce((accumulator, worker) => {
const state = worker.state || 'unknown';
accumulator[state] = (accumulator[state] || 0) + 1;
return accumulator;
}, {});
return {
workerCount: workers.length,
states
};
}
function deriveDmuxSessionState(snapshot) {
if (snapshot.sessionActive) {
return 'active';
}
if (snapshot.workerCount > 0) {
return 'idle';
}
return 'missing';
}
function normalizeDmuxSnapshot(snapshot, sourceTarget) {
const workers = (snapshot.workers || []).map(worker => ({
id: worker.workerSlug,
label: worker.workerSlug,
state: worker.status.state || 'unknown',
branch: worker.status.branch || null,
worktree: worker.status.worktree || null,
runtime: {
kind: 'tmux-pane',
command: worker.pane ? worker.pane.currentCommand || null : null,
pid: worker.pane ? worker.pane.pid || null : null,
active: worker.pane ? Boolean(worker.pane.active) : false,
dead: worker.pane ? Boolean(worker.pane.dead) : false,
},
intent: {
objective: worker.task.objective || '',
seedPaths: Array.isArray(worker.task.seedPaths) ? worker.task.seedPaths : []
},
outputs: {
summary: Array.isArray(worker.handoff.summary) ? worker.handoff.summary : [],
validation: Array.isArray(worker.handoff.validation) ? worker.handoff.validation : [],
remainingRisks: Array.isArray(worker.handoff.remainingRisks) ? worker.handoff.remainingRisks : []
},
artifacts: {
statusFile: worker.files.status,
taskFile: worker.files.task,
handoffFile: worker.files.handoff
}
}));
return {
schemaVersion: SESSION_SCHEMA_VERSION,
adapterId: 'dmux-tmux',
session: {
id: snapshot.sessionName,
kind: 'orchestrated',
state: deriveDmuxSessionState(snapshot),
repoRoot: snapshot.repoRoot || null,
sourceTarget
},
workers,
aggregates: buildAggregates(workers)
};
}
function deriveClaudeWorkerId(session) {
if (session.shortId && session.shortId !== 'no-id') {
return session.shortId;
}
return path.basename(session.filename || session.sessionPath || 'session', '.tmp');
}
function normalizeClaudeHistorySession(session, sourceTarget) {
const metadata = session.metadata || {};
const workerId = deriveClaudeWorkerId(session);
const worker = {
id: workerId,
label: metadata.title || session.filename || workerId,
state: 'recorded',
branch: metadata.branch || null,
worktree: metadata.worktree || null,
runtime: {
kind: 'claude-session',
command: 'claude',
pid: null,
active: false,
dead: true,
},
intent: {
objective: metadata.inProgress && metadata.inProgress.length > 0
? metadata.inProgress[0]
: (metadata.title || ''),
seedPaths: []
},
outputs: {
summary: Array.isArray(metadata.completed) ? metadata.completed : [],
validation: [],
remainingRisks: metadata.notes ? [metadata.notes] : []
},
artifacts: {
sessionFile: session.sessionPath,
context: metadata.context || null
}
};
return {
schemaVersion: SESSION_SCHEMA_VERSION,
adapterId: 'claude-history',
session: {
id: workerId,
kind: 'history',
state: 'recorded',
repoRoot: metadata.worktree || null,
sourceTarget
},
workers: [worker],
aggregates: buildAggregates([worker])
};
}
module.exports = {
SESSION_SCHEMA_VERSION,
buildAggregates,
normalizeClaudeHistorySession,
normalizeDmuxSnapshot
};

View File

@@ -0,0 +1,147 @@
'use strict';
const fs = require('fs');
const path = require('path');
const sessionManager = require('../session-manager');
const sessionAliases = require('../session-aliases');
const { normalizeClaudeHistorySession } = require('./canonical-session');
function parseClaudeTarget(target) {
if (typeof target !== 'string') {
return null;
}
for (const prefix of ['claude-history:', 'claude:', 'history:']) {
if (target.startsWith(prefix)) {
return target.slice(prefix.length).trim();
}
}
return null;
}
function isSessionFileTarget(target, cwd) {
if (typeof target !== 'string' || target.length === 0) {
return false;
}
const absoluteTarget = path.resolve(cwd, target);
return fs.existsSync(absoluteTarget)
&& fs.statSync(absoluteTarget).isFile()
&& absoluteTarget.endsWith('.tmp');
}
function hydrateSessionFromPath(sessionPath) {
const filename = path.basename(sessionPath);
const parsed = sessionManager.parseSessionFilename(filename);
if (!parsed) {
throw new Error(`Unsupported session file: ${sessionPath}`);
}
const content = sessionManager.getSessionContent(sessionPath);
const stats = fs.statSync(sessionPath);
return {
...parsed,
sessionPath,
content,
metadata: sessionManager.parseSessionMetadata(content),
stats: sessionManager.getSessionStats(content || ''),
size: stats.size,
modifiedTime: stats.mtime,
createdTime: stats.birthtime || stats.ctime
};
}
function resolveSessionRecord(target, cwd) {
const explicitTarget = parseClaudeTarget(target);
if (explicitTarget) {
if (explicitTarget === 'latest') {
const [latest] = sessionManager.getAllSessions({ limit: 1 }).sessions;
if (!latest) {
throw new Error('No Claude session history found');
}
return {
session: sessionManager.getSessionById(latest.filename, true),
sourceTarget: {
type: 'claude-history',
value: 'latest'
}
};
}
const alias = sessionAliases.resolveAlias(explicitTarget);
if (alias) {
return {
session: hydrateSessionFromPath(alias.sessionPath),
sourceTarget: {
type: 'claude-alias',
value: explicitTarget
}
};
}
const session = sessionManager.getSessionById(explicitTarget, true);
if (!session) {
throw new Error(`Claude session not found: ${explicitTarget}`);
}
return {
session,
sourceTarget: {
type: 'claude-history',
value: explicitTarget
}
};
}
if (isSessionFileTarget(target, cwd)) {
return {
session: hydrateSessionFromPath(path.resolve(cwd, target)),
sourceTarget: {
type: 'session-file',
value: path.resolve(cwd, target)
}
};
}
throw new Error(`Unsupported Claude session target: ${target}`);
}
function createClaudeHistoryAdapter() {
return {
id: 'claude-history',
canOpen(target, context = {}) {
if (context.adapterId && context.adapterId !== 'claude-history') {
return false;
}
if (context.adapterId === 'claude-history') {
return true;
}
const cwd = context.cwd || process.cwd();
return parseClaudeTarget(target) !== null || isSessionFileTarget(target, cwd);
},
open(target, context = {}) {
const cwd = context.cwd || process.cwd();
return {
adapterId: 'claude-history',
getSnapshot() {
const { session, sourceTarget } = resolveSessionRecord(target, cwd);
return normalizeClaudeHistorySession(session, sourceTarget);
}
};
}
};
}
module.exports = {
createClaudeHistoryAdapter,
isSessionFileTarget,
parseClaudeTarget
};

View File

@@ -0,0 +1,78 @@
'use strict';
const fs = require('fs');
const path = require('path');
const { collectSessionSnapshot } = require('../orchestration-session');
const { normalizeDmuxSnapshot } = require('./canonical-session');
function isPlanFileTarget(target, cwd) {
if (typeof target !== 'string' || target.length === 0) {
return false;
}
const absoluteTarget = path.resolve(cwd, target);
return fs.existsSync(absoluteTarget)
&& fs.statSync(absoluteTarget).isFile()
&& path.extname(absoluteTarget) === '.json';
}
function isSessionNameTarget(target, cwd) {
if (typeof target !== 'string' || target.length === 0) {
return false;
}
const coordinationDir = path.resolve(cwd, '.claude', 'orchestration', target);
return fs.existsSync(coordinationDir) && fs.statSync(coordinationDir).isDirectory();
}
function buildSourceTarget(target, cwd) {
if (isPlanFileTarget(target, cwd)) {
return {
type: 'plan',
value: path.resolve(cwd, target)
};
}
return {
type: 'session',
value: target
};
}
function createDmuxTmuxAdapter(options = {}) {
const collectSessionSnapshotImpl = options.collectSessionSnapshotImpl || collectSessionSnapshot;
return {
id: 'dmux-tmux',
canOpen(target, context = {}) {
if (context.adapterId && context.adapterId !== 'dmux-tmux') {
return false;
}
if (context.adapterId === 'dmux-tmux') {
return true;
}
const cwd = context.cwd || process.cwd();
return isPlanFileTarget(target, cwd) || isSessionNameTarget(target, cwd);
},
open(target, context = {}) {
const cwd = context.cwd || process.cwd();
return {
adapterId: 'dmux-tmux',
getSnapshot() {
const snapshot = collectSessionSnapshotImpl(target, cwd);
return normalizeDmuxSnapshot(snapshot, buildSourceTarget(target, cwd));
}
};
}
};
}
module.exports = {
createDmuxTmuxAdapter,
isPlanFileTarget,
isSessionNameTarget
};

View File

@@ -0,0 +1,42 @@
'use strict';
const { createClaudeHistoryAdapter } = require('./claude-history');
const { createDmuxTmuxAdapter } = require('./dmux-tmux');
function createDefaultAdapters() {
return [
createClaudeHistoryAdapter(),
createDmuxTmuxAdapter()
];
}
function createAdapterRegistry(options = {}) {
const adapters = options.adapters || createDefaultAdapters();
return {
adapters,
select(target, context = {}) {
const adapter = adapters.find(candidate => candidate.canOpen(target, context));
if (!adapter) {
throw new Error(`No session adapter matched target: ${target}`);
}
return adapter;
},
open(target, context = {}) {
const adapter = this.select(target, context);
return adapter.open(target, context);
}
};
}
function inspectSessionTarget(target, options = {}) {
const registry = createAdapterRegistry(options);
return registry.open(target, options).getSnapshot();
}
module.exports = {
createAdapterRegistry,
createDefaultAdapters,
inspectSessionTarget
};

90
scripts/list-installed.js Normal file
View File

@@ -0,0 +1,90 @@
#!/usr/bin/env node
const { discoverInstalledStates } = require('./lib/install-lifecycle');
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
function showHelp(exitCode = 0) {
console.log(`
Usage: node scripts/list-installed.js [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--json]
Inspect ECC install-state files for the current home/project context.
`);
process.exit(exitCode);
}
function parseArgs(argv) {
const args = argv.slice(2);
const parsed = {
targets: [],
json: false,
help: false,
};
for (let index = 0; index < args.length; index += 1) {
const arg = args[index];
if (arg === '--target') {
parsed.targets.push(args[index + 1] || null);
index += 1;
} else if (arg === '--json') {
parsed.json = true;
} else if (arg === '--help' || arg === '-h') {
parsed.help = true;
} else {
throw new Error(`Unknown argument: ${arg}`);
}
}
return parsed;
}
function printHuman(records) {
if (records.length === 0) {
console.log('No ECC install-state files found for the current home/project context.');
return;
}
console.log('Installed ECC targets:\n');
for (const record of records) {
if (record.error) {
console.log(`- ${record.adapter.id}: INVALID (${record.error})`);
continue;
}
const state = record.state;
console.log(`- ${record.adapter.id}`);
console.log(` Root: ${state.target.root}`);
console.log(` Installed: ${state.installedAt}`);
console.log(` Profile: ${state.request.profile || '(legacy/custom)'}`);
console.log(` Modules: ${(state.resolution.selectedModules || []).join(', ') || '(none)'}`);
console.log(` Legacy languages: ${(state.request.legacyLanguages || []).join(', ') || '(none)'}`);
console.log(` Source version: ${state.source.repoVersion || '(unknown)'}`);
}
}
function main() {
try {
const options = parseArgs(process.argv);
if (options.help) {
showHelp(0);
}
const records = discoverInstalledStates({
homeDir: process.env.HOME,
projectRoot: process.cwd(),
targets: options.targets,
}).filter(record => record.exists);
if (options.json) {
console.log(JSON.stringify({ records }, null, 2));
return;
}
printHuman(records);
} catch (error) {
console.error(`Error: ${error.message}`);
process.exit(1);
}
}
main();

View File

@@ -4,7 +4,7 @@
const fs = require('fs');
const path = require('path');
const { collectSessionSnapshot } = require('./lib/orchestration-session');
const { inspectSessionTarget } = require('./lib/session-adapters/registry');
function usage() {
console.log([
@@ -35,7 +35,10 @@ function main() {
process.exit(1);
}
const snapshot = collectSessionSnapshot(target, process.cwd());
const snapshot = inspectSessionTarget(target, {
cwd: process.cwd(),
adapterId: 'dmux-tmux'
});
const json = JSON.stringify(snapshot, null, 2);
if (writePath) {

97
scripts/repair.js Normal file
View File

@@ -0,0 +1,97 @@
#!/usr/bin/env node
const { repairInstalledStates } = require('./lib/install-lifecycle');
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
function showHelp(exitCode = 0) {
console.log(`
Usage: node scripts/repair.js [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json]
Rebuild ECC-managed files recorded in install-state for the current context.
`);
process.exit(exitCode);
}
function parseArgs(argv) {
const args = argv.slice(2);
const parsed = {
targets: [],
dryRun: false,
json: false,
help: false,
};
for (let index = 0; index < args.length; index += 1) {
const arg = args[index];
if (arg === '--target') {
parsed.targets.push(args[index + 1] || null);
index += 1;
} else if (arg === '--dry-run') {
parsed.dryRun = true;
} else if (arg === '--json') {
parsed.json = true;
} else if (arg === '--help' || arg === '-h') {
parsed.help = true;
} else {
throw new Error(`Unknown argument: ${arg}`);
}
}
return parsed;
}
function printHuman(result) {
if (result.results.length === 0) {
console.log('No ECC install-state files found for the current home/project context.');
return;
}
console.log('Repair summary:\n');
for (const entry of result.results) {
console.log(`- ${entry.adapter.id}`);
console.log(` Status: ${entry.status.toUpperCase()}`);
console.log(` Install-state: ${entry.installStatePath}`);
if (entry.error) {
console.log(` Error: ${entry.error}`);
continue;
}
const paths = result.dryRun ? entry.plannedRepairs : entry.repairedPaths;
console.log(` ${result.dryRun ? 'Planned repairs' : 'Repaired paths'}: ${paths.length}`);
}
console.log(`\nSummary: checked=${result.summary.checkedCount}, ${result.dryRun ? 'planned' : 'repaired'}=${result.dryRun ? result.summary.plannedRepairCount : result.summary.repairedCount}, errors=${result.summary.errorCount}`);
}
function main() {
try {
const options = parseArgs(process.argv);
if (options.help) {
showHelp(0);
}
const result = repairInstalledStates({
repoRoot: require('path').join(__dirname, '..'),
homeDir: process.env.HOME,
projectRoot: process.cwd(),
targets: options.targets,
dryRun: options.dryRun,
});
const hasErrors = result.summary.errorCount > 0;
if (options.json) {
console.log(JSON.stringify(result, null, 2));
} else {
printHuman(result);
}
process.exitCode = hasErrors ? 1 : 0;
} catch (error) {
console.error(`Error: ${error.message}`);
process.exit(1);
}
}
main();

View File

@@ -0,0 +1,77 @@
#!/usr/bin/env node
'use strict';
const fs = require('fs');
const path = require('path');
const { inspectSessionTarget } = require('./lib/session-adapters/registry');
function usage() {
console.log([
'Usage:',
' node scripts/session-inspect.js <target> [--adapter <id>] [--write <output.json>]',
'',
'Targets:',
' <plan.json> Dmux/orchestration plan file',
' <session-name> Dmux session name when the coordination directory exists',
' claude:latest Most recent Claude session history entry',
' claude:<id|alias> Specific Claude session or alias',
' <session.tmp> Direct path to a Claude session file',
'',
'Examples:',
' node scripts/session-inspect.js .claude/plan/workflow.json',
' node scripts/session-inspect.js workflow-visual-proof',
' node scripts/session-inspect.js claude:latest',
' node scripts/session-inspect.js claude:a1b2c3d4 --write /tmp/session.json'
].join('\n'));
}
function parseArgs(argv) {
const args = argv.slice(2);
const target = args.find(argument => !argument.startsWith('--'));
const adapterIndex = args.indexOf('--adapter');
const adapterId = adapterIndex >= 0 ? args[adapterIndex + 1] : null;
const writeIndex = args.indexOf('--write');
const writePath = writeIndex >= 0 ? args[writeIndex + 1] : null;
return { target, adapterId, writePath };
}
function main() {
const { target, adapterId, writePath } = parseArgs(process.argv);
if (!target) {
usage();
process.exit(1);
}
const snapshot = inspectSessionTarget(target, {
cwd: process.cwd(),
adapterId
});
const payload = JSON.stringify(snapshot, null, 2);
if (writePath) {
const absoluteWritePath = path.resolve(writePath);
fs.mkdirSync(path.dirname(absoluteWritePath), { recursive: true });
fs.writeFileSync(absoluteWritePath, payload + '\n', 'utf8');
}
console.log(payload);
}
if (require.main === module) {
try {
main();
} catch (error) {
console.error(`[session-inspect] ${error.message}`);
process.exit(1);
}
}
module.exports = {
main,
parseArgs
};

96
scripts/uninstall.js Normal file
View File

@@ -0,0 +1,96 @@
#!/usr/bin/env node
const { uninstallInstalledStates } = require('./lib/install-lifecycle');
const { SUPPORTED_INSTALL_TARGETS } = require('./lib/install-manifests');
function showHelp(exitCode = 0) {
console.log(`
Usage: node scripts/uninstall.js [--target <${SUPPORTED_INSTALL_TARGETS.join('|')}>] [--dry-run] [--json]
Remove ECC-managed files recorded in install-state for the current context.
`);
process.exit(exitCode);
}
function parseArgs(argv) {
const args = argv.slice(2);
const parsed = {
targets: [],
dryRun: false,
json: false,
help: false,
};
for (let index = 0; index < args.length; index += 1) {
const arg = args[index];
if (arg === '--target') {
parsed.targets.push(args[index + 1] || null);
index += 1;
} else if (arg === '--dry-run') {
parsed.dryRun = true;
} else if (arg === '--json') {
parsed.json = true;
} else if (arg === '--help' || arg === '-h') {
parsed.help = true;
} else {
throw new Error(`Unknown argument: ${arg}`);
}
}
return parsed;
}
function printHuman(result) {
if (result.results.length === 0) {
console.log('No ECC install-state files found for the current home/project context.');
return;
}
console.log('Uninstall summary:\n');
for (const entry of result.results) {
console.log(`- ${entry.adapter.id}`);
console.log(` Status: ${entry.status.toUpperCase()}`);
console.log(` Install-state: ${entry.installStatePath}`);
if (entry.error) {
console.log(` Error: ${entry.error}`);
continue;
}
const paths = result.dryRun ? entry.plannedRemovals : entry.removedPaths;
console.log(` ${result.dryRun ? 'Planned removals' : 'Removed paths'}: ${paths.length}`);
}
console.log(`\nSummary: checked=${result.summary.checkedCount}, ${result.dryRun ? 'planned' : 'uninstalled'}=${result.dryRun ? result.summary.plannedRemovalCount : result.summary.uninstalledCount}, errors=${result.summary.errorCount}`);
}
function main() {
try {
const options = parseArgs(process.argv);
if (options.help) {
showHelp(0);
}
const result = uninstallInstalledStates({
homeDir: process.env.HOME,
projectRoot: process.cwd(),
targets: options.targets,
dryRun: options.dryRun,
});
const hasErrors = result.summary.errorCount > 0;
if (options.json) {
console.log(JSON.stringify(result, null, 2));
} else {
printHuman(result);
}
process.exitCode = hasErrors ? 1 : 0;
} catch (error) {
console.error(`Error: ${error.message}`);
process.exit(1);
}
}
main();

View File

@@ -82,7 +82,7 @@ If the user chooses niche or core + niche, continue to category selection below
### 2b: Choose Skill Categories
There are 35 skills organized into 7 categories. Use `AskUserQuestion` with `multiSelect: true`:
There are 7 selectable category groups below. The detailed confirmation lists that follow cover 41 skills across 8 categories, plus 1 standalone template. Use `AskUserQuestion` with `multiSelect: true`:
```
Question: "Which skill categories do you want to install?"

View File

@@ -91,7 +91,8 @@ PROMPT
max_turns=10
fi
claude --model haiku --max-turns "$max_turns" --print < "$prompt_file" >> "$LOG_FILE" 2>&1 &
# Prevent observe.sh from recording this automated Haiku session as observations
ECC_SKIP_OBSERVE=1 ECC_HOOK_PROFILE=minimal claude --model haiku --max-turns "$max_turns" --print < "$prompt_file" >> "$LOG_FILE" 2>&1 &
claude_pid=$!
(

View File

@@ -73,6 +73,50 @@ if [ -n "$STDIN_CWD" ] && [ -d "$STDIN_CWD" ]; then
export CLAUDE_PROJECT_DIR="$STDIN_CWD"
fi
# ─────────────────────────────────────────────
# Lightweight config and automated session guards
# ─────────────────────────────────────────────
CONFIG_DIR="${HOME}/.claude/homunculus"
# Skip if disabled
if [ -f "$CONFIG_DIR/disabled" ]; then
exit 0
fi
# Prevent observe.sh from firing on non-human sessions to avoid:
# - ECC observing its own Haiku observer sessions (self-loop)
# - ECC observing other tools' automated sessions
# - automated sessions creating project-scoped homunculus metadata
# Layer 1: entrypoint. Only interactive terminal sessions should continue.
case "${CLAUDE_CODE_ENTRYPOINT:-cli}" in
cli) ;;
*) exit 0 ;;
esac
# Layer 2: minimal hook profile suppresses non-essential hooks.
[ "${ECC_HOOK_PROFILE:-standard}" = "minimal" ] && exit 0
# Layer 3: cooperative skip env var for automated sessions.
[ "${ECC_SKIP_OBSERVE:-0}" = "1" ] && exit 0
# Layer 4: subagent sessions are automated by definition.
_ECC_AGENT_ID=$(echo "$INPUT_JSON" | "$PYTHON_CMD" -c "import json,sys; print(json.load(sys.stdin).get('agent_id',''))" 2>/dev/null || true)
[ -n "$_ECC_AGENT_ID" ] && exit 0
# Layer 5: known observer-session path exclusions.
_ECC_SKIP_PATHS="${ECC_OBSERVE_SKIP_PATHS:-observer-sessions,.claude-mem}"
if [ -n "$STDIN_CWD" ]; then
IFS=',' read -ra _ECC_SKIP_ARRAY <<< "$_ECC_SKIP_PATHS"
for _pattern in "${_ECC_SKIP_ARRAY[@]}"; do
_pattern="${_pattern#"${_pattern%%[![:space:]]*}"}"
_pattern="${_pattern%"${_pattern##*[![:space:]]}"}"
[ -z "$_pattern" ] && continue
case "$STDIN_CWD" in *"$_pattern"*) exit 0 ;; esac
done
fi
# ─────────────────────────────────────────────
# Project detection
# ─────────────────────────────────────────────
@@ -89,15 +133,9 @@ PYTHON_CMD="${CLV2_PYTHON_CMD:-$PYTHON_CMD}"
# Configuration
# ─────────────────────────────────────────────
CONFIG_DIR="${HOME}/.claude/homunculus"
OBSERVATIONS_FILE="${PROJECT_DIR}/observations.jsonl"
MAX_FILE_SIZE_MB=10
# Skip if disabled
if [ -f "$CONFIG_DIR/disabled" ]; then
exit 0
fi
# Auto-purge observation files older than 30 days (runs once per session)
PURGE_MARKER="${PROJECT_DIR}/.last-purge"
if [ ! -f "$PURGE_MARKER" ] || [ "$(find "$PURGE_MARKER" -mtime +1 2>/dev/null)" ]; then

View File

@@ -161,8 +161,10 @@ resp = requests.post(
"linkedin": {"text": linkedin_version},
"threads": {"text": threads_version}
}
}
},
timeout=30,
)
resp.raise_for_status()
```
### Manual Posting

View File

@@ -24,17 +24,14 @@ Exa MCP server must be configured. Add to `~/.claude.json`:
```json
"exa-web-search": {
"command": "npx",
"args": [
"-y",
"exa-mcp-server",
"tools=web_search_exa,get_code_context_exa,crawling_exa,company_research_exa,linkedin_search_exa,deep_researcher_start,deep_researcher_check"
],
"args": ["-y", "exa-mcp-server"],
"env": { "EXA_API_KEY": "YOUR_EXA_API_KEY_HERE" }
}
```
Get an API key at [exa.ai](https://exa.ai).
If you omit the `tools=...` argument, only a smaller default tool set may be enabled.
This repo's current Exa setup documents the tool surface exposed here: `web_search_exa` and `get_code_context_exa`.
If your Exa server exposes additional tools, verify their exact names before depending on them in docs or prompts.
## Core Tools
@@ -51,29 +48,9 @@ web_search_exa(query: "latest AI developments 2026", numResults: 5)
|-------|------|---------|-------|
| `query` | string | required | Search query |
| `numResults` | number | 8 | Number of results |
### web_search_advanced_exa
Filtered search with domain and date constraints.
```
web_search_advanced_exa(
query: "React Server Components best practices",
numResults: 5,
includeDomains: ["github.com", "react.dev"],
startPublishedDate: "2025-01-01"
)
```
**Parameters:**
| Param | Type | Default | Notes |
|-------|------|---------|-------|
| `query` | string | required | Search query |
| `numResults` | number | 8 | Number of results |
| `includeDomains` | string[] | none | Limit to specific domains |
| `excludeDomains` | string[] | none | Exclude specific domains |
| `startPublishedDate` | string | none | ISO date filter (start) |
| `endPublishedDate` | string | none | ISO date filter (end) |
| `type` | string | `auto` | Search mode |
| `livecrawl` | string | `fallback` | Prefer live crawling when needed |
| `category` | string | none | Optional focus such as `company` or `research paper` |
### get_code_context_exa
Find code examples and documentation from GitHub, Stack Overflow, and docs sites.
@@ -89,52 +66,6 @@ get_code_context_exa(query: "Python asyncio patterns", tokensNum: 3000)
| `query` | string | required | Code or API search query |
| `tokensNum` | number | 5000 | Content tokens (1000-50000) |
### company_research_exa
Research companies for business intelligence and news.
```
company_research_exa(companyName: "Anthropic", numResults: 5)
```
**Parameters:**
| Param | Type | Default | Notes |
|-------|------|---------|-------|
| `companyName` | string | required | Company name |
| `numResults` | number | 5 | Number of results |
### linkedin_search_exa
Find professional profiles and company-adjacent people research.
```
linkedin_search_exa(query: "AI safety researchers at Anthropic", numResults: 5)
```
### crawling_exa
Extract full page content from a URL.
```
crawling_exa(url: "https://example.com/article", tokensNum: 5000)
```
**Parameters:**
| Param | Type | Default | Notes |
|-------|------|---------|-------|
| `url` | string | required | URL to extract |
| `tokensNum` | number | 5000 | Content tokens |
### deep_researcher_start / deep_researcher_check
Start an AI research agent that runs asynchronously.
```
# Start research
deep_researcher_start(query: "comprehensive analysis of AI code editors in 2026")
# Check status (returns results when complete)
deep_researcher_check(researchId: "<id from start>")
```
## Usage Patterns
### Quick Lookup
@@ -147,27 +78,24 @@ web_search_exa(query: "Node.js 22 new features", numResults: 3)
get_code_context_exa(query: "Rust error handling patterns Result type", tokensNum: 3000)
```
### Company Due Diligence
### Company or People Research
```
company_research_exa(companyName: "Vercel", numResults: 5)
web_search_advanced_exa(query: "Vercel funding valuation 2026", numResults: 3)
web_search_exa(query: "Vercel funding valuation 2026", numResults: 3, category: "company")
web_search_exa(query: "site:linkedin.com/in AI safety researchers Anthropic", numResults: 5)
```
### Technical Deep Dive
```
# Start async research
deep_researcher_start(query: "WebAssembly component model status and adoption")
# ... do other work ...
deep_researcher_check(researchId: "<id>")
web_search_exa(query: "WebAssembly component model status and adoption", numResults: 5)
get_code_context_exa(query: "WebAssembly component model examples", tokensNum: 4000)
```
## Tips
- Use `web_search_exa` for broad queries, `web_search_advanced_exa` for filtered results
- Use `web_search_exa` for current information, company lookups, and broad discovery
- Use search operators like `site:`, quoted phrases, and `intitle:` to narrow results
- Lower `tokensNum` (1000-2000) for focused code snippets, higher (5000+) for comprehensive context
- Combine `company_research_exa` with `web_search_advanced_exa` for thorough company analysis
- Use `crawling_exa` to get full content from specific URLs found in search results
- `deep_researcher_start` is best for comprehensive topics that benefit from AI synthesis
- Use `get_code_context_exa` when you need API usage or code examples rather than general web pages
## Related Skills

View File

@@ -253,7 +253,7 @@ estimate_cost(
estimate_type: "unit_price",
endpoints: {
"fal-ai/nano-banana-pro": {
"num_images": 1
"unit_quantity": 1
}
}
)

View File

@@ -14,6 +14,10 @@ const os = require('os');
const { execFileSync } = require('child_process');
const validatorsDir = path.join(__dirname, '..', '..', 'scripts', 'ci');
const repoRoot = path.join(__dirname, '..', '..');
const modulesSchemaPath = path.join(repoRoot, 'schemas', 'install-modules.schema.json');
const profilesSchemaPath = path.join(repoRoot, 'schemas', 'install-profiles.schema.json');
const componentsSchemaPath = path.join(repoRoot, 'schemas', 'install-components.schema.json');
// Test helpers
function test(name, fn) {
@@ -36,6 +40,18 @@ function cleanupTestDir(testDir) {
fs.rmSync(testDir, { recursive: true, force: true });
}
function writeJson(filePath, value) {
fs.mkdirSync(path.dirname(filePath), { recursive: true });
fs.writeFileSync(filePath, JSON.stringify(value, null, 2));
}
function writeInstallComponentsManifest(testDir, components) {
writeJson(path.join(testDir, 'manifests', 'install-components.json'), {
version: 1,
components,
});
}
/**
* Run a validator script via a wrapper that overrides its directory constant.
* This allows testing error cases without modifying real project files.
@@ -2164,6 +2180,369 @@ function runTests() {
cleanupTestDir(testDir);
})) passed++; else failed++;
// ==========================================
// validate-install-manifests.js
// ==========================================
console.log('\nvalidate-install-manifests.js:');
if (test('passes on real project install manifests', () => {
const result = runValidator('validate-install-manifests');
assert.strictEqual(result.code, 0, `Should pass, got stderr: ${result.stderr}`);
assert.ok(result.stdout.includes('Validated'), 'Should output validation count');
})) passed++; else failed++;
if (test('exits 0 when install manifests do not exist', () => {
const testDir = createTestDir();
const result = runValidatorWithDirs('validate-install-manifests', {
REPO_ROOT: testDir,
MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'),
PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json')
});
assert.strictEqual(result.code, 0, 'Should skip when manifests are missing');
assert.ok(result.stdout.includes('skipping'), 'Should say skipping');
cleanupTestDir(testDir);
})) passed++; else failed++;
if (test('fails on invalid install manifest JSON', () => {
const testDir = createTestDir();
const manifestsDir = path.join(testDir, 'manifests');
fs.mkdirSync(manifestsDir, { recursive: true });
fs.writeFileSync(path.join(manifestsDir, 'install-modules.json'), '{ invalid json');
writeJson(path.join(manifestsDir, 'install-profiles.json'), {
version: 1,
profiles: {}
});
const result = runValidatorWithDirs('validate-install-manifests', {
REPO_ROOT: testDir,
MODULES_MANIFEST_PATH: path.join(manifestsDir, 'install-modules.json'),
PROFILES_MANIFEST_PATH: path.join(manifestsDir, 'install-profiles.json'),
COMPONENTS_MANIFEST_PATH: path.join(manifestsDir, 'install-components.json'),
MODULES_SCHEMA_PATH: modulesSchemaPath,
PROFILES_SCHEMA_PATH: profilesSchemaPath,
COMPONENTS_SCHEMA_PATH: componentsSchemaPath
});
assert.strictEqual(result.code, 1, 'Should fail on invalid JSON');
assert.ok(result.stderr.includes('Invalid JSON'), 'Should report invalid JSON');
cleanupTestDir(testDir);
})) passed++; else failed++;
if (test('fails when install module references a missing path', () => {
const testDir = createTestDir();
writeJson(path.join(testDir, 'manifests', 'install-modules.json'), {
version: 1,
modules: [
{
id: 'rules-core',
kind: 'rules',
description: 'Rules',
paths: ['rules'],
targets: ['claude'],
dependencies: [],
defaultInstall: true,
cost: 'light',
stability: 'stable'
},
{
id: 'security',
kind: 'skills',
description: 'Security',
paths: ['skills/security-review'],
targets: ['codex'],
dependencies: [],
defaultInstall: false,
cost: 'medium',
stability: 'stable'
}
]
});
writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), {
version: 1,
profiles: {
core: { description: 'Core', modules: ['rules-core'] },
developer: { description: 'Developer', modules: ['rules-core'] },
security: { description: 'Security', modules: ['rules-core', 'security'] },
research: { description: 'Research', modules: ['rules-core'] },
full: { description: 'Full', modules: ['rules-core', 'security'] }
}
});
writeInstallComponentsManifest(testDir, [
{
id: 'baseline:rules',
family: 'baseline',
description: 'Rules',
modules: ['rules-core']
},
{
id: 'capability:security',
family: 'capability',
description: 'Security',
modules: ['security']
}
]);
fs.mkdirSync(path.join(testDir, 'rules'), { recursive: true });
const result = runValidatorWithDirs('validate-install-manifests', {
REPO_ROOT: testDir,
MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'),
PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'),
COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'),
MODULES_SCHEMA_PATH: modulesSchemaPath,
PROFILES_SCHEMA_PATH: profilesSchemaPath,
COMPONENTS_SCHEMA_PATH: componentsSchemaPath
});
assert.strictEqual(result.code, 1, 'Should fail when a referenced path is missing');
assert.ok(result.stderr.includes('references missing path'), 'Should report missing path');
cleanupTestDir(testDir);
})) passed++; else failed++;
if (test('fails when two install modules claim the same path', () => {
const testDir = createTestDir();
writeJson(path.join(testDir, 'manifests', 'install-modules.json'), {
version: 1,
modules: [
{
id: 'agents-core',
kind: 'agents',
description: 'Agents',
paths: ['agents'],
targets: ['codex'],
dependencies: [],
defaultInstall: true,
cost: 'light',
stability: 'stable'
},
{
id: 'commands-core',
kind: 'commands',
description: 'Commands',
paths: ['agents'],
targets: ['codex'],
dependencies: [],
defaultInstall: true,
cost: 'light',
stability: 'stable'
}
]
});
writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), {
version: 1,
profiles: {
core: { description: 'Core', modules: ['agents-core', 'commands-core'] },
developer: { description: 'Developer', modules: ['agents-core', 'commands-core'] },
security: { description: 'Security', modules: ['agents-core', 'commands-core'] },
research: { description: 'Research', modules: ['agents-core', 'commands-core'] },
full: { description: 'Full', modules: ['agents-core', 'commands-core'] }
}
});
writeInstallComponentsManifest(testDir, [
{
id: 'baseline:agents',
family: 'baseline',
description: 'Agents',
modules: ['agents-core']
},
{
id: 'baseline:commands',
family: 'baseline',
description: 'Commands',
modules: ['commands-core']
}
]);
fs.mkdirSync(path.join(testDir, 'agents'), { recursive: true });
const result = runValidatorWithDirs('validate-install-manifests', {
REPO_ROOT: testDir,
MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'),
PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'),
COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'),
MODULES_SCHEMA_PATH: modulesSchemaPath,
PROFILES_SCHEMA_PATH: profilesSchemaPath,
COMPONENTS_SCHEMA_PATH: componentsSchemaPath
});
assert.strictEqual(result.code, 1, 'Should fail on duplicate claimed paths');
assert.ok(result.stderr.includes('claimed by both'), 'Should report duplicate path claims');
cleanupTestDir(testDir);
})) passed++; else failed++;
if (test('fails when an install profile references an unknown module', () => {
const testDir = createTestDir();
writeJson(path.join(testDir, 'manifests', 'install-modules.json'), {
version: 1,
modules: [
{
id: 'rules-core',
kind: 'rules',
description: 'Rules',
paths: ['rules'],
targets: ['claude'],
dependencies: [],
defaultInstall: true,
cost: 'light',
stability: 'stable'
}
]
});
writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), {
version: 1,
profiles: {
core: { description: 'Core', modules: ['rules-core'] },
developer: { description: 'Developer', modules: ['rules-core'] },
security: { description: 'Security', modules: ['rules-core'] },
research: { description: 'Research', modules: ['rules-core'] },
full: { description: 'Full', modules: ['rules-core', 'ghost-module'] }
}
});
writeInstallComponentsManifest(testDir, [
{
id: 'baseline:rules',
family: 'baseline',
description: 'Rules',
modules: ['rules-core']
}
]);
fs.mkdirSync(path.join(testDir, 'rules'), { recursive: true });
const result = runValidatorWithDirs('validate-install-manifests', {
REPO_ROOT: testDir,
MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'),
PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'),
COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'),
MODULES_SCHEMA_PATH: modulesSchemaPath,
PROFILES_SCHEMA_PATH: profilesSchemaPath,
COMPONENTS_SCHEMA_PATH: componentsSchemaPath
});
assert.strictEqual(result.code, 1, 'Should fail on unknown profile module');
assert.ok(result.stderr.includes('references unknown module ghost-module'),
'Should report unknown module reference');
cleanupTestDir(testDir);
})) passed++; else failed++;
if (test('passes on a valid standalone install manifest fixture', () => {
const testDir = createTestDir();
writeJson(path.join(testDir, 'manifests', 'install-modules.json'), {
version: 1,
modules: [
{
id: 'rules-core',
kind: 'rules',
description: 'Rules',
paths: ['rules'],
targets: ['claude'],
dependencies: [],
defaultInstall: true,
cost: 'light',
stability: 'stable'
},
{
id: 'orchestration',
kind: 'orchestration',
description: 'Orchestration',
paths: ['scripts/orchestrate-worktrees.js'],
targets: ['codex'],
dependencies: ['rules-core'],
defaultInstall: false,
cost: 'medium',
stability: 'beta'
}
]
});
writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), {
version: 1,
profiles: {
core: { description: 'Core', modules: ['rules-core'] },
developer: { description: 'Developer', modules: ['rules-core', 'orchestration'] },
security: { description: 'Security', modules: ['rules-core'] },
research: { description: 'Research', modules: ['rules-core'] },
full: { description: 'Full', modules: ['rules-core', 'orchestration'] }
}
});
writeInstallComponentsManifest(testDir, [
{
id: 'baseline:rules',
family: 'baseline',
description: 'Rules',
modules: ['rules-core']
},
{
id: 'capability:orchestration',
family: 'capability',
description: 'Orchestration',
modules: ['orchestration']
}
]);
fs.mkdirSync(path.join(testDir, 'rules'), { recursive: true });
fs.mkdirSync(path.join(testDir, 'scripts'), { recursive: true });
fs.writeFileSync(path.join(testDir, 'scripts', 'orchestrate-worktrees.js'), '#!/usr/bin/env node\n');
const result = runValidatorWithDirs('validate-install-manifests', {
REPO_ROOT: testDir,
MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'),
PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'),
COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'),
MODULES_SCHEMA_PATH: modulesSchemaPath,
PROFILES_SCHEMA_PATH: profilesSchemaPath,
COMPONENTS_SCHEMA_PATH: componentsSchemaPath
});
assert.strictEqual(result.code, 0, `Should pass valid fixture, got stderr: ${result.stderr}`);
assert.ok(result.stdout.includes('Validated 2 install modules, 2 install components, and 5 profiles'),
'Should report validated install manifest counts');
cleanupTestDir(testDir);
})) passed++; else failed++;
if (test('fails when an install component references an unknown module', () => {
const testDir = createTestDir();
writeJson(path.join(testDir, 'manifests', 'install-modules.json'), {
version: 1,
modules: [
{
id: 'rules-core',
kind: 'rules',
description: 'Rules',
paths: ['rules'],
targets: ['claude'],
dependencies: [],
defaultInstall: true,
cost: 'light',
stability: 'stable'
}
]
});
writeJson(path.join(testDir, 'manifests', 'install-profiles.json'), {
version: 1,
profiles: {
core: { description: 'Core', modules: ['rules-core'] },
developer: { description: 'Developer', modules: ['rules-core'] },
security: { description: 'Security', modules: ['rules-core'] },
research: { description: 'Research', modules: ['rules-core'] },
full: { description: 'Full', modules: ['rules-core'] }
}
});
writeInstallComponentsManifest(testDir, [
{
id: 'capability:security',
family: 'capability',
description: 'Security',
modules: ['ghost-module']
}
]);
fs.mkdirSync(path.join(testDir, 'rules'), { recursive: true });
const result = runValidatorWithDirs('validate-install-manifests', {
REPO_ROOT: testDir,
MODULES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-modules.json'),
PROFILES_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-profiles.json'),
COMPONENTS_MANIFEST_PATH: path.join(testDir, 'manifests', 'install-components.json'),
MODULES_SCHEMA_PATH: modulesSchemaPath,
PROFILES_SCHEMA_PATH: profilesSchemaPath,
COMPONENTS_SCHEMA_PATH: componentsSchemaPath
});
assert.strictEqual(result.code, 1, 'Should fail on unknown component module');
assert.ok(result.stderr.includes('references unknown module ghost-module'),
'Should report unknown component module');
cleanupTestDir(testDir);
})) passed++; else failed++;
// Summary
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);

View File

@@ -88,9 +88,7 @@ function runShellScript(scriptPath, args = [], input = '', env = {}, cwd = proce
// Create a temporary test directory
function createTestDir() {
const testDir = path.join(os.tmpdir(), `hooks-test-${Date.now()}`);
fs.mkdirSync(testDir, { recursive: true });
return testDir;
return fs.mkdtempSync(path.join(os.tmpdir(), 'hooks-test-'));
}
// Clean up test directory
@@ -2159,6 +2157,8 @@ async function runTests() {
assert.ok(observerLoopSource.includes('ECC_OBSERVER_MAX_TURNS'), 'observer-loop should allow max-turn overrides');
assert.ok(observerLoopSource.includes('max_turns="${ECC_OBSERVER_MAX_TURNS:-10}"'), 'observer-loop should default to 10 turns');
assert.ok(!observerLoopSource.includes('--max-turns 3'), 'observer-loop should not hardcode a 3-turn limit');
assert.ok(observerLoopSource.includes('ECC_SKIP_OBSERVE=1'), 'observer-loop should suppress observe.sh for automated sessions');
assert.ok(observerLoopSource.includes('ECC_HOOK_PROFILE=minimal'), 'observer-loop should run automated analysis with the minimal hook profile');
})
)
passed++;
@@ -2292,6 +2292,77 @@ async function runTests() {
}
})) passed++; else failed++;
if (await asyncTest('observe.sh skips automated sessions before project detection side effects', async () => {
const observePath = path.join(__dirname, '..', '..', 'skills', 'continuous-learning-v2', 'hooks', 'observe.sh');
const cases = [
{
name: 'non-cli entrypoint',
env: { CLAUDE_CODE_ENTRYPOINT: 'mcp' }
},
{
name: 'minimal hook profile',
env: { CLAUDE_CODE_ENTRYPOINT: 'cli', ECC_HOOK_PROFILE: 'minimal' }
},
{
name: 'cooperative skip env',
env: { CLAUDE_CODE_ENTRYPOINT: 'cli', ECC_SKIP_OBSERVE: '1' }
},
{
name: 'subagent payload',
env: { CLAUDE_CODE_ENTRYPOINT: 'cli' },
payload: { agent_id: 'agent-123' }
},
{
name: 'cwd skip path',
env: {
CLAUDE_CODE_ENTRYPOINT: 'cli',
ECC_OBSERVE_SKIP_PATHS: ' observer-sessions , .claude-mem '
},
cwdSuffix: path.join('observer-sessions', 'worker')
}
];
for (const testCase of cases) {
const homeDir = createTestDir();
const projectDir = createTestDir();
try {
const cwd = testCase.cwdSuffix ? path.join(projectDir, testCase.cwdSuffix) : projectDir;
fs.mkdirSync(cwd, { recursive: true });
const payload = JSON.stringify({
tool_name: 'Bash',
tool_input: { command: 'echo hello' },
tool_response: 'ok',
session_id: `session-${testCase.name.replace(/[^a-z0-9]+/gi, '-')}`,
cwd,
...(testCase.payload || {})
});
const result = await runShellScript(observePath, ['post'], payload, {
HOME: homeDir,
...testCase.env
}, projectDir);
assert.strictEqual(result.code, 0, `${testCase.name} should exit successfully, stderr: ${result.stderr}`);
const homunculusDir = path.join(homeDir, '.claude', 'homunculus');
const registryPath = path.join(homunculusDir, 'projects.json');
const projectsDir = path.join(homunculusDir, 'projects');
assert.ok(!fs.existsSync(registryPath), `${testCase.name} should not create projects.json`);
const projectEntries = fs.existsSync(projectsDir)
? fs.readdirSync(projectsDir).filter(entry => fs.statSync(path.join(projectsDir, entry)).isDirectory())
: [];
assert.strictEqual(projectEntries.length, 0, `${testCase.name} should not create project directories`);
} finally {
cleanupTestDir(homeDir);
cleanupTestDir(projectDir);
}
}
})) passed++; else failed++;
if (await asyncTest('matches .tsx extension for type checking', async () => {
const testDir = createTestDir();
const testFile = path.join(testDir, 'component.tsx');
@@ -4528,8 +4599,11 @@ async function runTests() {
const content = fs.readFileSync(path.join(sessionsDir, files[0]), 'utf8');
// The real string message should appear
assert.ok(content.includes('Real user message'), 'Should include the string content user message');
// Numeric/boolean/object content should NOT appear as text
assert.ok(!content.includes('42'), 'Numeric content should be skipped (else branch → empty string → filtered)');
// Numeric/boolean/object content should NOT appear as task bullets.
// The full file may legitimately contain "42" in timestamps like 03:42.
assert.ok(!content.includes('\n- 42\n'), 'Numeric content should not be rendered as a task bullet');
assert.ok(!content.includes('\n- true\n'), 'Boolean content should not be rendered as a task bullet');
assert.ok(!content.includes('\n- [object Object]\n'), 'Object content should not be stringified into a task bullet');
} finally {
fs.rmSync(isoHome, { recursive: true, force: true });
}

View File

@@ -0,0 +1,117 @@
/**
* Tests for scripts/lib/install/config.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const {
loadInstallConfig,
resolveInstallConfigPath,
} = require('../../scripts/lib/install/config');
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function writeJson(filePath, value) {
fs.mkdirSync(path.dirname(filePath), { recursive: true });
fs.writeFileSync(filePath, JSON.stringify(value, null, 2));
}
function runTests() {
console.log('\n=== Testing install/config.js ===\n');
let passed = 0;
let failed = 0;
if (test('resolves relative config paths from the provided cwd', () => {
const cwd = '/workspace/app';
const resolved = resolveInstallConfigPath('configs/ecc-install.json', { cwd });
assert.strictEqual(resolved, path.join(cwd, 'configs', 'ecc-install.json'));
})) passed++; else failed++;
if (test('loads and normalizes a valid install config', () => {
const cwd = createTempDir('install-config-');
try {
const configPath = path.join(cwd, 'ecc-install.json');
writeJson(configPath, {
version: 1,
target: 'cursor',
profile: 'developer',
modules: ['platform-configs', 'platform-configs'],
include: ['lang:typescript', 'framework:nextjs', 'lang:typescript'],
exclude: ['capability:media'],
options: {
includeExamples: false,
},
});
const config = loadInstallConfig('ecc-install.json', { cwd });
assert.strictEqual(config.path, configPath);
assert.strictEqual(config.target, 'cursor');
assert.strictEqual(config.profileId, 'developer');
assert.deepStrictEqual(config.moduleIds, ['platform-configs']);
assert.deepStrictEqual(config.includeComponentIds, ['lang:typescript', 'framework:nextjs']);
assert.deepStrictEqual(config.excludeComponentIds, ['capability:media']);
assert.deepStrictEqual(config.options, { includeExamples: false });
} finally {
cleanup(cwd);
}
})) passed++; else failed++;
if (test('rejects invalid config schema values', () => {
const cwd = createTempDir('install-config-');
try {
writeJson(path.join(cwd, 'ecc-install.json'), {
version: 2,
target: 'ghost-target',
});
assert.throws(
() => loadInstallConfig('ecc-install.json', { cwd }),
/Invalid install config/
);
} finally {
cleanup(cwd);
}
})) passed++; else failed++;
if (test('fails when the install config does not exist', () => {
const cwd = createTempDir('install-config-');
try {
assert.throws(
() => loadInstallConfig('ecc-install.json', { cwd }),
/Install config not found/
);
} finally {
cleanup(cwd);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,357 @@
/**
* Tests for scripts/lib/install-lifecycle.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const {
buildDoctorReport,
discoverInstalledStates,
} = require('../../scripts/lib/install-lifecycle');
const {
createInstallState,
writeInstallState,
} = require('../../scripts/lib/install-state');
const REPO_ROOT = path.join(__dirname, '..', '..');
const CURRENT_PACKAGE_VERSION = JSON.parse(
fs.readFileSync(path.join(REPO_ROOT, 'package.json'), 'utf8')
).version;
const CURRENT_MANIFEST_VERSION = JSON.parse(
fs.readFileSync(path.join(REPO_ROOT, 'manifests', 'install-modules.json'), 'utf8')
).version;
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function writeState(filePath, options) {
const state = createInstallState(options);
writeInstallState(filePath, state);
return state;
}
function runTests() {
console.log('\n=== Testing install-lifecycle.js ===\n');
let passed = 0;
let failed = 0;
if (test('discovers installed states for multiple targets in the current context', () => {
const homeDir = createTempDir('install-lifecycle-home-');
const projectRoot = createTempDir('install-lifecycle-project-');
try {
const claudeStatePath = path.join(homeDir, '.claude', 'ecc', 'install-state.json');
const cursorStatePath = path.join(projectRoot, '.cursor', 'ecc-install-state.json');
writeState(claudeStatePath, {
adapter: { id: 'claude-home', target: 'claude', kind: 'home' },
targetRoot: path.join(homeDir, '.claude'),
installStatePath: claudeStatePath,
request: {
profile: null,
modules: [],
legacyLanguages: ['typescript'],
legacyMode: true,
},
resolution: {
selectedModules: ['legacy-claude-rules'],
skippedModules: [],
},
operations: [],
source: {
repoVersion: CURRENT_PACKAGE_VERSION,
repoCommit: 'abc123',
manifestVersion: CURRENT_MANIFEST_VERSION,
},
});
writeState(cursorStatePath, {
adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' },
targetRoot: path.join(projectRoot, '.cursor'),
installStatePath: cursorStatePath,
request: {
profile: 'core',
modules: [],
legacyLanguages: [],
legacyMode: false,
},
resolution: {
selectedModules: ['rules-core', 'platform-configs'],
skippedModules: [],
},
operations: [],
source: {
repoVersion: CURRENT_PACKAGE_VERSION,
repoCommit: 'def456',
manifestVersion: CURRENT_MANIFEST_VERSION,
},
});
const records = discoverInstalledStates({
homeDir,
projectRoot,
targets: ['claude', 'cursor'],
});
assert.strictEqual(records.length, 2);
assert.strictEqual(records[0].exists, true);
assert.strictEqual(records[1].exists, true);
assert.strictEqual(records[0].state.target.id, 'claude-home');
assert.strictEqual(records[1].state.target.id, 'cursor-project');
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('doctor reports missing managed files as an error', () => {
const homeDir = createTempDir('install-lifecycle-home-');
const projectRoot = createTempDir('install-lifecycle-project-');
try {
const targetRoot = path.join(projectRoot, '.cursor');
const statePath = path.join(targetRoot, 'ecc-install-state.json');
fs.mkdirSync(targetRoot, { recursive: true });
writeState(statePath, {
adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' },
targetRoot,
installStatePath: statePath,
request: {
profile: null,
modules: ['platform-configs'],
legacyLanguages: [],
legacyMode: false,
},
resolution: {
selectedModules: ['platform-configs'],
skippedModules: [],
},
operations: [
{
kind: 'copy-file',
moduleId: 'platform-configs',
sourceRelativePath: '.cursor/hooks.json',
destinationPath: path.join(targetRoot, 'hooks.json'),
strategy: 'sync-root-children',
ownership: 'managed',
scaffoldOnly: false,
},
],
source: {
repoVersion: CURRENT_PACKAGE_VERSION,
repoCommit: 'abc123',
manifestVersion: CURRENT_MANIFEST_VERSION,
},
});
const report = buildDoctorReport({
repoRoot: REPO_ROOT,
homeDir,
projectRoot,
targets: ['cursor'],
});
assert.strictEqual(report.results.length, 1);
assert.strictEqual(report.results[0].status, 'error');
assert.ok(report.results[0].issues.some(issue => issue.code === 'missing-managed-files'));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('doctor reports a healthy legacy install when managed files are present', () => {
const homeDir = createTempDir('install-lifecycle-home-');
const projectRoot = createTempDir('install-lifecycle-project-');
try {
const targetRoot = path.join(homeDir, '.claude');
const statePath = path.join(targetRoot, 'ecc', 'install-state.json');
const managedFile = path.join(targetRoot, 'rules', 'common', 'coding-style.md');
const sourceContent = fs.readFileSync(path.join(REPO_ROOT, 'rules', 'common', 'coding-style.md'), 'utf8');
fs.mkdirSync(path.dirname(managedFile), { recursive: true });
fs.writeFileSync(managedFile, sourceContent);
writeState(statePath, {
adapter: { id: 'claude-home', target: 'claude', kind: 'home' },
targetRoot,
installStatePath: statePath,
request: {
profile: null,
modules: [],
legacyLanguages: ['typescript'],
legacyMode: true,
},
resolution: {
selectedModules: ['legacy-claude-rules'],
skippedModules: [],
},
operations: [
{
kind: 'copy-file',
moduleId: 'legacy-claude-rules',
sourceRelativePath: 'rules/common/coding-style.md',
destinationPath: managedFile,
strategy: 'preserve-relative-path',
ownership: 'managed',
scaffoldOnly: false,
},
],
source: {
repoVersion: CURRENT_PACKAGE_VERSION,
repoCommit: 'abc123',
manifestVersion: CURRENT_MANIFEST_VERSION,
},
});
const report = buildDoctorReport({
repoRoot: REPO_ROOT,
homeDir,
projectRoot,
targets: ['claude'],
});
assert.strictEqual(report.results.length, 1);
assert.strictEqual(report.results[0].status, 'ok');
assert.strictEqual(report.results[0].issues.length, 0);
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('doctor reports drifted managed files as a warning', () => {
const homeDir = createTempDir('install-lifecycle-home-');
const projectRoot = createTempDir('install-lifecycle-project-');
try {
const targetRoot = path.join(projectRoot, '.cursor');
const statePath = path.join(targetRoot, 'ecc-install-state.json');
const sourcePath = path.join(REPO_ROOT, '.cursor', 'hooks.json');
const destinationPath = path.join(targetRoot, 'hooks.json');
fs.mkdirSync(path.dirname(destinationPath), { recursive: true });
fs.writeFileSync(destinationPath, '{"drifted":true}\n');
writeState(statePath, {
adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' },
targetRoot,
installStatePath: statePath,
request: {
profile: null,
modules: ['platform-configs'],
legacyLanguages: [],
legacyMode: false,
},
resolution: {
selectedModules: ['platform-configs'],
skippedModules: [],
},
operations: [
{
kind: 'copy-file',
moduleId: 'platform-configs',
sourcePath,
sourceRelativePath: '.cursor/hooks.json',
destinationPath,
strategy: 'sync-root-children',
ownership: 'managed',
scaffoldOnly: false,
},
],
source: {
repoVersion: CURRENT_PACKAGE_VERSION,
repoCommit: 'abc123',
manifestVersion: CURRENT_MANIFEST_VERSION,
},
});
const report = buildDoctorReport({
repoRoot: REPO_ROOT,
homeDir,
projectRoot,
targets: ['cursor'],
});
assert.strictEqual(report.results.length, 1);
assert.strictEqual(report.results[0].status, 'warning');
assert.ok(report.results[0].issues.some(issue => issue.code === 'drifted-managed-files'));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('doctor reports manifest resolution drift for non-legacy installs', () => {
const homeDir = createTempDir('install-lifecycle-home-');
const projectRoot = createTempDir('install-lifecycle-project-');
try {
const targetRoot = path.join(projectRoot, '.cursor');
const statePath = path.join(targetRoot, 'ecc-install-state.json');
fs.mkdirSync(targetRoot, { recursive: true });
writeState(statePath, {
adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' },
targetRoot,
installStatePath: statePath,
request: {
profile: 'core',
modules: [],
legacyLanguages: [],
legacyMode: false,
},
resolution: {
selectedModules: ['rules-core'],
skippedModules: [],
},
operations: [],
source: {
repoVersion: CURRENT_PACKAGE_VERSION,
repoCommit: 'abc123',
manifestVersion: CURRENT_MANIFEST_VERSION,
},
});
const report = buildDoctorReport({
repoRoot: REPO_ROOT,
homeDir,
projectRoot,
targets: ['cursor'],
});
assert.strictEqual(report.results.length, 1);
assert.strictEqual(report.results[0].status, 'warning');
assert.ok(report.results[0].issues.some(issue => issue.code === 'resolution-drift'));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,196 @@
/**
* Tests for scripts/lib/install-manifests.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const {
loadInstallManifests,
listInstallComponents,
listInstallModules,
listInstallProfiles,
resolveInstallPlan,
} = require('../../scripts/lib/install-manifests');
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function createTestRepo() {
const root = fs.mkdtempSync(path.join(os.tmpdir(), 'install-manifests-'));
fs.mkdirSync(path.join(root, 'manifests'), { recursive: true });
return root;
}
function cleanupTestRepo(root) {
fs.rmSync(root, { recursive: true, force: true });
}
function writeJson(filePath, value) {
fs.mkdirSync(path.dirname(filePath), { recursive: true });
fs.writeFileSync(filePath, JSON.stringify(value, null, 2));
}
function runTests() {
console.log('\n=== Testing install-manifests.js ===\n');
let passed = 0;
let failed = 0;
if (test('loads real project install manifests', () => {
const manifests = loadInstallManifests();
assert.ok(manifests.modules.length >= 1, 'Should load modules');
assert.ok(Object.keys(manifests.profiles).length >= 1, 'Should load profiles');
assert.ok(manifests.components.length >= 1, 'Should load components');
})) passed++; else failed++;
if (test('lists install profiles from the real project', () => {
const profiles = listInstallProfiles();
assert.ok(profiles.some(profile => profile.id === 'core'), 'Should include core profile');
assert.ok(profiles.some(profile => profile.id === 'full'), 'Should include full profile');
})) passed++; else failed++;
if (test('lists install modules from the real project', () => {
const modules = listInstallModules();
assert.ok(modules.some(module => module.id === 'rules-core'), 'Should include rules-core');
assert.ok(modules.some(module => module.id === 'orchestration'), 'Should include orchestration');
})) passed++; else failed++;
if (test('lists install components from the real project', () => {
const components = listInstallComponents();
assert.ok(components.some(component => component.id === 'lang:typescript'),
'Should include lang:typescript');
assert.ok(components.some(component => component.id === 'capability:security'),
'Should include capability:security');
})) passed++; else failed++;
if (test('resolves a real project profile with target-specific skips', () => {
const projectRoot = '/workspace/app';
const plan = resolveInstallPlan({ profileId: 'developer', target: 'cursor', projectRoot });
assert.ok(plan.selectedModuleIds.includes('rules-core'), 'Should keep rules-core');
assert.ok(plan.selectedModuleIds.includes('commands-core'), 'Should keep commands-core');
assert.ok(!plan.selectedModuleIds.includes('orchestration'),
'Should not select unsupported orchestration module for cursor');
assert.ok(plan.skippedModuleIds.includes('orchestration'),
'Should report unsupported orchestration module as skipped');
assert.strictEqual(plan.targetAdapterId, 'cursor-project');
assert.strictEqual(plan.targetRoot, path.join(projectRoot, '.cursor'));
assert.strictEqual(plan.installStatePath, path.join(projectRoot, '.cursor', 'ecc-install-state.json'));
assert.ok(plan.operations.length > 0, 'Should include scaffold operations');
assert.ok(
plan.operations.some(operation => (
operation.sourceRelativePath === '.cursor'
&& operation.strategy === 'sync-root-children'
)),
'Should flatten the native cursor root'
);
})) passed++; else failed++;
if (test('resolves explicit modules with dependency expansion', () => {
const plan = resolveInstallPlan({ moduleIds: ['security'] });
assert.ok(plan.selectedModuleIds.includes('security'), 'Should include requested module');
assert.ok(plan.selectedModuleIds.includes('workflow-quality'),
'Should include transitive dependency');
assert.ok(plan.selectedModuleIds.includes('platform-configs'),
'Should include nested dependency');
})) passed++; else failed++;
if (test('resolves included and excluded user-facing components', () => {
const plan = resolveInstallPlan({
profileId: 'core',
includeComponentIds: ['capability:security'],
excludeComponentIds: ['capability:orchestration'],
target: 'claude',
});
assert.deepStrictEqual(plan.includedComponentIds, ['capability:security']);
assert.deepStrictEqual(plan.excludedComponentIds, ['capability:orchestration']);
assert.ok(plan.selectedModuleIds.includes('security'), 'Should include modules from selected components');
assert.ok(!plan.selectedModuleIds.includes('orchestration'), 'Should exclude modules from excluded components');
assert.ok(plan.excludedModuleIds.includes('orchestration'),
'Should report modules removed by excluded components');
})) passed++; else failed++;
if (test('fails when a selected component depends on an excluded component module', () => {
assert.throws(
() => resolveInstallPlan({
includeComponentIds: ['capability:social'],
excludeComponentIds: ['capability:content'],
}),
/depends on excluded module business-content/
);
})) passed++; else failed++;
if (test('throws on unknown install profile', () => {
assert.throws(
() => resolveInstallPlan({ profileId: 'ghost-profile' }),
/Unknown install profile/
);
})) passed++; else failed++;
if (test('throws on unknown install target', () => {
assert.throws(
() => resolveInstallPlan({ profileId: 'core', target: 'not-a-target' }),
/Unknown install target/
);
})) passed++; else failed++;
if (test('throws when a dependency does not support the requested target', () => {
const repoRoot = createTestRepo();
writeJson(path.join(repoRoot, 'manifests', 'install-modules.json'), {
version: 1,
modules: [
{
id: 'parent',
kind: 'skills',
description: 'Parent',
paths: ['parent'],
targets: ['claude'],
dependencies: ['child'],
defaultInstall: false,
cost: 'light',
stability: 'stable'
},
{
id: 'child',
kind: 'skills',
description: 'Child',
paths: ['child'],
targets: ['cursor'],
dependencies: [],
defaultInstall: false,
cost: 'light',
stability: 'stable'
}
]
});
writeJson(path.join(repoRoot, 'manifests', 'install-profiles.json'), {
version: 1,
profiles: {
core: { description: 'Core', modules: ['parent'] }
}
});
assert.throws(
() => resolveInstallPlan({ repoRoot, profileId: 'core', target: 'claude' }),
/does not support target claude/
);
cleanupTestRepo(repoRoot);
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,147 @@
/**
* Tests for scripts/lib/install/request.js
*/
const assert = require('assert');
const {
normalizeInstallRequest,
parseInstallArgs,
} = require('../../scripts/lib/install/request');
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing install/request.js ===\n');
let passed = 0;
let failed = 0;
if (test('parses manifest-mode CLI arguments', () => {
const parsed = parseInstallArgs([
'node',
'scripts/install-apply.js',
'--target', 'cursor',
'--profile', 'developer',
'--with', 'lang:typescript',
'--without', 'capability:media',
'--config', 'ecc-install.json',
'--dry-run',
'--json'
]);
assert.strictEqual(parsed.target, 'cursor');
assert.strictEqual(parsed.profileId, 'developer');
assert.strictEqual(parsed.configPath, 'ecc-install.json');
assert.deepStrictEqual(parsed.includeComponentIds, ['lang:typescript']);
assert.deepStrictEqual(parsed.excludeComponentIds, ['capability:media']);
assert.strictEqual(parsed.dryRun, true);
assert.strictEqual(parsed.json, true);
assert.deepStrictEqual(parsed.languages, []);
})) passed++; else failed++;
if (test('normalizes legacy language installs into a canonical request', () => {
const request = normalizeInstallRequest({
target: 'claude',
profileId: null,
moduleIds: [],
languages: ['typescript', 'python']
});
assert.strictEqual(request.mode, 'legacy');
assert.strictEqual(request.target, 'claude');
assert.deepStrictEqual(request.languages, ['typescript', 'python']);
assert.deepStrictEqual(request.moduleIds, []);
assert.strictEqual(request.profileId, null);
})) passed++; else failed++;
if (test('normalizes manifest installs into a canonical request', () => {
const request = normalizeInstallRequest({
target: 'cursor',
profileId: 'developer',
moduleIds: [],
includeComponentIds: ['lang:typescript'],
excludeComponentIds: ['capability:media'],
languages: []
});
assert.strictEqual(request.mode, 'manifest');
assert.strictEqual(request.target, 'cursor');
assert.strictEqual(request.profileId, 'developer');
assert.deepStrictEqual(request.includeComponentIds, ['lang:typescript']);
assert.deepStrictEqual(request.excludeComponentIds, ['capability:media']);
assert.deepStrictEqual(request.languages, []);
})) passed++; else failed++;
if (test('merges config-backed component selections with CLI overrides', () => {
const request = normalizeInstallRequest({
target: 'cursor',
profileId: null,
moduleIds: ['platform-configs'],
includeComponentIds: ['framework:nextjs'],
excludeComponentIds: ['capability:media'],
languages: [],
configPath: '/workspace/app/ecc-install.json',
config: {
path: '/workspace/app/ecc-install.json',
target: 'claude',
profileId: 'developer',
moduleIds: ['workflow-quality'],
includeComponentIds: ['lang:typescript'],
excludeComponentIds: ['capability:orchestration'],
},
});
assert.strictEqual(request.mode, 'manifest');
assert.strictEqual(request.target, 'cursor');
assert.strictEqual(request.profileId, 'developer');
assert.deepStrictEqual(request.moduleIds, ['workflow-quality', 'platform-configs']);
assert.deepStrictEqual(request.includeComponentIds, ['lang:typescript', 'framework:nextjs']);
assert.deepStrictEqual(request.excludeComponentIds, ['capability:orchestration', 'capability:media']);
assert.strictEqual(request.configPath, '/workspace/app/ecc-install.json');
})) passed++; else failed++;
if (test('rejects mixing legacy languages with manifest flags', () => {
assert.throws(
() => normalizeInstallRequest({
target: 'claude',
profileId: 'core',
moduleIds: [],
includeComponentIds: [],
excludeComponentIds: [],
languages: ['typescript']
}),
/cannot be combined/
);
})) passed++; else failed++;
if (test('rejects empty install requests when not asking for help', () => {
assert.throws(
() => normalizeInstallRequest({
target: 'claude',
profileId: null,
moduleIds: [],
includeComponentIds: [],
excludeComponentIds: [],
languages: [],
help: false
}),
/No install profile, module IDs, included components, or legacy languages/
);
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,139 @@
/**
* Tests for scripts/lib/install-state.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const {
createInstallState,
readInstallState,
writeInstallState,
} = require('../../scripts/lib/install-state');
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function createTestDir() {
return fs.mkdtempSync(path.join(os.tmpdir(), 'install-state-'));
}
function cleanupTestDir(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function runTests() {
console.log('\n=== Testing install-state.js ===\n');
let passed = 0;
let failed = 0;
if (test('creates a valid install-state payload', () => {
const state = createInstallState({
adapter: { id: 'cursor-project' },
targetRoot: '/repo/.cursor',
installStatePath: '/repo/.cursor/ecc-install-state.json',
request: {
profile: 'developer',
modules: ['orchestration'],
legacyLanguages: ['typescript'],
legacyMode: true,
},
resolution: {
selectedModules: ['rules-core', 'orchestration'],
skippedModules: [],
},
operations: [
{
kind: 'copy-path',
moduleId: 'rules-core',
sourceRelativePath: 'rules',
destinationPath: '/repo/.cursor/rules',
strategy: 'preserve-relative-path',
ownership: 'managed',
scaffoldOnly: true,
},
],
source: {
repoVersion: '1.9.0',
repoCommit: 'abc123',
manifestVersion: 1,
},
installedAt: '2026-03-13T00:00:00Z',
});
assert.strictEqual(state.schemaVersion, 'ecc.install.v1');
assert.strictEqual(state.target.id, 'cursor-project');
assert.strictEqual(state.request.profile, 'developer');
assert.strictEqual(state.operations.length, 1);
})) passed++; else failed++;
if (test('writes and reads install-state from disk', () => {
const testDir = createTestDir();
const statePath = path.join(testDir, 'ecc-install-state.json');
try {
const state = createInstallState({
adapter: { id: 'claude-home' },
targetRoot: path.join(testDir, '.claude'),
installStatePath: statePath,
request: {
profile: 'core',
modules: [],
legacyLanguages: [],
legacyMode: false,
},
resolution: {
selectedModules: ['rules-core'],
skippedModules: [],
},
operations: [],
source: {
repoVersion: '1.9.0',
repoCommit: 'abc123',
manifestVersion: 1,
},
});
writeInstallState(statePath, state);
const loaded = readInstallState(statePath);
assert.strictEqual(loaded.target.id, 'claude-home');
assert.strictEqual(loaded.request.profile, 'core');
assert.deepStrictEqual(loaded.resolution.selectedModules, ['rules-core']);
} finally {
cleanupTestDir(testDir);
}
})) passed++; else failed++;
if (test('rejects invalid install-state payloads on read', () => {
const testDir = createTestDir();
const statePath = path.join(testDir, 'ecc-install-state.json');
try {
fs.writeFileSync(statePath, JSON.stringify({ schemaVersion: 'ecc.install.v1' }, null, 2));
assert.throws(
() => readInstallState(statePath),
/Invalid install-state/
);
} finally {
cleanupTestDir(testDir);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,110 @@
/**
* Tests for scripts/lib/install-targets/registry.js
*/
const assert = require('assert');
const path = require('path');
const {
getInstallTargetAdapter,
listInstallTargetAdapters,
planInstallTargetScaffold,
} = require('../../scripts/lib/install-targets/registry');
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing install-target adapters ===\n');
let passed = 0;
let failed = 0;
if (test('lists supported target adapters', () => {
const adapters = listInstallTargetAdapters();
const targets = adapters.map(adapter => adapter.target);
assert.ok(targets.includes('claude'), 'Should include claude target');
assert.ok(targets.includes('cursor'), 'Should include cursor target');
assert.ok(targets.includes('antigravity'), 'Should include antigravity target');
assert.ok(targets.includes('codex'), 'Should include codex target');
assert.ok(targets.includes('opencode'), 'Should include opencode target');
})) passed++; else failed++;
if (test('resolves cursor adapter root and install-state path from project root', () => {
const adapter = getInstallTargetAdapter('cursor');
const projectRoot = '/workspace/app';
const root = adapter.resolveRoot({ projectRoot });
const statePath = adapter.getInstallStatePath({ projectRoot });
assert.strictEqual(root, path.join(projectRoot, '.cursor'));
assert.strictEqual(statePath, path.join(projectRoot, '.cursor', 'ecc-install-state.json'));
})) passed++; else failed++;
if (test('resolves claude adapter root and install-state path from home dir', () => {
const adapter = getInstallTargetAdapter('claude');
const homeDir = '/Users/example';
const root = adapter.resolveRoot({ homeDir, repoRoot: '/repo/ecc' });
const statePath = adapter.getInstallStatePath({ homeDir, repoRoot: '/repo/ecc' });
assert.strictEqual(root, path.join(homeDir, '.claude'));
assert.strictEqual(statePath, path.join(homeDir, '.claude', 'ecc', 'install-state.json'));
})) passed++; else failed++;
if (test('plans scaffold operations and flattens native target roots', () => {
const repoRoot = '/repo/ecc';
const projectRoot = '/workspace/app';
const modules = [
{
id: 'platform-configs',
paths: ['.cursor', 'mcp-configs'],
},
{
id: 'rules-core',
paths: ['rules'],
},
];
const plan = planInstallTargetScaffold({
target: 'cursor',
repoRoot,
projectRoot,
modules,
});
assert.strictEqual(plan.adapter.id, 'cursor-project');
assert.strictEqual(plan.targetRoot, path.join(projectRoot, '.cursor'));
assert.strictEqual(plan.installStatePath, path.join(projectRoot, '.cursor', 'ecc-install-state.json'));
const flattened = plan.operations.find(operation => operation.sourceRelativePath === '.cursor');
const preserved = plan.operations.find(operation => operation.sourceRelativePath === 'rules');
assert.ok(flattened, 'Should include .cursor scaffold operation');
assert.strictEqual(flattened.strategy, 'sync-root-children');
assert.strictEqual(flattened.destinationPath, path.join(projectRoot, '.cursor'));
assert.ok(preserved, 'Should include rules scaffold operation');
assert.strictEqual(preserved.strategy, 'preserve-relative-path');
assert.strictEqual(preserved.destinationPath, path.join(projectRoot, '.cursor', 'rules'));
})) passed++; else failed++;
if (test('throws on unknown target adapter', () => {
assert.throws(
() => getInstallTargetAdapter('ghost-target'),
/Unknown install target adapter/
);
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -7,6 +7,7 @@ const path = require('path');
const {
buildSessionSnapshot,
listTmuxPanes,
loadWorkerSnapshots,
parseWorkerHandoff,
parseWorkerStatus,
@@ -186,6 +187,16 @@ test('buildSessionSnapshot merges tmux panes with worker metadata', () => {
}
});
test('listTmuxPanes returns an empty array when tmux is unavailable', () => {
const panes = listTmuxPanes('workflow-visual-proof', {
spawnSyncImpl: () => ({
error: Object.assign(new Error('tmux not found'), { code: 'ENOENT' })
})
});
assert.deepStrictEqual(panes, []);
});
test('resolveSnapshotTarget handles plan files and direct session names', () => {
const tempRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-orch-target-'));
const repoRoot = path.join(tempRoot, 'repo');

View File

@@ -0,0 +1,214 @@
'use strict';
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { createClaudeHistoryAdapter } = require('../../scripts/lib/session-adapters/claude-history');
const { createDmuxTmuxAdapter } = require('../../scripts/lib/session-adapters/dmux-tmux');
const { createAdapterRegistry } = require('../../scripts/lib/session-adapters/registry');
console.log('=== Testing session-adapters ===\n');
let passed = 0;
let failed = 0;
function test(name, fn) {
try {
fn();
console.log(`${name}`);
passed += 1;
} catch (error) {
console.log(`${name}: ${error.message}`);
failed += 1;
}
}
function withHome(homeDir, fn) {
const previousHome = process.env.HOME;
process.env.HOME = homeDir;
try {
fn();
} finally {
if (typeof previousHome === 'string') {
process.env.HOME = previousHome;
} else {
delete process.env.HOME;
}
}
}
test('dmux adapter normalizes orchestration snapshots into canonical form', () => {
const adapter = createDmuxTmuxAdapter({
collectSessionSnapshotImpl: () => ({
sessionName: 'workflow-visual-proof',
coordinationDir: '/tmp/.claude/orchestration/workflow-visual-proof',
repoRoot: '/tmp/repo',
targetType: 'plan',
sessionActive: true,
paneCount: 1,
workerCount: 1,
workerStates: { running: 1 },
panes: [{
paneId: '%95',
windowIndex: 1,
paneIndex: 0,
title: 'seed-check',
currentCommand: 'codex',
currentPath: '/tmp/worktree',
active: false,
dead: false,
pid: 1234
}],
workers: [{
workerSlug: 'seed-check',
workerDir: '/tmp/.claude/orchestration/workflow-visual-proof/seed-check',
status: {
state: 'running',
updated: '2026-03-13T00:00:00Z',
branch: 'feature/seed-check',
worktree: '/tmp/worktree',
taskFile: '/tmp/task.md',
handoffFile: '/tmp/handoff.md'
},
task: {
objective: 'Inspect seeded files.',
seedPaths: ['scripts/orchestrate-worktrees.js']
},
handoff: {
summary: ['Pending'],
validation: [],
remainingRisks: ['No screenshot yet']
},
files: {
status: '/tmp/status.md',
task: '/tmp/task.md',
handoff: '/tmp/handoff.md'
},
pane: {
paneId: '%95',
title: 'seed-check'
}
}]
})
});
const snapshot = adapter.open('workflow-visual-proof').getSnapshot();
assert.strictEqual(snapshot.schemaVersion, 'ecc.session.v1');
assert.strictEqual(snapshot.adapterId, 'dmux-tmux');
assert.strictEqual(snapshot.session.id, 'workflow-visual-proof');
assert.strictEqual(snapshot.session.kind, 'orchestrated');
assert.strictEqual(snapshot.session.sourceTarget.type, 'session');
assert.strictEqual(snapshot.aggregates.workerCount, 1);
assert.strictEqual(snapshot.workers[0].runtime.kind, 'tmux-pane');
assert.strictEqual(snapshot.workers[0].outputs.remainingRisks[0], 'No screenshot yet');
});
test('claude-history adapter loads the latest recorded session', () => {
const homeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-adapter-home-'));
const sessionsDir = path.join(homeDir, '.claude', 'sessions');
fs.mkdirSync(sessionsDir, { recursive: true });
const sessionPath = path.join(sessionsDir, '2026-03-13-a1b2c3d4-session.tmp');
fs.writeFileSync(sessionPath, [
'# Session Review',
'',
'**Date:** 2026-03-13',
'**Started:** 09:00',
'**Last Updated:** 11:30',
'**Project:** everything-claude-code',
'**Branch:** feat/session-adapter',
'**Worktree:** /tmp/ecc-worktree',
'',
'### Completed',
'- [x] Build snapshot prototype',
'',
'### In Progress',
'- [ ] Add CLI wrapper',
'',
'### Notes for Next Session',
'Need a second adapter.',
'',
'### Context to Load',
'```',
'scripts/lib/orchestration-session.js',
'```'
].join('\n'));
try {
withHome(homeDir, () => {
const adapter = createClaudeHistoryAdapter();
const snapshot = adapter.open('claude:latest').getSnapshot();
assert.strictEqual(snapshot.schemaVersion, 'ecc.session.v1');
assert.strictEqual(snapshot.adapterId, 'claude-history');
assert.strictEqual(snapshot.session.kind, 'history');
assert.strictEqual(snapshot.session.state, 'recorded');
assert.strictEqual(snapshot.workers.length, 1);
assert.strictEqual(snapshot.workers[0].branch, 'feat/session-adapter');
assert.strictEqual(snapshot.workers[0].worktree, '/tmp/ecc-worktree');
assert.strictEqual(snapshot.workers[0].runtime.kind, 'claude-session');
assert.strictEqual(snapshot.workers[0].artifacts.sessionFile, sessionPath);
assert.ok(snapshot.workers[0].outputs.summary.includes('Build snapshot prototype'));
});
} finally {
fs.rmSync(homeDir, { recursive: true, force: true });
}
});
test('adapter registry routes plan files to dmux and explicit claude targets to history', () => {
const repoRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-registry-repo-'));
const planPath = path.join(repoRoot, 'workflow.json');
fs.writeFileSync(planPath, JSON.stringify({
sessionName: 'workflow-visual-proof',
repoRoot,
coordinationRoot: path.join(repoRoot, '.claude', 'orchestration')
}));
const homeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-registry-home-'));
const sessionsDir = path.join(homeDir, '.claude', 'sessions');
fs.mkdirSync(sessionsDir, { recursive: true });
fs.writeFileSync(
path.join(sessionsDir, '2026-03-13-z9y8x7w6-session.tmp'),
'# History Session\n\n**Branch:** feat/history\n'
);
try {
withHome(homeDir, () => {
const registry = createAdapterRegistry({
adapters: [
createDmuxTmuxAdapter({
collectSessionSnapshotImpl: () => ({
sessionName: 'workflow-visual-proof',
coordinationDir: path.join(repoRoot, '.claude', 'orchestration', 'workflow-visual-proof'),
repoRoot,
targetType: 'plan',
sessionActive: false,
paneCount: 0,
workerCount: 0,
workerStates: {},
panes: [],
workers: []
})
}),
createClaudeHistoryAdapter()
]
});
const dmuxSnapshot = registry.open(planPath, { cwd: repoRoot }).getSnapshot();
const claudeSnapshot = registry.open('claude:latest', { cwd: repoRoot }).getSnapshot();
assert.strictEqual(dmuxSnapshot.adapterId, 'dmux-tmux');
assert.strictEqual(claudeSnapshot.adapterId, 'claude-history');
});
} finally {
fs.rmSync(repoRoot, { recursive: true, force: true });
fs.rmSync(homeDir, { recursive: true, force: true });
}
});
console.log(`\n=== Results: ${passed} passed, ${failed} failed ===`);
if (failed > 0) process.exit(1);

View File

@@ -0,0 +1,190 @@
/**
* Tests for scripts/doctor.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { execFileSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'doctor.js');
const REPO_ROOT = path.join(__dirname, '..', '..');
const CURRENT_PACKAGE_VERSION = JSON.parse(
fs.readFileSync(path.join(REPO_ROOT, 'package.json'), 'utf8')
).version;
const CURRENT_MANIFEST_VERSION = JSON.parse(
fs.readFileSync(path.join(REPO_ROOT, 'manifests', 'install-modules.json'), 'utf8')
).version;
const {
createInstallState,
writeInstallState,
} = require('../../scripts/lib/install-state');
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function writeState(filePath, options) {
const state = createInstallState(options);
writeInstallState(filePath, state);
}
function run(args = [], options = {}) {
const env = {
...process.env,
HOME: options.homeDir || process.env.HOME,
};
try {
const stdout = execFileSync('node', [SCRIPT, ...args], {
cwd: options.cwd,
env,
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
});
return { code: 0, stdout, stderr: '' };
} catch (error) {
return {
code: error.status || 1,
stdout: error.stdout || '',
stderr: error.stderr || '',
};
}
}
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing doctor.js ===\n');
let passed = 0;
let failed = 0;
if (test('reports a healthy install with exit code 0', () => {
const homeDir = createTempDir('doctor-home-');
const projectRoot = createTempDir('doctor-project-');
try {
const targetRoot = path.join(homeDir, '.claude');
const statePath = path.join(targetRoot, 'ecc', 'install-state.json');
const managedFile = path.join(targetRoot, 'rules', 'common', 'coding-style.md');
const sourceContent = fs.readFileSync(path.join(REPO_ROOT, 'rules', 'common', 'coding-style.md'), 'utf8');
fs.mkdirSync(path.dirname(managedFile), { recursive: true });
fs.writeFileSync(managedFile, sourceContent);
writeState(statePath, {
adapter: { id: 'claude-home', target: 'claude', kind: 'home' },
targetRoot,
installStatePath: statePath,
request: {
profile: null,
modules: [],
legacyLanguages: ['typescript'],
legacyMode: true,
},
resolution: {
selectedModules: ['legacy-claude-rules'],
skippedModules: [],
},
operations: [
{
kind: 'copy-file',
moduleId: 'legacy-claude-rules',
sourceRelativePath: 'rules/common/coding-style.md',
destinationPath: managedFile,
strategy: 'preserve-relative-path',
ownership: 'managed',
scaffoldOnly: false,
},
],
source: {
repoVersion: CURRENT_PACKAGE_VERSION,
repoCommit: 'abc123',
manifestVersion: CURRENT_MANIFEST_VERSION,
},
});
const result = run(['--target', 'claude'], { cwd: projectRoot, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(result.stdout.includes('Doctor report'));
assert.ok(result.stdout.includes('Status: OK'));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('reports issues and exits 1 for unhealthy installs', () => {
const homeDir = createTempDir('doctor-home-');
const projectRoot = createTempDir('doctor-project-');
try {
const targetRoot = path.join(projectRoot, '.cursor');
const statePath = path.join(targetRoot, 'ecc-install-state.json');
fs.mkdirSync(targetRoot, { recursive: true });
writeState(statePath, {
adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' },
targetRoot,
installStatePath: statePath,
request: {
profile: null,
modules: ['platform-configs'],
legacyLanguages: [],
legacyMode: false,
},
resolution: {
selectedModules: ['platform-configs'],
skippedModules: [],
},
operations: [
{
kind: 'copy-file',
moduleId: 'platform-configs',
sourceRelativePath: '.cursor/hooks.json',
destinationPath: path.join(targetRoot, 'hooks.json'),
strategy: 'sync-root-children',
ownership: 'managed',
scaffoldOnly: false,
},
],
source: {
repoVersion: CURRENT_PACKAGE_VERSION,
repoCommit: 'abc123',
manifestVersion: CURRENT_MANIFEST_VERSION,
},
});
const result = run(['--target', 'cursor', '--json'], { cwd: projectRoot, homeDir });
assert.strictEqual(result.code, 1);
const parsed = JSON.parse(result.stdout);
assert.strictEqual(parsed.summary.errorCount, 1);
assert.ok(parsed.results[0].issues.some(issue => issue.code === 'missing-managed-files'));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

139
tests/scripts/ecc.test.js Normal file
View File

@@ -0,0 +1,139 @@
/**
* Tests for scripts/ecc.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { spawnSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'ecc.js');
function runCli(args, options = {}) {
return spawnSync('node', [SCRIPT, ...args], {
encoding: 'utf8',
cwd: options.cwd || process.cwd(),
env: {
...process.env,
...(options.env || {}),
},
});
}
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function parseJson(stdout) {
return JSON.parse(stdout.trim());
}
function runTest(name, fn) {
try {
fn();
console.log(`${name}`);
return true;
} catch (error) {
console.log(`${name}`);
console.error(` ${error.message}`);
return false;
}
}
function main() {
console.log('\n=== Testing ecc.js ===\n');
let passed = 0;
let failed = 0;
const tests = [
['shows top-level help', () => {
const result = runCli(['--help']);
assert.strictEqual(result.status, 0);
assert.match(result.stdout, /ECC selective-install CLI/);
assert.match(result.stdout, /list-installed/);
assert.match(result.stdout, /doctor/);
}],
['delegates explicit install command', () => {
const result = runCli(['install', '--dry-run', '--json', 'typescript']);
assert.strictEqual(result.status, 0, result.stderr);
const payload = parseJson(result.stdout);
assert.strictEqual(payload.dryRun, true);
assert.strictEqual(payload.plan.mode, 'legacy');
assert.deepStrictEqual(payload.plan.languages, ['typescript']);
}],
['routes implicit top-level args to install', () => {
const result = runCli(['--dry-run', '--json', 'typescript']);
assert.strictEqual(result.status, 0, result.stderr);
const payload = parseJson(result.stdout);
assert.strictEqual(payload.dryRun, true);
assert.strictEqual(payload.plan.mode, 'legacy');
assert.deepStrictEqual(payload.plan.languages, ['typescript']);
}],
['delegates plan command', () => {
const result = runCli(['plan', '--list-profiles', '--json']);
assert.strictEqual(result.status, 0, result.stderr);
const payload = parseJson(result.stdout);
assert.ok(Array.isArray(payload.profiles));
assert.ok(payload.profiles.length > 0);
}],
['delegates lifecycle commands', () => {
const homeDir = createTempDir('ecc-cli-home-');
const projectRoot = createTempDir('ecc-cli-project-');
const result = runCli(['list-installed', '--json'], {
cwd: projectRoot,
env: { HOME: homeDir },
});
assert.strictEqual(result.status, 0, result.stderr);
const payload = parseJson(result.stdout);
assert.deepStrictEqual(payload.records, []);
}],
['delegates session-inspect command', () => {
const homeDir = createTempDir('ecc-cli-home-');
const sessionsDir = path.join(homeDir, '.claude', 'sessions');
fs.mkdirSync(sessionsDir, { recursive: true });
fs.writeFileSync(
path.join(sessionsDir, '2026-03-13-a1b2c3d4-session.tmp'),
'# ECC Session\n\n**Branch:** feat/ecc-cli\n'
);
const result = runCli(['session-inspect', 'claude:latest'], {
env: { HOME: homeDir },
});
assert.strictEqual(result.status, 0, result.stderr);
const payload = parseJson(result.stdout);
assert.strictEqual(payload.adapterId, 'claude-history');
assert.strictEqual(payload.workers[0].branch, 'feat/ecc-cli');
}],
['supports help for a subcommand', () => {
const result = runCli(['help', 'repair']);
assert.strictEqual(result.status, 0, result.stderr);
assert.match(result.stdout, /Usage: node scripts\/repair\.js/);
}],
['fails on unknown commands instead of treating them as installs', () => {
const result = runCli(['bogus']);
assert.strictEqual(result.status, 1);
assert.match(result.stderr, /Unknown command: bogus/);
}],
['fails on unknown help subcommands', () => {
const result = runCli(['help', 'bogus']);
assert.strictEqual(result.status, 1);
assert.match(result.stderr, /Unknown command: bogus/);
}],
];
for (const [name, fn] of tests) {
if (runTest(name, fn)) {
passed += 1;
} else {
failed += 1;
}
}
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
main();

View File

@@ -0,0 +1,309 @@
/**
* Tests for scripts/install-apply.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { execFileSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'install-apply.js');
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function readJson(filePath) {
return JSON.parse(fs.readFileSync(filePath, 'utf8'));
}
function run(args = [], options = {}) {
const env = {
...process.env,
HOME: options.homeDir || process.env.HOME,
...(options.env || {}),
};
try {
const stdout = execFileSync('node', [SCRIPT, ...args], {
cwd: options.cwd,
env,
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
});
return { code: 0, stdout, stderr: '' };
} catch (error) {
return {
code: error.status || 1,
stdout: error.stdout || '',
stderr: error.stderr || '',
};
}
}
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing install-apply.js ===\n');
let passed = 0;
let failed = 0;
if (test('shows help with --help', () => {
const result = run(['--help']);
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('Usage:'));
assert.ok(result.stdout.includes('--dry-run'));
assert.ok(result.stdout.includes('--profile <name>'));
assert.ok(result.stdout.includes('--modules <id,id,...>'));
})) passed++; else failed++;
if (test('rejects mixing legacy languages with manifest profile flags', () => {
const result = run(['--profile', 'core', 'typescript']);
assert.strictEqual(result.code, 1);
assert.ok(result.stderr.includes('cannot be combined'));
})) passed++; else failed++;
if (test('installs Claude rules and writes install-state', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const result = run(['typescript'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
const rulesDir = path.join(homeDir, '.claude', 'rules');
assert.ok(fs.existsSync(path.join(rulesDir, 'common', 'coding-style.md')));
assert.ok(fs.existsSync(path.join(rulesDir, 'typescript', 'testing.md')));
const statePath = path.join(homeDir, '.claude', 'ecc', 'install-state.json');
const state = readJson(statePath);
assert.strictEqual(state.target.id, 'claude-home');
assert.deepStrictEqual(state.request.legacyLanguages, ['typescript']);
assert.strictEqual(state.request.legacyMode, true);
assert.ok(
state.operations.some(operation => (
operation.destinationPath === path.join(rulesDir, 'common', 'coding-style.md')
)),
'Should record common rule file operation'
);
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('installs Cursor configs and writes install-state', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const result = run(['--target', 'cursor', 'typescript'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-coding-style.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'typescript-testing.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks', 'session-start.js')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'skills', 'article-writing', 'SKILL.md')));
const statePath = path.join(projectDir, '.cursor', 'ecc-install-state.json');
const state = readJson(statePath);
const normalizedProjectDir = fs.realpathSync(projectDir);
assert.strictEqual(state.target.id, 'cursor-project');
assert.strictEqual(state.target.root, path.join(normalizedProjectDir, '.cursor'));
assert.ok(
state.operations.some(operation => (
operation.destinationPath === path.join(normalizedProjectDir, '.cursor', 'hooks', 'session-start.js')
)),
'Should record hook file copy operation'
);
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('installs Antigravity configs and writes install-state', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const result = run(['--target', 'antigravity', 'typescript'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'rules', 'common-coding-style.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'rules', 'typescript-testing.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'workflows', 'code-review.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'skills', 'architect.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.agent', 'skills', 'article-writing', 'SKILL.md')));
const statePath = path.join(projectDir, '.agent', 'ecc-install-state.json');
const state = readJson(statePath);
assert.strictEqual(state.target.id, 'antigravity-project');
assert.ok(
state.operations.some(operation => (
operation.destinationPath.endsWith(path.join('.agent', 'workflows', 'code-review.md'))
)),
'Should record workflow file copy operation'
);
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('supports dry-run without mutating the target project', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const result = run(['--target', 'cursor', '--dry-run', 'typescript'], {
cwd: projectDir,
homeDir,
});
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(result.stdout.includes('Dry-run install plan'));
assert.ok(!fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json')));
assert.ok(!fs.existsSync(path.join(projectDir, '.cursor', 'ecc-install-state.json')));
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('supports manifest profile dry-runs through the installer', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const result = run(['--profile', 'core', '--dry-run'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(result.stdout.includes('Mode: manifest'));
assert.ok(result.stdout.includes('Profile: core'));
assert.ok(result.stdout.includes('Included components: (none)'));
assert.ok(result.stdout.includes('Selected modules: rules-core, agents-core, commands-core, hooks-runtime, platform-configs, workflow-quality'));
assert.ok(!fs.existsSync(path.join(homeDir, '.claude', 'ecc', 'install-state.json')));
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('installs manifest profiles and writes non-legacy install-state', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const result = run(['--profile', 'core'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
const claudeRoot = path.join(homeDir, '.claude');
assert.ok(fs.existsSync(path.join(claudeRoot, 'rules', 'common', 'coding-style.md')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'agents', 'architect.md')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'commands', 'plan.md')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'hooks', 'hooks.json')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'scripts', 'hooks', 'session-end.js')));
assert.ok(fs.existsSync(path.join(claudeRoot, 'plugin.json')));
const state = readJson(path.join(claudeRoot, 'ecc', 'install-state.json'));
assert.strictEqual(state.request.profile, 'core');
assert.strictEqual(state.request.legacyMode, false);
assert.deepStrictEqual(state.request.legacyLanguages, []);
assert.ok(state.resolution.selectedModules.includes('platform-configs'));
assert.ok(
state.operations.some(operation => (
operation.destinationPath === path.join(claudeRoot, 'commands', 'plan.md')
)),
'Should record manifest-driven command file copy'
);
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('installs explicit modules for cursor using manifest operations', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
try {
const result = run(['--target', 'cursor', '--modules', 'platform-configs'], {
cwd: projectDir,
homeDir,
});
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-agents.md')));
const state = readJson(path.join(projectDir, '.cursor', 'ecc-install-state.json'));
assert.strictEqual(state.request.profile, null);
assert.deepStrictEqual(state.request.modules, ['platform-configs']);
assert.deepStrictEqual(state.request.includeComponents, []);
assert.deepStrictEqual(state.request.excludeComponents, []);
assert.strictEqual(state.request.legacyMode, false);
assert.ok(state.resolution.selectedModules.includes('platform-configs'));
assert.ok(
!state.operations.some(operation => operation.destinationPath.endsWith('ecc-install-state.json')),
'Manifest copy operations should not include generated install-state files'
);
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
if (test('installs from ecc-install.json and persists component selections', () => {
const homeDir = createTempDir('install-apply-home-');
const projectDir = createTempDir('install-apply-project-');
const configPath = path.join(projectDir, 'ecc-install.json');
try {
fs.writeFileSync(configPath, JSON.stringify({
version: 1,
target: 'claude',
profile: 'developer',
include: ['capability:security'],
exclude: ['capability:orchestration'],
}, null, 2));
const result = run(['--config', configPath], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(fs.existsSync(path.join(homeDir, '.claude', 'skills', 'security-review', 'SKILL.md')));
assert.ok(!fs.existsSync(path.join(homeDir, '.claude', 'skills', 'dmux-workflows', 'SKILL.md')));
const state = readJson(path.join(homeDir, '.claude', 'ecc', 'install-state.json'));
assert.strictEqual(state.request.profile, 'developer');
assert.deepStrictEqual(state.request.includeComponents, ['capability:security']);
assert.deepStrictEqual(state.request.excludeComponents, ['capability:orchestration']);
assert.ok(state.resolution.selectedModules.includes('security'));
assert.ok(!state.resolution.selectedModules.includes('orchestration'));
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,154 @@
/**
* Tests for scripts/install-plan.js
*/
const assert = require('assert');
const path = require('path');
const { execFileSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'install-plan.js');
function run(args = []) {
try {
const stdout = execFileSync('node', [SCRIPT, ...args], {
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
});
return { code: 0, stdout, stderr: '' };
} catch (error) {
return {
code: error.status || 1,
stdout: error.stdout || '',
stderr: error.stderr || '',
};
}
}
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing install-plan.js ===\n');
let passed = 0;
let failed = 0;
if (test('shows help with no arguments', () => {
const result = run();
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('Inspect ECC selective-install manifests'));
})) passed++; else failed++;
if (test('lists install profiles', () => {
const result = run(['--list-profiles']);
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('Install profiles'));
assert.ok(result.stdout.includes('core'));
})) passed++; else failed++;
if (test('lists install modules', () => {
const result = run(['--list-modules']);
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('Install modules'));
assert.ok(result.stdout.includes('rules-core'));
})) passed++; else failed++;
if (test('lists install components', () => {
const result = run(['--list-components', '--family', 'language']);
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('Install components'));
assert.ok(result.stdout.includes('lang:typescript'));
assert.ok(!result.stdout.includes('capability:security'));
})) passed++; else failed++;
if (test('prints a filtered install plan for a profile and target', () => {
const result = run([
'--profile', 'developer',
'--with', 'capability:security',
'--without', 'capability:orchestration',
'--target', 'cursor'
]);
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('Install plan'));
assert.ok(result.stdout.includes('Included components: capability:security'));
assert.ok(result.stdout.includes('Excluded components: capability:orchestration'));
assert.ok(result.stdout.includes('Adapter: cursor-project'));
assert.ok(result.stdout.includes('Target root:'));
assert.ok(result.stdout.includes('Install-state:'));
assert.ok(result.stdout.includes('Operation plan'));
assert.ok(result.stdout.includes('Excluded by selection'));
assert.ok(result.stdout.includes('security'));
})) passed++; else failed++;
if (test('emits JSON for explicit module resolution', () => {
const result = run([
'--modules', 'security',
'--with', 'capability:research',
'--target', 'cursor',
'--json'
]);
assert.strictEqual(result.code, 0);
const parsed = JSON.parse(result.stdout);
assert.ok(parsed.selectedModuleIds.includes('security'));
assert.ok(parsed.selectedModuleIds.includes('research-apis'));
assert.ok(parsed.selectedModuleIds.includes('workflow-quality'));
assert.deepStrictEqual(parsed.includedComponentIds, ['capability:research']);
assert.strictEqual(parsed.targetAdapterId, 'cursor-project');
assert.ok(Array.isArray(parsed.operations));
assert.ok(parsed.operations.length > 0);
})) passed++; else failed++;
if (test('loads planning intent from ecc-install.json', () => {
const configDir = path.join(__dirname, '..', 'fixtures', 'tmp-install-plan-config');
const configPath = path.join(configDir, 'ecc-install.json');
try {
require('fs').mkdirSync(configDir, { recursive: true });
require('fs').writeFileSync(configPath, JSON.stringify({
version: 1,
target: 'cursor',
profile: 'core',
include: ['capability:security'],
exclude: ['capability:orchestration'],
}, null, 2));
const result = run(['--config', configPath, '--json']);
assert.strictEqual(result.code, 0);
const parsed = JSON.parse(result.stdout);
assert.strictEqual(parsed.target, 'cursor');
assert.deepStrictEqual(parsed.includedComponentIds, ['capability:security']);
assert.deepStrictEqual(parsed.excludedComponentIds, ['capability:orchestration']);
assert.ok(parsed.selectedModuleIds.includes('security'));
assert.ok(!parsed.selectedModuleIds.includes('orchestration'));
} finally {
require('fs').rmSync(configDir, { recursive: true, force: true });
}
})) passed++; else failed++;
if (test('fails on unknown arguments', () => {
const result = run(['--unknown-flag']);
assert.strictEqual(result.code, 1);
assert.ok(result.stderr.includes('Unknown argument'));
})) passed++; else failed++;
if (test('fails on invalid install target', () => {
const result = run(['--profile', 'core', '--target', 'not-a-target']);
assert.strictEqual(result.code, 1);
assert.ok(result.stderr.includes('Unknown install target'));
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,87 @@
/**
* Tests for install.sh wrapper delegation
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { execFileSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'install.sh');
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function run(args = [], options = {}) {
const env = {
...process.env,
HOME: options.homeDir || process.env.HOME,
};
try {
const stdout = execFileSync('bash', [SCRIPT, ...args], {
cwd: options.cwd,
env,
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
});
return { code: 0, stdout, stderr: '' };
} catch (error) {
return {
code: error.status || 1,
stdout: error.stdout || '',
stderr: error.stderr || '',
};
}
}
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing install.sh ===\n');
let passed = 0;
let failed = 0;
if (test('delegates to the Node installer and preserves dry-run output', () => {
const homeDir = createTempDir('install-sh-home-');
const projectDir = createTempDir('install-sh-project-');
try {
const result = run(['--target', 'cursor', '--dry-run', 'typescript'], {
cwd: projectDir,
homeDir,
});
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(result.stdout.includes('Dry-run install plan'));
assert.ok(!fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json')));
} finally {
cleanup(homeDir);
cleanup(projectDir);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,139 @@
/**
* Tests for scripts/list-installed.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { execFileSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'list-installed.js');
const REPO_ROOT = path.join(__dirname, '..', '..');
const CURRENT_PACKAGE_VERSION = JSON.parse(
fs.readFileSync(path.join(REPO_ROOT, 'package.json'), 'utf8')
).version;
const CURRENT_MANIFEST_VERSION = JSON.parse(
fs.readFileSync(path.join(REPO_ROOT, 'manifests', 'install-modules.json'), 'utf8')
).version;
const {
createInstallState,
writeInstallState,
} = require('../../scripts/lib/install-state');
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function writeState(filePath, options) {
const state = createInstallState(options);
writeInstallState(filePath, state);
}
function run(args = [], options = {}) {
const env = {
...process.env,
HOME: options.homeDir || process.env.HOME,
};
try {
const stdout = execFileSync('node', [SCRIPT, ...args], {
cwd: options.cwd,
env,
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
});
return { code: 0, stdout, stderr: '' };
} catch (error) {
return {
code: error.status || 1,
stdout: error.stdout || '',
stderr: error.stderr || '',
};
}
}
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing list-installed.js ===\n');
let passed = 0;
let failed = 0;
if (test('reports when no install-state files are present', () => {
const homeDir = createTempDir('list-installed-home-');
const projectRoot = createTempDir('list-installed-project-');
try {
const result = run([], { cwd: projectRoot, homeDir });
assert.strictEqual(result.code, 0);
assert.ok(result.stdout.includes('No ECC install-state files found'));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('emits JSON for discovered install-state records', () => {
const homeDir = createTempDir('list-installed-home-');
const projectRoot = createTempDir('list-installed-project-');
try {
const statePath = path.join(projectRoot, '.cursor', 'ecc-install-state.json');
writeState(statePath, {
adapter: { id: 'cursor-project', target: 'cursor', kind: 'project' },
targetRoot: path.join(projectRoot, '.cursor'),
installStatePath: statePath,
request: {
profile: 'core',
modules: [],
legacyLanguages: [],
legacyMode: false,
},
resolution: {
selectedModules: ['rules-core', 'platform-configs'],
skippedModules: [],
},
operations: [],
source: {
repoVersion: CURRENT_PACKAGE_VERSION,
repoCommit: 'abc123',
manifestVersion: CURRENT_MANIFEST_VERSION,
},
});
const result = run(['--json'], { cwd: projectRoot, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
const parsed = JSON.parse(result.stdout);
assert.strictEqual(parsed.records.length, 1);
assert.strictEqual(parsed.records[0].state.target.id, 'cursor-project');
assert.strictEqual(parsed.records[0].state.request.profile, 'core');
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,76 @@
/**
* Tests for scripts/orchestration-status.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { execFileSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'orchestration-status.js');
function run(args = [], options = {}) {
try {
const stdout = execFileSync('node', [SCRIPT, ...args], {
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
cwd: options.cwd || process.cwd(),
});
return { code: 0, stdout, stderr: '' };
} catch (error) {
return {
code: error.status || 1,
stdout: error.stdout || '',
stderr: error.stderr || '',
};
}
}
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing orchestration-status.js ===\n');
let passed = 0;
let failed = 0;
if (test('emits canonical dmux snapshots for plan files', () => {
const repoRoot = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-orch-status-repo-'));
try {
const planPath = path.join(repoRoot, 'workflow.json');
fs.writeFileSync(planPath, JSON.stringify({
sessionName: 'workflow-visual-proof',
repoRoot,
coordinationRoot: path.join(repoRoot, '.claude', 'orchestration')
}));
const result = run([planPath], { cwd: repoRoot });
assert.strictEqual(result.code, 0, result.stderr);
const payload = JSON.parse(result.stdout);
assert.strictEqual(payload.adapterId, 'dmux-tmux');
assert.strictEqual(payload.session.id, 'workflow-visual-proof');
assert.strictEqual(payload.session.sourceTarget.type, 'plan');
} finally {
fs.rmSync(repoRoot, { recursive: true, force: true });
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,159 @@
/**
* Tests for scripts/repair.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { execFileSync } = require('child_process');
const INSTALL_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'install-apply.js');
const DOCTOR_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'doctor.js');
const REPAIR_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'repair.js');
const REPO_ROOT = path.join(__dirname, '..', '..');
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function runNode(scriptPath, args = [], options = {}) {
const env = {
...process.env,
HOME: options.homeDir || process.env.HOME,
};
try {
const stdout = execFileSync('node', [scriptPath, ...args], {
cwd: options.cwd,
env,
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
});
return { code: 0, stdout, stderr: '' };
} catch (error) {
return {
code: error.status || 1,
stdout: error.stdout || '',
stderr: error.stderr || '',
};
}
}
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing repair.js ===\n');
let passed = 0;
let failed = 0;
if (test('repairs drifted managed files and refreshes install-state', () => {
const homeDir = createTempDir('repair-home-');
const projectRoot = createTempDir('repair-project-');
try {
const installResult = runNode(INSTALL_SCRIPT, ['--target', 'cursor', '--modules', 'platform-configs'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(installResult.code, 0, installResult.stderr);
const cursorRoot = path.join(projectRoot, '.cursor');
const managedPath = path.join(cursorRoot, 'hooks.json');
const statePath = path.join(cursorRoot, 'ecc-install-state.json');
const managedRealPath = fs.realpathSync(cursorRoot);
const expectedManagedPath = path.join(managedRealPath, 'hooks.json');
const expectedContent = fs.readFileSync(path.join(REPO_ROOT, '.cursor', 'hooks.json'), 'utf8');
const installedAtBefore = JSON.parse(fs.readFileSync(statePath, 'utf8')).installedAt;
fs.writeFileSync(managedPath, '{"drifted":true}\n');
const doctorBefore = runNode(DOCTOR_SCRIPT, ['--target', 'cursor', '--json'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(doctorBefore.code, 1);
assert.ok(JSON.parse(doctorBefore.stdout).results[0].issues.some(issue => issue.code === 'drifted-managed-files'));
const repairResult = runNode(REPAIR_SCRIPT, ['--target', 'cursor', '--json'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(repairResult.code, 0, repairResult.stderr);
const parsed = JSON.parse(repairResult.stdout);
assert.strictEqual(parsed.results[0].status, 'repaired');
assert.ok(parsed.results[0].repairedPaths.includes(expectedManagedPath));
assert.strictEqual(fs.readFileSync(managedPath, 'utf8'), expectedContent);
const repairedState = JSON.parse(fs.readFileSync(statePath, 'utf8'));
assert.strictEqual(repairedState.installedAt, installedAtBefore);
assert.ok(repairedState.lastValidatedAt);
const doctorAfter = runNode(DOCTOR_SCRIPT, ['--target', 'cursor'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(doctorAfter.code, 0, doctorAfter.stderr);
assert.ok(doctorAfter.stdout.includes('Status: OK'));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('supports dry-run without mutating drifted files', () => {
const homeDir = createTempDir('repair-home-');
const projectRoot = createTempDir('repair-project-');
try {
const installResult = runNode(INSTALL_SCRIPT, ['--target', 'cursor', '--modules', 'platform-configs'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(installResult.code, 0, installResult.stderr);
const cursorRoot = path.join(projectRoot, '.cursor');
const managedPath = path.join(cursorRoot, 'hooks.json');
const managedRealPath = fs.realpathSync(cursorRoot);
const expectedManagedPath = path.join(managedRealPath, 'hooks.json');
const driftedContent = '{"drifted":true}\n';
fs.writeFileSync(managedPath, driftedContent);
const repairResult = runNode(REPAIR_SCRIPT, ['--target', 'cursor', '--dry-run', '--json'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(repairResult.code, 0, repairResult.stderr);
const parsed = JSON.parse(repairResult.stdout);
assert.strictEqual(parsed.dryRun, true);
assert.ok(parsed.results[0].plannedRepairs.includes(expectedManagedPath));
assert.strictEqual(fs.readFileSync(managedPath, 'utf8'), driftedContent);
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,116 @@
/**
* Tests for scripts/session-inspect.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { execFileSync } = require('child_process');
const SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'session-inspect.js');
function run(args = [], options = {}) {
try {
const stdout = execFileSync('node', [SCRIPT, ...args], {
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
cwd: options.cwd || process.cwd(),
env: {
...process.env,
...(options.env || {})
}
});
return { code: 0, stdout, stderr: '' };
} catch (error) {
return {
code: error.status || 1,
stdout: error.stdout || '',
stderr: error.stderr || '',
};
}
}
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing session-inspect.js ===\n');
let passed = 0;
let failed = 0;
if (test('shows usage when no target is provided', () => {
const result = run();
assert.strictEqual(result.code, 1);
assert.ok(result.stdout.includes('Usage:'));
})) passed++; else failed++;
if (test('prints canonical JSON for claude history targets', () => {
const homeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-inspect-home-'));
const sessionsDir = path.join(homeDir, '.claude', 'sessions');
fs.mkdirSync(sessionsDir, { recursive: true });
try {
fs.writeFileSync(
path.join(sessionsDir, '2026-03-13-a1b2c3d4-session.tmp'),
'# Inspect Session\n\n**Branch:** feat/session-inspect\n'
);
const result = run(['claude:latest'], {
env: { HOME: homeDir }
});
assert.strictEqual(result.code, 0, result.stderr);
const payload = JSON.parse(result.stdout);
assert.strictEqual(payload.adapterId, 'claude-history');
assert.strictEqual(payload.session.kind, 'history');
assert.strictEqual(payload.workers[0].branch, 'feat/session-inspect');
} finally {
fs.rmSync(homeDir, { recursive: true, force: true });
}
})) passed++; else failed++;
if (test('writes snapshot JSON to disk when --write is provided', () => {
const homeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-inspect-home-'));
const outputDir = fs.mkdtempSync(path.join(os.tmpdir(), 'ecc-session-inspect-out-'));
const sessionsDir = path.join(homeDir, '.claude', 'sessions');
fs.mkdirSync(sessionsDir, { recursive: true });
const outputPath = path.join(outputDir, 'snapshot.json');
try {
fs.writeFileSync(
path.join(sessionsDir, '2026-03-13-a1b2c3d4-session.tmp'),
'# Inspect Session\n\n**Branch:** feat/session-inspect\n'
);
const result = run(['claude:latest', '--write', outputPath], {
env: { HOME: homeDir }
});
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(fs.existsSync(outputPath));
const written = JSON.parse(fs.readFileSync(outputPath, 'utf8'));
assert.strictEqual(written.adapterId, 'claude-history');
} finally {
fs.rmSync(homeDir, { recursive: true, force: true });
fs.rmSync(outputDir, { recursive: true, force: true });
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -0,0 +1,133 @@
/**
* Tests for scripts/uninstall.js
*/
const assert = require('assert');
const fs = require('fs');
const os = require('os');
const path = require('path');
const { execFileSync } = require('child_process');
const INSTALL_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'install-apply.js');
const UNINSTALL_SCRIPT = path.join(__dirname, '..', '..', 'scripts', 'uninstall.js');
function createTempDir(prefix) {
return fs.mkdtempSync(path.join(os.tmpdir(), prefix));
}
function cleanup(dirPath) {
fs.rmSync(dirPath, { recursive: true, force: true });
}
function runNode(scriptPath, args = [], options = {}) {
const env = {
...process.env,
HOME: options.homeDir || process.env.HOME,
};
try {
const stdout = execFileSync('node', [scriptPath, ...args], {
cwd: options.cwd,
env,
encoding: 'utf8',
stdio: ['pipe', 'pipe', 'pipe'],
timeout: 10000,
});
return { code: 0, stdout, stderr: '' };
} catch (error) {
return {
code: error.status || 1,
stdout: error.stdout || '',
stderr: error.stderr || '',
};
}
}
function test(name, fn) {
try {
fn();
console.log(` \u2713 ${name}`);
return true;
} catch (error) {
console.log(` \u2717 ${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function runTests() {
console.log('\n=== Testing uninstall.js ===\n');
let passed = 0;
let failed = 0;
if (test('removes managed files and keeps unrelated files', () => {
const homeDir = createTempDir('uninstall-home-');
const projectRoot = createTempDir('uninstall-project-');
try {
const installResult = runNode(INSTALL_SCRIPT, ['--target', 'cursor', '--modules', 'platform-configs'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(installResult.code, 0, installResult.stderr);
const cursorRoot = path.join(projectRoot, '.cursor');
const managedPath = path.join(cursorRoot, 'hooks.json');
const statePath = path.join(cursorRoot, 'ecc-install-state.json');
const unrelatedPath = path.join(cursorRoot, 'custom-user-note.txt');
fs.writeFileSync(unrelatedPath, 'leave me alone');
const uninstallResult = runNode(UNINSTALL_SCRIPT, ['--target', 'cursor'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(uninstallResult.code, 0, uninstallResult.stderr);
assert.ok(uninstallResult.stdout.includes('Uninstall summary'));
assert.ok(!fs.existsSync(managedPath));
assert.ok(!fs.existsSync(statePath));
assert.ok(fs.existsSync(unrelatedPath));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
if (test('supports dry-run without removing files', () => {
const homeDir = createTempDir('uninstall-home-');
const projectRoot = createTempDir('uninstall-project-');
try {
const installResult = runNode(INSTALL_SCRIPT, ['--target', 'cursor', '--modules', 'platform-configs'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(installResult.code, 0, installResult.stderr);
const cursorRoot = path.join(projectRoot, '.cursor');
const managedPath = path.join(cursorRoot, 'hooks.json');
const statePath = path.join(cursorRoot, 'ecc-install-state.json');
const uninstallResult = runNode(UNINSTALL_SCRIPT, ['--target', 'cursor', '--dry-run', '--json'], {
cwd: projectRoot,
homeDir,
});
assert.strictEqual(uninstallResult.code, 0, uninstallResult.stderr);
const parsed = JSON.parse(uninstallResult.stdout);
assert.strictEqual(parsed.dryRun, true);
assert.ok(parsed.results[0].plannedRemovals.length > 0);
assert.ok(fs.existsSync(managedPath));
assert.ok(fs.existsSync(statePath));
} finally {
cleanup(homeDir);
cleanup(projectRoot);
}
})) passed++; else failed++;
console.log(`\nResults: Passed: ${passed}, Failed: ${failed}`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();