1 Commits

Author SHA1 Message Date
Affaan Mustafa
68ee51f1e3 docs: add community skill ecosystem notes 2026-04-13 00:45:51 -07:00
52 changed files with 124 additions and 4307 deletions

View File

@@ -43,18 +43,10 @@ jobs:
# Package manager setup
- name: Setup pnpm
if: matrix.pm == 'pnpm' && matrix.node != '18.x'
uses: pnpm/action-setup@08c4be7e2e672a47d11bd04269e27e5f3e8529cb # v6.0.0
if: matrix.pm == 'pnpm'
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
with:
# Keep an explicit pnpm major because this repo's packageManager is Yarn.
version: 10
- name: Setup pnpm (via Corepack)
if: matrix.pm == 'pnpm' && matrix.node == '18.x'
shell: bash
run: |
corepack enable
corepack prepare pnpm@9 --activate
version: latest
- name: Setup Yarn (via Corepack)
if: matrix.pm == 'yarn'
@@ -87,8 +79,6 @@ jobs:
if: matrix.pm == 'pnpm'
id: pnpm-cache-dir
shell: bash
env:
COREPACK_ENABLE_STRICT: '0'
run: echo "dir=$(pnpm store path)" >> $GITHUB_OUTPUT
- name: Cache pnpm

View File

@@ -35,18 +35,10 @@ jobs:
node-version: ${{ inputs.node-version }}
- name: Setup pnpm
if: inputs.package-manager == 'pnpm' && inputs.node-version != '18.x'
uses: pnpm/action-setup@08c4be7e2e672a47d11bd04269e27e5f3e8529cb # v6.0.0
if: inputs.package-manager == 'pnpm'
uses: pnpm/action-setup@fc06bc1257f339d1d5d8b3a19a8cae5388b55320 # v4
with:
# Keep an explicit pnpm major because this repo's packageManager is Yarn.
version: 10
- name: Setup pnpm (via Corepack)
if: inputs.package-manager == 'pnpm' && inputs.node-version == '18.x'
shell: bash
run: |
corepack enable
corepack prepare pnpm@9 --activate
version: latest
- name: Setup Yarn (via Corepack)
if: inputs.package-manager == 'yarn'
@@ -78,8 +70,6 @@ jobs:
if: inputs.package-manager == 'pnpm'
id: pnpm-cache-dir
shell: bash
env:
COREPACK_ENABLE_STRICT: '0'
run: echo "dir=$(pnpm store path)" >> $GITHUB_OUTPUT
- name: Cache pnpm

View File

@@ -9,7 +9,7 @@
"version": "1.10.0",
"license": "MIT",
"devDependencies": {
"@opencode-ai/plugin": "^1.4.3",
"@opencode-ai/plugin": "^1.0.0",
"@types/node": "^20.0.0",
"typescript": "^5.3.0"
},
@@ -21,37 +21,22 @@
}
},
"node_modules/@opencode-ai/plugin": {
"version": "1.4.3",
"resolved": "https://registry.npmjs.org/@opencode-ai/plugin/-/plugin-1.4.3.tgz",
"integrity": "sha512-Ob/3tVSIeuMRJBr2O23RtrnC5djRe01Lglx+TwGEmjrH9yDBJ2tftegYLnNEjRoMuzITgq9LD8168p4pzv+U/A==",
"version": "1.1.53",
"resolved": "https://registry.npmjs.org/@opencode-ai/plugin/-/plugin-1.1.53.tgz",
"integrity": "sha512-9ye7Wz2kESgt02AUDaMea4hXxj6XhWwKAG8NwFhrw09Ux54bGaMJFt1eIS8QQGIMaD+Lp11X4QdyEg96etEBJw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@opencode-ai/sdk": "1.4.3",
"@opencode-ai/sdk": "1.1.53",
"zod": "4.1.8"
},
"peerDependencies": {
"@opentui/core": ">=0.1.97",
"@opentui/solid": ">=0.1.97"
},
"peerDependenciesMeta": {
"@opentui/core": {
"optional": true
},
"@opentui/solid": {
"optional": true
}
}
},
"node_modules/@opencode-ai/sdk": {
"version": "1.4.3",
"resolved": "https://registry.npmjs.org/@opencode-ai/sdk/-/sdk-1.4.3.tgz",
"integrity": "sha512-X0CAVbwoGAjTY2iecpWkx2B+GAa2jSaQKYpJ+xILopeF/OGKZUN15mjqci+L7cEuwLHV5wk3x2TStUOVCa5p0A==",
"version": "1.1.53",
"resolved": "https://registry.npmjs.org/@opencode-ai/sdk/-/sdk-1.1.53.tgz",
"integrity": "sha512-RUIVnPOP1CyyU32FrOOYuE7Ge51lOBuhaFp2NSX98ncApT7ffoNetmwzqrhOiJQgZB1KrbCHLYOCK6AZfacxag==",
"dev": true,
"license": "MIT",
"dependencies": {
"cross-spawn": "7.0.6"
}
"license": "MIT"
},
"node_modules/@types/node": {
"version": "20.19.33",
@@ -63,61 +48,6 @@
"undici-types": "~6.21.0"
}
},
"node_modules/cross-spawn": {
"version": "7.0.6",
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
"dev": true,
"license": "MIT",
"dependencies": {
"path-key": "^3.1.0",
"shebang-command": "^2.0.0",
"which": "^2.0.1"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/isexe": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
"dev": true,
"license": "ISC"
},
"node_modules/path-key": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/shebang-command": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
"dev": true,
"license": "MIT",
"dependencies": {
"shebang-regex": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/shebang-regex": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/typescript": {
"version": "5.9.3",
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
@@ -139,22 +69,6 @@
"dev": true,
"license": "MIT"
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"dev": true,
"license": "ISC",
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
"node-which": "bin/node-which"
},
"engines": {
"node": ">= 8"
}
},
"node_modules/zod": {
"version": "4.1.8",
"resolved": "https://registry.npmjs.org/zod/-/zod-4.1.8.tgz",

View File

@@ -60,7 +60,7 @@
"@opencode-ai/plugin": ">=1.0.0"
},
"devDependencies": {
"@opencode-ai/plugin": "^1.4.3",
"@opencode-ai/plugin": "^1.0.0",
"@types/node": "^20.0.0",
"typescript": "^5.3.0"
},

View File

@@ -456,7 +456,7 @@ export const ECCHooksPlugin: ECCHooksPluginFn = async ({
const contextBlock = [
"# ECC Context (preserve across compaction)",
"",
"## Active Plugin: Everything Claude Code v1.10.0",
"## Active Plugin: Everything Claude Code v1.8.0",
"- Hooks: file.edited, tool.execute.before/after, session.created/idle/deleted, shell.env, compacting, permission.ask",
"- Tools: run-tests, check-coverage, security-audit, format-code, lint-check, git-summary, changed-files",
"- Agents: 13 specialized (planner, architect, tdd-guide, code-reviewer, security-reviewer, build-error-resolver, e2e-runner, refactor-cleaner, doc-updater, go-reviewer, go-build-resolver, database-reviewer, python-reviewer)",

View File

@@ -84,7 +84,6 @@ This repo is the raw code only. The guides explain everything.
### v1.10.0 — Surface Refresh, Operator Workflows, and ECC 2.0 Alpha (Apr 2026)
- **Dashboard GUI** — New Tkinter-based desktop application (`ecc_dashboard.py` or `npm run dashboard`) with dark/light theme toggle, font customization, and project logo in header and taskbar.
- **Public surface synced to the live repo** — metadata, catalog counts, plugin manifests, and install-facing docs now match the actual OSS surface: 38 agents, 156 skills, and 72 legacy command shims.
- **Operator and outbound workflow expansion** — `brand-voice`, `social-graph-ranker`, `connections-optimizer`, `customer-billing-ops`, `ecc-tools-cost-audit`, `google-workspace-ops`, `project-flow-ops`, and `workspace-surface-audit` round out the operator lane.
- **Media and launch tooling** — `manim-video`, `remotion-video-creation`, and upgraded social publishing surfaces make technical explainers and launch content part of the same system.
@@ -241,23 +240,6 @@ For manual install instructions see the README in the `rules/` folder. When copy
**That's it!** You now have access to 47 agents, 181 skills, and 79 legacy command shims.
### Dashboard GUI
Launch the desktop dashboard to visually explore ECC components:
```bash
npm run dashboard
# or
python3 ./ecc_dashboard.py
```
**Features:**
- Tabbed interface: Agents, Skills, Commands, Rules, Settings
- Dark/Light theme toggle
- Font customization (family & size)
- Project logo in header and taskbar
- Search and filter across all components
### Multi-model commands require additional setup
> WARNING: `multi-*` commands are **not** covered by the base plugin/rules install above.
@@ -518,12 +500,6 @@ everything-claude-code/
|-- mcp-configs/ # MCP server configurations
| |-- mcp-servers.json # GitHub, Supabase, Vercel, Railway, etc.
|
|-- ecc_dashboard.py # Desktop GUI dashboard (Tkinter)
|
|-- assets/ # Assets for dashboard
| |-- images/
| |-- ecc-logo.png
|
|-- marketplace.json # Self-hosted marketplace config (for /plugin marketplace add)
```
@@ -1013,6 +989,14 @@ Please contribute! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
- Testing strategies (different frameworks, visual regression)
- Domain-specific knowledge (ML, data engineering, mobile)
### Community Ecosystem Notes
These are not bundled with ECC and are not audited by this repo, but they are worth knowing about if you are exploring the broader Claude Code skills ecosystem:
- [claude-seo](https://github.com/AgriciDaniel/claude-seo) — SEO-focused skill and agent collection
- [claude-ads](https://github.com/AgriciDaniel/claude-ads) — Ad-audit and paid-growth workflow collection
- [claude-cybersecurity](https://github.com/AgriciDaniel/claude-cybersecurity) — Security-oriented skill and agent collection
---
## Cursor IDE Support

View File

@@ -1,139 +0,0 @@
---
name: a11y-architect
description: Accessibility Architect specializing in WCAG 2.2 compliance for Web and Native platforms. Use PROACTIVELY when designing UI components, establishing design systems, or auditing code for inclusive user experiences.
tools: ["Read", "Write", "Edit", "Bash", "Grep", "Glob"]
---
You are a Senior Accessibility Architect. Your goal is to ensure that every digital product is Perceivable, Operable, Understandable, and Robust (POUR) for all users, including those with visual, auditory, motor, or cognitive disabilities.
## Your Role
- **Architecting Inclusivity**: Design UI systems that natively support assistive technologies (Screen Readers, Voice Control, Switch Access).
- **WCAG 2.2 Enforcement**: Apply the latest success criteria, focusing on new standards like Focus Appearance, Target Size, and Redundant Entry.
- **Platform Strategy**: Bridge the gap between Web standards (WAI-ARIA) and Native frameworks (SwiftUI/Jetpack Compose).
- **Technical Specifications**: Provide developers with precise attributes (roles, labels, hints, and traits) required for compliance.
## Workflow
### Step 1: Contextual Discovery
- Determine if the target is **Web**, **iOS**, or **Android**.
- Analyze the user interaction (e.g., Is this a simple button or a complex data grid?).
- Identify potential accessibility "blockers" (e.g., color-only indicators, missing focus containment in modals).
### Step 2: Strategic Implementation
- **Apply the Accessibility Skill**: Invoke specific logic to generate semantic code.
- **Define Focus Flow**: Map out how a keyboard or screen reader user will move through the interface.
- **Optimize Touch/Pointer**: Ensure all interactive elements meet the minimum **24x24 pixel** spacing or **44x44 pixel** target size requirements.
### Step 3: Validation & Documentation
- Review the output against the WCAG 2.2 Level AA checklist.
- Provide a brief "Implementation Note" explaining _why_ certain attributes (like `aria-live` or `accessibilityHint`) were used.
## Output Format
For every component or page request, provide:
1. **The Code**: Semantic HTML/ARIA or Native code.
2. **The Accessibility Tree**: A description of what a screen reader will announce.
3. **Compliance Mapping**: A list of specific WCAG 2.2 criteria addressed.
## Examples
### Example: Accessible Search Component
**Input**: "Create a search bar with a submit icon."
**Action**: Ensuring the icon-only button has a visible label and the input is correctly labeled.
**Output**:
```html
<form role="search">
<label for="site-search" class="sr-only">Search the site</label>
<input type="search" id="site-search" name="q" />
<button type="submit" aria-label="Search">
<svg aria-hidden="true">...</svg>
</button>
</form>
```
## WCAG 2.2 Core Compliance Checklist
### 1. Perceivable (Information must be presentable)
- [ ] **Text Alternatives**: All non-text content has a text alternative (Alt text or labels).
- [ ] **Contrast**: Text meets 4.5:1; UI components/graphics meet 3:1 contrast ratios.
- [ ] **Adaptable**: Content reflows and remains functional when resized up to 400%.
### 2. Operable (Interface components must be usable)
- [ ] **Keyboard Accessible**: Every interactive element is reachable via keyboard/switch control.
- [ ] **Navigable**: Focus order is logical, and focus indicators are high-contrast (SC 2.4.11).
- [ ] **Pointer Gestures**: Single-pointer alternatives exist for all dragging or multipoint gestures.
- [ ] **Target Size**: Interactive elements are at least 24x24 CSS pixels (SC 2.5.8).
### 3. Understandable (Information must be clear)
- [ ] **Predictable**: Navigation and identification of elements are consistent across the app.
- [ ] **Input Assistance**: Forms provide clear error identification and suggestions for fix.
- [ ] **Redundant Entry**: Avoid asking for the same info twice in a single process (SC 3.3.7).
### 4. Robust (Content must be compatible)
- [ ] **Compatibility**: Maximize compatibility with assistive tech using valid Name, Role, and Value.
- [ ] **Status Messages**: Screen readers are notified of dynamic changes via ARIA live regions.
---
## Anti-Patterns
| Issue | Why it fails |
| :------------------------- | :------------------------------------------------------------------------------------------------- |
| **"Click Here" Links** | Non-descriptive; screen reader users navigating by links won't know the destination. |
| **Fixed-Sized Containers** | Prevents content reflow and breaks the layout at higher zoom levels. |
| **Keyboard Traps** | Prevents users from navigating the rest of the page once they enter a component. |
| **Auto-Playing Media** | Distracting for users with cognitive disabilities; interferes with screen reader audio. |
| **Empty Buttons** | Icon-only buttons without an `aria-label` or `accessibilityLabel` are invisible to screen readers. |
## Accessibility Decision Record Template
For major UI decisions, use this format:
````markdown
# ADR-ACC-[000]: [Title of the Accessibility Decision]
## Status
Proposed | **Accepted** | Deprecated | Superseded by [ADR-XXX]
## Context
_Describe the UI component or workflow being addressed._
- **Platform**: [Web | iOS | Android | Cross-platform]
- **WCAG 2.2 Success Criterion**: [e.g., 2.5.8 Target Size (Minimum)]
- **Problem**: What is the current accessibility barrier? (e.g., "The 'Close' button in the modal is too small for users with motor impairments.")
## Decision
_Detail the specific implementation choice._
"We will implement a touch target of at least 44x44 points for all mobile navigation elements and 24x24 CSS pixels for web, ensuring a minimum 4px spacing between adjacent targets."
## Implementation Details
### Code/Spec
```[language]
// Example: SwiftUI
Button(action: close) {
Image(systemName: "xmark")
.frame(width: 44, height: 44) // Standardizing hit area
}
.accessibilityLabel("Close modal")
```
````
## Reference
- See skill `accessibility` to transform raw UI requirements into platform-specific accessible code (WAI-ARIA, SwiftUI, or Jetpack Compose) based on WCAG 2.2 criteria.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

10
ecc2/Cargo.lock generated
View File

@@ -1286,15 +1286,6 @@ version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
[[package]]
name = "openssl-src"
version = "300.6.0+3.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8e8cbfd3a4a8c8f089147fd7aaa33cf8c7450c4d09f8f80698a0cf093abeff4"
dependencies = [
"cc",
]
[[package]]
name = "openssl-sys"
version = "0.9.112"
@@ -1303,7 +1294,6 @@ checksum = "57d55af3b3e226502be1526dfdba67ab0e9c96fc293004e79576b2b9edb0dbdb"
dependencies = [
"cc",
"libc",
"openssl-src",
"pkg-config",
"vcpkg",
]

View File

@@ -7,10 +7,6 @@ license = "MIT"
authors = ["Affaan Mustafa <me@affaanmustafa.com>"]
repository = "https://github.com/affaan-m/everything-claude-code"
[features]
default = ["vendored-openssl"]
vendored-openssl = ["git2/vendored-openssl"]
[dependencies]
# TUI
ratatui = { version = "0.30", features = ["crossterm_0_28"] }
@@ -23,7 +19,7 @@ tokio = { version = "1", features = ["full"] }
rusqlite = { version = "0.32", features = ["bundled"] }
# Git integration
git2 = { version = "0.20", features = ["ssh"] }
git2 = "0.20"
# Serialization
serde = { version = "1", features = ["derive"] }

View File

@@ -1,914 +0,0 @@
#!/usr/bin/env python3
"""
ECC Dashboard - Everything Claude Code GUI
Cross-platform TkInter application for managing ECC components
"""
import tkinter as tk
from tkinter import ttk, scrolledtext, messagebox
import os
import json
from typing import Dict, List, Optional
# ============================================================================
# DATA LOADERS - Load ECC data from the project
# ============================================================================
def get_project_path() -> str:
"""Get the ECC project path - assumes this script is run from the project dir"""
return os.path.dirname(os.path.abspath(__file__))
def load_agents(project_path: str) -> List[Dict]:
"""Load agents from AGENTS.md"""
agents_file = os.path.join(project_path, "AGENTS.md")
agents = []
if os.path.exists(agents_file):
with open(agents_file, 'r', encoding='utf-8') as f:
content = f.read()
# Parse agent table from AGENTS.md
lines = content.split('\n')
in_table = False
for line in lines:
if '| Agent | Purpose | When to Use |' in line:
in_table = True
continue
if in_table and line.startswith('|'):
parts = [p.strip() for p in line.split('|')]
if len(parts) >= 4 and parts[1] and parts[1] != 'Agent':
agents.append({
'name': parts[1],
'purpose': parts[2],
'when_to_use': parts[3]
})
# Fallback default agents if file not found
if not agents:
agents = [
{'name': 'planner', 'purpose': 'Implementation planning', 'when_to_use': 'Complex features, refactoring'},
{'name': 'architect', 'purpose': 'System design and scalability', 'when_to_use': 'Architectural decisions'},
{'name': 'tdd-guide', 'purpose': 'Test-driven development', 'when_to_use': 'New features, bug fixes'},
{'name': 'code-reviewer', 'purpose': 'Code quality and maintainability', 'when_to_use': 'After writing/modifying code'},
{'name': 'security-reviewer', 'purpose': 'Vulnerability detection', 'when_to_use': 'Before commits, sensitive code'},
{'name': 'build-error-resolver', 'purpose': 'Fix build/type errors', 'when_to_use': 'When build fails'},
{'name': 'e2e-runner', 'purpose': 'End-to-end Playwright testing', 'when_to_use': 'Critical user flows'},
{'name': 'refactor-cleaner', 'purpose': 'Dead code cleanup', 'when_to_use': 'Code maintenance'},
{'name': 'doc-updater', 'purpose': 'Documentation and codemaps', 'when_to_use': 'Updating docs'},
{'name': 'go-reviewer', 'purpose': 'Go code review', 'when_to_use': 'Go projects'},
{'name': 'python-reviewer', 'purpose': 'Python code review', 'when_to_use': 'Python projects'},
{'name': 'typescript-reviewer', 'purpose': 'TypeScript/JavaScript code review', 'when_to_use': 'TypeScript projects'},
{'name': 'rust-reviewer', 'purpose': 'Rust code review', 'when_to_use': 'Rust projects'},
{'name': 'java-reviewer', 'purpose': 'Java and Spring Boot code review', 'when_to_use': 'Java projects'},
{'name': 'kotlin-reviewer', 'purpose': 'Kotlin code review', 'when_to_use': 'Kotlin projects'},
{'name': 'cpp-reviewer', 'purpose': 'C/C++ code review', 'when_to_use': 'C/C++ projects'},
{'name': 'database-reviewer', 'purpose': 'PostgreSQL/Supabase specialist', 'when_to_use': 'Database work'},
{'name': 'loop-operator', 'purpose': 'Autonomous loop execution', 'when_to_use': 'Run loops safely'},
{'name': 'harness-optimizer', 'purpose': 'Harness config tuning', 'when_to_use': 'Reliability, cost, throughput'},
]
return agents
def load_skills(project_path: str) -> List[Dict]:
"""Load skills from skills directory"""
skills_dir = os.path.join(project_path, "skills")
skills = []
if os.path.exists(skills_dir):
for item in os.listdir(skills_dir):
skill_path = os.path.join(skills_dir, item)
if os.path.isdir(skill_path):
skill_file = os.path.join(skill_path, "SKILL.md")
description = item.replace('-', ' ').title()
if os.path.exists(skill_file):
try:
with open(skill_file, 'r', encoding='utf-8') as f:
content = f.read()
# Extract description from first lines
lines = content.split('\n')
for line in lines:
if line.strip() and not line.startswith('#'):
description = line.strip()[:100]
break
if line.startswith('# '):
description = line[2:].strip()[:100]
break
except:
pass
# Determine category
category = "General"
item_lower = item.lower()
if 'python' in item_lower or 'django' in item_lower:
category = "Python"
elif 'golang' in item_lower or 'go-' in item_lower:
category = "Go"
elif 'frontend' in item_lower or 'react' in item_lower:
category = "Frontend"
elif 'backend' in item_lower or 'api' in item_lower:
category = "Backend"
elif 'security' in item_lower:
category = "Security"
elif 'testing' in item_lower or 'tdd' in item_lower:
category = "Testing"
elif 'docker' in item_lower or 'deployment' in item_lower:
category = "DevOps"
elif 'swift' in item_lower or 'ios' in item_lower:
category = "iOS"
elif 'java' in item_lower or 'spring' in item_lower:
category = "Java"
elif 'rust' in item_lower:
category = "Rust"
skills.append({
'name': item,
'description': description,
'category': category,
'path': skill_path
})
# Fallback if directory doesn't exist
if not skills:
skills = [
{'name': 'tdd-workflow', 'description': 'Test-driven development workflow', 'category': 'Testing'},
{'name': 'coding-standards', 'description': 'Baseline coding conventions', 'category': 'General'},
{'name': 'security-review', 'description': 'Security checklist and patterns', 'category': 'Security'},
{'name': 'frontend-patterns', 'description': 'React and Next.js patterns', 'category': 'Frontend'},
{'name': 'backend-patterns', 'description': 'API and database patterns', 'category': 'Backend'},
{'name': 'api-design', 'description': 'REST API design patterns', 'category': 'Backend'},
{'name': 'docker-patterns', 'description': 'Docker and container patterns', 'category': 'DevOps'},
{'name': 'e2e-testing', 'description': 'Playwright E2E testing patterns', 'category': 'Testing'},
{'name': 'verification-loop', 'description': 'Build, test, lint verification', 'category': 'General'},
{'name': 'python-patterns', 'description': 'Python idioms and best practices', 'category': 'Python'},
{'name': 'golang-patterns', 'description': 'Go idioms and best practices', 'category': 'Go'},
{'name': 'django-patterns', 'description': 'Django patterns and best practices', 'category': 'Python'},
{'name': 'springboot-patterns', 'description': 'Java Spring Boot patterns', 'category': 'Java'},
{'name': 'laravel-patterns', 'description': 'Laravel architecture patterns', 'category': 'PHP'},
]
return skills
def load_commands(project_path: str) -> List[Dict]:
"""Load commands from commands directory"""
commands_dir = os.path.join(project_path, "commands")
commands = []
if os.path.exists(commands_dir):
for item in os.listdir(commands_dir):
if item.endswith('.md'):
cmd_name = item[:-3]
description = ""
try:
with open(os.path.join(commands_dir, item), 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
for line in lines:
if line.startswith('# '):
description = line[2:].strip()
break
except:
pass
commands.append({
'name': cmd_name,
'description': description or cmd_name.replace('-', ' ').title()
})
# Fallback commands
if not commands:
commands = [
{'name': 'plan', 'description': 'Create implementation plan'},
{'name': 'tdd', 'description': 'Test-driven development workflow'},
{'name': 'code-review', 'description': 'Review code for quality and security'},
{'name': 'build-fix', 'description': 'Fix build and TypeScript errors'},
{'name': 'e2e', 'description': 'Generate and run E2E tests'},
{'name': 'refactor-clean', 'description': 'Remove dead code'},
{'name': 'verify', 'description': 'Run verification loop'},
{'name': 'eval', 'description': 'Run evaluation against criteria'},
{'name': 'security', 'description': 'Run comprehensive security review'},
{'name': 'test-coverage', 'description': 'Analyze test coverage'},
{'name': 'update-docs', 'description': 'Update documentation'},
{'name': 'setup-pm', 'description': 'Configure package manager'},
{'name': 'go-review', 'description': 'Go code review'},
{'name': 'go-test', 'description': 'Go TDD workflow'},
{'name': 'python-review', 'description': 'Python code review'},
]
return commands
def load_rules(project_path: str) -> List[Dict]:
"""Load rules from rules directory"""
rules_dir = os.path.join(project_path, "rules")
rules = []
if os.path.exists(rules_dir):
for item in os.listdir(rules_dir):
item_path = os.path.join(rules_dir, item)
if os.path.isdir(item_path):
# Common rules
if item == "common":
for file in os.listdir(item_path):
if file.endswith('.md'):
rules.append({
'name': file[:-3],
'language': 'Common',
'path': os.path.join(item_path, file)
})
else:
# Language-specific rules
for file in os.listdir(item_path):
if file.endswith('.md'):
rules.append({
'name': file[:-3],
'language': item.title(),
'path': os.path.join(item_path, file)
})
# Fallback rules
if not rules:
rules = [
{'name': 'coding-style', 'language': 'Common', 'path': ''},
{'name': 'git-workflow', 'language': 'Common', 'path': ''},
{'name': 'testing', 'language': 'Common', 'path': ''},
{'name': 'performance', 'language': 'Common', 'path': ''},
{'name': 'patterns', 'language': 'Common', 'path': ''},
{'name': 'security', 'language': 'Common', 'path': ''},
{'name': 'typescript', 'language': 'TypeScript', 'path': ''},
{'name': 'python', 'language': 'Python', 'path': ''},
{'name': 'golang', 'language': 'Go', 'path': ''},
{'name': 'swift', 'language': 'Swift', 'path': ''},
{'name': 'php', 'language': 'PHP', 'path': ''},
]
return rules
# ============================================================================
# MAIN APPLICATION
# ============================================================================
class ECCDashboard(tk.Tk):
"""Main ECC Dashboard Application"""
def __init__(self):
super().__init__()
self.project_path = get_project_path()
self.title("ECC Dashboard - Everything Claude Code")
self.state('zoomed')
try:
self.icon_image = tk.PhotoImage(file='assets/images/ecc-logo.png')
self.iconphoto(True, self.icon_image)
except:
pass
self.minsize(800, 600)
# Load data
self.agents = load_agents(self.project_path)
self.skills = load_skills(self.project_path)
self.commands = load_commands(self.project_path)
self.rules = load_rules(self.project_path)
# Settings
self.settings = {
'project_path': self.project_path,
'theme': 'light'
}
# Setup UI
self.setup_styles()
self.create_widgets()
# Center window
self.center_window()
def setup_styles(self):
"""Setup ttk styles for modern look"""
style = ttk.Style()
style.theme_use('clam')
# Configure tab style
style.configure('TNotebook', background='#f0f0f0')
style.configure('TNotebook.Tab', padding=[10, 5], font=('Arial', 10))
style.map('TNotebook.Tab', background=[('selected', '#ffffff')])
# Configure Treeview
style.configure('Treeview', font=('Arial', 10), rowheight=25)
style.configure('Treeview.Heading', font=('Arial', 10, 'bold'))
# Configure buttons
style.configure('TButton', font=('Arial', 10), padding=5)
def center_window(self):
"""Center the window on screen"""
self.update_idletasks()
width = self.winfo_width()
height = self.winfo_height()
x = (self.winfo_screenwidth() // 2) - (width // 2)
y = (self.winfo_screenheight() // 2) - (height // 2)
self.geometry(f'{width}x{height}+{x}+{y}')
def create_widgets(self):
"""Create all UI widgets"""
# Main container
main_frame = ttk.Frame(self)
main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
# Header
header_frame = ttk.Frame(main_frame)
header_frame.pack(fill=tk.X, pady=(0, 10))
try:
self.logo_image = tk.PhotoImage(file='assets/images/ecc-logo.png')
self.logo_image = self.logo_image.subsample(2, 2)
ttk.Label(header_frame, image=self.logo_image).pack(side=tk.LEFT, padx=(0, 10))
except:
pass
self.title_label = ttk.Label(header_frame, text="ECC Dashboard", font=('Open Sans', 18, 'bold'))
self.title_label.pack(side=tk.LEFT)
self.version_label = ttk.Label(header_frame, text="v1.10.0", font=('Open Sans', 10), foreground='gray')
self.version_label.pack(side=tk.LEFT, padx=(10, 0))
# Notebook (tabs)
self.notebook = ttk.Notebook(main_frame)
self.notebook.pack(fill=tk.BOTH, expand=True)
# Create tabs
self.create_agents_tab()
self.create_skills_tab()
self.create_commands_tab()
self.create_rules_tab()
self.create_settings_tab()
# Status bar
status_frame = ttk.Frame(main_frame)
status_frame.pack(fill=tk.X, pady=(10, 0))
self.status_label = ttk.Label(status_frame,
text=f"Ready | Agents: {len(self.agents)} | Skills: {len(self.skills)} | Commands: {len(self.commands)}",
font=('Arial', 9), foreground='gray')
self.status_label.pack(side=tk.LEFT)
# =========================================================================
# AGENTS TAB
# =========================================================================
def create_agents_tab(self):
"""Create Agents tab"""
frame = ttk.Frame(self.notebook)
self.notebook.add(frame, text=f"Agents ({len(self.agents)})")
# Search bar
search_frame = ttk.Frame(frame)
search_frame.pack(fill=tk.X, padx=10, pady=10)
ttk.Label(search_frame, text="Search:").pack(side=tk.LEFT)
self.agent_search = ttk.Entry(search_frame, width=30)
self.agent_search.pack(side=tk.LEFT, padx=5)
self.agent_search.bind('<KeyRelease>', self.filter_agents)
ttk.Label(search_frame, text="Count:").pack(side=tk.LEFT, padx=(20, 0))
self.agent_count_label = ttk.Label(search_frame, text=str(len(self.agents)))
self.agent_count_label.pack(side=tk.LEFT)
# Split pane: list + details
paned = ttk.PanedWindow(frame, orient=tk.HORIZONTAL)
paned.pack(fill=tk.BOTH, expand=True, padx=10, pady=(0, 10))
# Agent list
list_frame = ttk.Frame(paned)
paned.add(list_frame, weight=2)
columns = ('name', 'purpose')
self.agent_tree = ttk.Treeview(list_frame, columns=columns, show='tree headings')
self.agent_tree.heading('#0', text='#')
self.agent_tree.heading('name', text='Agent Name')
self.agent_tree.heading('purpose', text='Purpose')
self.agent_tree.column('#0', width=40)
self.agent_tree.column('name', width=180)
self.agent_tree.column('purpose', width=250)
self.agent_tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
# Scrollbar
scrollbar = ttk.Scrollbar(list_frame, orient=tk.VERTICAL, command=self.agent_tree.yview)
self.agent_tree.configure(yscrollcommand=scrollbar.set)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
# Details panel
details_frame = ttk.Frame(paned)
paned.add(details_frame, weight=1)
ttk.Label(details_frame, text="Details", font=('Arial', 11, 'bold')).pack(anchor=tk.W, pady=5)
self.agent_details = scrolledtext.ScrolledText(details_frame, wrap=tk.WORD, height=15)
self.agent_details.pack(fill=tk.BOTH, expand=True)
# Bind selection
self.agent_tree.bind('<<TreeviewSelect>>', self.on_agent_select)
# Populate list
self.populate_agents(self.agents)
def populate_agents(self, agents: List[Dict]):
"""Populate agents list"""
for item in self.agent_tree.get_children():
self.agent_tree.delete(item)
for i, agent in enumerate(agents, 1):
self.agent_tree.insert('', tk.END, text=str(i), values=(agent['name'], agent['purpose']))
def filter_agents(self, event=None):
"""Filter agents based on search"""
query = self.agent_search.get().lower()
if not query:
filtered = self.agents
else:
filtered = [a for a in self.agents
if query in a['name'].lower() or query in a['purpose'].lower()]
self.populate_agents(filtered)
self.agent_count_label.config(text=str(len(filtered)))
def on_agent_select(self, event):
"""Handle agent selection"""
selection = self.agent_tree.selection()
if not selection:
return
item = self.agent_tree.item(selection[0])
agent_name = item['values'][0]
agent = next((a for a in self.agents if a['name'] == agent_name), None)
if agent:
details = f"""Agent: {agent['name']}
Purpose: {agent['purpose']}
When to Use: {agent['when_to_use']}
---
Usage in Claude Code:
Use the /{agent['name']} command or invoke via agent delegation."""
self.agent_details.delete('1.0', tk.END)
self.agent_details.insert('1.0', details)
# =========================================================================
# SKILLS TAB
# =========================================================================
def create_skills_tab(self):
"""Create Skills tab"""
frame = ttk.Frame(self.notebook)
self.notebook.add(frame, text=f"Skills ({len(self.skills)})")
# Search and filter
filter_frame = ttk.Frame(frame)
filter_frame.pack(fill=tk.X, padx=10, pady=10)
ttk.Label(filter_frame, text="Search:").pack(side=tk.LEFT)
self.skill_search = ttk.Entry(filter_frame, width=25)
self.skill_search.pack(side=tk.LEFT, padx=5)
self.skill_search.bind('<KeyRelease>', self.filter_skills)
ttk.Label(filter_frame, text="Category:").pack(side=tk.LEFT, padx=(20, 0))
self.skill_category = ttk.Combobox(filter_frame, values=['All'] + self.get_categories(), width=15)
self.skill_category.set('All')
self.skill_category.pack(side=tk.LEFT, padx=5)
self.skill_category.bind('<<ComboboxSelected>>', self.filter_skills)
ttk.Label(filter_frame, text="Count:").pack(side=tk.LEFT, padx=(20, 0))
self.skill_count_label = ttk.Label(filter_frame, text=str(len(self.skills)))
self.skill_count_label.pack(side=tk.LEFT)
# Split pane
paned = ttk.PanedWindow(frame, orient=tk.HORIZONTAL)
paned.pack(fill=tk.BOTH, expand=True, padx=10, pady=(0, 10))
# Skill list
list_frame = ttk.Frame(paned)
paned.add(list_frame, weight=1)
columns = ('name', 'category', 'description')
self.skill_tree = ttk.Treeview(list_frame, columns=columns, show='tree headings')
self.skill_tree.heading('#0', text='#')
self.skill_tree.heading('name', text='Skill Name')
self.skill_tree.heading('category', text='Category')
self.skill_tree.heading('description', text='Description')
self.skill_tree.column('#0', width=40)
self.skill_tree.column('name', width=180)
self.skill_tree.column('category', width=100)
self.skill_tree.column('description', width=300)
self.skill_tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
scrollbar = ttk.Scrollbar(list_frame, orient=tk.VERTICAL, command=self.skill_tree.yview)
self.skill_tree.configure(yscrollcommand=scrollbar.set)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
# Details
details_frame = ttk.Frame(paned)
paned.add(details_frame, weight=1)
ttk.Label(details_frame, text="Description", font=('Arial', 11, 'bold')).pack(anchor=tk.W, pady=5)
self.skill_details = scrolledtext.ScrolledText(details_frame, wrap=tk.WORD, height=15)
self.skill_details.pack(fill=tk.BOTH, expand=True)
self.skill_tree.bind('<<TreeviewSelect>>', self.on_skill_select)
self.populate_skills(self.skills)
def get_categories(self) -> List[str]:
"""Get unique categories from skills"""
categories = set(s['category'] for s in self.skills)
return sorted(categories)
def populate_skills(self, skills: List[Dict]):
"""Populate skills list"""
for item in self.skill_tree.get_children():
self.skill_tree.delete(item)
for i, skill in enumerate(skills, 1):
self.skill_tree.insert('', tk.END, text=str(i),
values=(skill['name'], skill['category'], skill['description']))
def filter_skills(self, event=None):
"""Filter skills based on search and category"""
search = self.skill_search.get().lower()
category = self.skill_category.get()
filtered = self.skills
if category != 'All':
filtered = [s for s in filtered if s['category'] == category]
if search:
filtered = [s for s in filtered
if search in s['name'].lower() or search in s['description'].lower()]
self.populate_skills(filtered)
self.skill_count_label.config(text=str(len(filtered)))
def on_skill_select(self, event):
"""Handle skill selection"""
selection = self.skill_tree.selection()
if not selection:
return
item = self.skill_tree.item(selection[0])
skill_name = item['values'][0]
skill = next((s for s in self.skills if s['name'] == skill_name), None)
if skill:
details = f"""Skill: {skill['name']}
Category: {skill['category']}
Description: {skill['description']}
Path: {skill['path']}
---
Usage: This skill is automatically activated when working with related technologies."""
self.skill_details.delete('1.0', tk.END)
self.skill_details.insert('1.0', details)
# =========================================================================
# COMMANDS TAB
# =========================================================================
def create_commands_tab(self):
"""Create Commands tab"""
frame = ttk.Frame(self.notebook)
self.notebook.add(frame, text=f"Commands ({len(self.commands)})")
# Info
info_frame = ttk.Frame(frame)
info_frame.pack(fill=tk.X, padx=10, pady=10)
ttk.Label(info_frame, text="Slash Commands for Claude Code:",
font=('Arial', 10, 'bold')).pack(anchor=tk.W)
ttk.Label(info_frame, text="Use these commands in Claude Code by typing /command_name",
foreground='gray').pack(anchor=tk.W)
# Commands list
list_frame = ttk.Frame(frame)
list_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=(0, 10))
columns = ('name', 'description')
self.command_tree = ttk.Treeview(list_frame, columns=columns, show='tree headings')
self.command_tree.heading('#0', text='#')
self.command_tree.heading('name', text='Command')
self.command_tree.heading('description', text='Description')
self.command_tree.column('#0', width=40)
self.command_tree.column('name', width=150)
self.command_tree.column('description', width=400)
self.command_tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
scrollbar = ttk.Scrollbar(list_frame, orient=tk.VERTICAL, command=self.command_tree.yview)
self.command_tree.configure(yscrollcommand=scrollbar.set)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
# Populate
for i, cmd in enumerate(self.commands, 1):
self.command_tree.insert('', tk.END, text=str(i),
values=('/' + cmd['name'], cmd['description']))
# =========================================================================
# RULES TAB
# =========================================================================
def create_rules_tab(self):
"""Create Rules tab"""
frame = ttk.Frame(self.notebook)
self.notebook.add(frame, text=f"Rules ({len(self.rules)})")
# Info
info_frame = ttk.Frame(frame)
info_frame.pack(fill=tk.X, padx=10, pady=10)
ttk.Label(info_frame, text="Coding Rules by Language:",
font=('Arial', 10, 'bold')).pack(anchor=tk.W)
ttk.Label(info_frame, text="These rules are automatically applied in Claude Code",
foreground='gray').pack(anchor=tk.W)
# Filter
filter_frame = ttk.Frame(frame)
filter_frame.pack(fill=tk.X, padx=10, pady=5)
ttk.Label(filter_frame, text="Language:").pack(side=tk.LEFT)
self.rules_language = ttk.Combobox(filter_frame,
values=['All'] + self.get_rule_languages(),
width=15)
self.rules_language.set('All')
self.rules_language.pack(side=tk.LEFT, padx=5)
self.rules_language.bind('<<ComboboxSelected>>', self.filter_rules)
# Rules list
list_frame = ttk.Frame(frame)
list_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=(0, 10))
columns = ('name', 'language')
self.rules_tree = ttk.Treeview(list_frame, columns=columns, show='tree headings')
self.rules_tree.heading('#0', text='#')
self.rules_tree.heading('name', text='Rule Name')
self.rules_tree.heading('language', text='Language')
self.rules_tree.column('#0', width=40)
self.rules_tree.column('name', width=250)
self.rules_tree.column('language', width=100)
self.rules_tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
scrollbar = ttk.Scrollbar(list_frame, orient=tk.VERTICAL, command=self.rules_tree.yview)
self.rules_tree.configure(yscrollcommand=scrollbar.set)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.populate_rules(self.rules)
def get_rule_languages(self) -> List[str]:
"""Get unique languages from rules"""
languages = set(r['language'] for r in self.rules)
return sorted(languages)
def populate_rules(self, rules: List[Dict]):
"""Populate rules list"""
for item in self.rules_tree.get_children():
self.rules_tree.delete(item)
for i, rule in enumerate(rules, 1):
self.rules_tree.insert('', tk.END, text=str(i),
values=(rule['name'], rule['language']))
def filter_rules(self, event=None):
"""Filter rules by language"""
language = self.rules_language.get()
if language == 'All':
filtered = self.rules
else:
filtered = [r for r in self.rules if r['language'] == language]
self.populate_rules(filtered)
# =========================================================================
# SETTINGS TAB
# =========================================================================
def create_settings_tab(self):
"""Create Settings tab"""
frame = ttk.Frame(self.notebook)
self.notebook.add(frame, text="Settings")
# Project path
path_frame = ttk.LabelFrame(frame, text="Project Path", padding=10)
path_frame.pack(fill=tk.X, padx=10, pady=10)
self.path_entry = ttk.Entry(path_frame, width=60)
self.path_entry.insert(0, self.project_path)
self.path_entry.pack(side=tk.LEFT, fill=tk.X, expand=True)
ttk.Button(path_frame, text="Browse...", command=self.browse_path).pack(side=tk.LEFT, padx=5)
# Theme
theme_frame = ttk.LabelFrame(frame, text="Appearance", padding=10)
theme_frame.pack(fill=tk.X, padx=10, pady=10)
ttk.Label(theme_frame, text="Theme:").pack(anchor=tk.W)
self.theme_var = tk.StringVar(value='light')
light_rb = ttk.Radiobutton(theme_frame, text="Light", variable=self.theme_var,
value='light', command=self.apply_theme)
light_rb.pack(anchor=tk.W)
dark_rb = ttk.Radiobutton(theme_frame, text="Dark", variable=self.theme_var,
value='dark', command=self.apply_theme)
dark_rb.pack(anchor=tk.W)
font_frame = ttk.LabelFrame(frame, text="Font", padding=10)
font_frame.pack(fill=tk.X, padx=10, pady=10)
ttk.Label(font_frame, text="Font Family:").pack(anchor=tk.W)
self.font_var = tk.StringVar(value='Open Sans')
fonts = ['Open Sans', 'Arial', 'Helvetica', 'Times New Roman', 'Courier New', 'Verdana', 'Georgia', 'Tahoma', 'Trebuchet MS']
self.font_combo = ttk.Combobox(font_frame, textvariable=self.font_var, values=fonts, state='readonly')
self.font_combo.pack(anchor=tk.W, fill=tk.X, pady=(5, 0))
self.font_combo.bind('<<ComboboxSelected>>', lambda e: self.apply_theme())
ttk.Label(font_frame, text="Font Size:").pack(anchor=tk.W, pady=(10, 0))
self.size_var = tk.StringVar(value='10')
sizes = ['8', '9', '10', '11', '12', '14', '16', '18', '20']
self.size_combo = ttk.Combobox(font_frame, textvariable=self.size_var, values=sizes, state='readonly', width=10)
self.size_combo.pack(anchor=tk.W, pady=(5, 0))
self.size_combo.bind('<<ComboboxSelected>>', lambda e: self.apply_theme())
# Quick Actions
actions_frame = ttk.LabelFrame(frame, text="Quick Actions", padding=10)
actions_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
ttk.Button(actions_frame, text="Open Project in Terminal",
command=self.open_terminal).pack(fill=tk.X, pady=2)
ttk.Button(actions_frame, text="Open README",
command=self.open_readme).pack(fill=tk.X, pady=2)
ttk.Button(actions_frame, text="Open AGENTS.md",
command=self.open_agents).pack(fill=tk.X, pady=2)
ttk.Button(actions_frame, text="Refresh Data",
command=self.refresh_data).pack(fill=tk.X, pady=2)
# About
about_frame = ttk.LabelFrame(frame, text="About", padding=10)
about_frame.pack(fill=tk.X, padx=10, pady=10)
about_text = """ECC Dashboard v1.0.0
Everything Claude Code GUI
A cross-platform desktop application for
managing and exploring ECC components.
Version: 1.10.0
Project: github.com/affaan-m/everything-claude-code"""
ttk.Label(about_frame, text=about_text, justify=tk.LEFT).pack(anchor=tk.W)
def browse_path(self):
"""Browse for project path"""
from tkinter import filedialog
path = filedialog.askdirectory(initialdir=self.project_path)
if path:
self.path_entry.delete(0, tk.END)
self.path_entry.insert(0, path)
def open_terminal(self):
"""Open terminal at project path"""
import subprocess
path = self.path_entry.get()
if os.name == 'nt': # Windows
subprocess.Popen(['cmd', '/c', 'start', 'cmd', '/k', f'cd /d "{path}"'])
elif os.uname().sysname == 'Darwin': # macOS
subprocess.Popen(['open', '-a', 'Terminal', path])
else: # Linux
subprocess.Popen(['x-terminal-emulator', '-e', f'cd {path}'])
def open_readme(self):
"""Open README in default browser/reader"""
import subprocess
path = os.path.join(self.path_entry.get(), 'README.md')
if os.path.exists(path):
subprocess.Popen(['xdg-open' if os.name != 'nt' else 'start', path])
else:
messagebox.showerror("Error", "README.md not found")
def open_agents(self):
"""Open AGENTS.md"""
import subprocess
path = os.path.join(self.path_entry.get(), 'AGENTS.md')
if os.path.exists(path):
subprocess.Popen(['xdg-open' if os.name != 'nt' else 'start', path])
else:
messagebox.showerror("Error", "AGENTS.md not found")
def refresh_data(self):
"""Refresh all data"""
self.project_path = self.path_entry.get()
self.agents = load_agents(self.project_path)
self.skills = load_skills(self.project_path)
self.commands = load_commands(self.project_path)
self.rules = load_rules(self.project_path)
# Update tabs
self.notebook.tab(0, text=f"Agents ({len(self.agents)})")
self.notebook.tab(1, text=f"Skills ({len(self.skills)})")
self.notebook.tab(2, text=f"Commands ({len(self.commands)})")
self.notebook.tab(3, text=f"Rules ({len(self.rules)})")
# Repopulate
self.populate_agents(self.agents)
self.populate_skills(self.skills)
# Update status
self.status_label.config(
text=f"Ready | Agents: {len(self.agents)} | Skills: {len(self.skills)} | Commands: {len(self.commands)}"
)
messagebox.showinfo("Success", "Data refreshed successfully!")
def apply_theme(self):
theme = self.theme_var.get()
font_family = self.font_var.get()
font_size = int(self.size_var.get())
font_tuple = (font_family, font_size)
if theme == 'dark':
bg_color = '#2b2b2b'
fg_color = '#ffffff'
entry_bg = '#3c3c3c'
frame_bg = '#2b2b2b'
select_bg = '#0f5a9e'
else:
bg_color = '#f0f0f0'
fg_color = '#000000'
entry_bg = '#ffffff'
frame_bg = '#f0f0f0'
select_bg = '#e0e0e0'
self.configure(background=bg_color)
style = ttk.Style()
style.configure('.', background=bg_color, foreground=fg_color, font=font_tuple)
style.configure('TFrame', background=bg_color, font=font_tuple)
style.configure('TLabel', background=bg_color, foreground=fg_color, font=font_tuple)
style.configure('TNotebook', background=bg_color, font=font_tuple)
style.configure('TNotebook.Tab', background=frame_bg, foreground=fg_color, font=font_tuple)
style.map('TNotebook.Tab', background=[('selected', select_bg)])
style.configure('Treeview', background=entry_bg, foreground=fg_color, fieldbackground=entry_bg, font=font_tuple)
style.configure('Treeview.Heading', background=frame_bg, foreground=fg_color, font=font_tuple)
style.configure('TEntry', fieldbackground=entry_bg, foreground=fg_color, font=font_tuple)
style.configure('TButton', background=frame_bg, foreground=fg_color, font=font_tuple)
self.title_label.configure(font=(font_family, 18, 'bold'))
self.version_label.configure(font=(font_family, 10))
def update_widget_colors(widget):
try:
widget.configure(background=bg_color)
except:
pass
for child in widget.winfo_children():
try:
child.configure(background=bg_color)
except:
pass
try:
update_widget_colors(child)
except:
pass
try:
update_widget_colors(self)
except:
pass
self.update()
# ============================================================================
# MAIN
# ============================================================================
def main():
"""Main entry point"""
app = ECCDashboard()
app.mainloop()
if __name__ == "__main__":
main()

View File

@@ -126,30 +126,6 @@
],
"description": "Check MCP server health before MCP tool execution and block unhealthy MCP calls",
"id": "pre:mcp-health-check"
},
{
"matcher": "Edit|Write|MultiEdit",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:edit-write:gateguard-fact-force\" \"scripts/hooks/gateguard-fact-force.js\" \"standard,strict\"",
"timeout": 5
}
],
"description": "Fact-forcing gate: block first Edit/Write/MultiEdit per file and demand investigation (importers, data schemas, user instruction) before allowing",
"id": "pre:edit-write:gateguard-fact-force"
},
{
"matcher": "Bash",
"hooks": [
{
"type": "command",
"command": "node \"${CLAUDE_PLUGIN_ROOT}/scripts/hooks/run-with-flags.js\" \"pre:bash:gateguard-fact-force\" \"scripts/hooks/gateguard-fact-force.js\" \"standard,strict\"",
"timeout": 5
}
],
"description": "Fact-forcing gate: block destructive Bash commands and demand rollback plan; quote user instruction on first Bash per session",
"id": "pre:bash:gateguard-fact-force"
}
],
"PreCompact": [

View File

@@ -39,198 +39,69 @@
},
"files": [
".agents/",
".claude-plugin/",
".codex/",
".codex-plugin/",
".cursor/",
".gemini/",
".opencode/",
".mcp.json",
"AGENTS.md",
"VERSION",
"agent.yaml",
".opencode/commands/",
".opencode/dist/",
".opencode/instructions/",
".opencode/plugins/",
".opencode/prompts/",
".opencode/tools/",
".opencode/index.ts",
".opencode/opencode.json",
".opencode/package.json",
".opencode/package-lock.json",
".opencode/tsconfig.json",
".opencode/MIGRATION.md",
".opencode/README.md",
"agents/",
"commands/",
"contexts/",
"examples/CLAUDE.md",
"examples/user-CLAUDE.md",
"examples/statusline.json",
"hooks/",
"install.ps1",
"install.sh",
"manifests/",
"mcp-configs/",
"plugins/",
"rules/",
"schemas/",
"scripts/catalog.js",
"scripts/claw.js",
"scripts/codex/merge-codex-config.js",
"scripts/codex/merge-mcp-config.js",
"scripts/doctor.js",
"scripts/ci/",
"scripts/ecc.js",
"scripts/gemini-adapt-agents.js",
"scripts/harness-audit.js",
"scripts/hooks/",
"scripts/lib/",
"scripts/claw.js",
"scripts/doctor.js",
"scripts/status.js",
"scripts/sessions-cli.js",
"scripts/install-apply.js",
"scripts/install-plan.js",
"scripts/lib/",
"scripts/list-installed.js",
"scripts/orchestration-status.js",
"scripts/orchestrate-codex-worker.sh",
"scripts/orchestrate-worktrees.js",
"scripts/repair.js",
"scripts/session-inspect.js",
"scripts/sessions-cli.js",
"scripts/setup-package-manager.js",
"scripts/skill-create-output.js",
"scripts/status.js",
"scripts/codex/merge-codex-config.js",
"scripts/codex/merge-mcp-config.js",
"scripts/repair.js",
"scripts/harness-audit.js",
"scripts/session-inspect.js",
"scripts/uninstall.js",
"skills/agent-harness-construction/",
"skills/agent-introspection-debugging/",
"skills/agent-sort/",
"skills/agentic-engineering/",
"skills/ai-first-engineering/",
"skills/ai-regression-testing/",
"skills/android-clean-architecture/",
"skills/api-connector-builder/",
"skills/api-design/",
"skills/article-writing/",
"skills/automation-audit-ops/",
"skills/autonomous-loops/",
"skills/backend-patterns/",
"skills/blueprint/",
"skills/brand-voice/",
"skills/carrier-relationship-management/",
"skills/claude-api/",
"skills/claude-devfleet/",
"skills/clickhouse-io/",
"skills/code-tour/",
"skills/coding-standards/",
"skills/compose-multiplatform-patterns/",
"skills/configure-ecc/",
"skills/connections-optimizer/",
"skills/content-engine/",
"skills/content-hash-cache-pattern/",
"skills/continuous-agent-loop/",
"skills/continuous-learning/",
"skills/continuous-learning-v2/",
"skills/cost-aware-llm-pipeline/",
"skills/council/",
"skills/cpp-coding-standards/",
"skills/cpp-testing/",
"skills/crosspost/",
"skills/csharp-testing/",
"skills/customer-billing-ops/",
"skills/customs-trade-compliance/",
"skills/dart-flutter-patterns/",
"skills/dashboard-builder/",
"skills/data-scraper-agent/",
"skills/database-migrations/",
"skills/deep-research/",
"skills/defi-amm-security/",
"skills/deployment-patterns/",
"skills/django-patterns/",
"skills/django-security/",
"skills/django-tdd/",
"skills/django-verification/",
"skills/dmux-workflows/",
"skills/docker-patterns/",
"skills/dotnet-patterns/",
"skills/e2e-testing/",
"skills/ecc-tools-cost-audit/",
"skills/email-ops/",
"skills/energy-procurement/",
"skills/enterprise-agent-ops/",
"skills/eval-harness/",
"skills/evm-token-decimals/",
"skills/exa-search/",
"skills/fal-ai-media/",
"skills/finance-billing-ops/",
"skills/foundation-models-on-device/",
"skills/frontend-design/",
"skills/frontend-patterns/",
"skills/frontend-slides/",
"skills/github-ops/",
"skills/golang-patterns/",
"skills/golang-testing/",
"skills/google-workspace-ops/",
"skills/healthcare-phi-compliance/",
"skills/hipaa-compliance/",
"skills/hookify-rules/",
"skills/inventory-demand-planning/",
"skills/investor-materials/",
"skills/investor-outreach/",
"skills/iterative-retrieval/",
"skills/java-coding-standards/",
"skills/jira-integration/",
"skills/jpa-patterns/",
"skills/knowledge-ops/",
"skills/kotlin-coroutines-flows/",
"skills/kotlin-exposed-patterns/",
"skills/kotlin-ktor-patterns/",
"skills/kotlin-patterns/",
"skills/kotlin-testing/",
"skills/laravel-patterns/",
"skills/laravel-plugin-discovery/",
"skills/laravel-security/",
"skills/laravel-tdd/",
"skills/laravel-verification/",
"skills/lead-intelligence/",
"skills/liquid-glass-design/",
"skills/llm-trading-agent-security/",
"skills/logistics-exception-management/",
"skills/manim-video/",
"skills/market-research/",
"skills/mcp-server-patterns/",
"skills/messages-ops/",
"skills/nanoclaw-repl/",
"skills/nestjs-patterns/",
"skills/nodejs-keccak256/",
"skills/nutrient-document-processing/",
"skills/perl-patterns/",
"skills/perl-security/",
"skills/perl-testing/",
"skills/plankton-code-quality/",
"skills/postgres-patterns/",
"skills/product-capability/",
"skills/production-scheduling/",
"skills/project-flow-ops/",
"skills/prompt-optimizer/",
"skills/python-patterns/",
"skills/python-testing/",
"skills/quality-nonconformance/",
"skills/ralphinho-rfc-pipeline/",
"skills/regex-vs-llm-structured-text/",
"skills/remotion-video-creation/",
"skills/research-ops/",
"skills/returns-reverse-logistics/",
"skills/rust-patterns/",
"skills/rust-testing/",
"skills/search-first/",
"skills/security-bounty-hunter/",
"skills/security-review/",
"skills/security-scan/",
"skills/seo/",
"skills/skill-stocktake/",
"skills/social-graph-ranker/",
"skills/springboot-patterns/",
"skills/springboot-security/",
"skills/springboot-tdd/",
"skills/springboot-verification/",
"skills/strategic-compact/",
"skills/swift-actor-persistence/",
"skills/swift-concurrency-6-2/",
"skills/swift-protocol-di-testing/",
"skills/swiftui-patterns/",
"skills/tdd-workflow/",
"skills/team-builder/",
"skills/terminal-ops/",
"skills/token-budget-advisor/",
"skills/ui-demo/",
"skills/unified-notifications-ops/",
"skills/verification-loop/",
"skills/video-editing/",
"skills/videodb/",
"skills/visa-doc-translate/",
"skills/workspace-surface-audit/",
"skills/x-api/",
"the-security-guide.md"
"skills/",
"AGENTS.md",
"agent.yaml",
".claude-plugin/plugin.json",
".claude-plugin/marketplace.json",
".claude-plugin/README.md",
".codex-plugin/plugin.json",
".codex-plugin/README.md",
".mcp.json",
"install.sh",
"install.ps1",
"llms.txt",
"VERSION"
],
"bin": {
"ecc": "scripts/ecc.js",
@@ -249,8 +120,7 @@
"test": "node scripts/ci/check-unicode-safety.js && node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-install-manifests.js && node scripts/ci/validate-no-personal-paths.js && npm run catalog:check && node tests/run-all.js",
"coverage": "c8 --all --include=\"scripts/**/*.js\" --check-coverage --lines 80 --functions 80 --branches 80 --statements 80 --reporter=text --reporter=lcov node tests/run-all.js",
"build:opencode": "node scripts/build-opencode.js",
"prepack": "npm run build:opencode",
"dashboard": "python3 ./ecc_dashboard.py"
"prepack": "npm run build:opencode"
},
"dependencies": {
"@iarna/toml": "^2.2.5",
@@ -259,11 +129,11 @@
},
"devDependencies": {
"@eslint/js": "^9.39.2",
"@opencode-ai/plugin": "^1.4.3",
"@opencode-ai/plugin": "^1.0.0",
"@types/node": "^20.19.24",
"c8": "^11.0.0",
"eslint": "^9.39.2",
"globals": "^17.5.0",
"globals": "^17.4.0",
"markdownlint-cli": "^0.48.0",
"typescript": "^5.9.3"
},
@@ -271,4 +141,4 @@
"node": ">=18"
},
"packageManager": "yarn@4.9.2+sha512.1fc009bc09d13cfd0e19efa44cbfc2b9cf6ca61482725eb35bbc5e257e093ebf4130db6dfe15d604ff4b79efd8e1e8e99b25fa7d0a6197c9f9826358d4d65c3c"
}
}

View File

@@ -1,78 +0,0 @@
[project]
name = "llm-abstraction"
version = "0.1.0"
description = "Provider-agnostic LLM abstraction layer"
readme = "README.md"
requires-python = ">=3.11"
license = {text = "MIT"}
authors = [
{name = "Affaan Mustafa", email = "affaan@example.com"}
]
keywords = ["llm", "openai", "anthropic", "ollama", "ai"]
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
dependencies = [
"anthropic>=0.25.0",
"openai>=1.30.0",
]
[project.optional-dependencies]
dev = [
"pytest>=8.0",
"pytest-asyncio>=0.23",
"pytest-cov>=4.1",
"pytest-mock>=3.12",
"ruff>=0.4",
"mypy>=1.10",
]
[project.urls]
Homepage = "https://github.com/affaan-m/everything-claude-code"
Repository = "https://github.com/affaan-m/everything-claude-code"
[project.scripts]
llm-select = "llm.cli.selector:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["src/llm"]
[tool.pytest.ini_options]
testpaths = ["tests"]
asyncio_mode = "auto"
filterwarnings = ["ignore::DeprecationWarning"]
[tool.coverage.run]
source = ["src/llm"]
branch = true
[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"if TYPE_CHECKING:",
"raise NotImplementedError",
]
[tool.ruff]
src-path = ["src"]
target-version = "py311"
[tool.ruff.lint]
select = ["E", "F", "I", "N", "W", "UP"]
ignore = ["E501"]
[tool.mypy]
python_version = "3.11"
src_paths = ["src"]
warn_return_any = true
warn_unused_ignores = true

View File

@@ -1,265 +0,0 @@
#!/usr/bin/env node
/**
* PreToolUse Hook: GateGuard Fact-Forcing Gate
*
* Forces Claude to investigate before editing files or running commands.
* Instead of asking "are you sure?" (which LLMs always answer "yes"),
* this hook demands concrete facts: importers, public API, data schemas.
*
* The act of investigation creates awareness that self-evaluation never did.
*
* Gates:
* - Edit/Write: list importers, affected API, verify data schemas, quote instruction
* - Bash (destructive): list targets, rollback plan, quote instruction
* - Bash (routine): quote current instruction (once per session)
*
* Compatible with run-with-flags.js via module.exports.run().
* Cross-platform (Windows, macOS, Linux).
*
* Full package with config support: pip install gateguard-ai
* Repo: https://github.com/zunoworks/gateguard
*/
'use strict';
const crypto = require('crypto');
const fs = require('fs');
const path = require('path');
// Session state — scoped per session to avoid cross-session races.
// Uses CLAUDE_SESSION_ID (set by Claude Code) or falls back to PID-based isolation.
const STATE_DIR = process.env.GATEGUARD_STATE_DIR || path.join(process.env.HOME || process.env.USERPROFILE || '/tmp', '.gateguard');
const SESSION_ID = process.env.CLAUDE_SESSION_ID || process.env.ECC_SESSION_ID || `pid-${process.ppid || process.pid}`;
const STATE_FILE = path.join(STATE_DIR, `state-${SESSION_ID.replace(/[^a-zA-Z0-9_-]/g, '_')}.json`);
// State expires after 30 minutes of inactivity
const SESSION_TIMEOUT_MS = 30 * 60 * 1000;
// Maximum checked entries to prevent unbounded growth
const MAX_CHECKED_ENTRIES = 500;
const MAX_SESSION_KEYS = 50;
const ROUTINE_BASH_SESSION_KEY = '__bash_session__';
const DESTRUCTIVE_BASH = /\b(rm\s+-rf|git\s+reset\s+--hard|git\s+checkout\s+--|git\s+clean\s+-f|drop\s+table|delete\s+from|truncate|git\s+push\s+--force|dd\s+if=)\b/i;
// --- State management (per-session, atomic writes, bounded) ---
function loadState() {
try {
if (fs.existsSync(STATE_FILE)) {
const state = JSON.parse(fs.readFileSync(STATE_FILE, 'utf8'));
const lastActive = state.last_active || 0;
if (Date.now() - lastActive > SESSION_TIMEOUT_MS) {
try { fs.unlinkSync(STATE_FILE); } catch (_) { /* ignore */ }
return { checked: [], last_active: Date.now() };
}
return state;
}
} catch (_) { /* ignore */ }
return { checked: [], last_active: Date.now() };
}
function pruneCheckedEntries(checked) {
if (checked.length <= MAX_CHECKED_ENTRIES) {
return checked;
}
const preserved = checked.includes(ROUTINE_BASH_SESSION_KEY) ? [ROUTINE_BASH_SESSION_KEY] : [];
const sessionKeys = checked.filter(k => k.startsWith('__') && k !== ROUTINE_BASH_SESSION_KEY);
const fileKeys = checked.filter(k => !k.startsWith('__'));
const remainingSessionSlots = Math.max(MAX_SESSION_KEYS - preserved.length, 0);
const cappedSession = sessionKeys.slice(-remainingSessionSlots);
const remainingFileSlots = Math.max(MAX_CHECKED_ENTRIES - preserved.length - cappedSession.length, 0);
const cappedFiles = fileKeys.slice(-remainingFileSlots);
return [...preserved, ...cappedSession, ...cappedFiles];
}
function saveState(state) {
try {
state.last_active = Date.now();
state.checked = pruneCheckedEntries(state.checked);
fs.mkdirSync(STATE_DIR, { recursive: true });
// Atomic write: temp file + rename prevents partial reads
const tmpFile = STATE_FILE + '.tmp.' + process.pid;
fs.writeFileSync(tmpFile, JSON.stringify(state, null, 2), 'utf8');
fs.renameSync(tmpFile, STATE_FILE);
} catch (_) { /* ignore */ }
}
function markChecked(key) {
const state = loadState();
if (!state.checked.includes(key)) {
state.checked.push(key);
saveState(state);
}
}
function isChecked(key) {
const state = loadState();
const found = state.checked.includes(key);
saveState(state);
return found;
}
// Prune stale session files older than 1 hour
(function pruneStaleFiles() {
try {
const files = fs.readdirSync(STATE_DIR);
const now = Date.now();
for (const f of files) {
if (!f.startsWith('state-') || !f.endsWith('.json')) continue;
const fp = path.join(STATE_DIR, f);
const stat = fs.statSync(fp);
if (now - stat.mtimeMs > SESSION_TIMEOUT_MS * 2) {
fs.unlinkSync(fp);
}
}
} catch (_) { /* ignore */ }
})();
// --- Sanitize file path against injection ---
function sanitizePath(filePath) {
// Strip control chars (including null), bidi overrides, and newlines
return filePath.replace(/[\x00-\x1f\x7f\u200e\u200f\u202a-\u202e\u2066-\u2069]/g, ' ').trim().slice(0, 500);
}
// --- Gate messages ---
function editGateMsg(filePath) {
const safe = sanitizePath(filePath);
return [
'[Fact-Forcing Gate]',
'',
`Before editing ${safe}, present these facts:`,
'',
'1. List ALL files that import/require this file (use Grep)',
'2. List the public functions/classes affected by this change',
'3. If this file reads/writes data files, show field names, structure, and date format (use redacted or synthetic values, not raw production data)',
'4. Quote the user\'s current instruction verbatim',
'',
'Present the facts, then retry the same operation.'
].join('\n');
}
function writeGateMsg(filePath) {
const safe = sanitizePath(filePath);
return [
'[Fact-Forcing Gate]',
'',
`Before creating ${safe}, present these facts:`,
'',
'1. Name the file(s) and line(s) that will call this new file',
'2. Confirm no existing file serves the same purpose (use Glob)',
'3. If this file reads/writes data files, show field names, structure, and date format (use redacted or synthetic values, not raw production data)',
'4. Quote the user\'s current instruction verbatim',
'',
'Present the facts, then retry the same operation.'
].join('\n');
}
function destructiveBashMsg() {
return [
'[Fact-Forcing Gate]',
'',
'Destructive command detected. Before running, present:',
'',
'1. List all files/data this command will modify or delete',
'2. Write a one-line rollback procedure',
'3. Quote the user\'s current instruction verbatim',
'',
'Present the facts, then retry the same operation.'
].join('\n');
}
function routineBashMsg() {
return [
'[Fact-Forcing Gate]',
'',
'Quote the user\'s current instruction verbatim.',
'Then retry the same operation.'
].join('\n');
}
// --- Deny helper ---
function denyResult(reason) {
return {
stdout: JSON.stringify({
hookSpecificOutput: {
hookEventName: 'PreToolUse',
permissionDecision: 'deny',
permissionDecisionReason: reason
}
}),
exitCode: 0
};
}
// --- Core logic (exported for run-with-flags.js) ---
function run(rawInput) {
let data;
try {
data = typeof rawInput === 'string' ? JSON.parse(rawInput) : rawInput;
} catch (_) {
return rawInput; // allow on parse error
}
const rawToolName = data.tool_name || '';
const toolInput = data.tool_input || {};
// Normalize: case-insensitive matching via lookup map
const TOOL_MAP = { 'edit': 'Edit', 'write': 'Write', 'multiedit': 'MultiEdit', 'bash': 'Bash' };
const toolName = TOOL_MAP[rawToolName.toLowerCase()] || rawToolName;
if (toolName === 'Edit' || toolName === 'Write') {
const filePath = toolInput.file_path || '';
if (!filePath) {
return rawInput; // allow
}
if (!isChecked(filePath)) {
markChecked(filePath);
return denyResult(toolName === 'Edit' ? editGateMsg(filePath) : writeGateMsg(filePath));
}
return rawInput; // allow
}
if (toolName === 'MultiEdit') {
const edits = toolInput.edits || [];
for (const edit of edits) {
const filePath = edit.file_path || '';
if (filePath && !isChecked(filePath)) {
markChecked(filePath);
return denyResult(editGateMsg(filePath));
}
}
return rawInput; // allow
}
if (toolName === 'Bash') {
const command = toolInput.command || '';
if (DESTRUCTIVE_BASH.test(command)) {
// Gate destructive commands on first attempt; allow retry after facts presented
const key = '__destructive__' + crypto.createHash('sha256').update(command).digest('hex').slice(0, 16);
if (!isChecked(key)) {
markChecked(key);
return denyResult(destructiveBashMsg());
}
return rawInput; // allow retry after facts presented
}
if (!isChecked(ROUTINE_BASH_SESSION_KEY)) {
markChecked(ROUTINE_BASH_SESSION_KEY);
return denyResult(routineBashMsg());
}
return rawInput; // allow
}
return rawInput; // allow
}
module.exports = { run };

View File

@@ -1,23 +1,11 @@
const fs = require('fs');
const path = require('path');
const {
createFlatRuleOperations,
createInstallTargetAdapter,
createManagedOperation,
isForeignPlatformPath,
} = require('./helpers');
function toCursorRuleFileName(fileName, sourceRelativeFile) {
if (path.basename(sourceRelativeFile).toLowerCase() === 'readme.md') {
return null;
}
return fileName.endsWith('.md')
? `${fileName.slice(0, -3)}.mdc`
: fileName;
}
module.exports = createInstallTargetAdapter({
id: 'cursor-project',
target: 'cursor',
@@ -29,7 +17,6 @@ module.exports = createInstallTargetAdapter({
const modules = Array.isArray(input.modules)
? input.modules
: (input.module ? [input.module] : []);
const seenDestinationPaths = new Set();
const {
repoRoot,
projectRoot,
@@ -41,98 +28,23 @@ module.exports = createInstallTargetAdapter({
homeDir,
};
const targetRoot = adapter.resolveRoot(planningInput);
const entries = modules.flatMap((module, moduleIndex) => {
return modules.flatMap(module => {
const paths = Array.isArray(module.paths) ? module.paths : [];
return paths
.filter(p => !isForeignPlatformPath(p, adapter.target))
.map((sourceRelativePath, pathIndex) => ({
module,
sourceRelativePath,
moduleIndex,
pathIndex,
}));
}).sort((left, right) => {
const getPriority = value => {
if (value === 'rules') {
return 0;
}
.flatMap(sourceRelativePath => {
if (sourceRelativePath === 'rules') {
return createFlatRuleOperations({
moduleId: module.id,
repoRoot,
sourceRelativePath,
destinationDir: path.join(targetRoot, 'rules'),
});
}
if (value === '.cursor') {
return 1;
}
return 2;
};
const leftPriority = getPriority(left.sourceRelativePath);
const rightPriority = getPriority(right.sourceRelativePath);
if (leftPriority !== rightPriority) {
return leftPriority - rightPriority;
}
if (left.moduleIndex !== right.moduleIndex) {
return left.moduleIndex - right.moduleIndex;
}
return left.pathIndex - right.pathIndex;
});
function takeUniqueOperations(operations) {
return operations.filter(operation => {
if (!operation || !operation.destinationPath) {
return false;
}
if (seenDestinationPaths.has(operation.destinationPath)) {
return false;
}
seenDestinationPaths.add(operation.destinationPath);
return true;
});
}
return entries.flatMap(({ module, sourceRelativePath }) => {
if (sourceRelativePath === 'rules') {
return takeUniqueOperations(createFlatRuleOperations({
moduleId: module.id,
repoRoot,
sourceRelativePath,
destinationDir: path.join(targetRoot, 'rules'),
destinationNameTransform: toCursorRuleFileName,
}));
}
if (sourceRelativePath === '.cursor') {
const cursorRoot = path.join(repoRoot, '.cursor');
if (!fs.existsSync(cursorRoot) || !fs.statSync(cursorRoot).isDirectory()) {
return [];
}
const childOperations = fs.readdirSync(cursorRoot, { withFileTypes: true })
.sort((left, right) => left.name.localeCompare(right.name))
.filter(entry => entry.name !== 'rules')
.map(entry => createManagedOperation({
moduleId: module.id,
sourceRelativePath: path.join('.cursor', entry.name),
destinationPath: path.join(targetRoot, entry.name),
strategy: 'preserve-relative-path',
}));
const ruleOperations = createFlatRuleOperations({
moduleId: module.id,
repoRoot,
sourceRelativePath: '.cursor/rules',
destinationDir: path.join(targetRoot, 'rules'),
destinationNameTransform: toCursorRuleFileName,
return [adapter.createScaffoldOperation(module.id, sourceRelativePath, planningInput)];
});
return takeUniqueOperations([...childOperations, ...ruleOperations]);
}
return takeUniqueOperations([
adapter.createScaffoldOperation(module.id, sourceRelativePath, planningInput),
]);
});
},
});

View File

@@ -181,13 +181,7 @@ function createNamespacedFlatRuleOperations(adapter, moduleId, sourceRelativePat
return operations;
}
function createFlatRuleOperations({
moduleId,
repoRoot,
sourceRelativePath,
destinationDir,
destinationNameTransform,
}) {
function createFlatRuleOperations({ moduleId, repoRoot, sourceRelativePath, destinationDir }) {
const normalizedSourcePath = normalizeRelativePath(sourceRelativePath);
const sourceRoot = path.join(repoRoot || '', normalizedSourcePath);
@@ -207,33 +201,19 @@ function createFlatRuleOperations({
if (entry.isDirectory()) {
const relativeFiles = listRelativeFiles(entryPath);
for (const relativeFile of relativeFiles) {
const defaultFileName = `${namespace}-${normalizeRelativePath(relativeFile).replace(/\//g, '-')}`;
const sourceRelativeFile = path.join(normalizedSourcePath, namespace, relativeFile);
const flattenedFileName = typeof destinationNameTransform === 'function'
? destinationNameTransform(defaultFileName, sourceRelativeFile)
: defaultFileName;
if (!flattenedFileName) {
continue;
}
const flattenedFileName = `${namespace}-${normalizeRelativePath(relativeFile).replace(/\//g, '-')}`;
operations.push(createManagedOperation({
moduleId,
sourceRelativePath: sourceRelativeFile,
sourceRelativePath: path.join(normalizedSourcePath, namespace, relativeFile),
destinationPath: path.join(destinationDir, flattenedFileName),
strategy: 'flatten-copy',
}));
}
} else if (entry.isFile()) {
const sourceRelativeFile = path.join(normalizedSourcePath, entry.name);
const destinationFileName = typeof destinationNameTransform === 'function'
? destinationNameTransform(entry.name, sourceRelativeFile)
: entry.name;
if (!destinationFileName) {
continue;
}
operations.push(createManagedOperation({
moduleId,
sourceRelativePath: sourceRelativeFile,
destinationPath: path.join(destinationDir, destinationFileName),
sourceRelativePath: path.join(normalizedSourcePath, entry.name),
destinationPath: path.join(destinationDir, entry.name),
strategy: 'flatten-copy',
}));
}

View File

@@ -18,7 +18,6 @@ CODEX_MARKETPLACE_JSON=".agents/plugins/marketplace.json"
CODEX_PLUGIN_JSON=".codex-plugin/plugin.json"
OPENCODE_PACKAGE_JSON=".opencode/package.json"
OPENCODE_PACKAGE_LOCK_JSON=".opencode/package-lock.json"
OPENCODE_ECC_HOOKS_PLUGIN=".opencode/plugins/ecc-hooks.ts"
README_FILE="README.md"
ZH_CN_README_FILE="docs/zh-CN/README.md"
SELECTIVE_INSTALL_ARCHITECTURE_DOC="docs/SELECTIVE-INSTALL-ARCHITECTURE.md"
@@ -56,7 +55,7 @@ if [[ -n "$(git status --porcelain --untracked-files=all)" ]]; then
fi
# Verify versioned manifests exist
for FILE in "$ROOT_PACKAGE_JSON" "$PACKAGE_LOCK_JSON" "$ROOT_AGENTS_MD" "$TR_AGENTS_MD" "$ZH_CN_AGENTS_MD" "$AGENT_YAML" "$VERSION_FILE" "$PLUGIN_JSON" "$MARKETPLACE_JSON" "$CODEX_MARKETPLACE_JSON" "$CODEX_PLUGIN_JSON" "$OPENCODE_PACKAGE_JSON" "$OPENCODE_PACKAGE_LOCK_JSON" "$OPENCODE_ECC_HOOKS_PLUGIN" "$README_FILE" "$ZH_CN_README_FILE" "$SELECTIVE_INSTALL_ARCHITECTURE_DOC"; do
for FILE in "$ROOT_PACKAGE_JSON" "$PACKAGE_LOCK_JSON" "$ROOT_AGENTS_MD" "$TR_AGENTS_MD" "$ZH_CN_AGENTS_MD" "$AGENT_YAML" "$VERSION_FILE" "$PLUGIN_JSON" "$MARKETPLACE_JSON" "$CODEX_MARKETPLACE_JSON" "$CODEX_PLUGIN_JSON" "$OPENCODE_PACKAGE_JSON" "$OPENCODE_PACKAGE_LOCK_JSON" "$README_FILE" "$ZH_CN_README_FILE" "$SELECTIVE_INSTALL_ARCHITECTURE_DOC"; do
if [[ ! -f "$FILE" ]]; then
echo "Error: $FILE not found"
exit 1
@@ -218,24 +217,6 @@ update_codex_marketplace_version() {
' "$CODEX_MARKETPLACE_JSON" "$VERSION"
}
update_opencode_hook_banner_version() {
node -e '
const fs = require("fs");
const file = process.argv[1];
const version = process.argv[2];
const current = fs.readFileSync(file, "utf8");
const updated = current.replace(
/(## Active Plugin: Everything Claude Code v)[0-9]+\.[0-9]+\.[0-9]+/,
`$1${version}`
);
if (updated === current) {
console.error(`Error: could not update OpenCode hook banner version in ${file}`);
process.exit(1);
}
fs.writeFileSync(file, updated);
' "$OPENCODE_ECC_HOOKS_PLUGIN" "$VERSION"
}
# Update all shipped package/plugin manifests
update_version "$ROOT_PACKAGE_JSON" "s|\"version\": *\"[^\"]*\"|\"version\": \"$VERSION\"|"
update_package_lock_version "$PACKAGE_LOCK_JSON"
@@ -250,7 +231,6 @@ update_codex_marketplace_version
update_version "$CODEX_PLUGIN_JSON" "s|\"version\": *\"[^\"]*\"|\"version\": \"$VERSION\"|"
update_version "$OPENCODE_PACKAGE_JSON" "s|\"version\": *\"[^\"]*\"|\"version\": \"$VERSION\"|"
update_package_lock_version "$OPENCODE_PACKAGE_LOCK_JSON"
update_opencode_hook_banner_version
update_readme_version_row "$README_FILE" "Version" "Plugin" "Plugin" "Reference config"
update_readme_version_row "$ZH_CN_README_FILE" "版本" "插件" "插件" "参考配置"
update_selective_install_repo_version "$SELECTIVE_INSTALL_ARCHITECTURE_DOC"
@@ -263,7 +243,7 @@ node tests/scripts/build-opencode.test.js
node tests/plugin-manifest.test.js
# Stage, commit, tag, and push
git add "$ROOT_PACKAGE_JSON" "$PACKAGE_LOCK_JSON" "$ROOT_AGENTS_MD" "$TR_AGENTS_MD" "$ZH_CN_AGENTS_MD" "$AGENT_YAML" "$VERSION_FILE" "$PLUGIN_JSON" "$MARKETPLACE_JSON" "$CODEX_MARKETPLACE_JSON" "$CODEX_PLUGIN_JSON" "$OPENCODE_PACKAGE_JSON" "$OPENCODE_PACKAGE_LOCK_JSON" "$OPENCODE_ECC_HOOKS_PLUGIN" "$README_FILE" "$ZH_CN_README_FILE" "$SELECTIVE_INSTALL_ARCHITECTURE_DOC"
git add "$ROOT_PACKAGE_JSON" "$PACKAGE_LOCK_JSON" "$ROOT_AGENTS_MD" "$TR_AGENTS_MD" "$ZH_CN_AGENTS_MD" "$AGENT_YAML" "$VERSION_FILE" "$PLUGIN_JSON" "$MARKETPLACE_JSON" "$CODEX_MARKETPLACE_JSON" "$CODEX_PLUGIN_JSON" "$OPENCODE_PACKAGE_JSON" "$OPENCODE_PACKAGE_LOCK_JSON" "$README_FILE" "$ZH_CN_README_FILE" "$SELECTIVE_INSTALL_ARCHITECTURE_DOC"
git commit -m "chore: bump plugin version to $VERSION"
git tag "v$VERSION"
git push origin main "v$VERSION"

View File

@@ -1,146 +0,0 @@
---
name: accessibility
description: Design, implement, and audit inclusive digital products using WCAG 2.2 Level AA
standards. Use this skill to generate semantic ARIA for Web and accessibility traits for Web and Native platforms (iOS/Android).
origin: ECC
---
# Accessibility (WCAG 2.2)
This skill ensures that digital interfaces are Perceivable, Operable, Understandable, and Robust (POUR) for all users, including those using screen readers, switch controls, or keyboard navigation. It focuses on the technical implementation of WCAG 2.2 success criteria.
## When to Use
- Defining UI component specifications for Web, iOS, or Android.
- Auditing existing code for accessibility barriers or compliance gaps.
- Implementing new WCAG 2.2 standards like Target Size (Minimum) and Focus Appearance.
- Mapping high-level design requirements to technical attributes (ARIA roles, traits, hints).
## Core Concepts
- **POUR Principles**: The foundation of WCAG (Perceivable, Operable, Understandable, Robust).
- **Semantic Mapping**: Using native elements over generic containers to provide built-in accessibility.
- **Accessibility Tree**: The representation of the UI that assistive technologies actually "read."
- **Focus Management**: Controlling the order and visibility of the keyboard/screen reader cursor.
- **Labeling & Hints**: Providing context through `aria-label`, `accessibilityLabel`, and `contentDescription`.
## How It Works
### Step 1: Identify the Component Role
Determine the functional purpose (e.g., Is this a button, a link, or a tab?). Use the most semantic native element available before resorting to custom roles.
### Step 2: Define Perceivable Attributes
- Ensure text contrast meets **4.5:1** (normal) or **3:1** (large/UI).
- Add text alternatives for non-text content (images, icons).
- Implement responsive reflow (up to 400% zoom without loss of function).
### Step 3: Implement Operable Controls
- Ensure a minimum **24x24 CSS pixel** target size (WCAG 2.2 SC 2.5.8).
- Verify all interactive elements are reachable via keyboard and have a visible focus indicator (SC 2.4.11).
- Provide single-pointer alternatives for dragging movements.
### Step 4: Ensure Understandable Logic
- Use consistent navigation patterns.
- Provide descriptive error messages and suggestions for correction (SC 3.3.3).
- Implement "Redundant Entry" (SC 3.3.7) to prevent asking for the same data twice.
### Step 5: Verify Robust Compatibility
- Use correct `Name, Role, Value` patterns.
- Implement `aria-live` or live regions for dynamic status updates.
## Accessibility Architecture Diagram
```mermaid
flowchart TD
UI["UI Component"] --> Platform{Platform?}
Platform -->|Web| ARIA["WAI-ARIA + HTML5"]
Platform -->|iOS| SwiftUI["Accessibility Traits + Labels"]
Platform -->|Android| Compose["Semantics + ContentDesc"]
ARIA --> AT["Assistive Technology (Screen Readers, Switches)"]
SwiftUI --> AT
Compose --> AT
```
## Cross-Platform Mapping
| Feature | Web (HTML/ARIA) | iOS (SwiftUI) | Android (Compose) |
| :----------------- | :----------------------- | :----------------------------------- | :---------------------------------------------------------- |
| **Primary Label** | `aria-label` / `<label>` | `.accessibilityLabel()` | `contentDescription` |
| **Secondary Hint** | `aria-describedby` | `.accessibilityHint()` | `Modifier.semantics { stateDescription = ... }` |
| **Action Role** | `role="button"` | `.accessibilityAddTraits(.isButton)` | `Modifier.semantics { role = Role.Button }` |
| **Live Updates** | `aria-live="polite"` | `.accessibilityLiveRegion(.polite)` | `Modifier.semantics { liveRegion = LiveRegionMode.Polite }` |
## Examples
### Web: Accessible Search
```html
<form role="search">
<label for="search-input" class="sr-only">Search products</label>
<input type="search" id="search-input" placeholder="Search..." />
<button type="submit" aria-label="Submit Search">
<svg aria-hidden="true">...</svg>
</button>
</form>
```
### iOS: Accessible Action Button
```swift
Button(action: deleteItem) {
Image(systemName: "trash")
}
.accessibilityLabel("Delete item")
.accessibilityHint("Permanently removes this item from your list")
.accessibilityAddTraits(.isButton)
```
### Android: Accessible Toggle
```kotlin
Switch(
checked = isEnabled,
onCheckedChange = { onToggle() },
modifier = Modifier.semantics {
contentDescription = "Enable notifications"
}
)
```
## Anti-Patterns to Avoid
- **Div-Buttons**: Using a `<div>` or `<span>` for a click event without adding a role and keyboard support.
- **Color-Only Meaning**: Indicating an error or status _only_ with a color change (e.g., turning a border red).
- **Uncontained Modal Focus**: Modals that don't trap focus, allowing keyboard users to navigate background content while the modal is open. Focus must be contained _and_ escapable via the `Escape` key or an explicit close button (WCAG SC 2.1.2).
- **Redundant Alt Text**: Using "Image of..." or "Picture of..." in alt text (screen readers already announce the role "Image").
## Best Practices Checklist
- [ ] Interactive elements meet the **24x24px** (Web) or **44x44pt** (Native) target size.
- [ ] Focus indicators are clearly visible and high-contrast.
- [ ] Modals **contain focus** while open, and release it cleanly on close (`Escape` key or close button).
- [ ] Dropdowns and menus restore focus to the trigger element on close.
- [ ] Forms provide text-based error suggestions.
- [ ] All icon-only buttons have a descriptive text label.
- [ ] Content reflows properly when text is scaled.
## References
- [WCAG 2.2 Guidelines](https://www.w3.org/TR/WCAG22/)
- [WAI-ARIA Authoring Practices](https://www.w3.org/TR/wai-aria-practices/)
- [iOS Accessibility Programming Guide](https://developer.apple.com/documentation/accessibility)
- [iOS Human Interface Guidelines - Accessibility](https://developer.apple.com/design/human-interface-guidelines/accessibility)
- [Android Accessibility Developer Guide](https://developer.android.com/guide/topics/ui/accessibility)
## Related Skills
- `frontend-patterns`
- `frontend-design`
- `liquid-glass-design`
- `swiftui-patterns`

View File

@@ -1,120 +0,0 @@
---
name: gateguard
description: Fact-forcing gate that blocks Edit/Write/Bash (including MultiEdit) and demands concrete investigation (importers, data schemas, user instruction) before allowing the action. Measurably improves output quality by +2.25 points vs ungated agents.
origin: community
---
# GateGuard — Fact-Forcing Pre-Action Gate
A PreToolUse hook that forces Claude to investigate before editing. Instead of self-evaluation ("are you sure?"), it demands concrete facts. The act of investigation creates awareness that self-evaluation never did.
## When to Activate
- Working on any codebase where file edits affect multiple modules
- Projects with data files that have specific schemas or date formats
- Teams where AI-generated code must match existing patterns
- Any workflow where Claude tends to guess instead of investigating
## Core Concept
LLM self-evaluation doesn't work. Ask "did you violate any policies?" and the answer is always "no." This is verified experimentally.
But asking "list every file that imports this module" forces the LLM to run Grep and Read. The investigation itself creates context that changes the output.
**Three-stage gate:**
```
1. DENY — block the first Edit/Write/Bash attempt
2. FORCE — tell the model exactly which facts to gather
3. ALLOW — permit retry after facts are presented
```
No competitor does all three. Most stop at deny.
## Evidence
Two independent A/B tests, identical agents, same task:
| Task | Gated | Ungated | Gap |
| --- | --- | --- | --- |
| Analytics module | 8.0/10 | 6.5/10 | +1.5 |
| Webhook validator | 10.0/10 | 7.0/10 | +3.0 |
| **Average** | **9.0** | **6.75** | **+2.25** |
Both agents produce code that runs and passes tests. The difference is design depth.
## Gate Types
### Edit / MultiEdit Gate (first edit per file)
MultiEdit is handled identically — each file in the batch is gated individually.
```
Before editing {file_path}, present these facts:
1. List ALL files that import/require this file (use Grep)
2. List the public functions/classes affected by this change
3. If this file reads/writes data files, show field names, structure,
and date format (use redacted or synthetic values, not raw production data)
4. Quote the user's current instruction verbatim
```
### Write Gate (first new file creation)
```
Before creating {file_path}, present these facts:
1. Name the file(s) and line(s) that will call this new file
2. Confirm no existing file serves the same purpose (use Glob)
3. If this file reads/writes data files, show field names, structure,
and date format (use redacted or synthetic values, not raw production data)
4. Quote the user's current instruction verbatim
```
### Destructive Bash Gate (every destructive command)
Triggers on: `rm -rf`, `git reset --hard`, `git push --force`, `drop table`, etc.
```
1. List all files/data this command will modify or delete
2. Write a one-line rollback procedure
3. Quote the user's current instruction verbatim
```
### Routine Bash Gate (once per session)
```
Quote the user's current instruction verbatim.
```
## Quick Start
### Option A: Use the ECC hook (zero install)
The hook at `scripts/hooks/gateguard-fact-force.js` is included in this plugin. Enable it via hooks.json.
### Option B: Full package with config
```bash
pip install gateguard-ai
gateguard init
```
This adds `.gateguard.yml` for per-project configuration (custom messages, ignore paths, gate toggles).
## Anti-Patterns
- **Don't use self-evaluation instead.** "Are you sure?" always gets "yes." This is experimentally verified.
- **Don't skip the data schema check.** Both A/B test agents assumed ISO-8601 dates when real data used `%Y/%m/%d %H:%M`. Checking data structure (with redacted values) prevents this entire class of bugs.
- **Don't gate every single Bash command.** Routine bash gates once per session. Destructive bash gates every time. This balance avoids slowdown while catching real risks.
## Best Practices
- Let the gate fire naturally. Don't try to pre-answer the gate questions — the investigation itself is what improves quality.
- Customize gate messages for your domain. If your project has specific conventions, add them to the gate prompts.
- Use `.gateguard.yml` to ignore paths like `.venv/`, `node_modules/`, `.git/`.
## Related Skills
- `safety-guard` — Runtime safety checks (complementary, not overlapping)
- `code-reviewer` — Post-edit review (GateGuard is pre-edit investigation)

View File

@@ -1,33 +0,0 @@
"""
LLM Abstraction Layer
Provider-agnostic interface for multiple LLM backends.
"""
from llm.core.interface import LLMProvider
from llm.core.types import LLMInput, LLMOutput, Message, ToolCall, ToolDefinition, ToolResult
from llm.providers import get_provider
from llm.tools import ToolExecutor, ToolRegistry
from llm.cli.selector import interactive_select
__version__ = "0.1.0"
__all__ = (
"LLMInput",
"LLMOutput",
"LLMProvider",
"Message",
"ToolCall",
"ToolDefinition",
"ToolResult",
"ToolExecutor",
"ToolRegistry",
"get_provider",
"interactive_select",
)
def gui() -> None:
from llm.cli.selector import main
main()

View File

@@ -1,7 +0,0 @@
#!/usr/bin/env python3
"""Entry point for llm CLI."""
from llm.cli.selector import main
if __name__ == "__main__":
main()

View File

@@ -1,154 +0,0 @@
from __future__ import annotations
import os
import sys
from enum import Enum
class Color(str, Enum):
RESET = "\033[0m"
BOLD = "\033[1m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
BLUE = "\033[94m"
CYAN = "\033[96m"
def print_banner() -> None:
banner = f"""{Color.CYAN}
╔═══════════════════════════════════════════╗
║ LLM Provider Selector ║
║ Provider-agnostic AI interactions ║
╚═══════════════════════════════════════════╝{Color.RESET}"""
print(banner)
def print_providers(providers: list[tuple[str, str]]) -> None:
print(f"\n{Color.BOLD}Available Providers:{Color.RESET}\n")
for i, (name, desc) in enumerate(providers, 1):
print(f" {Color.GREEN}{i}{Color.RESET}. {Color.BOLD}{name}{Color.RESET} - {desc}")
def select_provider(providers: list[tuple[str, str]]) -> str | None:
if not providers:
print("No providers available.")
return None
print_providers(providers)
while True:
try:
choice = input(f"\n{Color.YELLOW}Select provider (1-{len(providers)}): {Color.RESET}").strip()
if not choice:
return None
idx = int(choice) - 1
if 0 <= idx < len(providers):
return providers[idx][0]
print(f"{Color.YELLOW}Invalid selection. Try again.{Color.RESET}")
except ValueError:
print(f"{Color.YELLOW}Please enter a number.{Color.RESET}")
def select_model(models: list[tuple[str, str]]) -> str | None:
if not models:
print("No models available.")
return None
print(f"\n{Color.BOLD}Available Models:{Color.RESET}\n")
for i, (name, desc) in enumerate(models, 1):
print(f" {Color.GREEN}{i}{Color.RESET}. {Color.BOLD}{name}{Color.RESET} - {desc}")
while True:
try:
choice = input(f"\n{Color.YELLOW}Select model (1-{len(models)}): {Color.RESET}").strip()
if not choice:
return None
idx = int(choice) - 1
if 0 <= idx < len(models):
return models[idx][0]
print(f"{Color.YELLOW}Invalid selection. Try again.{Color.RESET}")
except ValueError:
print(f"{Color.YELLOW}Please enter a number.{Color.RESET}")
def save_config(provider: str, model: str, persist: bool = False) -> None:
config = f"LLM_PROVIDER={provider}\nLLM_MODEL={model}\n"
env_file = ".llm.env"
with open(env_file, "w") as f:
f.write(config)
print(f"\n{Color.GREEN}{Color.RESET} Config saved to {Color.CYAN}{env_file}{Color.RESET}")
if persist:
os.environ["LLM_PROVIDER"] = provider
os.environ["LLM_MODEL"] = model
print(f"{Color.GREEN}{Color.RESET} Config loaded to current session")
def interactive_select(
providers: list[tuple[str, str]] | None = None,
models_per_provider: dict[str, list[tuple[str, str]]] | None = None,
persist: bool = False,
) -> tuple[str, str] | None:
print_banner()
if providers is None:
providers = [
("claude", "Anthropic Claude ( Sonnet, Opus, Haiku)"),
("openai", "OpenAI GPT (4o, 4o-mini, 3.5-turbo)"),
("ollama", "Local Ollama models"),
]
if models_per_provider is None:
models_per_provider = {
"claude": [
("claude-opus-4-5", "Claude Opus 4.5 - Most capable"),
("claude-sonnet-4-7", "Claude Sonnet 4.7 - Balanced"),
("claude-haiku-4-7", "Claude Haiku 4.7 - Fast"),
],
"openai": [
("gpt-4o", "GPT-4o - Most capable"),
("gpt-4o-mini", "GPT-4o-mini - Fast & affordable"),
("gpt-4-turbo", "GPT-4 Turbo - Legacy powerful"),
("gpt-3.5-turbo", "GPT-3.5 - Legacy fast"),
],
"ollama": [
("llama3.2", "Llama 3.2 - General purpose"),
("mistral", "Mistral - Fast & efficient"),
("codellama", "CodeLlama - Code specialized"),
],
}
provider = select_provider(providers)
if not provider:
return None
models = models_per_provider.get(provider, [])
model = select_model(models)
if not model:
return None
print(f"\n{Color.GREEN}Selected: {Color.BOLD}{provider}{Color.RESET} / {Color.BOLD}{model}{Color.RESET}")
save_config(provider, model, persist)
return (provider, model)
def main() -> None:
result = interactive_select(persist=True)
if result:
print(f"\n{Color.GREEN}Ready to use!{Color.RESET}")
print(f" export LLM_PROVIDER={result[0]}")
print(f" export LLM_MODEL={result[1]}")
else:
print("\nSelection cancelled.")
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -1 +0,0 @@
"""Core module for LLM abstraction layer."""

View File

@@ -1,60 +0,0 @@
"""LLM Provider interface definition."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any
from llm.core.types import LLMInput, LLMOutput, ModelInfo, ProviderType
class LLMProvider(ABC):
provider_type: ProviderType
@abstractmethod
def generate(self, input: LLMInput) -> LLMOutput: ...
@abstractmethod
def list_models(self) -> list[ModelInfo]: ...
@abstractmethod
def validate_config(self) -> bool: ...
def supports_tools(self) -> bool:
return True
def supports_vision(self) -> bool:
return False
def get_default_model(self) -> str:
raise NotImplementedError(f"{self.__class__.__name__} must implement get_default_model")
class LLMError(Exception):
def __init__(
self,
message: str,
provider: ProviderType | None = None,
code: str | None = None,
details: dict[str, Any] | None = None,
) -> None:
super().__init__(message)
self.message = message
self.provider = provider
self.code = code
self.details = details or {}
class AuthenticationError(LLMError): ...
class RateLimitError(LLMError): ...
class ContextLengthError(LLMError): ...
class ModelNotFoundError(LLMError): ...
class ToolExecutionError(LLMError): ...

View File

@@ -1,146 +0,0 @@
"""Core type definitions for LLM abstraction layer."""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from typing import Any
class Role(str, Enum):
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
TOOL = "tool"
class ProviderType(str, Enum):
CLAUDE = "claude"
OPENAI = "openai"
OLLAMA = "ollama"
@dataclass(frozen=True)
class Message:
role: Role
content: str
name: str | None = None
tool_call_id: str | None = None
tool_calls: list[ToolCall] | None = None
def to_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {"role": self.role.value, "content": self.content}
if self.name:
result["name"] = self.name
if self.tool_call_id:
result["tool_call_id"] = self.tool_call_id
if self.tool_calls:
result["tool_calls"] = [
{"id": tc.id, "function": {"name": tc.name, "arguments": tc.arguments}}
for tc in self.tool_calls
]
return result
@dataclass(frozen=True)
class ToolDefinition:
name: str
description: str
parameters: dict[str, Any]
strict: bool = True
def to_dict(self) -> dict[str, Any]:
return {
"name": self.name,
"description": self.description,
"parameters": self.parameters,
"strict": self.strict,
}
@dataclass(frozen=True)
class ToolCall:
id: str
name: str
arguments: dict[str, Any]
@dataclass(frozen=True)
class ToolResult:
tool_call_id: str
content: str
is_error: bool = False
@dataclass(frozen=True)
class LLMInput:
messages: list[Message]
model: str | None = None
temperature: float = 1.0
max_tokens: int | None = None
tools: list[ToolDefinition] | None = None
stream: bool = False
metadata: dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {
"messages": [msg.to_dict() for msg in self.messages],
"temperature": self.temperature,
"stream": self.stream,
}
if self.model:
result["model"] = self.model
if self.max_tokens is not None:
result["max_tokens"] = self.max_tokens
if self.tools:
result["tools"] = [tool.to_dict() for tool in self.tools]
return result | self.metadata
@dataclass(frozen=True)
class LLMOutput:
content: str
tool_calls: list[ToolCall] | None = None
model: str | None = None
usage: dict[str, int] | None = None
stop_reason: str | None = None
metadata: dict[str, Any] = field(default_factory=dict)
@property
def has_tool_calls(self) -> bool:
return bool(self.tool_calls)
def to_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {"content": self.content}
if self.tool_calls:
result["tool_calls"] = [
{"id": tc.id, "name": tc.name, "arguments": tc.arguments}
for tc in self.tool_calls
]
if self.model:
result["model"] = self.model
if self.usage:
result["usage"] = self.usage
if self.stop_reason:
result["stop_reason"] = self.stop_reason
return result | self.metadata
@dataclass(frozen=True)
class ModelInfo:
name: str
provider: ProviderType
supports_tools: bool = True
supports_vision: bool = False
max_tokens: int | None = None
context_window: int | None = None
def to_dict(self) -> dict[str, Any]:
return {
"name": self.name,
"provider": self.provider.value,
"supports_tools": self.supports_tools,
"supports_vision": self.supports_vision,
"max_tokens": self.max_tokens,
"context_window": self.context_window,
}

View File

@@ -1,13 +0,0 @@
"""Prompt module for prompt building and normalization."""
from llm.prompt.builder import PromptBuilder, adapt_messages_for_provider, get_provider_builder
from llm.prompt.templates import TEMPLATES, get_template, get_template_or_default
__all__ = (
"PromptBuilder",
"TEMPLATES",
"adapt_messages_for_provider",
"get_provider_builder",
"get_template",
"get_template_or_default",
)

View File

@@ -1,102 +0,0 @@
"""Prompt builder for normalizing prompts across providers."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from llm.core.types import LLMInput, Message, Role, ToolDefinition
from llm.providers.claude import ClaudeProvider
from llm.providers.openai import OpenAIProvider
from llm.providers.ollama import OllamaProvider
@dataclass
class PromptConfig:
system_template: str | None = None
user_template: str | None = None
include_tools_in_system: bool = True
tool_format: str = "native"
class PromptBuilder:
def __init__(self, config: PromptConfig | None = None) -> None:
self.config = config or PromptConfig()
def build(self, messages: list[Message], tools: list[ToolDefinition] | None = None) -> list[Message]:
if not messages:
return []
result: list[Message] = []
system_parts: list[str] = []
if self.config.system_template:
system_parts.append(self.config.system_template)
if tools and self.config.include_tools_in_system:
tools_desc = self._format_tools(tools)
system_parts.append(f"\n\n## Available Tools\n{tools_desc}")
if messages[0].role == Role.SYSTEM:
system_parts.insert(0, messages[0].content)
result.insert(0, Message(role=Role.SYSTEM, content="\n\n".join(system_parts)))
result.extend(messages[1:])
else:
if system_parts:
result.insert(0, Message(role=Role.SYSTEM, content="\n\n".join(system_parts)))
result.extend(messages)
return result
def _format_tools(self, tools: list[ToolDefinition]) -> str:
lines = []
for tool in tools:
lines.append(f"### {tool.name}")
lines.append(tool.description)
if tool.parameters:
lines.append("Parameters:")
lines.append(self._format_parameters(tool.parameters))
return "\n".join(lines)
def _format_parameters(self, params: dict[str, Any]) -> str:
if "properties" not in params:
return str(params)
lines = []
required = params.get("required", [])
for name, spec in params["properties"].items():
prop_type = spec.get("type", "any")
desc = spec.get("description", "")
required_mark = "(required)" if name in required else "(optional)"
lines.append(f" - {name}: {prop_type} {required_mark} - {desc}")
return "\n".join(lines) if lines else str(params)
_PROVIDER_TEMPLATE_MAP: dict[str, dict[str, Any]] = {
"claude": {
"include_tools_in_system": False,
"tool_format": "anthropic",
},
"openai": {
"include_tools_in_system": False,
"tool_format": "openai",
},
"ollama": {
"include_tools_in_system": True,
"tool_format": "text",
},
}
def get_provider_builder(provider_name: str) -> PromptBuilder:
config_dict = _PROVIDER_TEMPLATE_MAP.get(provider_name.lower(), {})
config = PromptConfig(**config_dict)
return PromptBuilder(config)
def adapt_messages_for_provider(
messages: list[Message],
provider: str,
tools: list[ToolDefinition] | None = None,
) -> list[Message]:
builder = get_provider_builder(provider)
return builder.build(messages, tools)

View File

@@ -1 +0,0 @@
# Templates module for provider-specific prompt templates

View File

@@ -1,14 +0,0 @@
"""Provider adapters for multiple LLM backends."""
from llm.providers.claude import ClaudeProvider
from llm.providers.openai import OpenAIProvider
from llm.providers.ollama import OllamaProvider
from llm.providers.resolver import get_provider, register_provider
__all__ = (
"ClaudeProvider",
"OpenAIProvider",
"OllamaProvider",
"get_provider",
"register_provider",
)

View File

@@ -1,105 +0,0 @@
"""Claude provider adapter."""
from __future__ import annotations
import os
from typing import Any
from anthropic import Anthropic
from llm.core.interface import (
AuthenticationError,
ContextLengthError,
LLMProvider,
RateLimitError,
)
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
class ClaudeProvider(LLMProvider):
provider_type = ProviderType.CLAUDE
def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None:
self.client = Anthropic(api_key=api_key or os.environ.get("ANTHROPIC_API_KEY"), base_url=base_url)
self._models = [
ModelInfo(
name="claude-opus-4-5",
provider=ProviderType.CLAUDE,
supports_tools=True,
supports_vision=True,
max_tokens=8192,
context_window=200000,
),
ModelInfo(
name="claude-sonnet-4-7",
provider=ProviderType.CLAUDE,
supports_tools=True,
supports_vision=True,
max_tokens=8192,
context_window=200000,
),
ModelInfo(
name="claude-haiku-4-7",
provider=ProviderType.CLAUDE,
supports_tools=True,
supports_vision=False,
max_tokens=4096,
context_window=200000,
),
]
def generate(self, input: LLMInput) -> LLMOutput:
try:
params: dict[str, Any] = {
"model": input.model or "claude-sonnet-4-7",
"messages": [msg.to_dict() for msg in input.messages],
"temperature": input.temperature,
}
if input.max_tokens:
params["max_tokens"] = input.max_tokens
else:
params["max_tokens"] = 8192 # required by Anthropic API
if input.tools:
params["tools"] = [tool.to_dict() for tool in input.tools]
response = self.client.messages.create(**params)
tool_calls = None
if response.content and hasattr(response.content[0], "type"):
if response.content[0].type == "tool_use":
tool_calls = [
ToolCall(
id=getattr(response.content[0], "id", ""),
name=getattr(response.content[0], "name", ""),
arguments=getattr(response.content[0].input, "__dict__", {}),
)
]
return LLMOutput(
content=response.content[0].text if response.content else "",
tool_calls=tool_calls,
model=response.model,
usage={
"input_tokens": response.usage.input_tokens,
"output_tokens": response.usage.output_tokens,
},
stop_reason=response.stop_reason,
)
except Exception as e:
msg = str(e)
if "401" in msg or "authentication" in msg.lower():
raise AuthenticationError(msg, provider=ProviderType.CLAUDE) from e
if "429" in msg or "rate_limit" in msg.lower():
raise RateLimitError(msg, provider=ProviderType.CLAUDE) from e
if "context" in msg.lower() and "length" in msg.lower():
raise ContextLengthError(msg, provider=ProviderType.CLAUDE) from e
raise
def list_models(self) -> list[ModelInfo]:
return self._models.copy()
def validate_config(self) -> bool:
return bool(self.client.api_key)
def get_default_model(self) -> str:
return "claude-sonnet-4-7"

View File

@@ -1,112 +0,0 @@
"""Ollama provider adapter for local models."""
from __future__ import annotations
import os
from typing import Any
from llm.core.interface import (
AuthenticationError,
ContextLengthError,
LLMProvider,
RateLimitError,
)
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
class OllamaProvider(LLMProvider):
provider_type = ProviderType.OLLAMA
def __init__(
self,
base_url: str | None = None,
default_model: str | None = None,
) -> None:
self.base_url = base_url or os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
self.default_model = default_model or os.environ.get("OLLAMA_MODEL", "llama3.2")
self._models = [
ModelInfo(
name="llama3.2",
provider=ProviderType.OLLAMA,
supports_tools=False,
supports_vision=False,
max_tokens=4096,
context_window=128000,
),
ModelInfo(
name="mistral",
provider=ProviderType.OLLAMA,
supports_tools=False,
supports_vision=False,
max_tokens=4096,
context_window=8192,
),
ModelInfo(
name="codellama",
provider=ProviderType.OLLAMA,
supports_tools=False,
supports_vision=False,
max_tokens=4096,
context_window=16384,
),
]
def generate(self, input: LLMInput) -> LLMOutput:
import urllib.request
import json
try:
url = f"{self.base_url}/api/chat"
model = input.model or self.default_model
payload: dict[str, Any] = {
"model": model,
"messages": [msg.to_dict() for msg in input.messages],
"stream": False,
}
if input.temperature != 1.0:
payload["options"] = {"temperature": input.temperature}
data = json.dumps(payload).encode("utf-8")
req = urllib.request.Request(url, data=data, headers={"Content-Type": "application/json"})
with urllib.request.urlopen(req, timeout=60) as response:
result = json.loads(response.read().decode("utf-8"))
content = result.get("message", {}).get("content", "")
tool_calls = None
if result.get("message", {}).get("tool_calls"):
tool_calls = [
ToolCall(
id=tc.get("id", ""),
name=tc.get("function", {}).get("name", ""),
arguments=tc.get("function", {}).get("arguments", {}),
)
for tc in result["message"]["tool_calls"]
]
return LLMOutput(
content=content,
tool_calls=tool_calls,
model=model,
stop_reason=result.get("done_reason"),
)
except Exception as e:
msg = str(e)
if "401" in msg or "connection" in msg.lower():
raise AuthenticationError(f"Ollama connection failed: {msg}", provider=ProviderType.OLLAMA) from e
if "429" in msg or "rate_limit" in msg.lower():
raise RateLimitError(msg, provider=ProviderType.OLLAMA) from e
if "context" in msg.lower() and "length" in msg.lower():
raise ContextLengthError(msg, provider=ProviderType.OLLAMA) from e
raise
def list_models(self) -> list[ModelInfo]:
return self._models.copy()
def validate_config(self) -> bool:
return bool(self.base_url)
def get_default_model(self) -> str:
return self.default_model

View File

@@ -1,114 +0,0 @@
"""OpenAI provider adapter."""
from __future__ import annotations
import json
import os
from typing import Any
from openai import OpenAI
from llm.core.interface import (
AuthenticationError,
ContextLengthError,
LLMProvider,
RateLimitError,
)
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
class OpenAIProvider(LLMProvider):
provider_type = ProviderType.OPENAI
def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None:
self.client = OpenAI(api_key=api_key or os.environ.get("OPENAI_API_KEY"), base_url=base_url)
self._models = [
ModelInfo(
name="gpt-4o",
provider=ProviderType.OPENAI,
supports_tools=True,
supports_vision=True,
max_tokens=4096,
context_window=128000,
),
ModelInfo(
name="gpt-4o-mini",
provider=ProviderType.OPENAI,
supports_tools=True,
supports_vision=True,
max_tokens=4096,
context_window=128000,
),
ModelInfo(
name="gpt-4-turbo",
provider=ProviderType.OPENAI,
supports_tools=True,
supports_vision=True,
max_tokens=4096,
context_window=128000,
),
ModelInfo(
name="gpt-3.5-turbo",
provider=ProviderType.OPENAI,
supports_tools=True,
supports_vision=False,
max_tokens=4096,
context_window=16385,
),
]
def generate(self, input: LLMInput) -> LLMOutput:
try:
params: dict[str, Any] = {
"model": input.model or "gpt-4o-mini",
"messages": [msg.to_dict() for msg in input.messages],
"temperature": input.temperature,
}
if input.max_tokens:
params["max_tokens"] = input.max_tokens
if input.tools:
params["tools"] = [tool.to_dict() for tool in input.tools]
response = self.client.chat.completions.create(**params)
choice = response.choices[0]
tool_calls = None
if choice.message.tool_calls:
tool_calls = [
ToolCall(
id=tc.id or "",
name=tc.function.name,
arguments={} if not tc.function.arguments else json.loads(tc.function.arguments),
)
for tc in choice.message.tool_calls
]
return LLMOutput(
content=choice.message.content or "",
tool_calls=tool_calls,
model=response.model,
usage={
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens,
},
stop_reason=choice.finish_reason,
)
except Exception as e:
msg = str(e)
if "401" in msg or "authentication" in msg.lower():
raise AuthenticationError(msg, provider=ProviderType.OPENAI) from e
if "429" in msg or "rate_limit" in msg.lower():
raise RateLimitError(msg, provider=ProviderType.OPENAI) from e
if "context" in msg.lower() and "length" in msg.lower():
raise ContextLengthError(msg, provider=ProviderType.OPENAI) from e
raise
def list_models(self) -> list[ModelInfo]:
return self._models.copy()
def validate_config(self) -> bool:
return bool(self.client.api_key)
def get_default_model(self) -> str:
return "gpt-4o-mini"

View File

@@ -1,39 +0,0 @@
"""Provider factory and resolver."""
from __future__ import annotations
import os
from llm.core.interface import LLMProvider
from llm.core.types import ProviderType
from llm.providers.claude import ClaudeProvider
from llm.providers.openai import OpenAIProvider
from llm.providers.ollama import OllamaProvider
_PROVIDER_MAP: dict[ProviderType, type[LLMProvider]] = {
ProviderType.CLAUDE: ClaudeProvider,
ProviderType.OPENAI: OpenAIProvider,
ProviderType.OLLAMA: OllamaProvider,
}
def get_provider(provider_type: ProviderType | str | None = None, **kwargs: str) -> LLMProvider:
if provider_type is None:
provider_type = os.environ.get("LLM_PROVIDER", "claude").lower()
if isinstance(provider_type, str):
try:
provider_type = ProviderType(provider_type)
except ValueError:
raise ValueError(f"Unknown provider type: {provider_type}. Valid types: {[p.value for p in ProviderType]}")
provider_cls = _PROVIDER_MAP.get(provider_type)
if not provider_cls:
raise ValueError(f"No provider registered for type: {provider_type}")
return provider_cls(**kwargs)
def register_provider(provider_type: ProviderType, provider_cls: type[LLMProvider]) -> None:
_PROVIDER_MAP[provider_type] = provider_cls

View File

@@ -1,9 +0,0 @@
"""Tools module for tool/function calling abstraction."""
from llm.tools.executor import ReActAgent, ToolExecutor, ToolRegistry
__all__ = (
"ReActAgent",
"ToolExecutor",
"ToolRegistry",
)

View File

@@ -1,116 +0,0 @@
"""Tool executor for handling tool calls from LLM responses."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Callable
from llm.core.interface import ToolExecutionError
from llm.core.types import LLMInput, LLMOutput, Message, Role, ToolCall, ToolDefinition, ToolResult
ToolFunc = Callable[..., Any]
class ToolRegistry:
def __init__(self) -> None:
self._tools: dict[str, ToolFunc] = {}
self._definitions: dict[str, ToolDefinition] = {}
def register(self, definition: ToolDefinition, func: ToolFunc) -> None:
self._tools[definition.name] = func
self._definitions[definition.name] = definition
def get(self, name: str) -> ToolFunc | None:
return self._tools.get(name)
def get_definition(self, name: str) -> ToolDefinition | None:
return self._definitions.get(name)
def list_tools(self) -> list[ToolDefinition]:
return list(self._definitions.values())
def has(self, name: str) -> bool:
return name in self._tools
class ToolExecutor:
def __init__(self, registry: ToolRegistry | None = None) -> None:
self.registry = registry or ToolRegistry()
def execute(self, tool_call: ToolCall) -> ToolResult:
func = self.registry.get(tool_call.name)
if not func:
return ToolResult(
tool_call_id=tool_call.id,
content=f"Error: Tool '{tool_call.name}' not found",
is_error=True,
)
try:
result = func(**tool_call.arguments)
content = result if isinstance(result, str) else str(result)
return ToolResult(tool_call_id=tool_call.id, content=content)
except Exception as e:
return ToolResult(
tool_call_id=tool_call.id,
content=f"Error executing {tool_call.name}: {e}",
is_error=True,
)
def execute_all(self, tool_calls: list[ToolCall]) -> list[ToolResult]:
return [self.execute(tc) for tc in tool_calls]
class ReActAgent:
def __init__(
self,
provider: Any,
executor: ToolExecutor,
max_iterations: int = 10,
) -> None:
self.provider = provider
self.executor = executor
self.max_iterations = max_iterations
async def run(self, input: LLMInput) -> LLMOutput:
messages = list(input.messages)
tools = input.tools or []
for _ in range(self.max_iterations):
input_copy = LLMInput(
messages=messages,
model=input.model,
temperature=input.temperature,
max_tokens=input.max_tokens,
tools=tools,
)
output = self.provider.generate(input_copy)
if not output.has_tool_calls:
return output
messages.append(
Message(
role=Role.ASSISTANT,
content=output.content or "",
tool_calls=output.tool_calls,
)
)
results = self.executor.execute_all(output.tool_calls)
for result in results:
messages.append(
Message(
role=Role.TOOL,
content=result.content,
tool_call_id=result.tool_call_id,
)
)
return LLMOutput(
content="Max iterations reached",
stop_reason="max_iterations",
)

View File

View File

@@ -1,4 +0,0 @@
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))

View File

@@ -1,437 +0,0 @@
/**
* Tests for scripts/hooks/gateguard-fact-force.js via run-with-flags.js
*/
const assert = require('assert');
const fs = require('fs');
const path = require('path');
const { spawnSync } = require('child_process');
const runner = path.join(__dirname, '..', '..', 'scripts', 'hooks', 'run-with-flags.js');
const externalStateDir = process.env.GATEGUARD_STATE_DIR;
const tmpRoot = process.env.TMPDIR || process.env.TEMP || process.env.TMP || '/tmp';
const stateDir = externalStateDir || fs.mkdtempSync(path.join(tmpRoot, 'gateguard-test-'));
// Use a fixed session ID so test process and spawned hook process share the same state file
const TEST_SESSION_ID = 'gateguard-test-session';
const stateFile = path.join(stateDir, `state-${TEST_SESSION_ID}.json`);
function test(name, fn) {
try {
fn();
console.log(`${name}`);
return true;
} catch (error) {
console.log(`${name}`);
console.log(` Error: ${error.message}`);
return false;
}
}
function clearState() {
try {
if (fs.existsSync(stateFile)) {
fs.unlinkSync(stateFile);
}
} catch (err) {
console.error(` [clearState] failed to remove ${stateFile}: ${err.message}`);
}
}
function writeExpiredState() {
try {
fs.mkdirSync(stateDir, { recursive: true });
const expired = {
checked: ['some_file.js', '__bash_session__'],
last_active: Date.now() - (31 * 60 * 1000) // 31 minutes ago
};
fs.writeFileSync(stateFile, JSON.stringify(expired), 'utf8');
} catch (_) { /* ignore */ }
}
function writeState(state) {
fs.mkdirSync(stateDir, { recursive: true });
fs.writeFileSync(stateFile, JSON.stringify(state), 'utf8');
}
function runHook(input, env = {}) {
const rawInput = typeof input === 'string' ? input : JSON.stringify(input);
const result = spawnSync('node', [
runner,
'pre:edit-write:gateguard-fact-force',
'scripts/hooks/gateguard-fact-force.js',
'standard,strict'
], {
input: rawInput,
encoding: 'utf8',
env: {
...process.env,
ECC_HOOK_PROFILE: 'standard',
GATEGUARD_STATE_DIR: stateDir,
CLAUDE_SESSION_ID: TEST_SESSION_ID,
...env
},
timeout: 15000,
stdio: ['pipe', 'pipe', 'pipe']
});
return {
code: Number.isInteger(result.status) ? result.status : 1,
stdout: result.stdout || '',
stderr: result.stderr || ''
};
}
function runBashHook(input, env = {}) {
const rawInput = typeof input === 'string' ? input : JSON.stringify(input);
const result = spawnSync('node', [
runner,
'pre:bash:gateguard-fact-force',
'scripts/hooks/gateguard-fact-force.js',
'standard,strict'
], {
input: rawInput,
encoding: 'utf8',
env: {
...process.env,
ECC_HOOK_PROFILE: 'standard',
GATEGUARD_STATE_DIR: stateDir,
CLAUDE_SESSION_ID: TEST_SESSION_ID,
...env
},
timeout: 15000,
stdio: ['pipe', 'pipe', 'pipe']
});
return {
code: Number.isInteger(result.status) ? result.status : 1,
stdout: result.stdout || '',
stderr: result.stderr || ''
};
}
function parseOutput(stdout) {
try {
return JSON.parse(stdout);
} catch (_) {
return null;
}
}
function runTests() {
console.log('\n=== Testing gateguard-fact-force ===\n');
let passed = 0;
let failed = 0;
// --- Test 1: denies first Edit per file ---
clearState();
if (test('denies first Edit per file with fact-forcing message', () => {
const input = {
tool_name: 'Edit',
tool_input: { file_path: '/src/app.js', old_string: 'foo', new_string: 'bar' }
};
const result = runHook(input);
assert.strictEqual(result.code, 0, 'exit code should be 0');
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce JSON output');
assert.strictEqual(output.hookSpecificOutput.permissionDecision, 'deny');
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('Fact-Forcing Gate'));
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('import/require'));
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('/src/app.js'));
})) passed++; else failed++;
// --- Test 2: allows second Edit on same file ---
if (test('allows second Edit on same file (gate already passed)', () => {
const input = {
tool_name: 'Edit',
tool_input: { file_path: '/src/app.js', old_string: 'foo', new_string: 'bar' }
};
const result = runHook(input);
assert.strictEqual(result.code, 0, 'exit code should be 0');
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce valid JSON output');
// When allowed, the hook passes through the raw input (no hookSpecificOutput)
// OR if hookSpecificOutput exists, it must not be deny
if (output.hookSpecificOutput) {
assert.notStrictEqual(output.hookSpecificOutput.permissionDecision, 'deny',
'should not deny second edit on same file');
} else {
// Pass-through: output matches original input (allow)
assert.strictEqual(output.tool_name, 'Edit', 'pass-through should preserve input');
}
})) passed++; else failed++;
// --- Test 3: denies first Write per file ---
clearState();
if (test('denies first Write per file with fact-forcing message', () => {
const input = {
tool_name: 'Write',
tool_input: { file_path: '/src/new-file.js', content: 'console.log("hello")' }
};
const result = runHook(input);
assert.strictEqual(result.code, 0, 'exit code should be 0');
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce JSON output');
assert.strictEqual(output.hookSpecificOutput.permissionDecision, 'deny');
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('creating'));
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('call this new file'));
})) passed++; else failed++;
// --- Test 4: denies destructive Bash, allows retry ---
clearState();
if (test('denies destructive Bash commands, allows retry after facts presented', () => {
const input = {
tool_name: 'Bash',
tool_input: { command: 'rm -rf /important/data' }
};
// First call: should deny
const result1 = runBashHook(input);
assert.strictEqual(result1.code, 0, 'first call exit code should be 0');
const output1 = parseOutput(result1.stdout);
assert.ok(output1, 'first call should produce JSON output');
assert.strictEqual(output1.hookSpecificOutput.permissionDecision, 'deny');
assert.ok(output1.hookSpecificOutput.permissionDecisionReason.includes('Destructive'));
assert.ok(output1.hookSpecificOutput.permissionDecisionReason.includes('rollback'));
// Second call (retry after facts presented): should allow
const result2 = runBashHook(input);
assert.strictEqual(result2.code, 0, 'second call exit code should be 0');
const output2 = parseOutput(result2.stdout);
assert.ok(output2, 'second call should produce valid JSON output');
if (output2.hookSpecificOutput) {
assert.notStrictEqual(output2.hookSpecificOutput.permissionDecision, 'deny',
'should not deny destructive bash retry after facts presented');
} else {
assert.strictEqual(output2.tool_name, 'Bash', 'pass-through should preserve input');
}
})) passed++; else failed++;
// --- Test 5: denies first routine Bash, allows second ---
clearState();
if (test('denies first routine Bash, allows second', () => {
const input = {
tool_name: 'Bash',
tool_input: { command: 'ls -la' }
};
// First call: should deny
const result1 = runBashHook(input);
assert.strictEqual(result1.code, 0, 'first call exit code should be 0');
const output1 = parseOutput(result1.stdout);
assert.ok(output1, 'first call should produce JSON output');
assert.strictEqual(output1.hookSpecificOutput.permissionDecision, 'deny');
// Second call: should allow
const result2 = runBashHook(input);
assert.strictEqual(result2.code, 0, 'second call exit code should be 0');
const output2 = parseOutput(result2.stdout);
assert.ok(output2, 'second call should produce valid JSON output');
if (output2.hookSpecificOutput) {
assert.notStrictEqual(output2.hookSpecificOutput.permissionDecision, 'deny',
'should not deny second routine bash');
} else {
assert.strictEqual(output2.tool_name, 'Bash', 'pass-through should preserve input');
}
})) passed++; else failed++;
// --- Test 6: session state resets after timeout ---
if (test('session state resets after 30-minute timeout', () => {
writeExpiredState();
const input = {
tool_name: 'Edit',
tool_input: { file_path: 'some_file.js', old_string: 'a', new_string: 'b' }
};
const result = runHook(input);
assert.strictEqual(result.code, 0, 'exit code should be 0');
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce JSON output after expired state');
assert.strictEqual(output.hookSpecificOutput.permissionDecision, 'deny',
'should deny again after session timeout (state was reset)');
})) passed++; else failed++;
// --- Test 7: allows unknown tool names ---
clearState();
if (test('allows unknown tool names through', () => {
const input = {
tool_name: 'Read',
tool_input: { file_path: '/src/app.js' }
};
const result = runHook(input);
assert.strictEqual(result.code, 0, 'exit code should be 0');
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce valid JSON output');
if (output.hookSpecificOutput) {
assert.notStrictEqual(output.hookSpecificOutput.permissionDecision, 'deny',
'should not deny unknown tool');
} else {
assert.strictEqual(output.tool_name, 'Read', 'pass-through should preserve input');
}
})) passed++; else failed++;
// --- Test 8: sanitizes file paths with newlines ---
clearState();
if (test('sanitizes file paths containing newlines', () => {
const input = {
tool_name: 'Edit',
tool_input: { file_path: '/src/app.js\ninjected content', old_string: 'a', new_string: 'b' }
};
const result = runHook(input);
assert.strictEqual(result.code, 0, 'exit code should be 0');
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce JSON output');
assert.strictEqual(output.hookSpecificOutput.permissionDecision, 'deny');
const reason = output.hookSpecificOutput.permissionDecisionReason;
// The file path portion of the reason must not contain any raw newlines
// (sanitizePath replaces \n and \r with spaces)
const pathLine = reason.split('\n').find(l => l.includes('/src/app.js'));
assert.ok(pathLine, 'reason should mention the file path');
assert.ok(!pathLine.includes('\n'), 'file path line must not contain raw newlines');
assert.ok(!reason.includes('/src/app.js\n'), 'newline after file path should be sanitized');
assert.ok(!reason.includes('\ninjected'), 'injected content must not appear on its own line');
})) passed++; else failed++;
// --- Test 9: respects ECC_DISABLED_HOOKS ---
clearState();
if (test('respects ECC_DISABLED_HOOKS (skips when disabled)', () => {
const input = {
tool_name: 'Edit',
tool_input: { file_path: '/src/disabled.js', old_string: 'a', new_string: 'b' }
};
const result = runHook(input, {
ECC_DISABLED_HOOKS: 'pre:edit-write:gateguard-fact-force'
});
assert.strictEqual(result.code, 0, 'exit code should be 0');
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce valid JSON output');
if (output.hookSpecificOutput) {
assert.notStrictEqual(output.hookSpecificOutput.permissionDecision, 'deny',
'should not deny when hook is disabled');
} else {
// When disabled, hook passes through raw input
assert.strictEqual(output.tool_name, 'Edit', 'pass-through should preserve input');
}
})) passed++; else failed++;
// --- Test 10: MultiEdit gates first unchecked file ---
clearState();
if (test('denies first MultiEdit with unchecked file', () => {
const input = {
tool_name: 'MultiEdit',
tool_input: {
edits: [
{ file_path: '/src/multi-a.js', old_string: 'a', new_string: 'b' },
{ file_path: '/src/multi-b.js', old_string: 'c', new_string: 'd' }
]
}
};
const result = runHook(input);
assert.strictEqual(result.code, 0, 'exit code should be 0');
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce JSON output');
assert.strictEqual(output.hookSpecificOutput.permissionDecision, 'deny');
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('Fact-Forcing Gate'));
assert.ok(output.hookSpecificOutput.permissionDecisionReason.includes('/src/multi-a.js'));
})) passed++; else failed++;
// --- Test 11: MultiEdit allows after all files gated ---
if (test('allows MultiEdit after all files gated', () => {
// multi-a.js was gated in test 10; gate multi-b.js
const input2 = {
tool_name: 'MultiEdit',
tool_input: { edits: [{ file_path: '/src/multi-b.js', old_string: 'c', new_string: 'd' }] }
};
runHook(input2); // gates multi-b.js
// Now both files are gated — retry should allow
const input3 = {
tool_name: 'MultiEdit',
tool_input: {
edits: [
{ file_path: '/src/multi-a.js', old_string: 'a', new_string: 'b' },
{ file_path: '/src/multi-b.js', old_string: 'c', new_string: 'd' }
]
}
};
const result3 = runHook(input3);
const output3 = parseOutput(result3.stdout);
assert.ok(output3, 'should produce valid JSON');
if (output3.hookSpecificOutput) {
assert.notStrictEqual(output3.hookSpecificOutput.permissionDecision, 'deny',
'should allow MultiEdit after all files gated');
}
})) passed++; else failed++;
// --- Test 12: reads refresh active session state ---
clearState();
if (test('touches last_active on read so active sessions do not age out', () => {
const staleButActive = Date.now() - (29 * 60 * 1000);
writeState({
checked: ['/src/keep-alive.js'],
last_active: staleButActive
});
const before = JSON.parse(fs.readFileSync(stateFile, 'utf8'));
assert.strictEqual(before.last_active, staleButActive, 'seed state should use the expected timestamp');
const result = runHook({
tool_name: 'Edit',
tool_input: { file_path: '/src/keep-alive.js', old_string: 'a', new_string: 'b' }
});
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce valid JSON output');
if (output.hookSpecificOutput) {
assert.notStrictEqual(output.hookSpecificOutput.permissionDecision, 'deny',
'already-checked file should still be allowed');
}
const after = JSON.parse(fs.readFileSync(stateFile, 'utf8'));
assert.ok(after.last_active > staleButActive, 'successful reads should refresh last_active');
})) passed++; else failed++;
// --- Test 13: pruning preserves routine bash gate marker ---
clearState();
if (test('preserves __bash_session__ when pruning oversized state', () => {
const checked = ['__bash_session__'];
for (let i = 0; i < 80; i++) checked.push(`__destructive__${i}`);
for (let i = 0; i < 700; i++) checked.push(`/src/file-${i}.js`);
writeState({ checked, last_active: Date.now() });
runHook({
tool_name: 'Edit',
tool_input: { file_path: '/src/newly-gated.js', old_string: 'a', new_string: 'b' }
});
const result = runBashHook({
tool_name: 'Bash',
tool_input: { command: 'pwd' }
});
const output = parseOutput(result.stdout);
assert.ok(output, 'should produce valid JSON output');
if (output.hookSpecificOutput) {
assert.notStrictEqual(output.hookSpecificOutput.permissionDecision, 'deny',
'routine bash marker should survive pruning');
}
const persisted = JSON.parse(fs.readFileSync(stateFile, 'utf8'));
assert.ok(persisted.checked.includes('__bash_session__'), 'pruned state should retain __bash_session__');
assert.ok(persisted.checked.length <= 500, 'pruned state should still honor the checked-entry cap');
})) passed++; else failed++;
// Cleanup only the temp directory created by this test file.
if (!externalStateDir) {
try {
if (fs.existsSync(stateDir)) {
fs.rmSync(stateDir, { recursive: true, force: true });
}
} catch (err) {
console.error(` [cleanup] failed to remove ${stateDir}: ${err.message}`);
}
}
console.log(`\n ${passed} passed, ${failed} failed\n`);
process.exit(failed > 0 ? 1 : 0);
}
runTests();

View File

@@ -6,8 +6,6 @@
const assert = require('assert');
const fs = require('fs');
const http = require('http');
const https = require('https');
const os = require('os');
const path = require('path');
const { spawn, spawnSync } = require('child_process');
@@ -111,39 +109,6 @@ function waitForFile(filePath, timeoutMs = 5000) {
}
throw new Error(`Timed out waiting for ${filePath}`);
}
function waitForHttpReady(urlString, timeoutMs = 5000) {
const deadline = Date.now() + timeoutMs;
const { protocol } = new URL(urlString);
const client = protocol === 'https:' ? https : http;
return new Promise((resolve, reject) => {
const attempt = () => {
const req = client.request(urlString, { method: 'GET' }, res => {
res.resume();
resolve();
});
req.setTimeout(250, () => {
req.destroy(new Error('timeout'));
});
req.on('error', error => {
if (Date.now() >= deadline) {
reject(new Error(`Timed out waiting for ${urlString}: ${error.message}`));
return;
}
setTimeout(attempt, 25);
});
req.end();
};
attempt();
});
}
async function runTests() {
console.log('\n=== Testing mcp-health-check.js ===\n');
@@ -364,7 +329,6 @@ async function runTests() {
try {
const port = waitForFile(portFile).trim();
await waitForHttpReady(`http://127.0.0.1:${port}/mcp`);
writeConfig(configPath, {
mcpServers: {
@@ -427,7 +391,6 @@ async function runTests() {
try {
const port = waitForFile(portFile).trim();
await waitForHttpReady(`http://127.0.0.1:${port}/mcp`);
writeConfig(configPath, {
mcpServers: {

View File

@@ -116,19 +116,10 @@ function runTests() {
assert.ok(plan.operations.length > 0, 'Should include scaffold operations');
assert.ok(
plan.operations.some(operation => (
operation.sourceRelativePath === '.cursor/hooks.json'
&& operation.destinationPath === path.join(projectRoot, '.cursor', 'hooks.json')
&& operation.strategy === 'preserve-relative-path'
operation.sourceRelativePath === '.cursor'
&& operation.strategy === 'sync-root-children'
)),
'Should preserve non-rule Cursor platform files'
);
assert.ok(
plan.operations.some(operation => (
operation.sourceRelativePath === 'rules/common/agents.md'
&& operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'common-agents.mdc')
&& operation.strategy === 'flatten-copy'
)),
'Should produce Cursor .mdc rules while preferring rules-core over duplicate platform copies'
'Should flatten the native cursor root'
);
})) passed++; else failed++;

View File

@@ -90,22 +90,20 @@ function runTests() {
assert.strictEqual(plan.targetRoot, path.join(projectRoot, '.cursor'));
assert.strictEqual(plan.installStatePath, path.join(projectRoot, '.cursor', 'ecc-install-state.json'));
const hooksJson = plan.operations.find(operation => (
normalizedRelativePath(operation.sourceRelativePath) === '.cursor/hooks.json'
));
const flattened = plan.operations.find(operation => operation.sourceRelativePath === '.cursor');
const preserved = plan.operations.find(operation => (
normalizedRelativePath(operation.sourceRelativePath) === 'rules/common/coding-style.md'
));
assert.ok(hooksJson, 'Should preserve non-rule Cursor platform config files');
assert.strictEqual(hooksJson.strategy, 'preserve-relative-path');
assert.strictEqual(hooksJson.destinationPath, path.join(projectRoot, '.cursor', 'hooks.json'));
assert.ok(flattened, 'Should include .cursor scaffold operation');
assert.strictEqual(flattened.strategy, 'sync-root-children');
assert.strictEqual(flattened.destinationPath, path.join(projectRoot, '.cursor'));
assert.ok(preserved, 'Should include flattened rules scaffold operations');
assert.strictEqual(preserved.strategy, 'flatten-copy');
assert.strictEqual(
preserved.destinationPath,
path.join(projectRoot, '.cursor', 'rules', 'common-coding-style.mdc')
path.join(projectRoot, '.cursor', 'rules', 'common-coding-style.md')
);
})) passed++; else failed++;
@@ -128,16 +126,16 @@ function runTests() {
assert.ok(
plan.operations.some(operation => (
normalizedRelativePath(operation.sourceRelativePath) === 'rules/common/coding-style.md'
&& operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'common-coding-style.mdc')
&& operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'common-coding-style.md')
)),
'Should flatten common rules into namespaced .mdc files'
'Should flatten common rules into namespaced files'
);
assert.ok(
plan.operations.some(operation => (
normalizedRelativePath(operation.sourceRelativePath) === 'rules/typescript/testing.md'
&& operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'typescript-testing.mdc')
&& operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'typescript-testing.md')
)),
'Should flatten language rules into namespaced .mdc files'
'Should flatten language rules into namespaced files'
);
assert.ok(
!plan.operations.some(operation => (
@@ -145,132 +143,6 @@ function runTests() {
)),
'Should not preserve nested rule directories for cursor installs'
);
assert.ok(
!plan.operations.some(operation => (
operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'common-coding-style.md')
)),
'Should not emit .md Cursor rule files'
);
assert.ok(
!plan.operations.some(operation => (
normalizedRelativePath(operation.sourceRelativePath) === 'rules/README.md'
)),
'Should not install Cursor README docs as runtime rule files'
);
assert.ok(
!plan.operations.some(operation => (
normalizedRelativePath(operation.sourceRelativePath) === 'rules/zh/README.md'
)),
'Should not flatten localized README docs into Cursor rule files'
);
})) passed++; else failed++;
if (test('plans cursor platform rule files as .mdc and excludes rule README docs', () => {
const repoRoot = path.join(__dirname, '..', '..');
const projectRoot = '/workspace/app';
const plan = planInstallTargetScaffold({
target: 'cursor',
repoRoot,
projectRoot,
modules: [
{
id: 'platform-configs',
paths: ['.cursor'],
},
],
});
assert.ok(
plan.operations.some(operation => (
normalizedRelativePath(operation.sourceRelativePath) === '.cursor/rules/common-agents.md'
&& operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'common-agents.mdc')
)),
'Should rename Cursor platform rule files to .mdc'
);
assert.ok(
!plan.operations.some(operation => (
operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'common-agents.md')
)),
'Should not preserve .md Cursor platform rule files'
);
assert.ok(
plan.operations.some(operation => (
normalizedRelativePath(operation.sourceRelativePath) === '.cursor/hooks.json'
&& operation.destinationPath === path.join(projectRoot, '.cursor', 'hooks.json')
)),
'Should preserve non-rule Cursor platform config files'
);
assert.ok(
!plan.operations.some(operation => (
operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'README.mdc')
)),
'Should not emit Cursor rule README docs as .mdc files'
);
})) passed++; else failed++;
if (test('deduplicates cursor rule destinations when rules-core and platform-configs overlap', () => {
const repoRoot = path.join(__dirname, '..', '..');
const projectRoot = '/workspace/app';
const plan = planInstallTargetScaffold({
target: 'cursor',
repoRoot,
projectRoot,
modules: [
{
id: 'rules-core',
paths: ['rules'],
},
{
id: 'platform-configs',
paths: ['.cursor'],
},
],
});
const commonAgentsDestinations = plan.operations.filter(operation => (
operation.destinationPath === path.join(projectRoot, '.cursor', 'rules', 'common-agents.mdc')
));
assert.strictEqual(commonAgentsDestinations.length, 1, 'Should keep only one common-agents.mdc operation');
assert.strictEqual(
normalizedRelativePath(commonAgentsDestinations[0].sourceRelativePath),
'rules/common/agents.md',
'Should prefer rules-core when cursor platform rules would collide'
);
})) passed++; else failed++;
if (test('prefers native cursor hooks when hooks-runtime and platform-configs overlap', () => {
const repoRoot = path.join(__dirname, '..', '..');
const projectRoot = '/workspace/app';
const plan = planInstallTargetScaffold({
target: 'cursor',
repoRoot,
projectRoot,
modules: [
{
id: 'hooks-runtime',
paths: ['hooks', 'scripts/hooks', 'scripts/lib'],
},
{
id: 'platform-configs',
paths: ['.cursor'],
},
],
});
const hooksDestinations = plan.operations.filter(operation => (
operation.destinationPath === path.join(projectRoot, '.cursor', 'hooks')
));
assert.strictEqual(hooksDestinations.length, 1, 'Should keep only one .cursor/hooks scaffold operation');
assert.strictEqual(
normalizedRelativePath(hooksDestinations[0].sourceRelativePath),
'.cursor/hooks',
'Should prefer native Cursor hooks over generic hooks-runtime hooks'
);
})) passed++; else failed++;
if (test('plans antigravity remaps for workflows, skills, and flat rules', () => {

View File

@@ -34,7 +34,6 @@ const zhCnReadmePath = path.join(repoRoot, 'docs', 'zh-CN', 'README.md');
const selectiveInstallArchitecturePath = path.join(repoRoot, 'docs', 'SELECTIVE-INSTALL-ARCHITECTURE.md');
const opencodePackageJsonPath = path.join(repoRoot, '.opencode', 'package.json');
const opencodePackageLockPath = path.join(repoRoot, '.opencode', 'package-lock.json');
const opencodeHooksPluginPath = path.join(repoRoot, '.opencode', 'plugins', 'ecc-hooks.ts');
let passed = 0;
let failed = 0;
@@ -135,13 +134,6 @@ test('docs/SELECTIVE-INSTALL-ARCHITECTURE.md repoVersion example matches package
assert.strictEqual(match[1], expectedVersion);
});
test('.opencode/plugins/ecc-hooks.ts active plugin banner matches package.json', () => {
const source = fs.readFileSync(opencodeHooksPluginPath, 'utf8');
const match = source.match(/## Active Plugin: Everything Claude Code v([0-9]+\.[0-9]+\.[0-9]+)/);
assert.ok(match, 'Expected .opencode/plugins/ecc-hooks.ts to declare an active plugin banner');
assert.strictEqual(match[1], expectedVersion);
});
test('docs/pt-BR/README.md latest release heading matches package.json', () => {
const source = fs.readFileSync(ptBrReadmePath, 'utf8');
assert.ok(

View File

@@ -35,7 +35,7 @@ function main() {
["package.json exposes the OpenCode build and prepack hooks", () => {
assert.strictEqual(packageJson.scripts["build:opencode"], "node scripts/build-opencode.js")
assert.strictEqual(packageJson.scripts.prepack, "npm run build:opencode")
assert.ok(packageJson.files.includes(".opencode/"))
assert.ok(packageJson.files.includes(".opencode/dist/"))
}],
["build script generates .opencode/dist", () => {
const result = spawnSync("node", [buildScript], {

View File

@@ -130,11 +130,8 @@ function runTests() {
const result = run(['--target', 'cursor', 'typescript'], { cwd: projectDir, homeDir });
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-coding-style.mdc')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'typescript-testing.mdc')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-agents.mdc')));
assert.ok(!fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-agents.md')));
assert.ok(!fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'README.mdc')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-coding-style.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'typescript-testing.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'agents', 'architect.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'commands', 'plan.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json')));
@@ -307,8 +304,7 @@ function runTests() {
});
assert.strictEqual(result.code, 0, result.stderr);
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'hooks.json')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-agents.mdc')));
assert.ok(!fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-agents.md')));
assert.ok(fs.existsSync(path.join(projectDir, '.cursor', 'rules', 'common-agents.md')));
const state = readJson(path.join(projectDir, '.cursor', 'ecc-install-state.json'));
assert.strictEqual(state.request.profile, null);

View File

@@ -1,150 +0,0 @@
/**
* Tests for the npm publish surface contract.
*/
const assert = require("assert")
const fs = require("fs")
const path = require("path")
const { spawnSync } = require("child_process")
function runTest(name, fn) {
try {
fn()
console.log(`${name}`)
return true
} catch (error) {
console.log(`${name}`)
console.error(` ${error.message}`)
return false
}
}
function normalizePublishPath(value) {
return String(value).replace(/\\/g, "/").replace(/\/$/, "")
}
function isCoveredByAncestor(target, roots) {
const parts = target.split("/")
for (let index = 1; index < parts.length; index += 1) {
const ancestor = parts.slice(0, index).join("/")
if (roots.has(ancestor)) {
return true
}
}
return false
}
function buildExpectedPublishPaths(repoRoot) {
const modules = JSON.parse(
fs.readFileSync(path.join(repoRoot, "manifests", "install-modules.json"), "utf8")
).modules
const extraPaths = [
"manifests",
"scripts/ecc.js",
"scripts/catalog.js",
"scripts/claw.js",
"scripts/doctor.js",
"scripts/status.js",
"scripts/sessions-cli.js",
"scripts/install-apply.js",
"scripts/install-plan.js",
"scripts/list-installed.js",
"scripts/skill-create-output.js",
"scripts/repair.js",
"scripts/harness-audit.js",
"scripts/session-inspect.js",
"scripts/uninstall.js",
"scripts/gemini-adapt-agents.js",
"scripts/codex/merge-codex-config.js",
"scripts/codex/merge-mcp-config.js",
".codex-plugin",
".mcp.json",
"install.sh",
"install.ps1",
"schemas",
"agent.yaml",
"VERSION",
]
const combined = new Set(
[...modules.flatMap((module) => module.paths || []), ...extraPaths].map(normalizePublishPath)
)
return [...combined]
.filter((publishPath) => !isCoveredByAncestor(publishPath, combined))
.sort()
}
function main() {
console.log("\n=== Testing npm publish surface ===\n")
let passed = 0
let failed = 0
const repoRoot = path.join(__dirname, "..", "..")
const packageJson = JSON.parse(
fs.readFileSync(path.join(repoRoot, "package.json"), "utf8")
)
const expectedPublishPaths = buildExpectedPublishPaths(repoRoot)
const actualPublishPaths = packageJson.files.map(normalizePublishPath).sort()
const tests = [
["package.json files align to the module graph and explicit runtime allowlist", () => {
assert.deepStrictEqual(actualPublishPaths, expectedPublishPaths)
}],
["npm pack publishes the reduced runtime surface", () => {
const result = spawnSync("npm", ["pack", "--dry-run", "--json"], {
cwd: repoRoot,
encoding: "utf8",
shell: process.platform === "win32",
})
assert.strictEqual(result.status, 0, result.error?.message || result.stderr)
const packOutput = JSON.parse(result.stdout)
const packagedPaths = new Set(packOutput[0]?.files?.map((file) => file.path) ?? [])
for (const requiredPath of [
"scripts/catalog.js",
".gemini/GEMINI.md",
".claude-plugin/plugin.json",
".codex-plugin/plugin.json",
"schemas/install-state.schema.json",
"skills/backend-patterns/SKILL.md",
]) {
assert.ok(
packagedPaths.has(requiredPath),
`npm pack should include ${requiredPath}`
)
}
for (const excludedPath of [
"contexts/dev.md",
"examples/CLAUDE.md",
"plugins/README.md",
"scripts/ci/catalog.js",
"skills/skill-comply/SKILL.md",
]) {
assert.ok(
!packagedPaths.has(excludedPath),
`npm pack should not include ${excludedPath}`
)
}
}],
]
for (const [name, fn] of tests) {
if (runTest(name, fn)) {
passed += 1
} else {
failed += 1
}
}
console.log(`\nPassed: ${passed}`)
console.log(`Failed: ${failed}`)
process.exit(failed > 0 ? 1 : 0)
}
main()

View File

@@ -1,69 +0,0 @@
import pytest
from llm.core.types import LLMInput, Message, Role, ToolDefinition
from llm.prompt import PromptBuilder, adapt_messages_for_provider
from llm.prompt.builder import PromptConfig
class TestPromptBuilder:
def test_build_without_system(self):
messages = [Message(role=Role.USER, content="Hello")]
builder = PromptBuilder()
result = builder.build(messages)
assert len(result) == 1
assert result[0].role == Role.USER
def test_build_with_system(self):
messages = [
Message(role=Role.SYSTEM, content="You are helpful."),
Message(role=Role.USER, content="Hello"),
]
builder = PromptBuilder()
result = builder.build(messages)
assert len(result) == 2
assert result[0].role == Role.SYSTEM
def test_build_adds_system_from_config(self):
messages = [Message(role=Role.USER, content="Hello")]
builder = PromptBuilder(system_template="You are a pirate.")
result = builder.build(messages)
assert len(result) == 2
assert "pirate" in result[0].content
def test_build_adds_system_from_config(self):
messages = [Message(role=Role.USER, content="Hello")]
builder = PromptBuilder(config=PromptConfig(system_template="You are a pirate."))
result = builder.build(messages)
assert len(result) == 2
assert "pirate" in result[0].content
def test_build_with_tools(self):
messages = [Message(role=Role.USER, content="Search for something")]
tools = [
ToolDefinition(name="search", description="Search the web", parameters={}),
]
builder = PromptBuilder(include_tools_in_system=True)
result = builder.build(messages, tools)
assert len(result) == 2
assert "search" in result[0].content
assert "Available Tools" in result[0].content
class TestAdaptMessagesForProvider:
def test_adapt_for_claude(self):
messages = [Message(role=Role.USER, content="Hello")]
result = adapt_messages_for_provider(messages, "claude")
assert len(result) == 1
def test_adapt_for_openai(self):
messages = [Message(role=Role.USER, content="Hello")]
result = adapt_messages_for_provider(messages, "openai")
assert len(result) == 1
def test_adapt_for_ollama(self):
messages = [Message(role=Role.USER, content="Hello")]
result = adapt_messages_for_provider(messages, "ollama")
assert len(result) == 1

View File

@@ -1,86 +0,0 @@
import pytest
from llm.core.types import ToolCall, ToolDefinition, ToolResult
from llm.tools import ToolExecutor, ToolRegistry
class TestToolRegistry:
def test_register_and_get(self):
registry = ToolRegistry()
def dummy_func() -> str:
return "result"
tool_def = ToolDefinition(
name="dummy",
description="A dummy tool",
parameters={"type": "object"},
)
registry.register(tool_def, dummy_func)
assert registry.has("dummy") is True
assert registry.get("dummy") is dummy_func
assert registry.get_definition("dummy") == tool_def
def test_list_tools(self):
registry = ToolRegistry()
tool_def = ToolDefinition(name="test", description="Test", parameters={})
registry.register(tool_def, lambda: None)
tools = registry.list_tools()
assert len(tools) == 1
assert tools[0].name == "test"
class TestToolExecutor:
def test_execute_success(self):
registry = ToolRegistry()
def search(query: str) -> str:
return f"Results for: {query}"
registry.register(
ToolDefinition(
name="search",
description="Search",
parameters={"type": "object", "properties": {"query": {"type": "string"}}},
),
search,
)
executor = ToolExecutor(registry)
result = executor.execute(ToolCall(id="1", name="search", arguments={"query": "test"}))
assert result.tool_call_id == "1"
assert result.content == "Results for: test"
assert result.is_error is False
def test_execute_unknown_tool(self):
registry = ToolRegistry()
executor = ToolExecutor(registry)
result = executor.execute(ToolCall(id="1", name="unknown", arguments={}))
assert result.is_error is True
assert "not found" in result.content
def test_execute_all(self):
registry = ToolRegistry()
def tool1() -> str:
return "result1"
def tool2() -> str:
return "result2"
registry.register(ToolDefinition(name="t1", description="", parameters={}), tool1)
registry.register(ToolDefinition(name="t2", description="", parameters={}), tool2)
executor = ToolExecutor(registry)
results = executor.execute_all([
ToolCall(id="1", name="t1", arguments={}),
ToolCall(id="2", name="t2", arguments={}),
])
assert len(results) == 2
assert results[0].content == "result1"
assert results[1].content == "result2"

View File

@@ -1,28 +0,0 @@
import pytest
from llm.core.types import ProviderType
from llm.providers import ClaudeProvider, OpenAIProvider, OllamaProvider, get_provider
class TestGetProvider:
def test_get_claude_provider(self):
provider = get_provider("claude")
assert isinstance(provider, ClaudeProvider)
assert provider.provider_type == ProviderType.CLAUDE
def test_get_openai_provider(self):
provider = get_provider("openai")
assert isinstance(provider, OpenAIProvider)
assert provider.provider_type == ProviderType.OPENAI
def test_get_ollama_provider(self):
provider = get_provider("ollama")
assert isinstance(provider, OllamaProvider)
assert provider.provider_type == ProviderType.OLLAMA
def test_get_provider_by_enum(self):
provider = get_provider(ProviderType.CLAUDE)
assert isinstance(provider, ClaudeProvider)
def test_invalid_provider_raises(self):
with pytest.raises(ValueError, match="Unknown provider type"):
get_provider("invalid")

View File

@@ -1,117 +0,0 @@
import pytest
from llm.core.types import (
LLMInput,
LLMOutput,
Message,
ModelInfo,
ProviderType,
Role,
ToolCall,
ToolDefinition,
ToolResult,
)
class TestRole:
def test_role_values(self):
assert Role.SYSTEM.value == "system"
assert Role.USER.value == "user"
assert Role.ASSISTANT.value == "assistant"
assert Role.TOOL.value == "tool"
class TestProviderType:
def test_provider_values(self):
assert ProviderType.CLAUDE.value == "claude"
assert ProviderType.OPENAI.value == "openai"
assert ProviderType.OLLAMA.value == "ollama"
class TestMessage:
def test_create_message(self):
msg = Message(role=Role.USER, content="Hello")
assert msg.role == Role.USER
assert msg.content == "Hello"
assert msg.name is None
assert msg.tool_call_id is None
def test_message_to_dict(self):
msg = Message(role=Role.USER, content="Hello", name="test")
result = msg.to_dict()
assert result["role"] == "user"
assert result["content"] == "Hello"
assert result["name"] == "test"
class TestToolDefinition:
def test_create_tool(self):
tool = ToolDefinition(
name="search",
description="Search the web",
parameters={"type": "object", "properties": {}},
)
assert tool.name == "search"
assert tool.strict is True
def test_tool_to_dict(self):
tool = ToolDefinition(
name="search",
description="Search",
parameters={"type": "object"},
)
result = tool.to_dict()
assert result["name"] == "search"
assert result["strict"] is True
class TestToolCall:
def test_create_tool_call(self):
tc = ToolCall(id="1", name="search", arguments={"query": "test"})
assert tc.id == "1"
assert tc.name == "search"
assert tc.arguments == {"query": "test"}
class TestToolResult:
def test_create_tool_result(self):
result = ToolResult(tool_call_id="1", content="result")
assert result.tool_call_id == "1"
assert result.is_error is False
class TestLLMInput:
def test_create_input(self):
messages = [Message(role=Role.USER, content="Hello")]
input_obj = LLMInput(messages=messages, temperature=0.7)
assert len(input_obj.messages) == 1
assert input_obj.temperature == 0.7
def test_input_to_dict(self):
messages = [Message(role=Role.USER, content="Hello")]
input_obj = LLMInput(messages=messages)
result = input_obj.to_dict()
assert "messages" in result
assert result["temperature"] == 1.0
class TestLLMOutput:
def test_create_output(self):
output = LLMOutput(content="Hello!")
assert output.content == "Hello!"
assert output.has_tool_calls is False
def test_output_with_tool_calls(self):
tc = ToolCall(id="1", name="search", arguments={})
output = LLMOutput(content="", tool_calls=[tc])
assert output.has_tool_calls is True
class TestModelInfo:
def test_create_model_info(self):
info = ModelInfo(
name="gpt-4",
provider=ProviderType.OPENAI,
)
assert info.name == "gpt-4"
assert info.supports_tools is True
assert info.supports_vision is False

View File

@@ -169,30 +169,30 @@ __metadata:
languageName: node
linkType: hard
"@opencode-ai/plugin@npm:^1.4.3":
version: 1.4.3
resolution: "@opencode-ai/plugin@npm:1.4.3"
"@opencode-ai/plugin@npm:^1.0.0":
version: 1.3.15
resolution: "@opencode-ai/plugin@npm:1.3.15"
dependencies:
"@opencode-ai/sdk": "npm:1.4.3"
"@opencode-ai/sdk": "npm:1.3.15"
zod: "npm:4.1.8"
peerDependencies:
"@opentui/core": ">=0.1.97"
"@opentui/solid": ">=0.1.97"
"@opentui/core": ">=0.1.96"
"@opentui/solid": ">=0.1.96"
peerDependenciesMeta:
"@opentui/core":
optional: true
"@opentui/solid":
optional: true
checksum: 10c0/a20328a691a674638e4718c1fb911ea68b60fc7560f1bf314324114ccdcabbddc12e98c4fc9f3aad69e92aaaac7edbd44216bf036955ec5d1f50282430ab06ae
checksum: 10c0/1a662ff700812223310612f3c8c7fd4465eda5763d726ec4d29d0eae26babf344ef176c9b987d79fe1e29c8a498178881a47d7080bb9f4db3e70dad59eb8cd9e
languageName: node
linkType: hard
"@opencode-ai/sdk@npm:1.4.3":
version: 1.4.3
resolution: "@opencode-ai/sdk@npm:1.4.3"
"@opencode-ai/sdk@npm:1.3.15":
version: 1.3.15
resolution: "@opencode-ai/sdk@npm:1.3.15"
dependencies:
cross-spawn: "npm:7.0.6"
checksum: 10c0/edba27ef01ecfb6fde7df2348f953aab64f2e7b99e9cd5b155474e7e02cc0db62da242d9edcd5b704110b9ef82bc16633d99d25eaa812d4279badede71ae419f
checksum: 10c0/3957ae62e0ec1e339d9493e03a2440c95afdd64a608a2dc9db8383338650318a294280b2142305db5b0147badacbefa0d07e949d31167e5a4a49c9d057d016fa
languageName: node
linkType: hard
@@ -548,12 +548,12 @@ __metadata:
dependencies:
"@eslint/js": "npm:^9.39.2"
"@iarna/toml": "npm:^2.2.5"
"@opencode-ai/plugin": "npm:^1.4.3"
"@opencode-ai/plugin": "npm:^1.0.0"
"@types/node": "npm:^20.19.24"
ajv: "npm:^8.18.0"
c8: "npm:^11.0.0"
eslint: "npm:^9.39.2"
globals: "npm:^17.5.0"
globals: "npm:^17.4.0"
markdownlint-cli: "npm:^0.48.0"
sql.js: "npm:^1.14.1"
typescript: "npm:^5.9.3"
@@ -834,10 +834,10 @@ __metadata:
languageName: node
linkType: hard
"globals@npm:^17.5.0":
version: 17.5.0
resolution: "globals@npm:17.5.0"
checksum: 10c0/92828102ed2f5637907725f0478038bed02fc83e9fc89300bb753639ba7c022b6c02576fc772117302b431b204591db1f2fa909d26f3f0a9852cc856a941df3f
"globals@npm:^17.4.0":
version: 17.4.0
resolution: "globals@npm:17.4.0"
checksum: 10c0/2be9e8c2b9035836f13d420b22f0247a328db82967d3bebfc01126d888ed609305f06c05895914e969653af5c6ba35fd7a0920f3e6c869afa60666c810630feb
languageName: node
linkType: hard