mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-04-13 21:33:32 +08:00
Merge pull request #1377 from Anish29801/feat/dashboard-gui
Feat/dashboard gui
This commit is contained in:
104
.opencode/package-lock.json
generated
104
.opencode/package-lock.json
generated
@@ -9,7 +9,7 @@
|
||||
"version": "1.10.0",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@opencode-ai/plugin": "^1.0.0",
|
||||
"@opencode-ai/plugin": "^1.4.3",
|
||||
"@types/node": "^20.0.0",
|
||||
"typescript": "^5.3.0"
|
||||
},
|
||||
@@ -21,22 +21,37 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@opencode-ai/plugin": {
|
||||
"version": "1.1.53",
|
||||
"resolved": "https://registry.npmjs.org/@opencode-ai/plugin/-/plugin-1.1.53.tgz",
|
||||
"integrity": "sha512-9ye7Wz2kESgt02AUDaMea4hXxj6XhWwKAG8NwFhrw09Ux54bGaMJFt1eIS8QQGIMaD+Lp11X4QdyEg96etEBJw==",
|
||||
"version": "1.4.3",
|
||||
"resolved": "https://registry.npmjs.org/@opencode-ai/plugin/-/plugin-1.4.3.tgz",
|
||||
"integrity": "sha512-Ob/3tVSIeuMRJBr2O23RtrnC5djRe01Lglx+TwGEmjrH9yDBJ2tftegYLnNEjRoMuzITgq9LD8168p4pzv+U/A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@opencode-ai/sdk": "1.1.53",
|
||||
"@opencode-ai/sdk": "1.4.3",
|
||||
"zod": "4.1.8"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@opentui/core": ">=0.1.97",
|
||||
"@opentui/solid": ">=0.1.97"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"@opentui/core": {
|
||||
"optional": true
|
||||
},
|
||||
"@opentui/solid": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@opencode-ai/sdk": {
|
||||
"version": "1.1.53",
|
||||
"resolved": "https://registry.npmjs.org/@opencode-ai/sdk/-/sdk-1.1.53.tgz",
|
||||
"integrity": "sha512-RUIVnPOP1CyyU32FrOOYuE7Ge51lOBuhaFp2NSX98ncApT7ffoNetmwzqrhOiJQgZB1KrbCHLYOCK6AZfacxag==",
|
||||
"version": "1.4.3",
|
||||
"resolved": "https://registry.npmjs.org/@opencode-ai/sdk/-/sdk-1.4.3.tgz",
|
||||
"integrity": "sha512-X0CAVbwoGAjTY2iecpWkx2B+GAa2jSaQKYpJ+xILopeF/OGKZUN15mjqci+L7cEuwLHV5wk3x2TStUOVCa5p0A==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"cross-spawn": "7.0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "20.19.33",
|
||||
@@ -48,6 +63,61 @@
|
||||
"undici-types": "~6.21.0"
|
||||
}
|
||||
},
|
||||
"node_modules/cross-spawn": {
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
||||
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"path-key": "^3.1.0",
|
||||
"shebang-command": "^2.0.0",
|
||||
"which": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 8"
|
||||
}
|
||||
},
|
||||
"node_modules/isexe": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
|
||||
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
|
||||
"dev": true,
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/path-key": {
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
|
||||
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/shebang-command": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
|
||||
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"shebang-regex": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/shebang-regex": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
|
||||
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/typescript": {
|
||||
"version": "5.9.3",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
|
||||
@@ -69,6 +139,22 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/which": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
|
||||
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"isexe": "^2.0.0"
|
||||
},
|
||||
"bin": {
|
||||
"node-which": "bin/node-which"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 8"
|
||||
}
|
||||
},
|
||||
"node_modules/zod": {
|
||||
"version": "4.1.8",
|
||||
"resolved": "https://registry.npmjs.org/zod/-/zod-4.1.8.tgz",
|
||||
|
||||
@@ -60,7 +60,7 @@
|
||||
"@opencode-ai/plugin": ">=1.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@opencode-ai/plugin": "^1.0.0",
|
||||
"@opencode-ai/plugin": "^1.4.3",
|
||||
"@types/node": "^20.0.0",
|
||||
"typescript": "^5.3.0"
|
||||
},
|
||||
|
||||
24
README.md
24
README.md
@@ -84,6 +84,7 @@ This repo is the raw code only. The guides explain everything.
|
||||
|
||||
### v1.10.0 — Surface Refresh, Operator Workflows, and ECC 2.0 Alpha (Apr 2026)
|
||||
|
||||
- **Dashboard GUI** — New Tkinter-based desktop application (`ecc_dashboard.py` or `npm run dashboard`) with dark/light theme toggle, font customization, and project logo in header and taskbar.
|
||||
- **Public surface synced to the live repo** — metadata, catalog counts, plugin manifests, and install-facing docs now match the actual OSS surface: 38 agents, 156 skills, and 72 legacy command shims.
|
||||
- **Operator and outbound workflow expansion** — `brand-voice`, `social-graph-ranker`, `connections-optimizer`, `customer-billing-ops`, `ecc-tools-cost-audit`, `google-workspace-ops`, `project-flow-ops`, and `workspace-surface-audit` round out the operator lane.
|
||||
- **Media and launch tooling** — `manim-video`, `remotion-video-creation`, and upgraded social publishing surfaces make technical explainers and launch content part of the same system.
|
||||
@@ -240,6 +241,23 @@ For manual install instructions see the README in the `rules/` folder. When copy
|
||||
|
||||
**That's it!** You now have access to 47 agents, 181 skills, and 79 legacy command shims.
|
||||
|
||||
### Dashboard GUI
|
||||
|
||||
Launch the desktop dashboard to visually explore ECC components:
|
||||
|
||||
```bash
|
||||
npm run dashboard
|
||||
# or
|
||||
python3 ./ecc_dashboard.py
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- Tabbed interface: Agents, Skills, Commands, Rules, Settings
|
||||
- Dark/Light theme toggle
|
||||
- Font customization (family & size)
|
||||
- Project logo in header and taskbar
|
||||
- Search and filter across all components
|
||||
|
||||
### Multi-model commands require additional setup
|
||||
|
||||
> WARNING: `multi-*` commands are **not** covered by the base plugin/rules install above.
|
||||
@@ -500,6 +518,12 @@ everything-claude-code/
|
||||
|-- mcp-configs/ # MCP server configurations
|
||||
| |-- mcp-servers.json # GitHub, Supabase, Vercel, Railway, etc.
|
||||
|
|
||||
|-- ecc_dashboard.py # Desktop GUI dashboard (Tkinter)
|
||||
|
|
||||
|-- assets/ # Assets for dashboard
|
||||
| |-- images/
|
||||
| |-- ecc-logo.png
|
||||
|
|
||||
|-- marketplace.json # Self-hosted marketplace config (for /plugin marketplace add)
|
||||
```
|
||||
|
||||
|
||||
BIN
assets/images/ecc-logo.png
Normal file
BIN
assets/images/ecc-logo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 98 KiB |
10
ecc2/Cargo.lock
generated
10
ecc2/Cargo.lock
generated
@@ -1286,6 +1286,15 @@ version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
||||
|
||||
[[package]]
|
||||
name = "openssl-src"
|
||||
version = "300.6.0+3.6.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a8e8cbfd3a4a8c8f089147fd7aaa33cf8c7450c4d09f8f80698a0cf093abeff4"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.112"
|
||||
@@ -1294,6 +1303,7 @@ checksum = "57d55af3b3e226502be1526dfdba67ab0e9c96fc293004e79576b2b9edb0dbdb"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"openssl-src",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
]
|
||||
|
||||
@@ -7,6 +7,10 @@ license = "MIT"
|
||||
authors = ["Affaan Mustafa <me@affaanmustafa.com>"]
|
||||
repository = "https://github.com/affaan-m/everything-claude-code"
|
||||
|
||||
[features]
|
||||
default = ["vendored-openssl"]
|
||||
vendored-openssl = ["git2/vendored-openssl"]
|
||||
|
||||
[dependencies]
|
||||
# TUI
|
||||
ratatui = { version = "0.30", features = ["crossterm_0_28"] }
|
||||
@@ -19,7 +23,7 @@ tokio = { version = "1", features = ["full"] }
|
||||
rusqlite = { version = "0.32", features = ["bundled"] }
|
||||
|
||||
# Git integration
|
||||
git2 = "0.20"
|
||||
git2 = { version = "0.20", features = ["ssh"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
|
||||
914
ecc_dashboard.py
Normal file
914
ecc_dashboard.py
Normal file
@@ -0,0 +1,914 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ECC Dashboard - Everything Claude Code GUI
|
||||
Cross-platform TkInter application for managing ECC components
|
||||
"""
|
||||
|
||||
import tkinter as tk
|
||||
from tkinter import ttk, scrolledtext, messagebox
|
||||
import os
|
||||
import json
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
# ============================================================================
|
||||
# DATA LOADERS - Load ECC data from the project
|
||||
# ============================================================================
|
||||
|
||||
def get_project_path() -> str:
|
||||
"""Get the ECC project path - assumes this script is run from the project dir"""
|
||||
return os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
def load_agents(project_path: str) -> List[Dict]:
|
||||
"""Load agents from AGENTS.md"""
|
||||
agents_file = os.path.join(project_path, "AGENTS.md")
|
||||
agents = []
|
||||
|
||||
if os.path.exists(agents_file):
|
||||
with open(agents_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Parse agent table from AGENTS.md
|
||||
lines = content.split('\n')
|
||||
in_table = False
|
||||
for line in lines:
|
||||
if '| Agent | Purpose | When to Use |' in line:
|
||||
in_table = True
|
||||
continue
|
||||
if in_table and line.startswith('|'):
|
||||
parts = [p.strip() for p in line.split('|')]
|
||||
if len(parts) >= 4 and parts[1] and parts[1] != 'Agent':
|
||||
agents.append({
|
||||
'name': parts[1],
|
||||
'purpose': parts[2],
|
||||
'when_to_use': parts[3]
|
||||
})
|
||||
|
||||
# Fallback default agents if file not found
|
||||
if not agents:
|
||||
agents = [
|
||||
{'name': 'planner', 'purpose': 'Implementation planning', 'when_to_use': 'Complex features, refactoring'},
|
||||
{'name': 'architect', 'purpose': 'System design and scalability', 'when_to_use': 'Architectural decisions'},
|
||||
{'name': 'tdd-guide', 'purpose': 'Test-driven development', 'when_to_use': 'New features, bug fixes'},
|
||||
{'name': 'code-reviewer', 'purpose': 'Code quality and maintainability', 'when_to_use': 'After writing/modifying code'},
|
||||
{'name': 'security-reviewer', 'purpose': 'Vulnerability detection', 'when_to_use': 'Before commits, sensitive code'},
|
||||
{'name': 'build-error-resolver', 'purpose': 'Fix build/type errors', 'when_to_use': 'When build fails'},
|
||||
{'name': 'e2e-runner', 'purpose': 'End-to-end Playwright testing', 'when_to_use': 'Critical user flows'},
|
||||
{'name': 'refactor-cleaner', 'purpose': 'Dead code cleanup', 'when_to_use': 'Code maintenance'},
|
||||
{'name': 'doc-updater', 'purpose': 'Documentation and codemaps', 'when_to_use': 'Updating docs'},
|
||||
{'name': 'go-reviewer', 'purpose': 'Go code review', 'when_to_use': 'Go projects'},
|
||||
{'name': 'python-reviewer', 'purpose': 'Python code review', 'when_to_use': 'Python projects'},
|
||||
{'name': 'typescript-reviewer', 'purpose': 'TypeScript/JavaScript code review', 'when_to_use': 'TypeScript projects'},
|
||||
{'name': 'rust-reviewer', 'purpose': 'Rust code review', 'when_to_use': 'Rust projects'},
|
||||
{'name': 'java-reviewer', 'purpose': 'Java and Spring Boot code review', 'when_to_use': 'Java projects'},
|
||||
{'name': 'kotlin-reviewer', 'purpose': 'Kotlin code review', 'when_to_use': 'Kotlin projects'},
|
||||
{'name': 'cpp-reviewer', 'purpose': 'C/C++ code review', 'when_to_use': 'C/C++ projects'},
|
||||
{'name': 'database-reviewer', 'purpose': 'PostgreSQL/Supabase specialist', 'when_to_use': 'Database work'},
|
||||
{'name': 'loop-operator', 'purpose': 'Autonomous loop execution', 'when_to_use': 'Run loops safely'},
|
||||
{'name': 'harness-optimizer', 'purpose': 'Harness config tuning', 'when_to_use': 'Reliability, cost, throughput'},
|
||||
]
|
||||
|
||||
return agents
|
||||
|
||||
def load_skills(project_path: str) -> List[Dict]:
|
||||
"""Load skills from skills directory"""
|
||||
skills_dir = os.path.join(project_path, "skills")
|
||||
skills = []
|
||||
|
||||
if os.path.exists(skills_dir):
|
||||
for item in os.listdir(skills_dir):
|
||||
skill_path = os.path.join(skills_dir, item)
|
||||
if os.path.isdir(skill_path):
|
||||
skill_file = os.path.join(skill_path, "SKILL.md")
|
||||
description = item.replace('-', ' ').title()
|
||||
|
||||
if os.path.exists(skill_file):
|
||||
try:
|
||||
with open(skill_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
# Extract description from first lines
|
||||
lines = content.split('\n')
|
||||
for line in lines:
|
||||
if line.strip() and not line.startswith('#'):
|
||||
description = line.strip()[:100]
|
||||
break
|
||||
if line.startswith('# '):
|
||||
description = line[2:].strip()[:100]
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
||||
# Determine category
|
||||
category = "General"
|
||||
item_lower = item.lower()
|
||||
if 'python' in item_lower or 'django' in item_lower:
|
||||
category = "Python"
|
||||
elif 'golang' in item_lower or 'go-' in item_lower:
|
||||
category = "Go"
|
||||
elif 'frontend' in item_lower or 'react' in item_lower:
|
||||
category = "Frontend"
|
||||
elif 'backend' in item_lower or 'api' in item_lower:
|
||||
category = "Backend"
|
||||
elif 'security' in item_lower:
|
||||
category = "Security"
|
||||
elif 'testing' in item_lower or 'tdd' in item_lower:
|
||||
category = "Testing"
|
||||
elif 'docker' in item_lower or 'deployment' in item_lower:
|
||||
category = "DevOps"
|
||||
elif 'swift' in item_lower or 'ios' in item_lower:
|
||||
category = "iOS"
|
||||
elif 'java' in item_lower or 'spring' in item_lower:
|
||||
category = "Java"
|
||||
elif 'rust' in item_lower:
|
||||
category = "Rust"
|
||||
|
||||
skills.append({
|
||||
'name': item,
|
||||
'description': description,
|
||||
'category': category,
|
||||
'path': skill_path
|
||||
})
|
||||
|
||||
# Fallback if directory doesn't exist
|
||||
if not skills:
|
||||
skills = [
|
||||
{'name': 'tdd-workflow', 'description': 'Test-driven development workflow', 'category': 'Testing'},
|
||||
{'name': 'coding-standards', 'description': 'Baseline coding conventions', 'category': 'General'},
|
||||
{'name': 'security-review', 'description': 'Security checklist and patterns', 'category': 'Security'},
|
||||
{'name': 'frontend-patterns', 'description': 'React and Next.js patterns', 'category': 'Frontend'},
|
||||
{'name': 'backend-patterns', 'description': 'API and database patterns', 'category': 'Backend'},
|
||||
{'name': 'api-design', 'description': 'REST API design patterns', 'category': 'Backend'},
|
||||
{'name': 'docker-patterns', 'description': 'Docker and container patterns', 'category': 'DevOps'},
|
||||
{'name': 'e2e-testing', 'description': 'Playwright E2E testing patterns', 'category': 'Testing'},
|
||||
{'name': 'verification-loop', 'description': 'Build, test, lint verification', 'category': 'General'},
|
||||
{'name': 'python-patterns', 'description': 'Python idioms and best practices', 'category': 'Python'},
|
||||
{'name': 'golang-patterns', 'description': 'Go idioms and best practices', 'category': 'Go'},
|
||||
{'name': 'django-patterns', 'description': 'Django patterns and best practices', 'category': 'Python'},
|
||||
{'name': 'springboot-patterns', 'description': 'Java Spring Boot patterns', 'category': 'Java'},
|
||||
{'name': 'laravel-patterns', 'description': 'Laravel architecture patterns', 'category': 'PHP'},
|
||||
]
|
||||
|
||||
return skills
|
||||
|
||||
def load_commands(project_path: str) -> List[Dict]:
|
||||
"""Load commands from commands directory"""
|
||||
commands_dir = os.path.join(project_path, "commands")
|
||||
commands = []
|
||||
|
||||
if os.path.exists(commands_dir):
|
||||
for item in os.listdir(commands_dir):
|
||||
if item.endswith('.md'):
|
||||
cmd_name = item[:-3]
|
||||
description = ""
|
||||
|
||||
try:
|
||||
with open(os.path.join(commands_dir, item), 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
lines = content.split('\n')
|
||||
for line in lines:
|
||||
if line.startswith('# '):
|
||||
description = line[2:].strip()
|
||||
break
|
||||
except:
|
||||
pass
|
||||
|
||||
commands.append({
|
||||
'name': cmd_name,
|
||||
'description': description or cmd_name.replace('-', ' ').title()
|
||||
})
|
||||
|
||||
# Fallback commands
|
||||
if not commands:
|
||||
commands = [
|
||||
{'name': 'plan', 'description': 'Create implementation plan'},
|
||||
{'name': 'tdd', 'description': 'Test-driven development workflow'},
|
||||
{'name': 'code-review', 'description': 'Review code for quality and security'},
|
||||
{'name': 'build-fix', 'description': 'Fix build and TypeScript errors'},
|
||||
{'name': 'e2e', 'description': 'Generate and run E2E tests'},
|
||||
{'name': 'refactor-clean', 'description': 'Remove dead code'},
|
||||
{'name': 'verify', 'description': 'Run verification loop'},
|
||||
{'name': 'eval', 'description': 'Run evaluation against criteria'},
|
||||
{'name': 'security', 'description': 'Run comprehensive security review'},
|
||||
{'name': 'test-coverage', 'description': 'Analyze test coverage'},
|
||||
{'name': 'update-docs', 'description': 'Update documentation'},
|
||||
{'name': 'setup-pm', 'description': 'Configure package manager'},
|
||||
{'name': 'go-review', 'description': 'Go code review'},
|
||||
{'name': 'go-test', 'description': 'Go TDD workflow'},
|
||||
{'name': 'python-review', 'description': 'Python code review'},
|
||||
]
|
||||
|
||||
return commands
|
||||
|
||||
def load_rules(project_path: str) -> List[Dict]:
|
||||
"""Load rules from rules directory"""
|
||||
rules_dir = os.path.join(project_path, "rules")
|
||||
rules = []
|
||||
|
||||
if os.path.exists(rules_dir):
|
||||
for item in os.listdir(rules_dir):
|
||||
item_path = os.path.join(rules_dir, item)
|
||||
if os.path.isdir(item_path):
|
||||
# Common rules
|
||||
if item == "common":
|
||||
for file in os.listdir(item_path):
|
||||
if file.endswith('.md'):
|
||||
rules.append({
|
||||
'name': file[:-3],
|
||||
'language': 'Common',
|
||||
'path': os.path.join(item_path, file)
|
||||
})
|
||||
else:
|
||||
# Language-specific rules
|
||||
for file in os.listdir(item_path):
|
||||
if file.endswith('.md'):
|
||||
rules.append({
|
||||
'name': file[:-3],
|
||||
'language': item.title(),
|
||||
'path': os.path.join(item_path, file)
|
||||
})
|
||||
|
||||
# Fallback rules
|
||||
if not rules:
|
||||
rules = [
|
||||
{'name': 'coding-style', 'language': 'Common', 'path': ''},
|
||||
{'name': 'git-workflow', 'language': 'Common', 'path': ''},
|
||||
{'name': 'testing', 'language': 'Common', 'path': ''},
|
||||
{'name': 'performance', 'language': 'Common', 'path': ''},
|
||||
{'name': 'patterns', 'language': 'Common', 'path': ''},
|
||||
{'name': 'security', 'language': 'Common', 'path': ''},
|
||||
{'name': 'typescript', 'language': 'TypeScript', 'path': ''},
|
||||
{'name': 'python', 'language': 'Python', 'path': ''},
|
||||
{'name': 'golang', 'language': 'Go', 'path': ''},
|
||||
{'name': 'swift', 'language': 'Swift', 'path': ''},
|
||||
{'name': 'php', 'language': 'PHP', 'path': ''},
|
||||
]
|
||||
|
||||
return rules
|
||||
|
||||
# ============================================================================
|
||||
# MAIN APPLICATION
|
||||
# ============================================================================
|
||||
|
||||
class ECCDashboard(tk.Tk):
|
||||
"""Main ECC Dashboard Application"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self.project_path = get_project_path()
|
||||
self.title("ECC Dashboard - Everything Claude Code")
|
||||
|
||||
self.state('zoomed')
|
||||
|
||||
try:
|
||||
self.icon_image = tk.PhotoImage(file='assets/images/ecc-logo.png')
|
||||
self.iconphoto(True, self.icon_image)
|
||||
except:
|
||||
pass
|
||||
|
||||
self.minsize(800, 600)
|
||||
|
||||
# Load data
|
||||
self.agents = load_agents(self.project_path)
|
||||
self.skills = load_skills(self.project_path)
|
||||
self.commands = load_commands(self.project_path)
|
||||
self.rules = load_rules(self.project_path)
|
||||
|
||||
# Settings
|
||||
self.settings = {
|
||||
'project_path': self.project_path,
|
||||
'theme': 'light'
|
||||
}
|
||||
|
||||
# Setup UI
|
||||
self.setup_styles()
|
||||
self.create_widgets()
|
||||
|
||||
# Center window
|
||||
self.center_window()
|
||||
|
||||
def setup_styles(self):
|
||||
"""Setup ttk styles for modern look"""
|
||||
style = ttk.Style()
|
||||
style.theme_use('clam')
|
||||
|
||||
# Configure tab style
|
||||
style.configure('TNotebook', background='#f0f0f0')
|
||||
style.configure('TNotebook.Tab', padding=[10, 5], font=('Arial', 10))
|
||||
style.map('TNotebook.Tab', background=[('selected', '#ffffff')])
|
||||
|
||||
# Configure Treeview
|
||||
style.configure('Treeview', font=('Arial', 10), rowheight=25)
|
||||
style.configure('Treeview.Heading', font=('Arial', 10, 'bold'))
|
||||
|
||||
# Configure buttons
|
||||
style.configure('TButton', font=('Arial', 10), padding=5)
|
||||
|
||||
def center_window(self):
|
||||
"""Center the window on screen"""
|
||||
self.update_idletasks()
|
||||
width = self.winfo_width()
|
||||
height = self.winfo_height()
|
||||
x = (self.winfo_screenwidth() // 2) - (width // 2)
|
||||
y = (self.winfo_screenheight() // 2) - (height // 2)
|
||||
self.geometry(f'{width}x{height}+{x}+{y}')
|
||||
|
||||
def create_widgets(self):
|
||||
"""Create all UI widgets"""
|
||||
# Main container
|
||||
main_frame = ttk.Frame(self)
|
||||
main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
|
||||
|
||||
# Header
|
||||
header_frame = ttk.Frame(main_frame)
|
||||
header_frame.pack(fill=tk.X, pady=(0, 10))
|
||||
|
||||
try:
|
||||
self.logo_image = tk.PhotoImage(file='assets/images/ecc-logo.png')
|
||||
self.logo_image = self.logo_image.subsample(2, 2)
|
||||
ttk.Label(header_frame, image=self.logo_image).pack(side=tk.LEFT, padx=(0, 10))
|
||||
except:
|
||||
pass
|
||||
|
||||
self.title_label = ttk.Label(header_frame, text="ECC Dashboard", font=('Open Sans', 18, 'bold'))
|
||||
self.title_label.pack(side=tk.LEFT)
|
||||
self.version_label = ttk.Label(header_frame, text="v1.10.0", font=('Open Sans', 10), foreground='gray')
|
||||
self.version_label.pack(side=tk.LEFT, padx=(10, 0))
|
||||
|
||||
# Notebook (tabs)
|
||||
self.notebook = ttk.Notebook(main_frame)
|
||||
self.notebook.pack(fill=tk.BOTH, expand=True)
|
||||
|
||||
# Create tabs
|
||||
self.create_agents_tab()
|
||||
self.create_skills_tab()
|
||||
self.create_commands_tab()
|
||||
self.create_rules_tab()
|
||||
self.create_settings_tab()
|
||||
|
||||
# Status bar
|
||||
status_frame = ttk.Frame(main_frame)
|
||||
status_frame.pack(fill=tk.X, pady=(10, 0))
|
||||
|
||||
self.status_label = ttk.Label(status_frame,
|
||||
text=f"Ready | Agents: {len(self.agents)} | Skills: {len(self.skills)} | Commands: {len(self.commands)}",
|
||||
font=('Arial', 9), foreground='gray')
|
||||
self.status_label.pack(side=tk.LEFT)
|
||||
|
||||
# =========================================================================
|
||||
# AGENTS TAB
|
||||
# =========================================================================
|
||||
|
||||
def create_agents_tab(self):
|
||||
"""Create Agents tab"""
|
||||
frame = ttk.Frame(self.notebook)
|
||||
self.notebook.add(frame, text=f"Agents ({len(self.agents)})")
|
||||
|
||||
# Search bar
|
||||
search_frame = ttk.Frame(frame)
|
||||
search_frame.pack(fill=tk.X, padx=10, pady=10)
|
||||
|
||||
ttk.Label(search_frame, text="Search:").pack(side=tk.LEFT)
|
||||
self.agent_search = ttk.Entry(search_frame, width=30)
|
||||
self.agent_search.pack(side=tk.LEFT, padx=5)
|
||||
self.agent_search.bind('<KeyRelease>', self.filter_agents)
|
||||
|
||||
ttk.Label(search_frame, text="Count:").pack(side=tk.LEFT, padx=(20, 0))
|
||||
self.agent_count_label = ttk.Label(search_frame, text=str(len(self.agents)))
|
||||
self.agent_count_label.pack(side=tk.LEFT)
|
||||
|
||||
# Split pane: list + details
|
||||
paned = ttk.PanedWindow(frame, orient=tk.HORIZONTAL)
|
||||
paned.pack(fill=tk.BOTH, expand=True, padx=10, pady=(0, 10))
|
||||
|
||||
# Agent list
|
||||
list_frame = ttk.Frame(paned)
|
||||
paned.add(list_frame, weight=2)
|
||||
|
||||
columns = ('name', 'purpose')
|
||||
self.agent_tree = ttk.Treeview(list_frame, columns=columns, show='tree headings')
|
||||
self.agent_tree.heading('#0', text='#')
|
||||
self.agent_tree.heading('name', text='Agent Name')
|
||||
self.agent_tree.heading('purpose', text='Purpose')
|
||||
self.agent_tree.column('#0', width=40)
|
||||
self.agent_tree.column('name', width=180)
|
||||
self.agent_tree.column('purpose', width=250)
|
||||
|
||||
self.agent_tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
|
||||
|
||||
# Scrollbar
|
||||
scrollbar = ttk.Scrollbar(list_frame, orient=tk.VERTICAL, command=self.agent_tree.yview)
|
||||
self.agent_tree.configure(yscrollcommand=scrollbar.set)
|
||||
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
|
||||
|
||||
# Details panel
|
||||
details_frame = ttk.Frame(paned)
|
||||
paned.add(details_frame, weight=1)
|
||||
|
||||
ttk.Label(details_frame, text="Details", font=('Arial', 11, 'bold')).pack(anchor=tk.W, pady=5)
|
||||
|
||||
self.agent_details = scrolledtext.ScrolledText(details_frame, wrap=tk.WORD, height=15)
|
||||
self.agent_details.pack(fill=tk.BOTH, expand=True)
|
||||
|
||||
# Bind selection
|
||||
self.agent_tree.bind('<<TreeviewSelect>>', self.on_agent_select)
|
||||
|
||||
# Populate list
|
||||
self.populate_agents(self.agents)
|
||||
|
||||
def populate_agents(self, agents: List[Dict]):
|
||||
"""Populate agents list"""
|
||||
for item in self.agent_tree.get_children():
|
||||
self.agent_tree.delete(item)
|
||||
|
||||
for i, agent in enumerate(agents, 1):
|
||||
self.agent_tree.insert('', tk.END, text=str(i), values=(agent['name'], agent['purpose']))
|
||||
|
||||
def filter_agents(self, event=None):
|
||||
"""Filter agents based on search"""
|
||||
query = self.agent_search.get().lower()
|
||||
|
||||
if not query:
|
||||
filtered = self.agents
|
||||
else:
|
||||
filtered = [a for a in self.agents
|
||||
if query in a['name'].lower() or query in a['purpose'].lower()]
|
||||
|
||||
self.populate_agents(filtered)
|
||||
self.agent_count_label.config(text=str(len(filtered)))
|
||||
|
||||
def on_agent_select(self, event):
|
||||
"""Handle agent selection"""
|
||||
selection = self.agent_tree.selection()
|
||||
if not selection:
|
||||
return
|
||||
|
||||
item = self.agent_tree.item(selection[0])
|
||||
agent_name = item['values'][0]
|
||||
|
||||
agent = next((a for a in self.agents if a['name'] == agent_name), None)
|
||||
if agent:
|
||||
details = f"""Agent: {agent['name']}
|
||||
|
||||
Purpose: {agent['purpose']}
|
||||
|
||||
When to Use: {agent['when_to_use']}
|
||||
|
||||
---
|
||||
Usage in Claude Code:
|
||||
Use the /{agent['name']} command or invoke via agent delegation."""
|
||||
self.agent_details.delete('1.0', tk.END)
|
||||
self.agent_details.insert('1.0', details)
|
||||
|
||||
# =========================================================================
|
||||
# SKILLS TAB
|
||||
# =========================================================================
|
||||
|
||||
def create_skills_tab(self):
|
||||
"""Create Skills tab"""
|
||||
frame = ttk.Frame(self.notebook)
|
||||
self.notebook.add(frame, text=f"Skills ({len(self.skills)})")
|
||||
|
||||
# Search and filter
|
||||
filter_frame = ttk.Frame(frame)
|
||||
filter_frame.pack(fill=tk.X, padx=10, pady=10)
|
||||
|
||||
ttk.Label(filter_frame, text="Search:").pack(side=tk.LEFT)
|
||||
self.skill_search = ttk.Entry(filter_frame, width=25)
|
||||
self.skill_search.pack(side=tk.LEFT, padx=5)
|
||||
self.skill_search.bind('<KeyRelease>', self.filter_skills)
|
||||
|
||||
ttk.Label(filter_frame, text="Category:").pack(side=tk.LEFT, padx=(20, 0))
|
||||
self.skill_category = ttk.Combobox(filter_frame, values=['All'] + self.get_categories(), width=15)
|
||||
self.skill_category.set('All')
|
||||
self.skill_category.pack(side=tk.LEFT, padx=5)
|
||||
self.skill_category.bind('<<ComboboxSelected>>', self.filter_skills)
|
||||
|
||||
ttk.Label(filter_frame, text="Count:").pack(side=tk.LEFT, padx=(20, 0))
|
||||
self.skill_count_label = ttk.Label(filter_frame, text=str(len(self.skills)))
|
||||
self.skill_count_label.pack(side=tk.LEFT)
|
||||
|
||||
# Split pane
|
||||
paned = ttk.PanedWindow(frame, orient=tk.HORIZONTAL)
|
||||
paned.pack(fill=tk.BOTH, expand=True, padx=10, pady=(0, 10))
|
||||
|
||||
# Skill list
|
||||
list_frame = ttk.Frame(paned)
|
||||
paned.add(list_frame, weight=1)
|
||||
|
||||
columns = ('name', 'category', 'description')
|
||||
self.skill_tree = ttk.Treeview(list_frame, columns=columns, show='tree headings')
|
||||
self.skill_tree.heading('#0', text='#')
|
||||
self.skill_tree.heading('name', text='Skill Name')
|
||||
self.skill_tree.heading('category', text='Category')
|
||||
self.skill_tree.heading('description', text='Description')
|
||||
|
||||
self.skill_tree.column('#0', width=40)
|
||||
self.skill_tree.column('name', width=180)
|
||||
self.skill_tree.column('category', width=100)
|
||||
self.skill_tree.column('description', width=300)
|
||||
|
||||
self.skill_tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
|
||||
|
||||
scrollbar = ttk.Scrollbar(list_frame, orient=tk.VERTICAL, command=self.skill_tree.yview)
|
||||
self.skill_tree.configure(yscrollcommand=scrollbar.set)
|
||||
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
|
||||
|
||||
# Details
|
||||
details_frame = ttk.Frame(paned)
|
||||
paned.add(details_frame, weight=1)
|
||||
|
||||
ttk.Label(details_frame, text="Description", font=('Arial', 11, 'bold')).pack(anchor=tk.W, pady=5)
|
||||
|
||||
self.skill_details = scrolledtext.ScrolledText(details_frame, wrap=tk.WORD, height=15)
|
||||
self.skill_details.pack(fill=tk.BOTH, expand=True)
|
||||
|
||||
self.skill_tree.bind('<<TreeviewSelect>>', self.on_skill_select)
|
||||
|
||||
self.populate_skills(self.skills)
|
||||
|
||||
def get_categories(self) -> List[str]:
|
||||
"""Get unique categories from skills"""
|
||||
categories = set(s['category'] for s in self.skills)
|
||||
return sorted(categories)
|
||||
|
||||
def populate_skills(self, skills: List[Dict]):
|
||||
"""Populate skills list"""
|
||||
for item in self.skill_tree.get_children():
|
||||
self.skill_tree.delete(item)
|
||||
|
||||
for i, skill in enumerate(skills, 1):
|
||||
self.skill_tree.insert('', tk.END, text=str(i),
|
||||
values=(skill['name'], skill['category'], skill['description']))
|
||||
|
||||
def filter_skills(self, event=None):
|
||||
"""Filter skills based on search and category"""
|
||||
search = self.skill_search.get().lower()
|
||||
category = self.skill_category.get()
|
||||
|
||||
filtered = self.skills
|
||||
|
||||
if category != 'All':
|
||||
filtered = [s for s in filtered if s['category'] == category]
|
||||
|
||||
if search:
|
||||
filtered = [s for s in filtered
|
||||
if search in s['name'].lower() or search in s['description'].lower()]
|
||||
|
||||
self.populate_skills(filtered)
|
||||
self.skill_count_label.config(text=str(len(filtered)))
|
||||
|
||||
def on_skill_select(self, event):
|
||||
"""Handle skill selection"""
|
||||
selection = self.skill_tree.selection()
|
||||
if not selection:
|
||||
return
|
||||
|
||||
item = self.skill_tree.item(selection[0])
|
||||
skill_name = item['values'][0]
|
||||
|
||||
skill = next((s for s in self.skills if s['name'] == skill_name), None)
|
||||
if skill:
|
||||
details = f"""Skill: {skill['name']}
|
||||
|
||||
Category: {skill['category']}
|
||||
|
||||
Description: {skill['description']}
|
||||
|
||||
Path: {skill['path']}
|
||||
|
||||
---
|
||||
Usage: This skill is automatically activated when working with related technologies."""
|
||||
self.skill_details.delete('1.0', tk.END)
|
||||
self.skill_details.insert('1.0', details)
|
||||
|
||||
# =========================================================================
|
||||
# COMMANDS TAB
|
||||
# =========================================================================
|
||||
|
||||
def create_commands_tab(self):
|
||||
"""Create Commands tab"""
|
||||
frame = ttk.Frame(self.notebook)
|
||||
self.notebook.add(frame, text=f"Commands ({len(self.commands)})")
|
||||
|
||||
# Info
|
||||
info_frame = ttk.Frame(frame)
|
||||
info_frame.pack(fill=tk.X, padx=10, pady=10)
|
||||
|
||||
ttk.Label(info_frame, text="Slash Commands for Claude Code:",
|
||||
font=('Arial', 10, 'bold')).pack(anchor=tk.W)
|
||||
ttk.Label(info_frame, text="Use these commands in Claude Code by typing /command_name",
|
||||
foreground='gray').pack(anchor=tk.W)
|
||||
|
||||
# Commands list
|
||||
list_frame = ttk.Frame(frame)
|
||||
list_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=(0, 10))
|
||||
|
||||
columns = ('name', 'description')
|
||||
self.command_tree = ttk.Treeview(list_frame, columns=columns, show='tree headings')
|
||||
self.command_tree.heading('#0', text='#')
|
||||
self.command_tree.heading('name', text='Command')
|
||||
self.command_tree.heading('description', text='Description')
|
||||
|
||||
self.command_tree.column('#0', width=40)
|
||||
self.command_tree.column('name', width=150)
|
||||
self.command_tree.column('description', width=400)
|
||||
|
||||
self.command_tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
|
||||
|
||||
scrollbar = ttk.Scrollbar(list_frame, orient=tk.VERTICAL, command=self.command_tree.yview)
|
||||
self.command_tree.configure(yscrollcommand=scrollbar.set)
|
||||
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
|
||||
|
||||
# Populate
|
||||
for i, cmd in enumerate(self.commands, 1):
|
||||
self.command_tree.insert('', tk.END, text=str(i),
|
||||
values=('/' + cmd['name'], cmd['description']))
|
||||
|
||||
# =========================================================================
|
||||
# RULES TAB
|
||||
# =========================================================================
|
||||
|
||||
def create_rules_tab(self):
|
||||
"""Create Rules tab"""
|
||||
frame = ttk.Frame(self.notebook)
|
||||
self.notebook.add(frame, text=f"Rules ({len(self.rules)})")
|
||||
|
||||
# Info
|
||||
info_frame = ttk.Frame(frame)
|
||||
info_frame.pack(fill=tk.X, padx=10, pady=10)
|
||||
|
||||
ttk.Label(info_frame, text="Coding Rules by Language:",
|
||||
font=('Arial', 10, 'bold')).pack(anchor=tk.W)
|
||||
ttk.Label(info_frame, text="These rules are automatically applied in Claude Code",
|
||||
foreground='gray').pack(anchor=tk.W)
|
||||
|
||||
# Filter
|
||||
filter_frame = ttk.Frame(frame)
|
||||
filter_frame.pack(fill=tk.X, padx=10, pady=5)
|
||||
|
||||
ttk.Label(filter_frame, text="Language:").pack(side=tk.LEFT)
|
||||
self.rules_language = ttk.Combobox(filter_frame,
|
||||
values=['All'] + self.get_rule_languages(),
|
||||
width=15)
|
||||
self.rules_language.set('All')
|
||||
self.rules_language.pack(side=tk.LEFT, padx=5)
|
||||
self.rules_language.bind('<<ComboboxSelected>>', self.filter_rules)
|
||||
|
||||
# Rules list
|
||||
list_frame = ttk.Frame(frame)
|
||||
list_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=(0, 10))
|
||||
|
||||
columns = ('name', 'language')
|
||||
self.rules_tree = ttk.Treeview(list_frame, columns=columns, show='tree headings')
|
||||
self.rules_tree.heading('#0', text='#')
|
||||
self.rules_tree.heading('name', text='Rule Name')
|
||||
self.rules_tree.heading('language', text='Language')
|
||||
|
||||
self.rules_tree.column('#0', width=40)
|
||||
self.rules_tree.column('name', width=250)
|
||||
self.rules_tree.column('language', width=100)
|
||||
|
||||
self.rules_tree.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
|
||||
|
||||
scrollbar = ttk.Scrollbar(list_frame, orient=tk.VERTICAL, command=self.rules_tree.yview)
|
||||
self.rules_tree.configure(yscrollcommand=scrollbar.set)
|
||||
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
|
||||
|
||||
self.populate_rules(self.rules)
|
||||
|
||||
def get_rule_languages(self) -> List[str]:
|
||||
"""Get unique languages from rules"""
|
||||
languages = set(r['language'] for r in self.rules)
|
||||
return sorted(languages)
|
||||
|
||||
def populate_rules(self, rules: List[Dict]):
|
||||
"""Populate rules list"""
|
||||
for item in self.rules_tree.get_children():
|
||||
self.rules_tree.delete(item)
|
||||
|
||||
for i, rule in enumerate(rules, 1):
|
||||
self.rules_tree.insert('', tk.END, text=str(i),
|
||||
values=(rule['name'], rule['language']))
|
||||
|
||||
def filter_rules(self, event=None):
|
||||
"""Filter rules by language"""
|
||||
language = self.rules_language.get()
|
||||
|
||||
if language == 'All':
|
||||
filtered = self.rules
|
||||
else:
|
||||
filtered = [r for r in self.rules if r['language'] == language]
|
||||
|
||||
self.populate_rules(filtered)
|
||||
|
||||
# =========================================================================
|
||||
# SETTINGS TAB
|
||||
# =========================================================================
|
||||
|
||||
def create_settings_tab(self):
|
||||
"""Create Settings tab"""
|
||||
frame = ttk.Frame(self.notebook)
|
||||
self.notebook.add(frame, text="Settings")
|
||||
|
||||
# Project path
|
||||
path_frame = ttk.LabelFrame(frame, text="Project Path", padding=10)
|
||||
path_frame.pack(fill=tk.X, padx=10, pady=10)
|
||||
|
||||
self.path_entry = ttk.Entry(path_frame, width=60)
|
||||
self.path_entry.insert(0, self.project_path)
|
||||
self.path_entry.pack(side=tk.LEFT, fill=tk.X, expand=True)
|
||||
|
||||
ttk.Button(path_frame, text="Browse...", command=self.browse_path).pack(side=tk.LEFT, padx=5)
|
||||
|
||||
# Theme
|
||||
theme_frame = ttk.LabelFrame(frame, text="Appearance", padding=10)
|
||||
theme_frame.pack(fill=tk.X, padx=10, pady=10)
|
||||
|
||||
ttk.Label(theme_frame, text="Theme:").pack(anchor=tk.W)
|
||||
self.theme_var = tk.StringVar(value='light')
|
||||
light_rb = ttk.Radiobutton(theme_frame, text="Light", variable=self.theme_var,
|
||||
value='light', command=self.apply_theme)
|
||||
light_rb.pack(anchor=tk.W)
|
||||
dark_rb = ttk.Radiobutton(theme_frame, text="Dark", variable=self.theme_var,
|
||||
value='dark', command=self.apply_theme)
|
||||
dark_rb.pack(anchor=tk.W)
|
||||
|
||||
font_frame = ttk.LabelFrame(frame, text="Font", padding=10)
|
||||
font_frame.pack(fill=tk.X, padx=10, pady=10)
|
||||
|
||||
ttk.Label(font_frame, text="Font Family:").pack(anchor=tk.W)
|
||||
self.font_var = tk.StringVar(value='Open Sans')
|
||||
|
||||
fonts = ['Open Sans', 'Arial', 'Helvetica', 'Times New Roman', 'Courier New', 'Verdana', 'Georgia', 'Tahoma', 'Trebuchet MS']
|
||||
self.font_combo = ttk.Combobox(font_frame, textvariable=self.font_var, values=fonts, state='readonly')
|
||||
self.font_combo.pack(anchor=tk.W, fill=tk.X, pady=(5, 0))
|
||||
self.font_combo.bind('<<ComboboxSelected>>', lambda e: self.apply_theme())
|
||||
|
||||
ttk.Label(font_frame, text="Font Size:").pack(anchor=tk.W, pady=(10, 0))
|
||||
self.size_var = tk.StringVar(value='10')
|
||||
sizes = ['8', '9', '10', '11', '12', '14', '16', '18', '20']
|
||||
self.size_combo = ttk.Combobox(font_frame, textvariable=self.size_var, values=sizes, state='readonly', width=10)
|
||||
self.size_combo.pack(anchor=tk.W, pady=(5, 0))
|
||||
self.size_combo.bind('<<ComboboxSelected>>', lambda e: self.apply_theme())
|
||||
|
||||
# Quick Actions
|
||||
actions_frame = ttk.LabelFrame(frame, text="Quick Actions", padding=10)
|
||||
actions_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10)
|
||||
|
||||
ttk.Button(actions_frame, text="Open Project in Terminal",
|
||||
command=self.open_terminal).pack(fill=tk.X, pady=2)
|
||||
ttk.Button(actions_frame, text="Open README",
|
||||
command=self.open_readme).pack(fill=tk.X, pady=2)
|
||||
ttk.Button(actions_frame, text="Open AGENTS.md",
|
||||
command=self.open_agents).pack(fill=tk.X, pady=2)
|
||||
ttk.Button(actions_frame, text="Refresh Data",
|
||||
command=self.refresh_data).pack(fill=tk.X, pady=2)
|
||||
|
||||
# About
|
||||
about_frame = ttk.LabelFrame(frame, text="About", padding=10)
|
||||
about_frame.pack(fill=tk.X, padx=10, pady=10)
|
||||
|
||||
about_text = """ECC Dashboard v1.0.0
|
||||
Everything Claude Code GUI
|
||||
|
||||
A cross-platform desktop application for
|
||||
managing and exploring ECC components.
|
||||
|
||||
Version: 1.10.0
|
||||
Project: github.com/affaan-m/everything-claude-code"""
|
||||
|
||||
ttk.Label(about_frame, text=about_text, justify=tk.LEFT).pack(anchor=tk.W)
|
||||
|
||||
def browse_path(self):
|
||||
"""Browse for project path"""
|
||||
from tkinter import filedialog
|
||||
path = filedialog.askdirectory(initialdir=self.project_path)
|
||||
if path:
|
||||
self.path_entry.delete(0, tk.END)
|
||||
self.path_entry.insert(0, path)
|
||||
|
||||
def open_terminal(self):
|
||||
"""Open terminal at project path"""
|
||||
import subprocess
|
||||
path = self.path_entry.get()
|
||||
if os.name == 'nt': # Windows
|
||||
subprocess.Popen(['cmd', '/c', 'start', 'cmd', '/k', f'cd /d "{path}"'])
|
||||
elif os.uname().sysname == 'Darwin': # macOS
|
||||
subprocess.Popen(['open', '-a', 'Terminal', path])
|
||||
else: # Linux
|
||||
subprocess.Popen(['x-terminal-emulator', '-e', f'cd {path}'])
|
||||
|
||||
def open_readme(self):
|
||||
"""Open README in default browser/reader"""
|
||||
import subprocess
|
||||
path = os.path.join(self.path_entry.get(), 'README.md')
|
||||
if os.path.exists(path):
|
||||
subprocess.Popen(['xdg-open' if os.name != 'nt' else 'start', path])
|
||||
else:
|
||||
messagebox.showerror("Error", "README.md not found")
|
||||
|
||||
def open_agents(self):
|
||||
"""Open AGENTS.md"""
|
||||
import subprocess
|
||||
path = os.path.join(self.path_entry.get(), 'AGENTS.md')
|
||||
if os.path.exists(path):
|
||||
subprocess.Popen(['xdg-open' if os.name != 'nt' else 'start', path])
|
||||
else:
|
||||
messagebox.showerror("Error", "AGENTS.md not found")
|
||||
|
||||
def refresh_data(self):
|
||||
"""Refresh all data"""
|
||||
self.project_path = self.path_entry.get()
|
||||
self.agents = load_agents(self.project_path)
|
||||
self.skills = load_skills(self.project_path)
|
||||
self.commands = load_commands(self.project_path)
|
||||
self.rules = load_rules(self.project_path)
|
||||
|
||||
# Update tabs
|
||||
self.notebook.tab(0, text=f"Agents ({len(self.agents)})")
|
||||
self.notebook.tab(1, text=f"Skills ({len(self.skills)})")
|
||||
self.notebook.tab(2, text=f"Commands ({len(self.commands)})")
|
||||
self.notebook.tab(3, text=f"Rules ({len(self.rules)})")
|
||||
|
||||
# Repopulate
|
||||
self.populate_agents(self.agents)
|
||||
self.populate_skills(self.skills)
|
||||
|
||||
# Update status
|
||||
self.status_label.config(
|
||||
text=f"Ready | Agents: {len(self.agents)} | Skills: {len(self.skills)} | Commands: {len(self.commands)}"
|
||||
)
|
||||
|
||||
messagebox.showinfo("Success", "Data refreshed successfully!")
|
||||
|
||||
def apply_theme(self):
|
||||
theme = self.theme_var.get()
|
||||
font_family = self.font_var.get()
|
||||
font_size = int(self.size_var.get())
|
||||
font_tuple = (font_family, font_size)
|
||||
|
||||
if theme == 'dark':
|
||||
bg_color = '#2b2b2b'
|
||||
fg_color = '#ffffff'
|
||||
entry_bg = '#3c3c3c'
|
||||
frame_bg = '#2b2b2b'
|
||||
select_bg = '#0f5a9e'
|
||||
else:
|
||||
bg_color = '#f0f0f0'
|
||||
fg_color = '#000000'
|
||||
entry_bg = '#ffffff'
|
||||
frame_bg = '#f0f0f0'
|
||||
select_bg = '#e0e0e0'
|
||||
|
||||
self.configure(background=bg_color)
|
||||
|
||||
style = ttk.Style()
|
||||
style.configure('.', background=bg_color, foreground=fg_color, font=font_tuple)
|
||||
style.configure('TFrame', background=bg_color, font=font_tuple)
|
||||
style.configure('TLabel', background=bg_color, foreground=fg_color, font=font_tuple)
|
||||
style.configure('TNotebook', background=bg_color, font=font_tuple)
|
||||
style.configure('TNotebook.Tab', background=frame_bg, foreground=fg_color, font=font_tuple)
|
||||
style.map('TNotebook.Tab', background=[('selected', select_bg)])
|
||||
style.configure('Treeview', background=entry_bg, foreground=fg_color, fieldbackground=entry_bg, font=font_tuple)
|
||||
style.configure('Treeview.Heading', background=frame_bg, foreground=fg_color, font=font_tuple)
|
||||
style.configure('TEntry', fieldbackground=entry_bg, foreground=fg_color, font=font_tuple)
|
||||
style.configure('TButton', background=frame_bg, foreground=fg_color, font=font_tuple)
|
||||
|
||||
self.title_label.configure(font=(font_family, 18, 'bold'))
|
||||
self.version_label.configure(font=(font_family, 10))
|
||||
|
||||
def update_widget_colors(widget):
|
||||
try:
|
||||
widget.configure(background=bg_color)
|
||||
except:
|
||||
pass
|
||||
for child in widget.winfo_children():
|
||||
try:
|
||||
child.configure(background=bg_color)
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
update_widget_colors(child)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
update_widget_colors(self)
|
||||
except:
|
||||
pass
|
||||
|
||||
self.update()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# MAIN
|
||||
# ============================================================================
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
app = ECCDashboard()
|
||||
app.mainloop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -249,7 +249,8 @@
|
||||
"test": "node scripts/ci/check-unicode-safety.js && node scripts/ci/validate-agents.js && node scripts/ci/validate-commands.js && node scripts/ci/validate-rules.js && node scripts/ci/validate-skills.js && node scripts/ci/validate-hooks.js && node scripts/ci/validate-install-manifests.js && node scripts/ci/validate-no-personal-paths.js && npm run catalog:check && node tests/run-all.js",
|
||||
"coverage": "c8 --all --include=\"scripts/**/*.js\" --check-coverage --lines 80 --functions 80 --branches 80 --statements 80 --reporter=text --reporter=lcov node tests/run-all.js",
|
||||
"build:opencode": "node scripts/build-opencode.js",
|
||||
"prepack": "npm run build:opencode"
|
||||
"prepack": "npm run build:opencode",
|
||||
"dashboard": "python3 ./ecc_dashboard.py"
|
||||
},
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^2.2.5",
|
||||
@@ -270,4 +271,4 @@
|
||||
"node": ">=18"
|
||||
},
|
||||
"packageManager": "yarn@4.9.2+sha512.1fc009bc09d13cfd0e19efa44cbfc2b9cf6ca61482725eb35bbc5e257e093ebf4130db6dfe15d604ff4b79efd8e1e8e99b25fa7d0a6197c9f9826358d4d65c3c"
|
||||
}
|
||||
}
|
||||
78
pyproject.toml
Normal file
78
pyproject.toml
Normal file
@@ -0,0 +1,78 @@
|
||||
[project]
|
||||
name = "llm-abstraction"
|
||||
version = "0.1.0"
|
||||
description = "Provider-agnostic LLM abstraction layer"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.11"
|
||||
license = {text = "MIT"}
|
||||
authors = [
|
||||
{name = "Affaan Mustafa", email = "affaan@example.com"}
|
||||
]
|
||||
keywords = ["llm", "openai", "anthropic", "ollama", "ai"]
|
||||
classifiers = [
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
]
|
||||
|
||||
dependencies = [
|
||||
"anthropic>=0.25.0",
|
||||
"openai>=1.30.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = [
|
||||
"pytest>=8.0",
|
||||
"pytest-asyncio>=0.23",
|
||||
"pytest-cov>=4.1",
|
||||
"pytest-mock>=3.12",
|
||||
"ruff>=0.4",
|
||||
"mypy>=1.10",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/affaan-m/everything-claude-code"
|
||||
Repository = "https://github.com/affaan-m/everything-claude-code"
|
||||
|
||||
[project.scripts]
|
||||
llm-select = "llm.cli.selector:main"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.build.targets.wheel]
|
||||
packages = ["src/llm"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
asyncio_mode = "auto"
|
||||
filterwarnings = ["ignore::DeprecationWarning"]
|
||||
|
||||
[tool.coverage.run]
|
||||
source = ["src/llm"]
|
||||
branch = true
|
||||
|
||||
[tool.coverage.report]
|
||||
exclude_lines = [
|
||||
"pragma: no cover",
|
||||
"if TYPE_CHECKING:",
|
||||
"raise NotImplementedError",
|
||||
]
|
||||
|
||||
[tool.ruff]
|
||||
src-path = ["src"]
|
||||
target-version = "py311"
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "F", "I", "N", "W", "UP"]
|
||||
ignore = ["E501"]
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.11"
|
||||
src_paths = ["src"]
|
||||
warn_return_any = true
|
||||
warn_unused_ignores = true
|
||||
33
src/llm/__init__.py
Normal file
33
src/llm/__init__.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""
|
||||
LLM Abstraction Layer
|
||||
|
||||
Provider-agnostic interface for multiple LLM backends.
|
||||
"""
|
||||
|
||||
from llm.core.interface import LLMProvider
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ToolCall, ToolDefinition, ToolResult
|
||||
from llm.providers import get_provider
|
||||
from llm.tools import ToolExecutor, ToolRegistry
|
||||
from llm.cli.selector import interactive_select
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
__all__ = (
|
||||
"LLMInput",
|
||||
"LLMOutput",
|
||||
"LLMProvider",
|
||||
"Message",
|
||||
"ToolCall",
|
||||
"ToolDefinition",
|
||||
"ToolResult",
|
||||
"ToolExecutor",
|
||||
"ToolRegistry",
|
||||
"get_provider",
|
||||
"interactive_select",
|
||||
)
|
||||
|
||||
|
||||
def gui() -> None:
|
||||
from llm.cli.selector import main
|
||||
main()
|
||||
|
||||
7
src/llm/__main__.py
Normal file
7
src/llm/__main__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Entry point for llm CLI."""
|
||||
|
||||
from llm.cli.selector import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
0
src/llm/cli/__init__.py
Normal file
0
src/llm/cli/__init__.py
Normal file
154
src/llm/cli/selector.py
Normal file
154
src/llm/cli/selector.py
Normal file
@@ -0,0 +1,154 @@
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class Color(str, Enum):
|
||||
RESET = "\033[0m"
|
||||
BOLD = "\033[1m"
|
||||
GREEN = "\033[92m"
|
||||
YELLOW = "\033[93m"
|
||||
BLUE = "\033[94m"
|
||||
CYAN = "\033[96m"
|
||||
|
||||
|
||||
def print_banner() -> None:
|
||||
banner = f"""{Color.CYAN}
|
||||
╔═══════════════════════════════════════════╗
|
||||
║ LLM Provider Selector ║
|
||||
║ Provider-agnostic AI interactions ║
|
||||
╚═══════════════════════════════════════════╝{Color.RESET}"""
|
||||
print(banner)
|
||||
|
||||
|
||||
def print_providers(providers: list[tuple[str, str]]) -> None:
|
||||
print(f"\n{Color.BOLD}Available Providers:{Color.RESET}\n")
|
||||
for i, (name, desc) in enumerate(providers, 1):
|
||||
print(f" {Color.GREEN}{i}{Color.RESET}. {Color.BOLD}{name}{Color.RESET} - {desc}")
|
||||
|
||||
|
||||
def select_provider(providers: list[tuple[str, str]]) -> str | None:
|
||||
if not providers:
|
||||
print("No providers available.")
|
||||
return None
|
||||
|
||||
print_providers(providers)
|
||||
|
||||
while True:
|
||||
try:
|
||||
choice = input(f"\n{Color.YELLOW}Select provider (1-{len(providers)}): {Color.RESET}").strip()
|
||||
if not choice:
|
||||
return None
|
||||
idx = int(choice) - 1
|
||||
if 0 <= idx < len(providers):
|
||||
return providers[idx][0]
|
||||
print(f"{Color.YELLOW}Invalid selection. Try again.{Color.RESET}")
|
||||
except ValueError:
|
||||
print(f"{Color.YELLOW}Please enter a number.{Color.RESET}")
|
||||
|
||||
|
||||
def select_model(models: list[tuple[str, str]]) -> str | None:
|
||||
if not models:
|
||||
print("No models available.")
|
||||
return None
|
||||
|
||||
print(f"\n{Color.BOLD}Available Models:{Color.RESET}\n")
|
||||
for i, (name, desc) in enumerate(models, 1):
|
||||
print(f" {Color.GREEN}{i}{Color.RESET}. {Color.BOLD}{name}{Color.RESET} - {desc}")
|
||||
|
||||
while True:
|
||||
try:
|
||||
choice = input(f"\n{Color.YELLOW}Select model (1-{len(models)}): {Color.RESET}").strip()
|
||||
if not choice:
|
||||
return None
|
||||
idx = int(choice) - 1
|
||||
if 0 <= idx < len(models):
|
||||
return models[idx][0]
|
||||
print(f"{Color.YELLOW}Invalid selection. Try again.{Color.RESET}")
|
||||
except ValueError:
|
||||
print(f"{Color.YELLOW}Please enter a number.{Color.RESET}")
|
||||
|
||||
|
||||
def save_config(provider: str, model: str, persist: bool = False) -> None:
|
||||
config = f"LLM_PROVIDER={provider}\nLLM_MODEL={model}\n"
|
||||
env_file = ".llm.env"
|
||||
|
||||
with open(env_file, "w") as f:
|
||||
f.write(config)
|
||||
|
||||
print(f"\n{Color.GREEN}✓{Color.RESET} Config saved to {Color.CYAN}{env_file}{Color.RESET}")
|
||||
|
||||
if persist:
|
||||
os.environ["LLM_PROVIDER"] = provider
|
||||
os.environ["LLM_MODEL"] = model
|
||||
print(f"{Color.GREEN}✓{Color.RESET} Config loaded to current session")
|
||||
|
||||
|
||||
def interactive_select(
|
||||
providers: list[tuple[str, str]] | None = None,
|
||||
models_per_provider: dict[str, list[tuple[str, str]]] | None = None,
|
||||
persist: bool = False,
|
||||
) -> tuple[str, str] | None:
|
||||
print_banner()
|
||||
|
||||
if providers is None:
|
||||
providers = [
|
||||
("claude", "Anthropic Claude ( Sonnet, Opus, Haiku)"),
|
||||
("openai", "OpenAI GPT (4o, 4o-mini, 3.5-turbo)"),
|
||||
("ollama", "Local Ollama models"),
|
||||
]
|
||||
|
||||
if models_per_provider is None:
|
||||
models_per_provider = {
|
||||
"claude": [
|
||||
("claude-opus-4-5", "Claude Opus 4.5 - Most capable"),
|
||||
("claude-sonnet-4-7", "Claude Sonnet 4.7 - Balanced"),
|
||||
("claude-haiku-4-7", "Claude Haiku 4.7 - Fast"),
|
||||
],
|
||||
"openai": [
|
||||
("gpt-4o", "GPT-4o - Most capable"),
|
||||
("gpt-4o-mini", "GPT-4o-mini - Fast & affordable"),
|
||||
("gpt-4-turbo", "GPT-4 Turbo - Legacy powerful"),
|
||||
("gpt-3.5-turbo", "GPT-3.5 - Legacy fast"),
|
||||
],
|
||||
"ollama": [
|
||||
("llama3.2", "Llama 3.2 - General purpose"),
|
||||
("mistral", "Mistral - Fast & efficient"),
|
||||
("codellama", "CodeLlama - Code specialized"),
|
||||
],
|
||||
}
|
||||
|
||||
provider = select_provider(providers)
|
||||
if not provider:
|
||||
return None
|
||||
|
||||
models = models_per_provider.get(provider, [])
|
||||
model = select_model(models)
|
||||
if not model:
|
||||
return None
|
||||
|
||||
print(f"\n{Color.GREEN}Selected: {Color.BOLD}{provider}{Color.RESET} / {Color.BOLD}{model}{Color.RESET}")
|
||||
|
||||
save_config(provider, model, persist)
|
||||
|
||||
return (provider, model)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
result = interactive_select(persist=True)
|
||||
|
||||
if result:
|
||||
print(f"\n{Color.GREEN}Ready to use!{Color.RESET}")
|
||||
print(f" export LLM_PROVIDER={result[0]}")
|
||||
print(f" export LLM_MODEL={result[1]}")
|
||||
else:
|
||||
print("\nSelection cancelled.")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
src/llm/core/__init__.py
Normal file
1
src/llm/core/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Core module for LLM abstraction layer."""
|
||||
60
src/llm/core/interface.py
Normal file
60
src/llm/core/interface.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""LLM Provider interface definition."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
from llm.core.types import LLMInput, LLMOutput, ModelInfo, ProviderType
|
||||
|
||||
|
||||
class LLMProvider(ABC):
|
||||
provider_type: ProviderType
|
||||
|
||||
@abstractmethod
|
||||
def generate(self, input: LLMInput) -> LLMOutput: ...
|
||||
|
||||
@abstractmethod
|
||||
def list_models(self) -> list[ModelInfo]: ...
|
||||
|
||||
@abstractmethod
|
||||
def validate_config(self) -> bool: ...
|
||||
|
||||
def supports_tools(self) -> bool:
|
||||
return True
|
||||
|
||||
def supports_vision(self) -> bool:
|
||||
return False
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
raise NotImplementedError(f"{self.__class__.__name__} must implement get_default_model")
|
||||
|
||||
|
||||
class LLMError(Exception):
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
provider: ProviderType | None = None,
|
||||
code: str | None = None,
|
||||
details: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
super().__init__(message)
|
||||
self.message = message
|
||||
self.provider = provider
|
||||
self.code = code
|
||||
self.details = details or {}
|
||||
|
||||
|
||||
class AuthenticationError(LLMError): ...
|
||||
|
||||
|
||||
class RateLimitError(LLMError): ...
|
||||
|
||||
|
||||
class ContextLengthError(LLMError): ...
|
||||
|
||||
|
||||
class ModelNotFoundError(LLMError): ...
|
||||
|
||||
|
||||
class ToolExecutionError(LLMError): ...
|
||||
146
src/llm/core/types.py
Normal file
146
src/llm/core/types.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""Core type definitions for LLM abstraction layer."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
|
||||
class Role(str, Enum):
|
||||
SYSTEM = "system"
|
||||
USER = "user"
|
||||
ASSISTANT = "assistant"
|
||||
TOOL = "tool"
|
||||
|
||||
|
||||
class ProviderType(str, Enum):
|
||||
CLAUDE = "claude"
|
||||
OPENAI = "openai"
|
||||
OLLAMA = "ollama"
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Message:
|
||||
role: Role
|
||||
content: str
|
||||
name: str | None = None
|
||||
tool_call_id: str | None = None
|
||||
tool_calls: list[ToolCall] | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result: dict[str, Any] = {"role": self.role.value, "content": self.content}
|
||||
if self.name:
|
||||
result["name"] = self.name
|
||||
if self.tool_call_id:
|
||||
result["tool_call_id"] = self.tool_call_id
|
||||
if self.tool_calls:
|
||||
result["tool_calls"] = [
|
||||
{"id": tc.id, "function": {"name": tc.name, "arguments": tc.arguments}}
|
||||
for tc in self.tool_calls
|
||||
]
|
||||
return result
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ToolDefinition:
|
||||
name: str
|
||||
description: str
|
||||
parameters: dict[str, Any]
|
||||
strict: bool = True
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"parameters": self.parameters,
|
||||
"strict": self.strict,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ToolCall:
|
||||
id: str
|
||||
name: str
|
||||
arguments: dict[str, Any]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ToolResult:
|
||||
tool_call_id: str
|
||||
content: str
|
||||
is_error: bool = False
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LLMInput:
|
||||
messages: list[Message]
|
||||
model: str | None = None
|
||||
temperature: float = 1.0
|
||||
max_tokens: int | None = None
|
||||
tools: list[ToolDefinition] | None = None
|
||||
stream: bool = False
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result: dict[str, Any] = {
|
||||
"messages": [msg.to_dict() for msg in self.messages],
|
||||
"temperature": self.temperature,
|
||||
"stream": self.stream,
|
||||
}
|
||||
if self.model:
|
||||
result["model"] = self.model
|
||||
if self.max_tokens is not None:
|
||||
result["max_tokens"] = self.max_tokens
|
||||
if self.tools:
|
||||
result["tools"] = [tool.to_dict() for tool in self.tools]
|
||||
return result | self.metadata
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LLMOutput:
|
||||
content: str
|
||||
tool_calls: list[ToolCall] | None = None
|
||||
model: str | None = None
|
||||
usage: dict[str, int] | None = None
|
||||
stop_reason: str | None = None
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@property
|
||||
def has_tool_calls(self) -> bool:
|
||||
return bool(self.tool_calls)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result: dict[str, Any] = {"content": self.content}
|
||||
if self.tool_calls:
|
||||
result["tool_calls"] = [
|
||||
{"id": tc.id, "name": tc.name, "arguments": tc.arguments}
|
||||
for tc in self.tool_calls
|
||||
]
|
||||
if self.model:
|
||||
result["model"] = self.model
|
||||
if self.usage:
|
||||
result["usage"] = self.usage
|
||||
if self.stop_reason:
|
||||
result["stop_reason"] = self.stop_reason
|
||||
return result | self.metadata
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ModelInfo:
|
||||
name: str
|
||||
provider: ProviderType
|
||||
supports_tools: bool = True
|
||||
supports_vision: bool = False
|
||||
max_tokens: int | None = None
|
||||
context_window: int | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"provider": self.provider.value,
|
||||
"supports_tools": self.supports_tools,
|
||||
"supports_vision": self.supports_vision,
|
||||
"max_tokens": self.max_tokens,
|
||||
"context_window": self.context_window,
|
||||
}
|
||||
13
src/llm/prompt/__init__.py
Normal file
13
src/llm/prompt/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""Prompt module for prompt building and normalization."""
|
||||
|
||||
from llm.prompt.builder import PromptBuilder, adapt_messages_for_provider, get_provider_builder
|
||||
from llm.prompt.templates import TEMPLATES, get_template, get_template_or_default
|
||||
|
||||
__all__ = (
|
||||
"PromptBuilder",
|
||||
"TEMPLATES",
|
||||
"adapt_messages_for_provider",
|
||||
"get_provider_builder",
|
||||
"get_template",
|
||||
"get_template_or_default",
|
||||
)
|
||||
102
src/llm/prompt/builder.py
Normal file
102
src/llm/prompt/builder.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""Prompt builder for normalizing prompts across providers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
from llm.core.types import LLMInput, Message, Role, ToolDefinition
|
||||
from llm.providers.claude import ClaudeProvider
|
||||
from llm.providers.openai import OpenAIProvider
|
||||
from llm.providers.ollama import OllamaProvider
|
||||
|
||||
|
||||
@dataclass
|
||||
class PromptConfig:
|
||||
system_template: str | None = None
|
||||
user_template: str | None = None
|
||||
include_tools_in_system: bool = True
|
||||
tool_format: str = "native"
|
||||
|
||||
|
||||
class PromptBuilder:
|
||||
def __init__(self, config: PromptConfig | None = None) -> None:
|
||||
self.config = config or PromptConfig()
|
||||
|
||||
def build(self, messages: list[Message], tools: list[ToolDefinition] | None = None) -> list[Message]:
|
||||
if not messages:
|
||||
return []
|
||||
|
||||
result: list[Message] = []
|
||||
system_parts: list[str] = []
|
||||
|
||||
if self.config.system_template:
|
||||
system_parts.append(self.config.system_template)
|
||||
|
||||
if tools and self.config.include_tools_in_system:
|
||||
tools_desc = self._format_tools(tools)
|
||||
system_parts.append(f"\n\n## Available Tools\n{tools_desc}")
|
||||
|
||||
if messages[0].role == Role.SYSTEM:
|
||||
system_parts.insert(0, messages[0].content)
|
||||
result.insert(0, Message(role=Role.SYSTEM, content="\n\n".join(system_parts)))
|
||||
result.extend(messages[1:])
|
||||
else:
|
||||
if system_parts:
|
||||
result.insert(0, Message(role=Role.SYSTEM, content="\n\n".join(system_parts)))
|
||||
result.extend(messages)
|
||||
|
||||
return result
|
||||
|
||||
def _format_tools(self, tools: list[ToolDefinition]) -> str:
|
||||
lines = []
|
||||
for tool in tools:
|
||||
lines.append(f"### {tool.name}")
|
||||
lines.append(tool.description)
|
||||
if tool.parameters:
|
||||
lines.append("Parameters:")
|
||||
lines.append(self._format_parameters(tool.parameters))
|
||||
return "\n".join(lines)
|
||||
|
||||
def _format_parameters(self, params: dict[str, Any]) -> str:
|
||||
if "properties" not in params:
|
||||
return str(params)
|
||||
lines = []
|
||||
required = params.get("required", [])
|
||||
for name, spec in params["properties"].items():
|
||||
prop_type = spec.get("type", "any")
|
||||
desc = spec.get("description", "")
|
||||
required_mark = "(required)" if name in required else "(optional)"
|
||||
lines.append(f" - {name}: {prop_type} {required_mark} - {desc}")
|
||||
return "\n".join(lines) if lines else str(params)
|
||||
|
||||
|
||||
_PROVIDER_TEMPLATE_MAP: dict[str, dict[str, Any]] = {
|
||||
"claude": {
|
||||
"include_tools_in_system": False,
|
||||
"tool_format": "anthropic",
|
||||
},
|
||||
"openai": {
|
||||
"include_tools_in_system": False,
|
||||
"tool_format": "openai",
|
||||
},
|
||||
"ollama": {
|
||||
"include_tools_in_system": True,
|
||||
"tool_format": "text",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def get_provider_builder(provider_name: str) -> PromptBuilder:
|
||||
config_dict = _PROVIDER_TEMPLATE_MAP.get(provider_name.lower(), {})
|
||||
config = PromptConfig(**config_dict)
|
||||
return PromptBuilder(config)
|
||||
|
||||
|
||||
def adapt_messages_for_provider(
|
||||
messages: list[Message],
|
||||
provider: str,
|
||||
tools: list[ToolDefinition] | None = None,
|
||||
) -> list[Message]:
|
||||
builder = get_provider_builder(provider)
|
||||
return builder.build(messages, tools)
|
||||
1
src/llm/prompt/templates/__init__.py
Normal file
1
src/llm/prompt/templates/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Templates module for provider-specific prompt templates
|
||||
14
src/llm/providers/__init__.py
Normal file
14
src/llm/providers/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Provider adapters for multiple LLM backends."""
|
||||
|
||||
from llm.providers.claude import ClaudeProvider
|
||||
from llm.providers.openai import OpenAIProvider
|
||||
from llm.providers.ollama import OllamaProvider
|
||||
from llm.providers.resolver import get_provider, register_provider
|
||||
|
||||
__all__ = (
|
||||
"ClaudeProvider",
|
||||
"OpenAIProvider",
|
||||
"OllamaProvider",
|
||||
"get_provider",
|
||||
"register_provider",
|
||||
)
|
||||
105
src/llm/providers/claude.py
Normal file
105
src/llm/providers/claude.py
Normal file
@@ -0,0 +1,105 @@
|
||||
"""Claude provider adapter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from anthropic import Anthropic
|
||||
|
||||
from llm.core.interface import (
|
||||
AuthenticationError,
|
||||
ContextLengthError,
|
||||
LLMProvider,
|
||||
RateLimitError,
|
||||
)
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
|
||||
|
||||
|
||||
class ClaudeProvider(LLMProvider):
|
||||
provider_type = ProviderType.CLAUDE
|
||||
|
||||
def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None:
|
||||
self.client = Anthropic(api_key=api_key or os.environ.get("ANTHROPIC_API_KEY"), base_url=base_url)
|
||||
self._models = [
|
||||
ModelInfo(
|
||||
name="claude-opus-4-5",
|
||||
provider=ProviderType.CLAUDE,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=8192,
|
||||
context_window=200000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="claude-sonnet-4-7",
|
||||
provider=ProviderType.CLAUDE,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=8192,
|
||||
context_window=200000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="claude-haiku-4-7",
|
||||
provider=ProviderType.CLAUDE,
|
||||
supports_tools=True,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=200000,
|
||||
),
|
||||
]
|
||||
|
||||
def generate(self, input: LLMInput) -> LLMOutput:
|
||||
try:
|
||||
params: dict[str, Any] = {
|
||||
"model": input.model or "claude-sonnet-4-7",
|
||||
"messages": [msg.to_dict() for msg in input.messages],
|
||||
"temperature": input.temperature,
|
||||
}
|
||||
if input.max_tokens:
|
||||
params["max_tokens"] = input.max_tokens
|
||||
else:
|
||||
params["max_tokens"] = 8192 # required by Anthropic API
|
||||
if input.tools:
|
||||
params["tools"] = [tool.to_dict() for tool in input.tools]
|
||||
|
||||
response = self.client.messages.create(**params)
|
||||
|
||||
tool_calls = None
|
||||
if response.content and hasattr(response.content[0], "type"):
|
||||
if response.content[0].type == "tool_use":
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=getattr(response.content[0], "id", ""),
|
||||
name=getattr(response.content[0], "name", ""),
|
||||
arguments=getattr(response.content[0].input, "__dict__", {}),
|
||||
)
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=response.content[0].text if response.content else "",
|
||||
tool_calls=tool_calls,
|
||||
model=response.model,
|
||||
usage={
|
||||
"input_tokens": response.usage.input_tokens,
|
||||
"output_tokens": response.usage.output_tokens,
|
||||
},
|
||||
stop_reason=response.stop_reason,
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if "401" in msg or "authentication" in msg.lower():
|
||||
raise AuthenticationError(msg, provider=ProviderType.CLAUDE) from e
|
||||
if "429" in msg or "rate_limit" in msg.lower():
|
||||
raise RateLimitError(msg, provider=ProviderType.CLAUDE) from e
|
||||
if "context" in msg.lower() and "length" in msg.lower():
|
||||
raise ContextLengthError(msg, provider=ProviderType.CLAUDE) from e
|
||||
raise
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models.copy()
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
return bool(self.client.api_key)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
return "claude-sonnet-4-7"
|
||||
112
src/llm/providers/ollama.py
Normal file
112
src/llm/providers/ollama.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Ollama provider adapter for local models."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from llm.core.interface import (
|
||||
AuthenticationError,
|
||||
ContextLengthError,
|
||||
LLMProvider,
|
||||
RateLimitError,
|
||||
)
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
|
||||
|
||||
|
||||
class OllamaProvider(LLMProvider):
|
||||
provider_type = ProviderType.OLLAMA
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str | None = None,
|
||||
default_model: str | None = None,
|
||||
) -> None:
|
||||
self.base_url = base_url or os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
|
||||
self.default_model = default_model or os.environ.get("OLLAMA_MODEL", "llama3.2")
|
||||
self._models = [
|
||||
ModelInfo(
|
||||
name="llama3.2",
|
||||
provider=ProviderType.OLLAMA,
|
||||
supports_tools=False,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="mistral",
|
||||
provider=ProviderType.OLLAMA,
|
||||
supports_tools=False,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=8192,
|
||||
),
|
||||
ModelInfo(
|
||||
name="codellama",
|
||||
provider=ProviderType.OLLAMA,
|
||||
supports_tools=False,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=16384,
|
||||
),
|
||||
]
|
||||
|
||||
def generate(self, input: LLMInput) -> LLMOutput:
|
||||
import urllib.request
|
||||
import json
|
||||
|
||||
try:
|
||||
url = f"{self.base_url}/api/chat"
|
||||
model = input.model or self.default_model
|
||||
|
||||
payload: dict[str, Any] = {
|
||||
"model": model,
|
||||
"messages": [msg.to_dict() for msg in input.messages],
|
||||
"stream": False,
|
||||
}
|
||||
if input.temperature != 1.0:
|
||||
payload["options"] = {"temperature": input.temperature}
|
||||
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
req = urllib.request.Request(url, data=data, headers={"Content-Type": "application/json"})
|
||||
|
||||
with urllib.request.urlopen(req, timeout=60) as response:
|
||||
result = json.loads(response.read().decode("utf-8"))
|
||||
|
||||
content = result.get("message", {}).get("content", "")
|
||||
|
||||
tool_calls = None
|
||||
if result.get("message", {}).get("tool_calls"):
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=tc.get("id", ""),
|
||||
name=tc.get("function", {}).get("name", ""),
|
||||
arguments=tc.get("function", {}).get("arguments", {}),
|
||||
)
|
||||
for tc in result["message"]["tool_calls"]
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=content,
|
||||
tool_calls=tool_calls,
|
||||
model=model,
|
||||
stop_reason=result.get("done_reason"),
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if "401" in msg or "connection" in msg.lower():
|
||||
raise AuthenticationError(f"Ollama connection failed: {msg}", provider=ProviderType.OLLAMA) from e
|
||||
if "429" in msg or "rate_limit" in msg.lower():
|
||||
raise RateLimitError(msg, provider=ProviderType.OLLAMA) from e
|
||||
if "context" in msg.lower() and "length" in msg.lower():
|
||||
raise ContextLengthError(msg, provider=ProviderType.OLLAMA) from e
|
||||
raise
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models.copy()
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
return bool(self.base_url)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
return self.default_model
|
||||
114
src/llm/providers/openai.py
Normal file
114
src/llm/providers/openai.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""OpenAI provider adapter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
from llm.core.interface import (
|
||||
AuthenticationError,
|
||||
ContextLengthError,
|
||||
LLMProvider,
|
||||
RateLimitError,
|
||||
)
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
|
||||
|
||||
|
||||
class OpenAIProvider(LLMProvider):
|
||||
provider_type = ProviderType.OPENAI
|
||||
|
||||
def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None:
|
||||
self.client = OpenAI(api_key=api_key or os.environ.get("OPENAI_API_KEY"), base_url=base_url)
|
||||
self._models = [
|
||||
ModelInfo(
|
||||
name="gpt-4o",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="gpt-4o-mini",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="gpt-4-turbo",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="gpt-3.5-turbo",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=16385,
|
||||
),
|
||||
]
|
||||
|
||||
def generate(self, input: LLMInput) -> LLMOutput:
|
||||
try:
|
||||
params: dict[str, Any] = {
|
||||
"model": input.model or "gpt-4o-mini",
|
||||
"messages": [msg.to_dict() for msg in input.messages],
|
||||
"temperature": input.temperature,
|
||||
}
|
||||
if input.max_tokens:
|
||||
params["max_tokens"] = input.max_tokens
|
||||
if input.tools:
|
||||
params["tools"] = [tool.to_dict() for tool in input.tools]
|
||||
|
||||
response = self.client.chat.completions.create(**params)
|
||||
choice = response.choices[0]
|
||||
|
||||
tool_calls = None
|
||||
if choice.message.tool_calls:
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=tc.id or "",
|
||||
name=tc.function.name,
|
||||
arguments={} if not tc.function.arguments else json.loads(tc.function.arguments),
|
||||
)
|
||||
for tc in choice.message.tool_calls
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=choice.message.content or "",
|
||||
tool_calls=tool_calls,
|
||||
model=response.model,
|
||||
usage={
|
||||
"prompt_tokens": response.usage.prompt_tokens,
|
||||
"completion_tokens": response.usage.completion_tokens,
|
||||
"total_tokens": response.usage.total_tokens,
|
||||
},
|
||||
stop_reason=choice.finish_reason,
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if "401" in msg or "authentication" in msg.lower():
|
||||
raise AuthenticationError(msg, provider=ProviderType.OPENAI) from e
|
||||
if "429" in msg or "rate_limit" in msg.lower():
|
||||
raise RateLimitError(msg, provider=ProviderType.OPENAI) from e
|
||||
if "context" in msg.lower() and "length" in msg.lower():
|
||||
raise ContextLengthError(msg, provider=ProviderType.OPENAI) from e
|
||||
raise
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models.copy()
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
return bool(self.client.api_key)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
return "gpt-4o-mini"
|
||||
39
src/llm/providers/resolver.py
Normal file
39
src/llm/providers/resolver.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""Provider factory and resolver."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from llm.core.interface import LLMProvider
|
||||
from llm.core.types import ProviderType
|
||||
from llm.providers.claude import ClaudeProvider
|
||||
from llm.providers.openai import OpenAIProvider
|
||||
from llm.providers.ollama import OllamaProvider
|
||||
|
||||
|
||||
_PROVIDER_MAP: dict[ProviderType, type[LLMProvider]] = {
|
||||
ProviderType.CLAUDE: ClaudeProvider,
|
||||
ProviderType.OPENAI: OpenAIProvider,
|
||||
ProviderType.OLLAMA: OllamaProvider,
|
||||
}
|
||||
|
||||
|
||||
def get_provider(provider_type: ProviderType | str | None = None, **kwargs: str) -> LLMProvider:
|
||||
if provider_type is None:
|
||||
provider_type = os.environ.get("LLM_PROVIDER", "claude").lower()
|
||||
|
||||
if isinstance(provider_type, str):
|
||||
try:
|
||||
provider_type = ProviderType(provider_type)
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown provider type: {provider_type}. Valid types: {[p.value for p in ProviderType]}")
|
||||
|
||||
provider_cls = _PROVIDER_MAP.get(provider_type)
|
||||
if not provider_cls:
|
||||
raise ValueError(f"No provider registered for type: {provider_type}")
|
||||
|
||||
return provider_cls(**kwargs)
|
||||
|
||||
|
||||
def register_provider(provider_type: ProviderType, provider_cls: type[LLMProvider]) -> None:
|
||||
_PROVIDER_MAP[provider_type] = provider_cls
|
||||
9
src/llm/tools/__init__.py
Normal file
9
src/llm/tools/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""Tools module for tool/function calling abstraction."""
|
||||
|
||||
from llm.tools.executor import ReActAgent, ToolExecutor, ToolRegistry
|
||||
|
||||
__all__ = (
|
||||
"ReActAgent",
|
||||
"ToolExecutor",
|
||||
"ToolRegistry",
|
||||
)
|
||||
116
src/llm/tools/executor.py
Normal file
116
src/llm/tools/executor.py
Normal file
@@ -0,0 +1,116 @@
|
||||
"""Tool executor for handling tool calls from LLM responses."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Callable
|
||||
|
||||
from llm.core.interface import ToolExecutionError
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, Role, ToolCall, ToolDefinition, ToolResult
|
||||
|
||||
|
||||
ToolFunc = Callable[..., Any]
|
||||
|
||||
|
||||
class ToolRegistry:
|
||||
def __init__(self) -> None:
|
||||
self._tools: dict[str, ToolFunc] = {}
|
||||
self._definitions: dict[str, ToolDefinition] = {}
|
||||
|
||||
def register(self, definition: ToolDefinition, func: ToolFunc) -> None:
|
||||
self._tools[definition.name] = func
|
||||
self._definitions[definition.name] = definition
|
||||
|
||||
def get(self, name: str) -> ToolFunc | None:
|
||||
return self._tools.get(name)
|
||||
|
||||
def get_definition(self, name: str) -> ToolDefinition | None:
|
||||
return self._definitions.get(name)
|
||||
|
||||
def list_tools(self) -> list[ToolDefinition]:
|
||||
return list(self._definitions.values())
|
||||
|
||||
def has(self, name: str) -> bool:
|
||||
return name in self._tools
|
||||
|
||||
|
||||
class ToolExecutor:
|
||||
def __init__(self, registry: ToolRegistry | None = None) -> None:
|
||||
self.registry = registry or ToolRegistry()
|
||||
|
||||
def execute(self, tool_call: ToolCall) -> ToolResult:
|
||||
func = self.registry.get(tool_call.name)
|
||||
if not func:
|
||||
return ToolResult(
|
||||
tool_call_id=tool_call.id,
|
||||
content=f"Error: Tool '{tool_call.name}' not found",
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
try:
|
||||
result = func(**tool_call.arguments)
|
||||
content = result if isinstance(result, str) else str(result)
|
||||
return ToolResult(tool_call_id=tool_call.id, content=content)
|
||||
except Exception as e:
|
||||
return ToolResult(
|
||||
tool_call_id=tool_call.id,
|
||||
content=f"Error executing {tool_call.name}: {e}",
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
def execute_all(self, tool_calls: list[ToolCall]) -> list[ToolResult]:
|
||||
return [self.execute(tc) for tc in tool_calls]
|
||||
|
||||
|
||||
class ReActAgent:
|
||||
def __init__(
|
||||
self,
|
||||
provider: Any,
|
||||
executor: ToolExecutor,
|
||||
max_iterations: int = 10,
|
||||
) -> None:
|
||||
self.provider = provider
|
||||
self.executor = executor
|
||||
self.max_iterations = max_iterations
|
||||
|
||||
async def run(self, input: LLMInput) -> LLMOutput:
|
||||
messages = list(input.messages)
|
||||
tools = input.tools or []
|
||||
|
||||
for _ in range(self.max_iterations):
|
||||
input_copy = LLMInput(
|
||||
messages=messages,
|
||||
model=input.model,
|
||||
temperature=input.temperature,
|
||||
max_tokens=input.max_tokens,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
output = self.provider.generate(input_copy)
|
||||
|
||||
if not output.has_tool_calls:
|
||||
return output
|
||||
|
||||
messages.append(
|
||||
Message(
|
||||
role=Role.ASSISTANT,
|
||||
content=output.content or "",
|
||||
tool_calls=output.tool_calls,
|
||||
)
|
||||
)
|
||||
|
||||
results = self.executor.execute_all(output.tool_calls)
|
||||
|
||||
for result in results:
|
||||
messages.append(
|
||||
Message(
|
||||
role=Role.TOOL,
|
||||
content=result.content,
|
||||
tool_call_id=result.tool_call_id,
|
||||
)
|
||||
)
|
||||
|
||||
return LLMOutput(
|
||||
content="Max iterations reached",
|
||||
stop_reason="max_iterations",
|
||||
)
|
||||
0
tests/__init__.py
Normal file
0
tests/__init__.py
Normal file
4
tests/conftest.py
Normal file
4
tests/conftest.py
Normal file
@@ -0,0 +1,4 @@
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
|
||||
69
tests/test_builder.py
Normal file
69
tests/test_builder.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import pytest
|
||||
from llm.core.types import LLMInput, Message, Role, ToolDefinition
|
||||
from llm.prompt import PromptBuilder, adapt_messages_for_provider
|
||||
from llm.prompt.builder import PromptConfig
|
||||
|
||||
|
||||
class TestPromptBuilder:
|
||||
def test_build_without_system(self):
|
||||
messages = [Message(role=Role.USER, content="Hello")]
|
||||
builder = PromptBuilder()
|
||||
result = builder.build(messages)
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0].role == Role.USER
|
||||
|
||||
def test_build_with_system(self):
|
||||
messages = [
|
||||
Message(role=Role.SYSTEM, content="You are helpful."),
|
||||
Message(role=Role.USER, content="Hello"),
|
||||
]
|
||||
builder = PromptBuilder()
|
||||
result = builder.build(messages)
|
||||
|
||||
assert len(result) == 2
|
||||
assert result[0].role == Role.SYSTEM
|
||||
|
||||
def test_build_adds_system_from_config(self):
|
||||
messages = [Message(role=Role.USER, content="Hello")]
|
||||
builder = PromptBuilder(system_template="You are a pirate.")
|
||||
result = builder.build(messages)
|
||||
|
||||
assert len(result) == 2
|
||||
assert "pirate" in result[0].content
|
||||
|
||||
def test_build_adds_system_from_config(self):
|
||||
messages = [Message(role=Role.USER, content="Hello")]
|
||||
builder = PromptBuilder(config=PromptConfig(system_template="You are a pirate."))
|
||||
result = builder.build(messages)
|
||||
|
||||
assert len(result) == 2
|
||||
assert "pirate" in result[0].content
|
||||
def test_build_with_tools(self):
|
||||
messages = [Message(role=Role.USER, content="Search for something")]
|
||||
tools = [
|
||||
ToolDefinition(name="search", description="Search the web", parameters={}),
|
||||
]
|
||||
builder = PromptBuilder(include_tools_in_system=True)
|
||||
result = builder.build(messages, tools)
|
||||
|
||||
assert len(result) == 2
|
||||
assert "search" in result[0].content
|
||||
assert "Available Tools" in result[0].content
|
||||
|
||||
|
||||
class TestAdaptMessagesForProvider:
|
||||
def test_adapt_for_claude(self):
|
||||
messages = [Message(role=Role.USER, content="Hello")]
|
||||
result = adapt_messages_for_provider(messages, "claude")
|
||||
assert len(result) == 1
|
||||
|
||||
def test_adapt_for_openai(self):
|
||||
messages = [Message(role=Role.USER, content="Hello")]
|
||||
result = adapt_messages_for_provider(messages, "openai")
|
||||
assert len(result) == 1
|
||||
|
||||
def test_adapt_for_ollama(self):
|
||||
messages = [Message(role=Role.USER, content="Hello")]
|
||||
result = adapt_messages_for_provider(messages, "ollama")
|
||||
assert len(result) == 1
|
||||
86
tests/test_executor.py
Normal file
86
tests/test_executor.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import pytest
|
||||
from llm.core.types import ToolCall, ToolDefinition, ToolResult
|
||||
from llm.tools import ToolExecutor, ToolRegistry
|
||||
|
||||
|
||||
class TestToolRegistry:
|
||||
def test_register_and_get(self):
|
||||
registry = ToolRegistry()
|
||||
|
||||
def dummy_func() -> str:
|
||||
return "result"
|
||||
|
||||
tool_def = ToolDefinition(
|
||||
name="dummy",
|
||||
description="A dummy tool",
|
||||
parameters={"type": "object"},
|
||||
)
|
||||
registry.register(tool_def, dummy_func)
|
||||
|
||||
assert registry.has("dummy") is True
|
||||
assert registry.get("dummy") is dummy_func
|
||||
assert registry.get_definition("dummy") == tool_def
|
||||
|
||||
def test_list_tools(self):
|
||||
registry = ToolRegistry()
|
||||
tool_def = ToolDefinition(name="test", description="Test", parameters={})
|
||||
registry.register(tool_def, lambda: None)
|
||||
|
||||
tools = registry.list_tools()
|
||||
assert len(tools) == 1
|
||||
assert tools[0].name == "test"
|
||||
|
||||
|
||||
class TestToolExecutor:
|
||||
def test_execute_success(self):
|
||||
registry = ToolRegistry()
|
||||
|
||||
def search(query: str) -> str:
|
||||
return f"Results for: {query}"
|
||||
|
||||
registry.register(
|
||||
ToolDefinition(
|
||||
name="search",
|
||||
description="Search",
|
||||
parameters={"type": "object", "properties": {"query": {"type": "string"}}},
|
||||
),
|
||||
search,
|
||||
)
|
||||
|
||||
executor = ToolExecutor(registry)
|
||||
result = executor.execute(ToolCall(id="1", name="search", arguments={"query": "test"}))
|
||||
|
||||
assert result.tool_call_id == "1"
|
||||
assert result.content == "Results for: test"
|
||||
assert result.is_error is False
|
||||
|
||||
def test_execute_unknown_tool(self):
|
||||
registry = ToolRegistry()
|
||||
executor = ToolExecutor(registry)
|
||||
|
||||
result = executor.execute(ToolCall(id="1", name="unknown", arguments={}))
|
||||
|
||||
assert result.is_error is True
|
||||
assert "not found" in result.content
|
||||
|
||||
def test_execute_all(self):
|
||||
registry = ToolRegistry()
|
||||
|
||||
def tool1() -> str:
|
||||
return "result1"
|
||||
|
||||
def tool2() -> str:
|
||||
return "result2"
|
||||
|
||||
registry.register(ToolDefinition(name="t1", description="", parameters={}), tool1)
|
||||
registry.register(ToolDefinition(name="t2", description="", parameters={}), tool2)
|
||||
|
||||
executor = ToolExecutor(registry)
|
||||
results = executor.execute_all([
|
||||
ToolCall(id="1", name="t1", arguments={}),
|
||||
ToolCall(id="2", name="t2", arguments={}),
|
||||
])
|
||||
|
||||
assert len(results) == 2
|
||||
assert results[0].content == "result1"
|
||||
assert results[1].content == "result2"
|
||||
28
tests/test_resolver.py
Normal file
28
tests/test_resolver.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import pytest
|
||||
from llm.core.types import ProviderType
|
||||
from llm.providers import ClaudeProvider, OpenAIProvider, OllamaProvider, get_provider
|
||||
|
||||
|
||||
class TestGetProvider:
|
||||
def test_get_claude_provider(self):
|
||||
provider = get_provider("claude")
|
||||
assert isinstance(provider, ClaudeProvider)
|
||||
assert provider.provider_type == ProviderType.CLAUDE
|
||||
|
||||
def test_get_openai_provider(self):
|
||||
provider = get_provider("openai")
|
||||
assert isinstance(provider, OpenAIProvider)
|
||||
assert provider.provider_type == ProviderType.OPENAI
|
||||
|
||||
def test_get_ollama_provider(self):
|
||||
provider = get_provider("ollama")
|
||||
assert isinstance(provider, OllamaProvider)
|
||||
assert provider.provider_type == ProviderType.OLLAMA
|
||||
|
||||
def test_get_provider_by_enum(self):
|
||||
provider = get_provider(ProviderType.CLAUDE)
|
||||
assert isinstance(provider, ClaudeProvider)
|
||||
|
||||
def test_invalid_provider_raises(self):
|
||||
with pytest.raises(ValueError, match="Unknown provider type"):
|
||||
get_provider("invalid")
|
||||
117
tests/test_types.py
Normal file
117
tests/test_types.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import pytest
|
||||
from llm.core.types import (
|
||||
LLMInput,
|
||||
LLMOutput,
|
||||
Message,
|
||||
ModelInfo,
|
||||
ProviderType,
|
||||
Role,
|
||||
ToolCall,
|
||||
ToolDefinition,
|
||||
ToolResult,
|
||||
)
|
||||
|
||||
|
||||
class TestRole:
|
||||
def test_role_values(self):
|
||||
assert Role.SYSTEM.value == "system"
|
||||
assert Role.USER.value == "user"
|
||||
assert Role.ASSISTANT.value == "assistant"
|
||||
assert Role.TOOL.value == "tool"
|
||||
|
||||
|
||||
class TestProviderType:
|
||||
def test_provider_values(self):
|
||||
assert ProviderType.CLAUDE.value == "claude"
|
||||
assert ProviderType.OPENAI.value == "openai"
|
||||
assert ProviderType.OLLAMA.value == "ollama"
|
||||
|
||||
|
||||
class TestMessage:
|
||||
def test_create_message(self):
|
||||
msg = Message(role=Role.USER, content="Hello")
|
||||
assert msg.role == Role.USER
|
||||
assert msg.content == "Hello"
|
||||
assert msg.name is None
|
||||
assert msg.tool_call_id is None
|
||||
|
||||
def test_message_to_dict(self):
|
||||
msg = Message(role=Role.USER, content="Hello", name="test")
|
||||
result = msg.to_dict()
|
||||
assert result["role"] == "user"
|
||||
assert result["content"] == "Hello"
|
||||
assert result["name"] == "test"
|
||||
|
||||
|
||||
class TestToolDefinition:
|
||||
def test_create_tool(self):
|
||||
tool = ToolDefinition(
|
||||
name="search",
|
||||
description="Search the web",
|
||||
parameters={"type": "object", "properties": {}},
|
||||
)
|
||||
assert tool.name == "search"
|
||||
assert tool.strict is True
|
||||
|
||||
def test_tool_to_dict(self):
|
||||
tool = ToolDefinition(
|
||||
name="search",
|
||||
description="Search",
|
||||
parameters={"type": "object"},
|
||||
)
|
||||
result = tool.to_dict()
|
||||
assert result["name"] == "search"
|
||||
assert result["strict"] is True
|
||||
|
||||
|
||||
class TestToolCall:
|
||||
def test_create_tool_call(self):
|
||||
tc = ToolCall(id="1", name="search", arguments={"query": "test"})
|
||||
assert tc.id == "1"
|
||||
assert tc.name == "search"
|
||||
assert tc.arguments == {"query": "test"}
|
||||
|
||||
|
||||
class TestToolResult:
|
||||
def test_create_tool_result(self):
|
||||
result = ToolResult(tool_call_id="1", content="result")
|
||||
assert result.tool_call_id == "1"
|
||||
assert result.is_error is False
|
||||
|
||||
|
||||
class TestLLMInput:
|
||||
def test_create_input(self):
|
||||
messages = [Message(role=Role.USER, content="Hello")]
|
||||
input_obj = LLMInput(messages=messages, temperature=0.7)
|
||||
assert len(input_obj.messages) == 1
|
||||
assert input_obj.temperature == 0.7
|
||||
|
||||
def test_input_to_dict(self):
|
||||
messages = [Message(role=Role.USER, content="Hello")]
|
||||
input_obj = LLMInput(messages=messages)
|
||||
result = input_obj.to_dict()
|
||||
assert "messages" in result
|
||||
assert result["temperature"] == 1.0
|
||||
|
||||
|
||||
class TestLLMOutput:
|
||||
def test_create_output(self):
|
||||
output = LLMOutput(content="Hello!")
|
||||
assert output.content == "Hello!"
|
||||
assert output.has_tool_calls is False
|
||||
|
||||
def test_output_with_tool_calls(self):
|
||||
tc = ToolCall(id="1", name="search", arguments={})
|
||||
output = LLMOutput(content="", tool_calls=[tc])
|
||||
assert output.has_tool_calls is True
|
||||
|
||||
|
||||
class TestModelInfo:
|
||||
def test_create_model_info(self):
|
||||
info = ModelInfo(
|
||||
name="gpt-4",
|
||||
provider=ProviderType.OPENAI,
|
||||
)
|
||||
assert info.name == "gpt-4"
|
||||
assert info.supports_tools is True
|
||||
assert info.supports_vision is False
|
||||
Reference in New Issue
Block a user