From f53a89ff88275b15da2d0480595891aa0c6c8233 Mon Sep 17 00:00:00 2001 From: Anish Date: Sun, 12 Apr 2026 12:16:49 +0530 Subject: [PATCH] GUI Created, Dark Mode Created, npm script added, styling changed, Decoupled from Claude made more open source. --- .opencode/package-lock.json | 3 + .opencode/package.json | 5 +- README.md | 18 ++++ ecc2/Cargo.lock | 10 ++ ecc2/Cargo.toml | 4 +- pyproject.toml | 84 +++++++++++++++ src/llm/__init__.py | 33 ++++++ src/llm/__main__.py | 7 ++ src/llm/cli/__init__.py | 0 src/llm/cli/selector.py | 154 +++++++++++++++++++++++++++ src/llm/core/__init__.py | 1 + src/llm/core/interface.py | 60 +++++++++++ src/llm/core/types.py | 140 ++++++++++++++++++++++++ src/llm/prompt/__init__.py | 13 +++ src/llm/prompt/builder.py | 101 ++++++++++++++++++ src/llm/prompt/templates/__init__.py | 1 + src/llm/providers/__init__.py | 14 +++ src/llm/providers/claude.py | 103 ++++++++++++++++++ src/llm/providers/ollama.py | 112 +++++++++++++++++++ src/llm/providers/openai.py | 113 ++++++++++++++++++++ src/llm/providers/resolver.py | 39 +++++++ src/llm/tools/__init__.py | 9 ++ src/llm/tools/executor.py | 108 +++++++++++++++++++ tests/__init__.py | 0 tests/conftest.py | 4 + tests/test_builder.py | 61 +++++++++++ tests/test_executor.py | 86 +++++++++++++++ tests/test_resolver.py | 28 +++++ tests/test_types.py | 117 ++++++++++++++++++++ 29 files changed, 1425 insertions(+), 3 deletions(-) create mode 100644 pyproject.toml create mode 100644 src/llm/__init__.py create mode 100644 src/llm/__main__.py create mode 100644 src/llm/cli/__init__.py create mode 100644 src/llm/cli/selector.py create mode 100644 src/llm/core/__init__.py create mode 100644 src/llm/core/interface.py create mode 100644 src/llm/core/types.py create mode 100644 src/llm/prompt/__init__.py create mode 100644 src/llm/prompt/builder.py create mode 100644 src/llm/prompt/templates/__init__.py create mode 100644 src/llm/providers/__init__.py create mode 100644 src/llm/providers/claude.py create mode 100644 src/llm/providers/ollama.py create mode 100644 src/llm/providers/openai.py create mode 100644 src/llm/providers/resolver.py create mode 100644 src/llm/tools/__init__.py create mode 100644 src/llm/tools/executor.py create mode 100644 tests/__init__.py create mode 100644 tests/conftest.py create mode 100644 tests/test_builder.py create mode 100644 tests/test_executor.py create mode 100644 tests/test_resolver.py create mode 100644 tests/test_types.py diff --git a/.opencode/package-lock.json b/.opencode/package-lock.json index 7c6c07c5..b8c43897 100644 --- a/.opencode/package-lock.json +++ b/.opencode/package-lock.json @@ -8,6 +8,9 @@ "name": "ecc-universal", "version": "1.10.0", "license": "MIT", + "dependencies": { + "@opencode-ai/plugin": "1.3.17" + }, "devDependencies": { "@opencode-ai/plugin": "^1.0.0", "@types/node": "^20.0.0", diff --git a/.opencode/package.json b/.opencode/package.json index 92736bcd..fb691006 100644 --- a/.opencode/package.json +++ b/.opencode/package.json @@ -66,5 +66,8 @@ }, "engines": { "node": ">=18.0.0" + }, + "dependencies": { + "@opencode-ai/plugin": "1.4.3" } -} +} \ No newline at end of file diff --git a/README.md b/README.md index 45fc6568..3135b209 100644 --- a/README.md +++ b/README.md @@ -84,6 +84,7 @@ This repo is the raw code only. The guides explain everything. ### v1.10.0 — Surface Refresh, Operator Workflows, and ECC 2.0 Alpha (Apr 2026) +- **Dashboard GUI** — New Tkinter-based desktop application (`ecc_dashboard.py` or `npm run dashboard`) with dark/light theme toggle, font customization, and project logo in header and taskbar. - **Public surface synced to the live repo** — metadata, catalog counts, plugin manifests, and install-facing docs now match the actual OSS surface: 38 agents, 156 skills, and 72 legacy command shims. - **Operator and outbound workflow expansion** — `brand-voice`, `social-graph-ranker`, `connections-optimizer`, `customer-billing-ops`, `ecc-tools-cost-audit`, `google-workspace-ops`, `project-flow-ops`, and `workspace-surface-audit` round out the operator lane. - **Media and launch tooling** — `manim-video`, `remotion-video-creation`, and upgraded social publishing surfaces make technical explainers and launch content part of the same system. @@ -240,6 +241,23 @@ For manual install instructions see the README in the `rules/` folder. When copy **That's it!** You now have access to 47 agents, 181 skills, and 79 legacy command shims. +### Dashboard GUI + +Launch the desktop dashboard to visually explore ECC components: + +```bash +npm run dashboard +# or +python3 ./ecc_dashboard.py +``` + +**Features:** +- Tabbed interface: Agents, Skills, Commands, Rules, Settings +- Dark/Light theme toggle +- Font customization (family & size) +- Project logo in header and taskbar +- Search and filter across all components + ### Multi-model commands require additional setup > WARNING: `multi-*` commands are **not** covered by the base plugin/rules install above. diff --git a/ecc2/Cargo.lock b/ecc2/Cargo.lock index ff240c32..9d0e631f 100644 --- a/ecc2/Cargo.lock +++ b/ecc2/Cargo.lock @@ -1286,6 +1286,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-src" +version = "300.6.0+3.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e8cbfd3a4a8c8f089147fd7aaa33cf8c7450c4d09f8f80698a0cf093abeff4" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.112" @@ -1294,6 +1303,7 @@ checksum = "57d55af3b3e226502be1526dfdba67ab0e9c96fc293004e79576b2b9edb0dbdb" dependencies = [ "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] diff --git a/ecc2/Cargo.toml b/ecc2/Cargo.toml index ea8d9733..9ef3aee3 100644 --- a/ecc2/Cargo.toml +++ b/ecc2/Cargo.toml @@ -18,8 +18,8 @@ tokio = { version = "1", features = ["full"] } # State store rusqlite = { version = "0.32", features = ["bundled"] } -# Git integration -git2 = "0.20" +# Git integration - use vendored openssl to avoid system dependency +git2 = { version = "0.20", features = ["ssh", "vendored-openssl"] } # Serialization serde = { version = "1", features = ["derive"] } diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..34778aef --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,84 @@ +[project] +name = "llm-abstraction" +version = "0.1.0" +description = "Provider-agnostic LLM abstraction layer" +readme = "README.md" +requires-python = ">=3.11" +license = {text = "MIT"} +authors = [ + {name = "Affaan Mustafa", email = "affaan@example.com"} +] +keywords = ["llm", "openai", "anthropic", "ollama", "ai"] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", +] + +dependencies = [ + "anthropic>=0.25.0", + "openai>=1.30.0", +] + +[project.optional-dependencies] +dev = [ + "pytest>=8.0", + "pytest-asyncio>=0.23", + "pytest-cov>=4.1", + "ruff>=0.4", + "mypy>=1.10", + "ruff>=0.4", +] +test = [ + "pytest>=8.0", + "pytest-asyncio>=0.23", + "pytest-cov>=4.1", + "pytest-mock>=3.12", +] + +[project.urls] +Homepage = "https://github.com/affaan-m/everything-claude-code" +Repository = "https://github.com/affaan-m/everything-claude-code" + +[project.scripts] +llm-select = "llm.cli.selector:main" + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/llm"] + +[tool.pytest.ini_options] +testpaths = ["tests"] +asyncio_mode = "auto" +filterwarnings = ["ignore::DeprecationWarning"] + +[tool.coverage.run] +source = ["src/llm"] +branch = true + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "if TYPE_CHECKING:", + "raise NotImplementedError", +] + +[tool.ruff] +src-path = ["src"] +target-version = "py311" + +[tool.ruff.lint] +select = ["E", "F", "I", "N", "W", "UP"] +ignore = ["E501"] + +[tool.mypy] +python_version = "3.11" +src_paths = ["src"] +warn_return_any = true +warn_unused_ignores = true diff --git a/src/llm/__init__.py b/src/llm/__init__.py new file mode 100644 index 00000000..3b25554b --- /dev/null +++ b/src/llm/__init__.py @@ -0,0 +1,33 @@ +""" +LLM Abstraction Layer + +Provider-agnostic interface for multiple LLM backends. +""" + +from llm.core.interface import LLMProvider +from llm.core.types import LLMInput, LLMOutput, Message, ToolCall, ToolDefinition, ToolResult +from llm.providers import get_provider +from llm.tools import ToolExecutor, ToolRegistry +from llm.cli.selector import interactive_select + +__version__ = "0.1.0" + +__all__ = [ + "LLMProvider", + "LLMInput", + "LLMOutput", + "Message", + "get_provider", + "ToolCall", + "ToolDefinition", + "ToolResult", + "ToolExecutor", + "ToolRegistry", + "interactive_select", +] + + +def gui() -> None: + from llm.gui.selector import main + main() + diff --git a/src/llm/__main__.py b/src/llm/__main__.py new file mode 100644 index 00000000..370f7ed3 --- /dev/null +++ b/src/llm/__main__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python3 +"""Entry point for llm CLI.""" + +from llm.cli.selector import main + +if __name__ == "__main__": + main() diff --git a/src/llm/cli/__init__.py b/src/llm/cli/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/llm/cli/selector.py b/src/llm/cli/selector.py new file mode 100644 index 00000000..4419b2bc --- /dev/null +++ b/src/llm/cli/selector.py @@ -0,0 +1,154 @@ + + +from __future__ import annotations + +import os +import sys +from enum import Enum + + +class Color(str, Enum): + RESET = "\033[0m" + BOLD = "\033[1m" + GREEN = "\033[92m" + YELLOW = "\033[93m" + BLUE = "\033[94m" + CYAN = "\033[96m" + + +def print_banner() -> None: + banner = f"""{Color.CYAN} +╔═══════════════════════════════════════════╗ +║ LLM Provider Selector ║ +║ Provider-agnostic AI interactions ║ +╚═══════════════════════════════════════════╝{Color.RESET}""" + print(banner) + + +def print_providers(providers: list[tuple[str, str]]) -> None: + print(f"\n{Color.BOLD}Available Providers:{Color.RESET}\n") + for i, (name, desc) in enumerate(providers, 1): + print(f" {Color.GREEN}{i}{Color.RESET}. {Color.BOLD}{name}{Color.RESET} - {desc}") + + +def select_provider(providers: list[tuple[str, str]]) -> str | None: + if not providers: + print("No providers available.") + return None + + print_providers(providers) + + while True: + try: + choice = input(f"\n{Color.YELLOW}Select provider (1-{len(providers)}): {Color.RESET}").strip() + if not choice: + return None + idx = int(choice) - 1 + if 0 <= idx < len(providers): + return providers[idx][0] + print(f"{Color.YELLOW}Invalid selection. Try again.{Color.RESET}") + except ValueError: + print(f"{Color.YELLOW}Please enter a number.{Color.RESET}") + + +def select_model(models: list[tuple[str, str]]) -> str | None: + if not models: + print("No models available.") + return None + + print(f"\n{Color.BOLD}Available Models:{Color.RESET}\n") + for i, (name, desc) in enumerate(models, 1): + print(f" {Color.GREEN}{i}{Color.RESET}. {Color.BOLD}{name}{Color.RESET} - {desc}") + + while True: + try: + choice = input(f"\n{Color.YELLOW}Select model (1-{len(models)}): {Color.RESET}").strip() + if not choice: + return None + idx = int(choice) - 1 + if 0 <= idx < len(models): + return models[idx][0] + print(f"{Color.YELLOW}Invalid selection. Try again.{Color.RESET}") + except ValueError: + print(f"{Color.YELLOW}Please enter a number.{Color.RESET}") + + +def save_config(provider: str, model: str, persist: bool = False) -> None: + config = f"LLM_PROVIDER={provider}\nLLM_MODEL={model}\n" + env_file = ".llm.env" + + with open(env_file, "w") as f: + f.write(config) + + print(f"\n{Color.GREEN}✓{Color.RESET} Config saved to {Color.CYAN}{env_file}{Color.RESET}") + + if persist: + os.environ["LLM_PROVIDER"] = provider + os.environ["LLM_MODEL"] = model + print(f"{Color.GREEN}✓{Color.RESET} Config loaded to current session") + + +def interactive_select( + providers: list[tuple[str, str]] | None = None, + models_per_provider: dict[str, list[tuple[str, str]]] | None = None, + persist: bool = False, +) -> tuple[str, str] | None: + print_banner() + + if providers is None: + providers = [ + ("claude", "Anthropic Claude ( Sonnet, Opus, Haiku)"), + ("openai", "OpenAI GPT (4o, 4o-mini, 3.5-turbo)"), + ("ollama", "Local Ollama models"), + ] + + if models_per_provider is None: + models_per_provider = { + "claude": [ + ("claude-opus-4-5", "Claude Opus 4.5 - Most capable"), + ("claude-sonnet-4-7", "Claude Sonnet 4.7 - Balanced"), + ("claude-haiku-4-7", "Claude Haiku 4.7 - Fast"), + ], + "openai": [ + ("gpt-4o", "GPT-4o - Most capable"), + ("gpt-4o-mini", "GPT-4o-mini - Fast & affordable"), + ("gpt-4-turbo", "GPT-4 Turbo - Legacy powerful"), + ("gpt-3.5-turbo", "GPT-3.5 - Legacy fast"), + ], + "ollama": [ + ("llama3.2", "Llama 3.2 - General purpose"), + ("mistral", "Mistral - Fast & efficient"), + ("codellama", "CodeLlama - Code specialized"), + ], + } + + provider = select_provider(providers) + if not provider: + return None + + models = models_per_provider.get(provider, []) + model = select_model(models) + if not model: + return None + + print(f"\n{Color.GREEN}Selected: {Color.BOLD}{provider}{Color.RESET} / {Color.BOLD}{model}{Color.RESET}") + + save_config(provider, model, persist) + + return (provider, model) + + +def main() -> None: + result = interactive_select(persist=True) + + if result: + print(f"\n{Color.GREEN}Ready to use!{Color.RESET}") + print(f" export LLM_PROVIDER={result[0]}") + print(f" export LLM_MODEL={result[1]}") + else: + print("\nSelection cancelled.") + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/src/llm/core/__init__.py b/src/llm/core/__init__.py new file mode 100644 index 00000000..a6fe6cc1 --- /dev/null +++ b/src/llm/core/__init__.py @@ -0,0 +1 @@ +"""Core module for LLM abstraction layer.""" diff --git a/src/llm/core/interface.py b/src/llm/core/interface.py new file mode 100644 index 00000000..ea1d9114 --- /dev/null +++ b/src/llm/core/interface.py @@ -0,0 +1,60 @@ +"""LLM Provider interface definition.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Any + +from llm.core.types import LLMInput, LLMOutput, ModelInfo, ProviderType + + +class LLMProvider(ABC): + provider_type: ProviderType + + @abstractmethod + def generate(self, input: LLMInput) -> LLMOutput: ... + + @abstractmethod + def list_models(self) -> list[ModelInfo]: ... + + @abstractmethod + def validate_config(self) -> bool: ... + + def supports_tools(self) -> bool: + return True + + def supports_vision(self) -> bool: + return False + + def get_default_model(self) -> str: + raise NotImplementedError(f"{self.__class__.__name__} must implement get_default_model") + + +class LLMError(Exception): + def __init__( + self, + message: str, + provider: ProviderType | None = None, + code: str | None = None, + details: dict[str, Any] | None = None, + ) -> None: + super().__init__(message) + self.message = message + self.provider = provider + self.code = code + self.details = details or {} + + +class AuthenticationError(LLMError): ... + + +class RateLimitError(LLMError): ... + + +class ContextLengthError(LLMError): ... + + +class ModelNotFoundError(LLMError): ... + + +class ToolExecutionError(LLMError): ... diff --git a/src/llm/core/types.py b/src/llm/core/types.py new file mode 100644 index 00000000..902fd28a --- /dev/null +++ b/src/llm/core/types.py @@ -0,0 +1,140 @@ +"""Core type definitions for LLM abstraction layer.""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from enum import Enum +from typing import Any + + +class Role(str, Enum): + SYSTEM = "system" + USER = "user" + ASSISTANT = "assistant" + TOOL = "tool" + + +class ProviderType(str, Enum): + CLAUDE = "claude" + OPENAI = "openai" + OLLAMA = "ollama" + + +@dataclass(frozen=True) +class Message: + role: Role + content: str + name: str | None = None + tool_call_id: str | None = None + + def to_dict(self) -> dict[str, Any]: + result: dict[str, Any] = {"role": self.role.value, "content": self.content} + if self.name: + result["name"] = self.name + if self.tool_call_id: + result["tool_call_id"] = self.tool_call_id + return result + + +@dataclass(frozen=True) +class ToolDefinition: + name: str + description: str + parameters: dict[str, Any] + strict: bool = True + + def to_dict(self) -> dict[str, Any]: + return { + "name": self.name, + "description": self.description, + "parameters": self.parameters, + "strict": self.strict, + } + + +@dataclass(frozen=True) +class ToolCall: + id: str + name: str + arguments: dict[str, Any] + + +@dataclass(frozen=True) +class ToolResult: + tool_call_id: str + content: str + is_error: bool = False + + +@dataclass(frozen=True) +class LLMInput: + messages: list[Message] + model: str | None = None + temperature: float = 1.0 + max_tokens: int | None = None + tools: list[ToolDefinition] | None = None + stream: bool = False + metadata: dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> dict[str, Any]: + result: dict[str, Any] = { + "messages": [msg.to_dict() for msg in self.messages], + "temperature": self.temperature, + "stream": self.stream, + } + if self.model: + result["model"] = self.model + if self.max_tokens is not None: + result["max_tokens"] = self.max_tokens + if self.tools: + result["tools"] = [tool.to_dict() for tool in self.tools] + return result | self.metadata + + +@dataclass(frozen=True) +class LLMOutput: + content: str + tool_calls: list[ToolCall] | None = None + model: str | None = None + usage: dict[str, int] | None = None + stop_reason: str | None = None + metadata: dict[str, Any] = field(default_factory=dict) + + @property + def has_tool_calls(self) -> bool: + return bool(self.tool_calls) + + def to_dict(self) -> dict[str, Any]: + result: dict[str, Any] = {"content": self.content} + if self.tool_calls: + result["tool_calls"] = [ + {"id": tc.id, "name": tc.name, "arguments": tc.arguments} + for tc in self.tool_calls + ] + if self.model: + result["model"] = self.model + if self.usage: + result["usage"] = self.usage + if self.stop_reason: + result["stop_reason"] = self.stop_reason + return result | self.metadata + + +@dataclass(frozen=True) +class ModelInfo: + name: str + provider: ProviderType + supports_tools: bool = True + supports_vision: bool = False + max_tokens: int | None = None + context_window: int | None = None + + def to_dict(self) -> dict[str, Any]: + return { + "name": self.name, + "provider": self.provider.value, + "supports_tools": self.supports_tools, + "supports_vision": self.supports_vision, + "max_tokens": self.max_tokens, + "context_window": self.context_window, + } diff --git a/src/llm/prompt/__init__.py b/src/llm/prompt/__init__.py new file mode 100644 index 00000000..6ae8f90a --- /dev/null +++ b/src/llm/prompt/__init__.py @@ -0,0 +1,13 @@ +"""Prompt module for prompt building and normalization.""" + +from llm.prompt.builder import PromptBuilder, adapt_messages_for_provider, get_provider_builder +from llm.prompt.templates import TEMPLATES, get_template, get_template_or_default + +__all__ = [ + "PromptBuilder", + "adapt_messages_for_provider", + "get_provider_builder", + "TEMPLATES", + "get_template", + "get_template_or_default", +] diff --git a/src/llm/prompt/builder.py b/src/llm/prompt/builder.py new file mode 100644 index 00000000..4d588e27 --- /dev/null +++ b/src/llm/prompt/builder.py @@ -0,0 +1,101 @@ +"""Prompt builder for normalizing prompts across providers.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any + +from llm.core.types import LLMInput, Message, Role, ToolDefinition +from llm.providers.claude import ClaudeProvider +from llm.providers.openai import OpenAIProvider +from llm.providers.ollama import OllamaProvider + + +@dataclass +class PromptConfig: + system_template: str | None = None + user_template: str | None = None + include_tools_in_system: bool = True + tool_format: str = "native" + + +class PromptBuilder: + def __init__(self, config: PromptConfig | None = None) -> None: + self.config = config or PromptConfig() + + def build(self, messages: list[Message], tools: list[ToolDefinition] | None = None) -> list[Message]: + if not messages: + return [] + + result: list[Message] = [] + system_parts: list[str] = [] + + if self.config.system_template: + system_parts.append(self.config.system_template) + + if tools and self.config.include_tools_in_system: + tools_desc = self._format_tools(tools) + system_parts.append(f"\n\n## Available Tools\n{tools_desc}") + + if messages[0].role == Role.SYSTEM: + system_parts.insert(0, messages[0].content) + result.extend(messages[1:]) + else: + if system_parts: + result.insert(0, Message(role=Role.SYSTEM, content="\n\n".join(system_parts))) + result.extend(messages) + + return result + + def _format_tools(self, tools: list[ToolDefinition]) -> str: + lines = [] + for tool in tools: + lines.append(f"### {tool.name}") + lines.append(tool.description) + if tool.parameters: + lines.append("Parameters:") + lines.append(self._format_parameters(tool.parameters)) + return "\n".join(lines) + + def _format_parameters(self, params: dict[str, Any]) -> str: + if "properties" not in params: + return str(params) + lines = [] + required = params.get("required", []) + for name, spec in params["properties"].items(): + prop_type = spec.get("type", "any") + desc = spec.get("description", "") + required_mark = "(required)" if name in required else "(optional)" + lines.append(f" - {name}: {prop_type} {required_mark} - {desc}") + return "\n".join(lines) if lines else str(params) + + +_PROVIDER_TEMPLATE_MAP: dict[str, dict[str, Any]] = { + "claude": { + "include_tools_in_system": False, + "tool_format": "anthropic", + }, + "openai": { + "include_tools_in_system": False, + "tool_format": "openai", + }, + "ollama": { + "include_tools_in_system": True, + "tool_format": "text", + }, +} + + +def get_provider_builder(provider_name: str) -> PromptBuilder: + config_dict = _PROVIDER_TEMPLATE_MAP.get(provider_name.lower(), {}) + config = PromptConfig(**config_dict) + return PromptBuilder(config) + + +def adapt_messages_for_provider( + messages: list[Message], + provider: str, + tools: list[ToolDefinition] | None = None, +) -> list[Message]: + builder = get_provider_builder(provider) + return builder.build(messages, tools) diff --git a/src/llm/prompt/templates/__init__.py b/src/llm/prompt/templates/__init__.py new file mode 100644 index 00000000..a69aa75b --- /dev/null +++ b/src/llm/prompt/templates/__init__.py @@ -0,0 +1 @@ +# Templates module for provider-specific prompt templates diff --git a/src/llm/providers/__init__.py b/src/llm/providers/__init__.py new file mode 100644 index 00000000..6194c3a2 --- /dev/null +++ b/src/llm/providers/__init__.py @@ -0,0 +1,14 @@ +"""Provider adapters for multiple LLM backends.""" + +from llm.providers.claude import ClaudeProvider +from llm.providers.openai import OpenAIProvider +from llm.providers.ollama import OllamaProvider +from llm.providers.resolver import get_provider, register_provider + +__all__ = [ + "ClaudeProvider", + "OpenAIProvider", + "OllamaProvider", + "get_provider", + "register_provider", +] diff --git a/src/llm/providers/claude.py b/src/llm/providers/claude.py new file mode 100644 index 00000000..cb41ed4b --- /dev/null +++ b/src/llm/providers/claude.py @@ -0,0 +1,103 @@ +"""Claude provider adapter.""" + +from __future__ import annotations + +import os +from typing import Any + +from anthropic import Anthropic + +from llm.core.interface import ( + AuthenticationError, + ContextLengthError, + LLMProvider, + RateLimitError, +) +from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall + + +class ClaudeProvider(LLMProvider): + provider_type = ProviderType.CLAUDE + + def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None: + self.client = Anthropic(api_key=api_key or os.environ.get("ANTHROPIC_API_KEY"), base_url=base_url) + self._models = [ + ModelInfo( + name="claude-opus-4-5", + provider=ProviderType.CLAUDE, + supports_tools=True, + supports_vision=True, + max_tokens=8192, + context_window=200000, + ), + ModelInfo( + name="claude-sonnet-4-7", + provider=ProviderType.CLAUDE, + supports_tools=True, + supports_vision=True, + max_tokens=8192, + context_window=200000, + ), + ModelInfo( + name="claude-haiku-4-7", + provider=ProviderType.CLAUDE, + supports_tools=True, + supports_vision=False, + max_tokens=4096, + context_window=200000, + ), + ] + + def generate(self, input: LLMInput) -> LLMOutput: + try: + params: dict[str, Any] = { + "model": input.model or "claude-sonnet-4-7", + "messages": [msg.to_dict() for msg in input.messages], + "temperature": input.temperature, + } + if input.max_tokens: + params["max_tokens"] = input.max_tokens + if input.tools: + params["tools"] = [tool.to_dict() for tool in input.tools] + + response = self.client.messages.create(**params) + + tool_calls = None + if response.content and hasattr(response.content[0], "type"): + if response.content[0].type == "tool_use": + tool_calls = [ + ToolCall( + id=getattr(response.content[0], "id", ""), + name=getattr(response.content[0], "name", ""), + arguments=getattr(response.content[0].input, "__dict__", {}), + ) + ] + + return LLMOutput( + content=response.content[0].text if response.content else "", + tool_calls=tool_calls, + model=response.model, + usage={ + "input_tokens": response.usage.input_tokens, + "output_tokens": response.usage.output_tokens, + }, + stop_reason=response.stop_reason, + ) + except Exception as e: + msg = str(e) + if "401" in msg or "authentication" in msg.lower(): + raise AuthenticationError(msg, provider=ProviderType.CLAUDE) from e + if "429" in msg or "rate_limit" in msg.lower(): + raise RateLimitError(msg, provider=ProviderType.CLAUDE) from e + if "context" in msg.lower() and "length" in msg.lower(): + raise ContextLengthError(msg, provider=ProviderType.CLAUDE) from e + raise + + def list_models(self) -> list[ModelInfo]: + return self._models.copy() + + def validate_config(self) -> bool: + return bool(self.client.api_key) + + def get_default_model(self) -> str: + return "claude-sonnet-4-7" diff --git a/src/llm/providers/ollama.py b/src/llm/providers/ollama.py new file mode 100644 index 00000000..56ee6eef --- /dev/null +++ b/src/llm/providers/ollama.py @@ -0,0 +1,112 @@ +"""Ollama provider adapter for local models.""" + +from __future__ import annotations + +import os +from typing import Any + +from llm.core.interface import ( + AuthenticationError, + ContextLengthError, + LLMProvider, + RateLimitError, +) +from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall + + +class OllamaProvider(LLMProvider): + provider_type = ProviderType.OLLAMA + + def __init__( + self, + base_url: str | None = None, + default_model: str | None = None, + ) -> None: + self.base_url = base_url or os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434") + self.default_model = default_model or os.environ.get("OLLAMA_MODEL", "llama3.2") + self._models = [ + ModelInfo( + name="llama3.2", + provider=ProviderType.OLLAMA, + supports_tools=False, + supports_vision=False, + max_tokens=4096, + context_window=128000, + ), + ModelInfo( + name="mistral", + provider=ProviderType.OLLAMA, + supports_tools=False, + supports_vision=False, + max_tokens=4096, + context_window=8192, + ), + ModelInfo( + name="codellama", + provider=ProviderType.OLLAMA, + supports_tools=False, + supports_vision=False, + max_tokens=4096, + context_window=16384, + ), + ] + + def generate(self, input: LLMInput) -> LLMOutput: + import urllib.request + import json + + try: + url = f"{self.base_url}/api/chat" + model = input.model or self.default_model + + payload: dict[str, Any] = { + "model": model, + "messages": [msg.to_dict() for msg in input.messages], + "stream": False, + } + if input.temperature != 1.0: + payload["options"] = {"temperature": input.temperature} + + data = json.dumps(payload).encode("utf-8") + req = urllib.request.Request(url, data=data, headers={"Content-Type": "application/json"}) + + with urllib.request.urlopen(req, timeout=60) as response: + result = json.loads(response.read().decode("utf-8")) + + content = result.get("message", {}).get("content", "") + + tool_calls = None + if result.get("message", {}).get("tool_calls"): + tool_calls = [ + ToolCall( + id=tc.get("id", ""), + name=tc.get("function", {}).get("name", ""), + arguments=tc.get("function", {}).get("arguments", {}), + ) + for tc in result["message"]["tool_calls"] + ] + + return LLMOutput( + content=content, + tool_calls=tool_calls, + model=model, + stop_reason=result.get("done_reason"), + ) + except Exception as e: + msg = str(e) + if "401" in msg or "connection" in msg.lower(): + raise AuthenticationError(f"Ollama connection failed: {msg}", provider=ProviderType.OLLAMA) from e + if "429" in msg or "rate_limit" in msg.lower(): + raise RateLimitError(msg, provider=ProviderType.OLLAMA) from e + if "context" in msg.lower() and "length" in msg.lower(): + raise ContextLengthError(msg, provider=ProviderType.OLLAMA) from e + raise + + def list_models(self) -> list[ModelInfo]: + return self._models.copy() + + def validate_config(self) -> bool: + return bool(self.base_url) + + def get_default_model(self) -> str: + return self.default_model diff --git a/src/llm/providers/openai.py b/src/llm/providers/openai.py new file mode 100644 index 00000000..ebbcf78d --- /dev/null +++ b/src/llm/providers/openai.py @@ -0,0 +1,113 @@ +"""OpenAI provider adapter.""" + +from __future__ import annotations + +import os +from typing import Any + +from openai import OpenAI + +from llm.core.interface import ( + AuthenticationError, + ContextLengthError, + LLMProvider, + RateLimitError, +) +from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall + + +class OpenAIProvider(LLMProvider): + provider_type = ProviderType.OPENAI + + def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None: + self.client = OpenAI(api_key=api_key or os.environ.get("OPENAI_API_KEY"), base_url=base_url) + self._models = [ + ModelInfo( + name="gpt-4o", + provider=ProviderType.OPENAI, + supports_tools=True, + supports_vision=True, + max_tokens=4096, + context_window=128000, + ), + ModelInfo( + name="gpt-4o-mini", + provider=ProviderType.OPENAI, + supports_tools=True, + supports_vision=True, + max_tokens=4096, + context_window=128000, + ), + ModelInfo( + name="gpt-4-turbo", + provider=ProviderType.OPENAI, + supports_tools=True, + supports_vision=True, + max_tokens=4096, + context_window=128000, + ), + ModelInfo( + name="gpt-3.5-turbo", + provider=ProviderType.OPENAI, + supports_tools=True, + supports_vision=False, + max_tokens=4096, + context_window=16385, + ), + ] + + def generate(self, input: LLMInput) -> LLMOutput: + try: + params: dict[str, Any] = { + "model": input.model or "gpt-4o-mini", + "messages": [msg.to_dict() for msg in input.messages], + "temperature": input.temperature, + } + if input.max_tokens: + params["max_tokens"] = input.max_tokens + if input.tools: + params["tools"] = [tool.to_dict() for tool in input.tools] + + response = self.client.chat.completions.create(**params) + choice = response.choices[0] + + tool_calls = None + if choice.message.tool_calls: + tool_calls = [ + ToolCall( + id=tc.id or "", + name=tc.function.name, + arguments={} if tc.function.arguments == "" else tc.function.arguments, + ) + for tc in choice.message.tool_calls + ] + + return LLMOutput( + content=choice.message.content or "", + tool_calls=tool_calls, + model=response.model, + usage={ + "prompt_tokens": response.usage.prompt_tokens, + "completion_tokens": response.usage.completion_tokens, + "total_tokens": response.usage.total_tokens, + }, + stop_reason=choice.finish_reason, + ) + except Exception as e: + msg = str(e) + if "401" in msg or "authentication" in msg.lower(): + raise AuthenticationError(msg, provider=ProviderType.OPENAI) from e + if "429" in msg or "rate_limit" in msg.lower(): + raise RateLimitError(msg, provider=ProviderType.OPENAI) from e + if "context" in msg.lower() and "length" in msg.lower(): + raise ContextLengthError(msg, provider=ProviderType.OPENAI) from e + raise + + def list_models(self) -> list[ModelInfo]: + return self._models.copy() + + def validate_config(self) -> bool: + return bool(self.client.api_key) + + def get_default_model(self) -> str: + return "gpt-4o-mini" diff --git a/src/llm/providers/resolver.py b/src/llm/providers/resolver.py new file mode 100644 index 00000000..655c8775 --- /dev/null +++ b/src/llm/providers/resolver.py @@ -0,0 +1,39 @@ +"""Provider factory and resolver.""" + +from __future__ import annotations + +import os + +from llm.core.interface import LLMProvider +from llm.core.types import ProviderType +from llm.providers.claude import ClaudeProvider +from llm.providers.openai import OpenAIProvider +from llm.providers.ollama import OllamaProvider + + +_PROVIDER_MAP: dict[ProviderType, type[LLMProvider]] = { + ProviderType.CLAUDE: ClaudeProvider, + ProviderType.OPENAI: OpenAIProvider, + ProviderType.OLLAMA: OllamaProvider, +} + + +def get_provider(provider_type: ProviderType | str | None = None, **kwargs: str) -> LLMProvider: + if provider_type is None: + provider_type = os.environ.get("LLM_PROVIDER", "claude").lower() + + if isinstance(provider_type, str): + try: + provider_type = ProviderType(provider_type) + except ValueError: + raise ValueError(f"Unknown provider type: {provider_type}. Valid types: {[p.value for p in ProviderType]}") + + provider_cls = _PROVIDER_MAP.get(provider_type) + if not provider_cls: + raise ValueError(f"No provider registered for type: {provider_type}") + + return provider_cls(**kwargs) + + +def register_provider(provider_type: ProviderType, provider_cls: type[LLMProvider]) -> None: + _PROVIDER_MAP[provider_type] = provider_cls diff --git a/src/llm/tools/__init__.py b/src/llm/tools/__init__.py new file mode 100644 index 00000000..cb2c5b7a --- /dev/null +++ b/src/llm/tools/__init__.py @@ -0,0 +1,9 @@ +"""Tools module for tool/function calling abstraction.""" + +from llm.tools.executor import ReActAgent, ToolExecutor, ToolRegistry + +__all__ = [ + "ToolRegistry", + "ToolExecutor", + "ReActAgent", +] diff --git a/src/llm/tools/executor.py b/src/llm/tools/executor.py new file mode 100644 index 00000000..8cd134e0 --- /dev/null +++ b/src/llm/tools/executor.py @@ -0,0 +1,108 @@ +"""Tool executor for handling tool calls from LLM responses.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Any, Callable + +from llm.core.interface import ToolExecutionError +from llm.core.types import LLMInput, LLMOutput, Message, Role, ToolCall, ToolDefinition, ToolResult + + +ToolFunc = Callable[..., Any] + + +class ToolRegistry: + def __init__(self) -> None: + self._tools: dict[str, ToolFunc] = {} + self._definitions: dict[str, ToolDefinition] = {} + + def register(self, definition: ToolDefinition, func: ToolFunc) -> None: + self._tools[definition.name] = func + self._definitions[definition.name] = definition + + def get(self, name: str) -> ToolFunc | None: + return self._tools.get(name) + + def get_definition(self, name: str) -> ToolDefinition | None: + return self._definitions.get(name) + + def list_tools(self) -> list[ToolDefinition]: + return list(self._definitions.values()) + + def has(self, name: str) -> bool: + return name in self._tools + + +class ToolExecutor: + def __init__(self, registry: ToolRegistry | None = None) -> None: + self.registry = registry or ToolRegistry() + + def execute(self, tool_call: ToolCall) -> ToolResult: + func = self.registry.get(tool_call.name) + if not func: + return ToolResult( + tool_call_id=tool_call.id, + content=f"Error: Tool '{tool_call.name}' not found", + is_error=True, + ) + + try: + result = func(**tool_call.arguments) + content = result if isinstance(result, str) else str(result) + return ToolResult(tool_call_id=tool_call.id, content=content) + except Exception as e: + return ToolResult( + tool_call_id=tool_call.id, + content=f"Error executing {tool_call.name}: {e}", + is_error=True, + ) + + def execute_all(self, tool_calls: list[ToolCall]) -> list[ToolResult]: + return [self.execute(tc) for tc in tool_calls] + + +class ReActAgent: + def __init__( + self, + provider: Any, + executor: ToolExecutor, + max_iterations: int = 10, + ) -> None: + self.provider = provider + self.executor = executor + self.max_iterations = max_iterations + + async def run(self, input: LLMInput) -> LLMOutput: + messages = list(input.messages) + tools = input.tools or [] + + for _ in range(self.max_iterations): + input_copy = LLMInput( + messages=messages, + model=input.model, + temperature=input.temperature, + max_tokens=input.max_tokens, + tools=tools, + ) + + output = self.provider.generate(input_copy) + + if not output.has_tool_calls: + return output + + results = self.executor.execute_all(output.tool_calls) + + for result in results: + messages.append( + Message( + role=Role.TOOL, + content=result.content, + tool_call_id=result.tool_call_id, + ) + ) + + return LLMOutput( + content="Max iterations reached", + stop_reason="max_iterations", + ) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..8d9f8989 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,4 @@ +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent / "src")) diff --git a/tests/test_builder.py b/tests/test_builder.py new file mode 100644 index 00000000..4ba4c0e1 --- /dev/null +++ b/tests/test_builder.py @@ -0,0 +1,61 @@ +import pytest +from llm.core.types import LLMInput, Message, Role, ToolDefinition +from llm.prompt import PromptBuilder, adapt_messages_for_provider + + +class TestPromptBuilder: + def test_build_without_system(self): + messages = [Message(role=Role.USER, content="Hello")] + builder = PromptBuilder() + result = builder.build(messages) + + assert len(result) == 1 + assert result[0].role == Role.USER + + def test_build_with_system(self): + messages = [ + Message(role=Role.SYSTEM, content="You are helpful."), + Message(role=Role.USER, content="Hello"), + ] + builder = PromptBuilder() + result = builder.build(messages) + + assert len(result) == 2 + assert result[0].role == Role.SYSTEM + + def test_build_adds_system_from_config(self): + messages = [Message(role=Role.USER, content="Hello")] + builder = PromptBuilder(system_template="You are a pirate.") + result = builder.build(messages) + + assert len(result) == 2 + assert "pirate" in result[0].content + + def test_build_with_tools(self): + messages = [Message(role=Role.USER, content="Search for something")] + tools = [ + ToolDefinition(name="search", description="Search the web", parameters={}), + ] + builder = PromptBuilder(include_tools_in_system=True) + result = builder.build(messages, tools) + + assert len(result) == 2 + assert "search" in result[0].content + assert "Available Tools" in result[0].content + + +class TestAdaptMessagesForProvider: + def test_adapt_for_claude(self): + messages = [Message(role=Role.USER, content="Hello")] + result = adapt_messages_for_provider(messages, "claude") + assert len(result) == 1 + + def test_adapt_for_openai(self): + messages = [Message(role=Role.USER, content="Hello")] + result = adapt_messages_for_provider(messages, "openai") + assert len(result) == 1 + + def test_adapt_for_ollama(self): + messages = [Message(role=Role.USER, content="Hello")] + result = adapt_messages_for_provider(messages, "ollama") + assert len(result) == 1 diff --git a/tests/test_executor.py b/tests/test_executor.py new file mode 100644 index 00000000..07f8fe92 --- /dev/null +++ b/tests/test_executor.py @@ -0,0 +1,86 @@ +import pytest +from llm.core.types import ToolCall, ToolDefinition, ToolResult +from llm.tools import ToolExecutor, ToolRegistry + + +class TestToolRegistry: + def test_register_and_get(self): + registry = ToolRegistry() + + def dummy_func() -> str: + return "result" + + tool_def = ToolDefinition( + name="dummy", + description="A dummy tool", + parameters={"type": "object"}, + ) + registry.register(tool_def, dummy_func) + + assert registry.has("dummy") is True + assert registry.get("dummy") is dummy_func + assert registry.get_definition("dummy") == tool_def + + def test_list_tools(self): + registry = ToolRegistry() + tool_def = ToolDefinition(name="test", description="Test", parameters={}) + registry.register(tool_def, lambda: None) + + tools = registry.list_tools() + assert len(tools) == 1 + assert tools[0].name == "test" + + +class TestToolExecutor: + def test_execute_success(self): + registry = ToolRegistry() + + def search(query: str) -> str: + return f"Results for: {query}" + + registry.register( + ToolDefinition( + name="search", + description="Search", + parameters={"type": "object", "properties": {"query": {"type": "string"}}}, + ), + search, + ) + + executor = ToolExecutor(registry) + result = executor.execute(ToolCall(id="1", name="search", arguments={"query": "test"})) + + assert result.tool_call_id == "1" + assert result.content == "Results for: test" + assert result.is_error is False + + def test_execute_unknown_tool(self): + registry = ToolRegistry() + executor = ToolExecutor(registry) + + result = executor.execute(ToolCall(id="1", name="unknown", arguments={})) + + assert result.is_error is True + assert "not found" in result.content + + def test_execute_all(self): + registry = ToolRegistry() + + def tool1() -> str: + return "result1" + + def tool2() -> str: + return "result2" + + registry.register(ToolDefinition(name="t1", description="", parameters={}), tool1) + registry.register(ToolDefinition(name="t2", description="", parameters={}), tool2) + + executor = ToolExecutor(registry) + results = executor.execute_all([ + ToolCall(id="1", name="t1", arguments={}), + ToolCall(id="2", name="t2", arguments={}), + ]) + + assert len(results) == 2 + assert results[0].content == "result1" + assert results[1].content == "result2" diff --git a/tests/test_resolver.py b/tests/test_resolver.py new file mode 100644 index 00000000..6676c20e --- /dev/null +++ b/tests/test_resolver.py @@ -0,0 +1,28 @@ +import pytest +from llm.core.types import ProviderType +from llm.providers import ClaudeProvider, OpenAIProvider, OllamaProvider, get_provider + + +class TestGetProvider: + def test_get_claude_provider(self): + provider = get_provider("claude") + assert isinstance(provider, ClaudeProvider) + assert provider.provider_type == ProviderType.CLAUDE + + def test_get_openai_provider(self): + provider = get_provider("openai") + assert isinstance(provider, OpenAIProvider) + assert provider.provider_type == ProviderType.OPENAI + + def test_get_ollama_provider(self): + provider = get_provider("ollama") + assert isinstance(provider, OllamaProvider) + assert provider.provider_type == ProviderType.OLLAMA + + def test_get_provider_by_enum(self): + provider = get_provider(ProviderType.CLAUDE) + assert isinstance(provider, ClaudeProvider) + + def test_invalid_provider_raises(self): + with pytest.raises(ValueError, match="Unknown provider type"): + get_provider("invalid") diff --git a/tests/test_types.py b/tests/test_types.py new file mode 100644 index 00000000..a24b6119 --- /dev/null +++ b/tests/test_types.py @@ -0,0 +1,117 @@ +import pytest +from llm.core.types import ( + LLMInput, + LLMOutput, + Message, + ModelInfo, + ProviderType, + Role, + ToolCall, + ToolDefinition, + ToolResult, +) + + +class TestRole: + def test_role_values(self): + assert Role.SYSTEM.value == "system" + assert Role.USER.value == "user" + assert Role.ASSISTANT.value == "assistant" + assert Role.TOOL.value == "tool" + + +class TestProviderType: + def test_provider_values(self): + assert ProviderType.CLAUDE.value == "claude" + assert ProviderType.OPENAI.value == "openai" + assert ProviderType.OLLAMA.value == "ollama" + + +class TestMessage: + def test_create_message(self): + msg = Message(role=Role.USER, content="Hello") + assert msg.role == Role.USER + assert msg.content == "Hello" + assert msg.name is None + assert msg.tool_call_id is None + + def test_message_to_dict(self): + msg = Message(role=Role.USER, content="Hello", name="test") + result = msg.to_dict() + assert result["role"] == "user" + assert result["content"] == "Hello" + assert result["name"] == "test" + + +class TestToolDefinition: + def test_create_tool(self): + tool = ToolDefinition( + name="search", + description="Search the web", + parameters={"type": "object", "properties": {}}, + ) + assert tool.name == "search" + assert tool.strict is True + + def test_tool_to_dict(self): + tool = ToolDefinition( + name="search", + description="Search", + parameters={"type": "object"}, + ) + result = tool.to_dict() + assert result["name"] == "search" + assert result["strict"] is True + + +class TestToolCall: + def test_create_tool_call(self): + tc = ToolCall(id="1", name="search", arguments={"query": "test"}) + assert tc.id == "1" + assert tc.name == "search" + assert tc.arguments == {"query": "test"} + + +class TestToolResult: + def test_create_tool_result(self): + result = ToolResult(tool_call_id="1", content="result") + assert result.tool_call_id == "1" + assert result.is_error is False + + +class TestLLMInput: + def test_create_input(self): + messages = [Message(role=Role.USER, content="Hello")] + input_obj = LLMInput(messages=messages, temperature=0.7) + assert len(input_obj.messages) == 1 + assert input_obj.temperature == 0.7 + + def test_input_to_dict(self): + messages = [Message(role=Role.USER, content="Hello")] + input_obj = LLMInput(messages=messages) + result = input_obj.to_dict() + assert "messages" in result + assert result["temperature"] == 1.0 + + +class TestLLMOutput: + def test_create_output(self): + output = LLMOutput(content="Hello!") + assert output.content == "Hello!" + assert output.has_tool_calls is False + + def test_output_with_tool_calls(self): + tc = ToolCall(id="1", name="search", arguments={}) + output = LLMOutput(content="", tool_calls=[tc]) + assert output.has_tool_calls is True + + +class TestModelInfo: + def test_create_model_info(self): + info = ModelInfo( + name="gpt-4", + provider=ProviderType.OPENAI, + ) + assert info.name == "gpt-4" + assert info.supports_tools is True + assert info.supports_vision is False