mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-04-14 13:53:29 +08:00
GUI Created, Dark Mode Created, npm script added, styling changed, Decoupled from Claude made more open source.
This commit is contained in:
33
src/llm/__init__.py
Normal file
33
src/llm/__init__.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""
|
||||
LLM Abstraction Layer
|
||||
|
||||
Provider-agnostic interface for multiple LLM backends.
|
||||
"""
|
||||
|
||||
from llm.core.interface import LLMProvider
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ToolCall, ToolDefinition, ToolResult
|
||||
from llm.providers import get_provider
|
||||
from llm.tools import ToolExecutor, ToolRegistry
|
||||
from llm.cli.selector import interactive_select
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
__all__ = [
|
||||
"LLMProvider",
|
||||
"LLMInput",
|
||||
"LLMOutput",
|
||||
"Message",
|
||||
"get_provider",
|
||||
"ToolCall",
|
||||
"ToolDefinition",
|
||||
"ToolResult",
|
||||
"ToolExecutor",
|
||||
"ToolRegistry",
|
||||
"interactive_select",
|
||||
]
|
||||
|
||||
|
||||
def gui() -> None:
|
||||
from llm.gui.selector import main
|
||||
main()
|
||||
|
||||
7
src/llm/__main__.py
Normal file
7
src/llm/__main__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Entry point for llm CLI."""
|
||||
|
||||
from llm.cli.selector import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
0
src/llm/cli/__init__.py
Normal file
0
src/llm/cli/__init__.py
Normal file
154
src/llm/cli/selector.py
Normal file
154
src/llm/cli/selector.py
Normal file
@@ -0,0 +1,154 @@
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class Color(str, Enum):
|
||||
RESET = "\033[0m"
|
||||
BOLD = "\033[1m"
|
||||
GREEN = "\033[92m"
|
||||
YELLOW = "\033[93m"
|
||||
BLUE = "\033[94m"
|
||||
CYAN = "\033[96m"
|
||||
|
||||
|
||||
def print_banner() -> None:
|
||||
banner = f"""{Color.CYAN}
|
||||
╔═══════════════════════════════════════════╗
|
||||
║ LLM Provider Selector ║
|
||||
║ Provider-agnostic AI interactions ║
|
||||
╚═══════════════════════════════════════════╝{Color.RESET}"""
|
||||
print(banner)
|
||||
|
||||
|
||||
def print_providers(providers: list[tuple[str, str]]) -> None:
|
||||
print(f"\n{Color.BOLD}Available Providers:{Color.RESET}\n")
|
||||
for i, (name, desc) in enumerate(providers, 1):
|
||||
print(f" {Color.GREEN}{i}{Color.RESET}. {Color.BOLD}{name}{Color.RESET} - {desc}")
|
||||
|
||||
|
||||
def select_provider(providers: list[tuple[str, str]]) -> str | None:
|
||||
if not providers:
|
||||
print("No providers available.")
|
||||
return None
|
||||
|
||||
print_providers(providers)
|
||||
|
||||
while True:
|
||||
try:
|
||||
choice = input(f"\n{Color.YELLOW}Select provider (1-{len(providers)}): {Color.RESET}").strip()
|
||||
if not choice:
|
||||
return None
|
||||
idx = int(choice) - 1
|
||||
if 0 <= idx < len(providers):
|
||||
return providers[idx][0]
|
||||
print(f"{Color.YELLOW}Invalid selection. Try again.{Color.RESET}")
|
||||
except ValueError:
|
||||
print(f"{Color.YELLOW}Please enter a number.{Color.RESET}")
|
||||
|
||||
|
||||
def select_model(models: list[tuple[str, str]]) -> str | None:
|
||||
if not models:
|
||||
print("No models available.")
|
||||
return None
|
||||
|
||||
print(f"\n{Color.BOLD}Available Models:{Color.RESET}\n")
|
||||
for i, (name, desc) in enumerate(models, 1):
|
||||
print(f" {Color.GREEN}{i}{Color.RESET}. {Color.BOLD}{name}{Color.RESET} - {desc}")
|
||||
|
||||
while True:
|
||||
try:
|
||||
choice = input(f"\n{Color.YELLOW}Select model (1-{len(models)}): {Color.RESET}").strip()
|
||||
if not choice:
|
||||
return None
|
||||
idx = int(choice) - 1
|
||||
if 0 <= idx < len(models):
|
||||
return models[idx][0]
|
||||
print(f"{Color.YELLOW}Invalid selection. Try again.{Color.RESET}")
|
||||
except ValueError:
|
||||
print(f"{Color.YELLOW}Please enter a number.{Color.RESET}")
|
||||
|
||||
|
||||
def save_config(provider: str, model: str, persist: bool = False) -> None:
|
||||
config = f"LLM_PROVIDER={provider}\nLLM_MODEL={model}\n"
|
||||
env_file = ".llm.env"
|
||||
|
||||
with open(env_file, "w") as f:
|
||||
f.write(config)
|
||||
|
||||
print(f"\n{Color.GREEN}✓{Color.RESET} Config saved to {Color.CYAN}{env_file}{Color.RESET}")
|
||||
|
||||
if persist:
|
||||
os.environ["LLM_PROVIDER"] = provider
|
||||
os.environ["LLM_MODEL"] = model
|
||||
print(f"{Color.GREEN}✓{Color.RESET} Config loaded to current session")
|
||||
|
||||
|
||||
def interactive_select(
|
||||
providers: list[tuple[str, str]] | None = None,
|
||||
models_per_provider: dict[str, list[tuple[str, str]]] | None = None,
|
||||
persist: bool = False,
|
||||
) -> tuple[str, str] | None:
|
||||
print_banner()
|
||||
|
||||
if providers is None:
|
||||
providers = [
|
||||
("claude", "Anthropic Claude ( Sonnet, Opus, Haiku)"),
|
||||
("openai", "OpenAI GPT (4o, 4o-mini, 3.5-turbo)"),
|
||||
("ollama", "Local Ollama models"),
|
||||
]
|
||||
|
||||
if models_per_provider is None:
|
||||
models_per_provider = {
|
||||
"claude": [
|
||||
("claude-opus-4-5", "Claude Opus 4.5 - Most capable"),
|
||||
("claude-sonnet-4-7", "Claude Sonnet 4.7 - Balanced"),
|
||||
("claude-haiku-4-7", "Claude Haiku 4.7 - Fast"),
|
||||
],
|
||||
"openai": [
|
||||
("gpt-4o", "GPT-4o - Most capable"),
|
||||
("gpt-4o-mini", "GPT-4o-mini - Fast & affordable"),
|
||||
("gpt-4-turbo", "GPT-4 Turbo - Legacy powerful"),
|
||||
("gpt-3.5-turbo", "GPT-3.5 - Legacy fast"),
|
||||
],
|
||||
"ollama": [
|
||||
("llama3.2", "Llama 3.2 - General purpose"),
|
||||
("mistral", "Mistral - Fast & efficient"),
|
||||
("codellama", "CodeLlama - Code specialized"),
|
||||
],
|
||||
}
|
||||
|
||||
provider = select_provider(providers)
|
||||
if not provider:
|
||||
return None
|
||||
|
||||
models = models_per_provider.get(provider, [])
|
||||
model = select_model(models)
|
||||
if not model:
|
||||
return None
|
||||
|
||||
print(f"\n{Color.GREEN}Selected: {Color.BOLD}{provider}{Color.RESET} / {Color.BOLD}{model}{Color.RESET}")
|
||||
|
||||
save_config(provider, model, persist)
|
||||
|
||||
return (provider, model)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
result = interactive_select(persist=True)
|
||||
|
||||
if result:
|
||||
print(f"\n{Color.GREEN}Ready to use!{Color.RESET}")
|
||||
print(f" export LLM_PROVIDER={result[0]}")
|
||||
print(f" export LLM_MODEL={result[1]}")
|
||||
else:
|
||||
print("\nSelection cancelled.")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
src/llm/core/__init__.py
Normal file
1
src/llm/core/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Core module for LLM abstraction layer."""
|
||||
60
src/llm/core/interface.py
Normal file
60
src/llm/core/interface.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""LLM Provider interface definition."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
|
||||
from llm.core.types import LLMInput, LLMOutput, ModelInfo, ProviderType
|
||||
|
||||
|
||||
class LLMProvider(ABC):
|
||||
provider_type: ProviderType
|
||||
|
||||
@abstractmethod
|
||||
def generate(self, input: LLMInput) -> LLMOutput: ...
|
||||
|
||||
@abstractmethod
|
||||
def list_models(self) -> list[ModelInfo]: ...
|
||||
|
||||
@abstractmethod
|
||||
def validate_config(self) -> bool: ...
|
||||
|
||||
def supports_tools(self) -> bool:
|
||||
return True
|
||||
|
||||
def supports_vision(self) -> bool:
|
||||
return False
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
raise NotImplementedError(f"{self.__class__.__name__} must implement get_default_model")
|
||||
|
||||
|
||||
class LLMError(Exception):
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
provider: ProviderType | None = None,
|
||||
code: str | None = None,
|
||||
details: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
super().__init__(message)
|
||||
self.message = message
|
||||
self.provider = provider
|
||||
self.code = code
|
||||
self.details = details or {}
|
||||
|
||||
|
||||
class AuthenticationError(LLMError): ...
|
||||
|
||||
|
||||
class RateLimitError(LLMError): ...
|
||||
|
||||
|
||||
class ContextLengthError(LLMError): ...
|
||||
|
||||
|
||||
class ModelNotFoundError(LLMError): ...
|
||||
|
||||
|
||||
class ToolExecutionError(LLMError): ...
|
||||
140
src/llm/core/types.py
Normal file
140
src/llm/core/types.py
Normal file
@@ -0,0 +1,140 @@
|
||||
"""Core type definitions for LLM abstraction layer."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
|
||||
class Role(str, Enum):
|
||||
SYSTEM = "system"
|
||||
USER = "user"
|
||||
ASSISTANT = "assistant"
|
||||
TOOL = "tool"
|
||||
|
||||
|
||||
class ProviderType(str, Enum):
|
||||
CLAUDE = "claude"
|
||||
OPENAI = "openai"
|
||||
OLLAMA = "ollama"
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Message:
|
||||
role: Role
|
||||
content: str
|
||||
name: str | None = None
|
||||
tool_call_id: str | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result: dict[str, Any] = {"role": self.role.value, "content": self.content}
|
||||
if self.name:
|
||||
result["name"] = self.name
|
||||
if self.tool_call_id:
|
||||
result["tool_call_id"] = self.tool_call_id
|
||||
return result
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ToolDefinition:
|
||||
name: str
|
||||
description: str
|
||||
parameters: dict[str, Any]
|
||||
strict: bool = True
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"description": self.description,
|
||||
"parameters": self.parameters,
|
||||
"strict": self.strict,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ToolCall:
|
||||
id: str
|
||||
name: str
|
||||
arguments: dict[str, Any]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ToolResult:
|
||||
tool_call_id: str
|
||||
content: str
|
||||
is_error: bool = False
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LLMInput:
|
||||
messages: list[Message]
|
||||
model: str | None = None
|
||||
temperature: float = 1.0
|
||||
max_tokens: int | None = None
|
||||
tools: list[ToolDefinition] | None = None
|
||||
stream: bool = False
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result: dict[str, Any] = {
|
||||
"messages": [msg.to_dict() for msg in self.messages],
|
||||
"temperature": self.temperature,
|
||||
"stream": self.stream,
|
||||
}
|
||||
if self.model:
|
||||
result["model"] = self.model
|
||||
if self.max_tokens is not None:
|
||||
result["max_tokens"] = self.max_tokens
|
||||
if self.tools:
|
||||
result["tools"] = [tool.to_dict() for tool in self.tools]
|
||||
return result | self.metadata
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LLMOutput:
|
||||
content: str
|
||||
tool_calls: list[ToolCall] | None = None
|
||||
model: str | None = None
|
||||
usage: dict[str, int] | None = None
|
||||
stop_reason: str | None = None
|
||||
metadata: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
@property
|
||||
def has_tool_calls(self) -> bool:
|
||||
return bool(self.tool_calls)
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
result: dict[str, Any] = {"content": self.content}
|
||||
if self.tool_calls:
|
||||
result["tool_calls"] = [
|
||||
{"id": tc.id, "name": tc.name, "arguments": tc.arguments}
|
||||
for tc in self.tool_calls
|
||||
]
|
||||
if self.model:
|
||||
result["model"] = self.model
|
||||
if self.usage:
|
||||
result["usage"] = self.usage
|
||||
if self.stop_reason:
|
||||
result["stop_reason"] = self.stop_reason
|
||||
return result | self.metadata
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ModelInfo:
|
||||
name: str
|
||||
provider: ProviderType
|
||||
supports_tools: bool = True
|
||||
supports_vision: bool = False
|
||||
max_tokens: int | None = None
|
||||
context_window: int | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"name": self.name,
|
||||
"provider": self.provider.value,
|
||||
"supports_tools": self.supports_tools,
|
||||
"supports_vision": self.supports_vision,
|
||||
"max_tokens": self.max_tokens,
|
||||
"context_window": self.context_window,
|
||||
}
|
||||
13
src/llm/prompt/__init__.py
Normal file
13
src/llm/prompt/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""Prompt module for prompt building and normalization."""
|
||||
|
||||
from llm.prompt.builder import PromptBuilder, adapt_messages_for_provider, get_provider_builder
|
||||
from llm.prompt.templates import TEMPLATES, get_template, get_template_or_default
|
||||
|
||||
__all__ = [
|
||||
"PromptBuilder",
|
||||
"adapt_messages_for_provider",
|
||||
"get_provider_builder",
|
||||
"TEMPLATES",
|
||||
"get_template",
|
||||
"get_template_or_default",
|
||||
]
|
||||
101
src/llm/prompt/builder.py
Normal file
101
src/llm/prompt/builder.py
Normal file
@@ -0,0 +1,101 @@
|
||||
"""Prompt builder for normalizing prompts across providers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
from llm.core.types import LLMInput, Message, Role, ToolDefinition
|
||||
from llm.providers.claude import ClaudeProvider
|
||||
from llm.providers.openai import OpenAIProvider
|
||||
from llm.providers.ollama import OllamaProvider
|
||||
|
||||
|
||||
@dataclass
|
||||
class PromptConfig:
|
||||
system_template: str | None = None
|
||||
user_template: str | None = None
|
||||
include_tools_in_system: bool = True
|
||||
tool_format: str = "native"
|
||||
|
||||
|
||||
class PromptBuilder:
|
||||
def __init__(self, config: PromptConfig | None = None) -> None:
|
||||
self.config = config or PromptConfig()
|
||||
|
||||
def build(self, messages: list[Message], tools: list[ToolDefinition] | None = None) -> list[Message]:
|
||||
if not messages:
|
||||
return []
|
||||
|
||||
result: list[Message] = []
|
||||
system_parts: list[str] = []
|
||||
|
||||
if self.config.system_template:
|
||||
system_parts.append(self.config.system_template)
|
||||
|
||||
if tools and self.config.include_tools_in_system:
|
||||
tools_desc = self._format_tools(tools)
|
||||
system_parts.append(f"\n\n## Available Tools\n{tools_desc}")
|
||||
|
||||
if messages[0].role == Role.SYSTEM:
|
||||
system_parts.insert(0, messages[0].content)
|
||||
result.extend(messages[1:])
|
||||
else:
|
||||
if system_parts:
|
||||
result.insert(0, Message(role=Role.SYSTEM, content="\n\n".join(system_parts)))
|
||||
result.extend(messages)
|
||||
|
||||
return result
|
||||
|
||||
def _format_tools(self, tools: list[ToolDefinition]) -> str:
|
||||
lines = []
|
||||
for tool in tools:
|
||||
lines.append(f"### {tool.name}")
|
||||
lines.append(tool.description)
|
||||
if tool.parameters:
|
||||
lines.append("Parameters:")
|
||||
lines.append(self._format_parameters(tool.parameters))
|
||||
return "\n".join(lines)
|
||||
|
||||
def _format_parameters(self, params: dict[str, Any]) -> str:
|
||||
if "properties" not in params:
|
||||
return str(params)
|
||||
lines = []
|
||||
required = params.get("required", [])
|
||||
for name, spec in params["properties"].items():
|
||||
prop_type = spec.get("type", "any")
|
||||
desc = spec.get("description", "")
|
||||
required_mark = "(required)" if name in required else "(optional)"
|
||||
lines.append(f" - {name}: {prop_type} {required_mark} - {desc}")
|
||||
return "\n".join(lines) if lines else str(params)
|
||||
|
||||
|
||||
_PROVIDER_TEMPLATE_MAP: dict[str, dict[str, Any]] = {
|
||||
"claude": {
|
||||
"include_tools_in_system": False,
|
||||
"tool_format": "anthropic",
|
||||
},
|
||||
"openai": {
|
||||
"include_tools_in_system": False,
|
||||
"tool_format": "openai",
|
||||
},
|
||||
"ollama": {
|
||||
"include_tools_in_system": True,
|
||||
"tool_format": "text",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def get_provider_builder(provider_name: str) -> PromptBuilder:
|
||||
config_dict = _PROVIDER_TEMPLATE_MAP.get(provider_name.lower(), {})
|
||||
config = PromptConfig(**config_dict)
|
||||
return PromptBuilder(config)
|
||||
|
||||
|
||||
def adapt_messages_for_provider(
|
||||
messages: list[Message],
|
||||
provider: str,
|
||||
tools: list[ToolDefinition] | None = None,
|
||||
) -> list[Message]:
|
||||
builder = get_provider_builder(provider)
|
||||
return builder.build(messages, tools)
|
||||
1
src/llm/prompt/templates/__init__.py
Normal file
1
src/llm/prompt/templates/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Templates module for provider-specific prompt templates
|
||||
14
src/llm/providers/__init__.py
Normal file
14
src/llm/providers/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Provider adapters for multiple LLM backends."""
|
||||
|
||||
from llm.providers.claude import ClaudeProvider
|
||||
from llm.providers.openai import OpenAIProvider
|
||||
from llm.providers.ollama import OllamaProvider
|
||||
from llm.providers.resolver import get_provider, register_provider
|
||||
|
||||
__all__ = [
|
||||
"ClaudeProvider",
|
||||
"OpenAIProvider",
|
||||
"OllamaProvider",
|
||||
"get_provider",
|
||||
"register_provider",
|
||||
]
|
||||
103
src/llm/providers/claude.py
Normal file
103
src/llm/providers/claude.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Claude provider adapter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from anthropic import Anthropic
|
||||
|
||||
from llm.core.interface import (
|
||||
AuthenticationError,
|
||||
ContextLengthError,
|
||||
LLMProvider,
|
||||
RateLimitError,
|
||||
)
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
|
||||
|
||||
|
||||
class ClaudeProvider(LLMProvider):
|
||||
provider_type = ProviderType.CLAUDE
|
||||
|
||||
def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None:
|
||||
self.client = Anthropic(api_key=api_key or os.environ.get("ANTHROPIC_API_KEY"), base_url=base_url)
|
||||
self._models = [
|
||||
ModelInfo(
|
||||
name="claude-opus-4-5",
|
||||
provider=ProviderType.CLAUDE,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=8192,
|
||||
context_window=200000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="claude-sonnet-4-7",
|
||||
provider=ProviderType.CLAUDE,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=8192,
|
||||
context_window=200000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="claude-haiku-4-7",
|
||||
provider=ProviderType.CLAUDE,
|
||||
supports_tools=True,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=200000,
|
||||
),
|
||||
]
|
||||
|
||||
def generate(self, input: LLMInput) -> LLMOutput:
|
||||
try:
|
||||
params: dict[str, Any] = {
|
||||
"model": input.model or "claude-sonnet-4-7",
|
||||
"messages": [msg.to_dict() for msg in input.messages],
|
||||
"temperature": input.temperature,
|
||||
}
|
||||
if input.max_tokens:
|
||||
params["max_tokens"] = input.max_tokens
|
||||
if input.tools:
|
||||
params["tools"] = [tool.to_dict() for tool in input.tools]
|
||||
|
||||
response = self.client.messages.create(**params)
|
||||
|
||||
tool_calls = None
|
||||
if response.content and hasattr(response.content[0], "type"):
|
||||
if response.content[0].type == "tool_use":
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=getattr(response.content[0], "id", ""),
|
||||
name=getattr(response.content[0], "name", ""),
|
||||
arguments=getattr(response.content[0].input, "__dict__", {}),
|
||||
)
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=response.content[0].text if response.content else "",
|
||||
tool_calls=tool_calls,
|
||||
model=response.model,
|
||||
usage={
|
||||
"input_tokens": response.usage.input_tokens,
|
||||
"output_tokens": response.usage.output_tokens,
|
||||
},
|
||||
stop_reason=response.stop_reason,
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if "401" in msg or "authentication" in msg.lower():
|
||||
raise AuthenticationError(msg, provider=ProviderType.CLAUDE) from e
|
||||
if "429" in msg or "rate_limit" in msg.lower():
|
||||
raise RateLimitError(msg, provider=ProviderType.CLAUDE) from e
|
||||
if "context" in msg.lower() and "length" in msg.lower():
|
||||
raise ContextLengthError(msg, provider=ProviderType.CLAUDE) from e
|
||||
raise
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models.copy()
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
return bool(self.client.api_key)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
return "claude-sonnet-4-7"
|
||||
112
src/llm/providers/ollama.py
Normal file
112
src/llm/providers/ollama.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Ollama provider adapter for local models."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from llm.core.interface import (
|
||||
AuthenticationError,
|
||||
ContextLengthError,
|
||||
LLMProvider,
|
||||
RateLimitError,
|
||||
)
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
|
||||
|
||||
|
||||
class OllamaProvider(LLMProvider):
|
||||
provider_type = ProviderType.OLLAMA
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str | None = None,
|
||||
default_model: str | None = None,
|
||||
) -> None:
|
||||
self.base_url = base_url or os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
|
||||
self.default_model = default_model or os.environ.get("OLLAMA_MODEL", "llama3.2")
|
||||
self._models = [
|
||||
ModelInfo(
|
||||
name="llama3.2",
|
||||
provider=ProviderType.OLLAMA,
|
||||
supports_tools=False,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="mistral",
|
||||
provider=ProviderType.OLLAMA,
|
||||
supports_tools=False,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=8192,
|
||||
),
|
||||
ModelInfo(
|
||||
name="codellama",
|
||||
provider=ProviderType.OLLAMA,
|
||||
supports_tools=False,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=16384,
|
||||
),
|
||||
]
|
||||
|
||||
def generate(self, input: LLMInput) -> LLMOutput:
|
||||
import urllib.request
|
||||
import json
|
||||
|
||||
try:
|
||||
url = f"{self.base_url}/api/chat"
|
||||
model = input.model or self.default_model
|
||||
|
||||
payload: dict[str, Any] = {
|
||||
"model": model,
|
||||
"messages": [msg.to_dict() for msg in input.messages],
|
||||
"stream": False,
|
||||
}
|
||||
if input.temperature != 1.0:
|
||||
payload["options"] = {"temperature": input.temperature}
|
||||
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
req = urllib.request.Request(url, data=data, headers={"Content-Type": "application/json"})
|
||||
|
||||
with urllib.request.urlopen(req, timeout=60) as response:
|
||||
result = json.loads(response.read().decode("utf-8"))
|
||||
|
||||
content = result.get("message", {}).get("content", "")
|
||||
|
||||
tool_calls = None
|
||||
if result.get("message", {}).get("tool_calls"):
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=tc.get("id", ""),
|
||||
name=tc.get("function", {}).get("name", ""),
|
||||
arguments=tc.get("function", {}).get("arguments", {}),
|
||||
)
|
||||
for tc in result["message"]["tool_calls"]
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=content,
|
||||
tool_calls=tool_calls,
|
||||
model=model,
|
||||
stop_reason=result.get("done_reason"),
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if "401" in msg or "connection" in msg.lower():
|
||||
raise AuthenticationError(f"Ollama connection failed: {msg}", provider=ProviderType.OLLAMA) from e
|
||||
if "429" in msg or "rate_limit" in msg.lower():
|
||||
raise RateLimitError(msg, provider=ProviderType.OLLAMA) from e
|
||||
if "context" in msg.lower() and "length" in msg.lower():
|
||||
raise ContextLengthError(msg, provider=ProviderType.OLLAMA) from e
|
||||
raise
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models.copy()
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
return bool(self.base_url)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
return self.default_model
|
||||
113
src/llm/providers/openai.py
Normal file
113
src/llm/providers/openai.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""OpenAI provider adapter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
from llm.core.interface import (
|
||||
AuthenticationError,
|
||||
ContextLengthError,
|
||||
LLMProvider,
|
||||
RateLimitError,
|
||||
)
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
|
||||
|
||||
|
||||
class OpenAIProvider(LLMProvider):
|
||||
provider_type = ProviderType.OPENAI
|
||||
|
||||
def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None:
|
||||
self.client = OpenAI(api_key=api_key or os.environ.get("OPENAI_API_KEY"), base_url=base_url)
|
||||
self._models = [
|
||||
ModelInfo(
|
||||
name="gpt-4o",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="gpt-4o-mini",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="gpt-4-turbo",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="gpt-3.5-turbo",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=16385,
|
||||
),
|
||||
]
|
||||
|
||||
def generate(self, input: LLMInput) -> LLMOutput:
|
||||
try:
|
||||
params: dict[str, Any] = {
|
||||
"model": input.model or "gpt-4o-mini",
|
||||
"messages": [msg.to_dict() for msg in input.messages],
|
||||
"temperature": input.temperature,
|
||||
}
|
||||
if input.max_tokens:
|
||||
params["max_tokens"] = input.max_tokens
|
||||
if input.tools:
|
||||
params["tools"] = [tool.to_dict() for tool in input.tools]
|
||||
|
||||
response = self.client.chat.completions.create(**params)
|
||||
choice = response.choices[0]
|
||||
|
||||
tool_calls = None
|
||||
if choice.message.tool_calls:
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=tc.id or "",
|
||||
name=tc.function.name,
|
||||
arguments={} if tc.function.arguments == "" else tc.function.arguments,
|
||||
)
|
||||
for tc in choice.message.tool_calls
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=choice.message.content or "",
|
||||
tool_calls=tool_calls,
|
||||
model=response.model,
|
||||
usage={
|
||||
"prompt_tokens": response.usage.prompt_tokens,
|
||||
"completion_tokens": response.usage.completion_tokens,
|
||||
"total_tokens": response.usage.total_tokens,
|
||||
},
|
||||
stop_reason=choice.finish_reason,
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if "401" in msg or "authentication" in msg.lower():
|
||||
raise AuthenticationError(msg, provider=ProviderType.OPENAI) from e
|
||||
if "429" in msg or "rate_limit" in msg.lower():
|
||||
raise RateLimitError(msg, provider=ProviderType.OPENAI) from e
|
||||
if "context" in msg.lower() and "length" in msg.lower():
|
||||
raise ContextLengthError(msg, provider=ProviderType.OPENAI) from e
|
||||
raise
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models.copy()
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
return bool(self.client.api_key)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
return "gpt-4o-mini"
|
||||
39
src/llm/providers/resolver.py
Normal file
39
src/llm/providers/resolver.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""Provider factory and resolver."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from llm.core.interface import LLMProvider
|
||||
from llm.core.types import ProviderType
|
||||
from llm.providers.claude import ClaudeProvider
|
||||
from llm.providers.openai import OpenAIProvider
|
||||
from llm.providers.ollama import OllamaProvider
|
||||
|
||||
|
||||
_PROVIDER_MAP: dict[ProviderType, type[LLMProvider]] = {
|
||||
ProviderType.CLAUDE: ClaudeProvider,
|
||||
ProviderType.OPENAI: OpenAIProvider,
|
||||
ProviderType.OLLAMA: OllamaProvider,
|
||||
}
|
||||
|
||||
|
||||
def get_provider(provider_type: ProviderType | str | None = None, **kwargs: str) -> LLMProvider:
|
||||
if provider_type is None:
|
||||
provider_type = os.environ.get("LLM_PROVIDER", "claude").lower()
|
||||
|
||||
if isinstance(provider_type, str):
|
||||
try:
|
||||
provider_type = ProviderType(provider_type)
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown provider type: {provider_type}. Valid types: {[p.value for p in ProviderType]}")
|
||||
|
||||
provider_cls = _PROVIDER_MAP.get(provider_type)
|
||||
if not provider_cls:
|
||||
raise ValueError(f"No provider registered for type: {provider_type}")
|
||||
|
||||
return provider_cls(**kwargs)
|
||||
|
||||
|
||||
def register_provider(provider_type: ProviderType, provider_cls: type[LLMProvider]) -> None:
|
||||
_PROVIDER_MAP[provider_type] = provider_cls
|
||||
9
src/llm/tools/__init__.py
Normal file
9
src/llm/tools/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""Tools module for tool/function calling abstraction."""
|
||||
|
||||
from llm.tools.executor import ReActAgent, ToolExecutor, ToolRegistry
|
||||
|
||||
__all__ = [
|
||||
"ToolRegistry",
|
||||
"ToolExecutor",
|
||||
"ReActAgent",
|
||||
]
|
||||
108
src/llm/tools/executor.py
Normal file
108
src/llm/tools/executor.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""Tool executor for handling tool calls from LLM responses."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Callable
|
||||
|
||||
from llm.core.interface import ToolExecutionError
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, Role, ToolCall, ToolDefinition, ToolResult
|
||||
|
||||
|
||||
ToolFunc = Callable[..., Any]
|
||||
|
||||
|
||||
class ToolRegistry:
|
||||
def __init__(self) -> None:
|
||||
self._tools: dict[str, ToolFunc] = {}
|
||||
self._definitions: dict[str, ToolDefinition] = {}
|
||||
|
||||
def register(self, definition: ToolDefinition, func: ToolFunc) -> None:
|
||||
self._tools[definition.name] = func
|
||||
self._definitions[definition.name] = definition
|
||||
|
||||
def get(self, name: str) -> ToolFunc | None:
|
||||
return self._tools.get(name)
|
||||
|
||||
def get_definition(self, name: str) -> ToolDefinition | None:
|
||||
return self._definitions.get(name)
|
||||
|
||||
def list_tools(self) -> list[ToolDefinition]:
|
||||
return list(self._definitions.values())
|
||||
|
||||
def has(self, name: str) -> bool:
|
||||
return name in self._tools
|
||||
|
||||
|
||||
class ToolExecutor:
|
||||
def __init__(self, registry: ToolRegistry | None = None) -> None:
|
||||
self.registry = registry or ToolRegistry()
|
||||
|
||||
def execute(self, tool_call: ToolCall) -> ToolResult:
|
||||
func = self.registry.get(tool_call.name)
|
||||
if not func:
|
||||
return ToolResult(
|
||||
tool_call_id=tool_call.id,
|
||||
content=f"Error: Tool '{tool_call.name}' not found",
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
try:
|
||||
result = func(**tool_call.arguments)
|
||||
content = result if isinstance(result, str) else str(result)
|
||||
return ToolResult(tool_call_id=tool_call.id, content=content)
|
||||
except Exception as e:
|
||||
return ToolResult(
|
||||
tool_call_id=tool_call.id,
|
||||
content=f"Error executing {tool_call.name}: {e}",
|
||||
is_error=True,
|
||||
)
|
||||
|
||||
def execute_all(self, tool_calls: list[ToolCall]) -> list[ToolResult]:
|
||||
return [self.execute(tc) for tc in tool_calls]
|
||||
|
||||
|
||||
class ReActAgent:
|
||||
def __init__(
|
||||
self,
|
||||
provider: Any,
|
||||
executor: ToolExecutor,
|
||||
max_iterations: int = 10,
|
||||
) -> None:
|
||||
self.provider = provider
|
||||
self.executor = executor
|
||||
self.max_iterations = max_iterations
|
||||
|
||||
async def run(self, input: LLMInput) -> LLMOutput:
|
||||
messages = list(input.messages)
|
||||
tools = input.tools or []
|
||||
|
||||
for _ in range(self.max_iterations):
|
||||
input_copy = LLMInput(
|
||||
messages=messages,
|
||||
model=input.model,
|
||||
temperature=input.temperature,
|
||||
max_tokens=input.max_tokens,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
output = self.provider.generate(input_copy)
|
||||
|
||||
if not output.has_tool_calls:
|
||||
return output
|
||||
|
||||
results = self.executor.execute_all(output.tool_calls)
|
||||
|
||||
for result in results:
|
||||
messages.append(
|
||||
Message(
|
||||
role=Role.TOOL,
|
||||
content=result.content,
|
||||
tool_call_id=result.tool_call_id,
|
||||
)
|
||||
)
|
||||
|
||||
return LLMOutput(
|
||||
content="Max iterations reached",
|
||||
stop_reason="max_iterations",
|
||||
)
|
||||
Reference in New Issue
Block a user