mirror of
https://github.com/affaan-m/everything-claude-code.git
synced 2026-04-15 22:43:28 +08:00
GUI Created, Dark Mode Created, npm script added, styling changed, Decoupled from Claude made more open source.
This commit is contained in:
14
src/llm/providers/__init__.py
Normal file
14
src/llm/providers/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""Provider adapters for multiple LLM backends."""
|
||||
|
||||
from llm.providers.claude import ClaudeProvider
|
||||
from llm.providers.openai import OpenAIProvider
|
||||
from llm.providers.ollama import OllamaProvider
|
||||
from llm.providers.resolver import get_provider, register_provider
|
||||
|
||||
__all__ = [
|
||||
"ClaudeProvider",
|
||||
"OpenAIProvider",
|
||||
"OllamaProvider",
|
||||
"get_provider",
|
||||
"register_provider",
|
||||
]
|
||||
103
src/llm/providers/claude.py
Normal file
103
src/llm/providers/claude.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Claude provider adapter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from anthropic import Anthropic
|
||||
|
||||
from llm.core.interface import (
|
||||
AuthenticationError,
|
||||
ContextLengthError,
|
||||
LLMProvider,
|
||||
RateLimitError,
|
||||
)
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
|
||||
|
||||
|
||||
class ClaudeProvider(LLMProvider):
|
||||
provider_type = ProviderType.CLAUDE
|
||||
|
||||
def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None:
|
||||
self.client = Anthropic(api_key=api_key or os.environ.get("ANTHROPIC_API_KEY"), base_url=base_url)
|
||||
self._models = [
|
||||
ModelInfo(
|
||||
name="claude-opus-4-5",
|
||||
provider=ProviderType.CLAUDE,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=8192,
|
||||
context_window=200000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="claude-sonnet-4-7",
|
||||
provider=ProviderType.CLAUDE,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=8192,
|
||||
context_window=200000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="claude-haiku-4-7",
|
||||
provider=ProviderType.CLAUDE,
|
||||
supports_tools=True,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=200000,
|
||||
),
|
||||
]
|
||||
|
||||
def generate(self, input: LLMInput) -> LLMOutput:
|
||||
try:
|
||||
params: dict[str, Any] = {
|
||||
"model": input.model or "claude-sonnet-4-7",
|
||||
"messages": [msg.to_dict() for msg in input.messages],
|
||||
"temperature": input.temperature,
|
||||
}
|
||||
if input.max_tokens:
|
||||
params["max_tokens"] = input.max_tokens
|
||||
if input.tools:
|
||||
params["tools"] = [tool.to_dict() for tool in input.tools]
|
||||
|
||||
response = self.client.messages.create(**params)
|
||||
|
||||
tool_calls = None
|
||||
if response.content and hasattr(response.content[0], "type"):
|
||||
if response.content[0].type == "tool_use":
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=getattr(response.content[0], "id", ""),
|
||||
name=getattr(response.content[0], "name", ""),
|
||||
arguments=getattr(response.content[0].input, "__dict__", {}),
|
||||
)
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=response.content[0].text if response.content else "",
|
||||
tool_calls=tool_calls,
|
||||
model=response.model,
|
||||
usage={
|
||||
"input_tokens": response.usage.input_tokens,
|
||||
"output_tokens": response.usage.output_tokens,
|
||||
},
|
||||
stop_reason=response.stop_reason,
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if "401" in msg or "authentication" in msg.lower():
|
||||
raise AuthenticationError(msg, provider=ProviderType.CLAUDE) from e
|
||||
if "429" in msg or "rate_limit" in msg.lower():
|
||||
raise RateLimitError(msg, provider=ProviderType.CLAUDE) from e
|
||||
if "context" in msg.lower() and "length" in msg.lower():
|
||||
raise ContextLengthError(msg, provider=ProviderType.CLAUDE) from e
|
||||
raise
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models.copy()
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
return bool(self.client.api_key)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
return "claude-sonnet-4-7"
|
||||
112
src/llm/providers/ollama.py
Normal file
112
src/llm/providers/ollama.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Ollama provider adapter for local models."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from llm.core.interface import (
|
||||
AuthenticationError,
|
||||
ContextLengthError,
|
||||
LLMProvider,
|
||||
RateLimitError,
|
||||
)
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
|
||||
|
||||
|
||||
class OllamaProvider(LLMProvider):
|
||||
provider_type = ProviderType.OLLAMA
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str | None = None,
|
||||
default_model: str | None = None,
|
||||
) -> None:
|
||||
self.base_url = base_url or os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
|
||||
self.default_model = default_model or os.environ.get("OLLAMA_MODEL", "llama3.2")
|
||||
self._models = [
|
||||
ModelInfo(
|
||||
name="llama3.2",
|
||||
provider=ProviderType.OLLAMA,
|
||||
supports_tools=False,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="mistral",
|
||||
provider=ProviderType.OLLAMA,
|
||||
supports_tools=False,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=8192,
|
||||
),
|
||||
ModelInfo(
|
||||
name="codellama",
|
||||
provider=ProviderType.OLLAMA,
|
||||
supports_tools=False,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=16384,
|
||||
),
|
||||
]
|
||||
|
||||
def generate(self, input: LLMInput) -> LLMOutput:
|
||||
import urllib.request
|
||||
import json
|
||||
|
||||
try:
|
||||
url = f"{self.base_url}/api/chat"
|
||||
model = input.model or self.default_model
|
||||
|
||||
payload: dict[str, Any] = {
|
||||
"model": model,
|
||||
"messages": [msg.to_dict() for msg in input.messages],
|
||||
"stream": False,
|
||||
}
|
||||
if input.temperature != 1.0:
|
||||
payload["options"] = {"temperature": input.temperature}
|
||||
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
req = urllib.request.Request(url, data=data, headers={"Content-Type": "application/json"})
|
||||
|
||||
with urllib.request.urlopen(req, timeout=60) as response:
|
||||
result = json.loads(response.read().decode("utf-8"))
|
||||
|
||||
content = result.get("message", {}).get("content", "")
|
||||
|
||||
tool_calls = None
|
||||
if result.get("message", {}).get("tool_calls"):
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=tc.get("id", ""),
|
||||
name=tc.get("function", {}).get("name", ""),
|
||||
arguments=tc.get("function", {}).get("arguments", {}),
|
||||
)
|
||||
for tc in result["message"]["tool_calls"]
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=content,
|
||||
tool_calls=tool_calls,
|
||||
model=model,
|
||||
stop_reason=result.get("done_reason"),
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if "401" in msg or "connection" in msg.lower():
|
||||
raise AuthenticationError(f"Ollama connection failed: {msg}", provider=ProviderType.OLLAMA) from e
|
||||
if "429" in msg or "rate_limit" in msg.lower():
|
||||
raise RateLimitError(msg, provider=ProviderType.OLLAMA) from e
|
||||
if "context" in msg.lower() and "length" in msg.lower():
|
||||
raise ContextLengthError(msg, provider=ProviderType.OLLAMA) from e
|
||||
raise
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models.copy()
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
return bool(self.base_url)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
return self.default_model
|
||||
113
src/llm/providers/openai.py
Normal file
113
src/llm/providers/openai.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""OpenAI provider adapter."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
from llm.core.interface import (
|
||||
AuthenticationError,
|
||||
ContextLengthError,
|
||||
LLMProvider,
|
||||
RateLimitError,
|
||||
)
|
||||
from llm.core.types import LLMInput, LLMOutput, Message, ModelInfo, ProviderType, ToolCall
|
||||
|
||||
|
||||
class OpenAIProvider(LLMProvider):
|
||||
provider_type = ProviderType.OPENAI
|
||||
|
||||
def __init__(self, api_key: str | None = None, base_url: str | None = None) -> None:
|
||||
self.client = OpenAI(api_key=api_key or os.environ.get("OPENAI_API_KEY"), base_url=base_url)
|
||||
self._models = [
|
||||
ModelInfo(
|
||||
name="gpt-4o",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="gpt-4o-mini",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="gpt-4-turbo",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=True,
|
||||
max_tokens=4096,
|
||||
context_window=128000,
|
||||
),
|
||||
ModelInfo(
|
||||
name="gpt-3.5-turbo",
|
||||
provider=ProviderType.OPENAI,
|
||||
supports_tools=True,
|
||||
supports_vision=False,
|
||||
max_tokens=4096,
|
||||
context_window=16385,
|
||||
),
|
||||
]
|
||||
|
||||
def generate(self, input: LLMInput) -> LLMOutput:
|
||||
try:
|
||||
params: dict[str, Any] = {
|
||||
"model": input.model or "gpt-4o-mini",
|
||||
"messages": [msg.to_dict() for msg in input.messages],
|
||||
"temperature": input.temperature,
|
||||
}
|
||||
if input.max_tokens:
|
||||
params["max_tokens"] = input.max_tokens
|
||||
if input.tools:
|
||||
params["tools"] = [tool.to_dict() for tool in input.tools]
|
||||
|
||||
response = self.client.chat.completions.create(**params)
|
||||
choice = response.choices[0]
|
||||
|
||||
tool_calls = None
|
||||
if choice.message.tool_calls:
|
||||
tool_calls = [
|
||||
ToolCall(
|
||||
id=tc.id or "",
|
||||
name=tc.function.name,
|
||||
arguments={} if tc.function.arguments == "" else tc.function.arguments,
|
||||
)
|
||||
for tc in choice.message.tool_calls
|
||||
]
|
||||
|
||||
return LLMOutput(
|
||||
content=choice.message.content or "",
|
||||
tool_calls=tool_calls,
|
||||
model=response.model,
|
||||
usage={
|
||||
"prompt_tokens": response.usage.prompt_tokens,
|
||||
"completion_tokens": response.usage.completion_tokens,
|
||||
"total_tokens": response.usage.total_tokens,
|
||||
},
|
||||
stop_reason=choice.finish_reason,
|
||||
)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if "401" in msg or "authentication" in msg.lower():
|
||||
raise AuthenticationError(msg, provider=ProviderType.OPENAI) from e
|
||||
if "429" in msg or "rate_limit" in msg.lower():
|
||||
raise RateLimitError(msg, provider=ProviderType.OPENAI) from e
|
||||
if "context" in msg.lower() and "length" in msg.lower():
|
||||
raise ContextLengthError(msg, provider=ProviderType.OPENAI) from e
|
||||
raise
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models.copy()
|
||||
|
||||
def validate_config(self) -> bool:
|
||||
return bool(self.client.api_key)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
return "gpt-4o-mini"
|
||||
39
src/llm/providers/resolver.py
Normal file
39
src/llm/providers/resolver.py
Normal file
@@ -0,0 +1,39 @@
|
||||
"""Provider factory and resolver."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
from llm.core.interface import LLMProvider
|
||||
from llm.core.types import ProviderType
|
||||
from llm.providers.claude import ClaudeProvider
|
||||
from llm.providers.openai import OpenAIProvider
|
||||
from llm.providers.ollama import OllamaProvider
|
||||
|
||||
|
||||
_PROVIDER_MAP: dict[ProviderType, type[LLMProvider]] = {
|
||||
ProviderType.CLAUDE: ClaudeProvider,
|
||||
ProviderType.OPENAI: OpenAIProvider,
|
||||
ProviderType.OLLAMA: OllamaProvider,
|
||||
}
|
||||
|
||||
|
||||
def get_provider(provider_type: ProviderType | str | None = None, **kwargs: str) -> LLMProvider:
|
||||
if provider_type is None:
|
||||
provider_type = os.environ.get("LLM_PROVIDER", "claude").lower()
|
||||
|
||||
if isinstance(provider_type, str):
|
||||
try:
|
||||
provider_type = ProviderType(provider_type)
|
||||
except ValueError:
|
||||
raise ValueError(f"Unknown provider type: {provider_type}. Valid types: {[p.value for p in ProviderType]}")
|
||||
|
||||
provider_cls = _PROVIDER_MAP.get(provider_type)
|
||||
if not provider_cls:
|
||||
raise ValueError(f"No provider registered for type: {provider_type}")
|
||||
|
||||
return provider_cls(**kwargs)
|
||||
|
||||
|
||||
def register_provider(provider_type: ProviderType, provider_cls: type[LLMProvider]) -> None:
|
||||
_PROVIDER_MAP[provider_type] = provider_cls
|
||||
Reference in New Issue
Block a user