Small changes

This commit is contained in:
Anish
2026-04-12 12:34:45 +05:30
parent 7fc44c91b8
commit d39a8a049a
29 changed files with 1410 additions and 5 deletions

1
src/llm/core/__init__.py Normal file
View File

@@ -0,0 +1 @@
"""Core module for LLM abstraction layer."""

60
src/llm/core/interface.py Normal file
View File

@@ -0,0 +1,60 @@
"""LLM Provider interface definition."""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any
from llm.core.types import LLMInput, LLMOutput, ModelInfo, ProviderType
class LLMProvider(ABC):
provider_type: ProviderType
@abstractmethod
def generate(self, input: LLMInput) -> LLMOutput: ...
@abstractmethod
def list_models(self) -> list[ModelInfo]: ...
@abstractmethod
def validate_config(self) -> bool: ...
def supports_tools(self) -> bool:
return True
def supports_vision(self) -> bool:
return False
def get_default_model(self) -> str:
raise NotImplementedError(f"{self.__class__.__name__} must implement get_default_model")
class LLMError(Exception):
def __init__(
self,
message: str,
provider: ProviderType | None = None,
code: str | None = None,
details: dict[str, Any] | None = None,
) -> None:
super().__init__(message)
self.message = message
self.provider = provider
self.code = code
self.details = details or {}
class AuthenticationError(LLMError): ...
class RateLimitError(LLMError): ...
class ContextLengthError(LLMError): ...
class ModelNotFoundError(LLMError): ...
class ToolExecutionError(LLMError): ...

140
src/llm/core/types.py Normal file
View File

@@ -0,0 +1,140 @@
"""Core type definitions for LLM abstraction layer."""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from typing import Any
class Role(str, Enum):
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
TOOL = "tool"
class ProviderType(str, Enum):
CLAUDE = "claude"
OPENAI = "openai"
OLLAMA = "ollama"
@dataclass(frozen=True)
class Message:
role: Role
content: str
name: str | None = None
tool_call_id: str | None = None
def to_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {"role": self.role.value, "content": self.content}
if self.name:
result["name"] = self.name
if self.tool_call_id:
result["tool_call_id"] = self.tool_call_id
return result
@dataclass(frozen=True)
class ToolDefinition:
name: str
description: str
parameters: dict[str, Any]
strict: bool = True
def to_dict(self) -> dict[str, Any]:
return {
"name": self.name,
"description": self.description,
"parameters": self.parameters,
"strict": self.strict,
}
@dataclass(frozen=True)
class ToolCall:
id: str
name: str
arguments: dict[str, Any]
@dataclass(frozen=True)
class ToolResult:
tool_call_id: str
content: str
is_error: bool = False
@dataclass(frozen=True)
class LLMInput:
messages: list[Message]
model: str | None = None
temperature: float = 1.0
max_tokens: int | None = None
tools: list[ToolDefinition] | None = None
stream: bool = False
metadata: dict[str, Any] = field(default_factory=dict)
def to_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {
"messages": [msg.to_dict() for msg in self.messages],
"temperature": self.temperature,
"stream": self.stream,
}
if self.model:
result["model"] = self.model
if self.max_tokens is not None:
result["max_tokens"] = self.max_tokens
if self.tools:
result["tools"] = [tool.to_dict() for tool in self.tools]
return result | self.metadata
@dataclass(frozen=True)
class LLMOutput:
content: str
tool_calls: list[ToolCall] | None = None
model: str | None = None
usage: dict[str, int] | None = None
stop_reason: str | None = None
metadata: dict[str, Any] = field(default_factory=dict)
@property
def has_tool_calls(self) -> bool:
return bool(self.tool_calls)
def to_dict(self) -> dict[str, Any]:
result: dict[str, Any] = {"content": self.content}
if self.tool_calls:
result["tool_calls"] = [
{"id": tc.id, "name": tc.name, "arguments": tc.arguments}
for tc in self.tool_calls
]
if self.model:
result["model"] = self.model
if self.usage:
result["usage"] = self.usage
if self.stop_reason:
result["stop_reason"] = self.stop_reason
return result | self.metadata
@dataclass(frozen=True)
class ModelInfo:
name: str
provider: ProviderType
supports_tools: bool = True
supports_vision: bool = False
max_tokens: int | None = None
context_window: int | None = None
def to_dict(self) -> dict[str, Any]:
return {
"name": self.name,
"provider": self.provider.value,
"supports_tools": self.supports_tools,
"supports_vision": self.supports_vision,
"max_tokens": self.max_tokens,
"context_window": self.context_window,
}