106 lines
3.1 KiB
Python
106 lines
3.1 KiB
Python
"""
|
|
providers/base.py — Abstract base class for AI providers.
|
|
|
|
The interface is designed for aide's tool-use agent loop:
|
|
- Tool schemas are in aide's internal format (Anthropic-native)
|
|
- Providers are responsible for translating to their wire format
|
|
- Responses are normalised into a common ProviderResponse
|
|
"""
|
|
from __future__ import annotations
|
|
|
|
from abc import ABC, abstractmethod
|
|
from dataclasses import dataclass, field
|
|
|
|
|
|
@dataclass
|
|
class ToolCallResult:
|
|
"""A single tool call requested by the model."""
|
|
id: str # Unique ID for this call (used in tool result messages)
|
|
name: str # Tool name, e.g. "caldav" or "email:send"
|
|
arguments: dict # Parsed JSON arguments
|
|
|
|
|
|
@dataclass
|
|
class UsageStats:
|
|
input_tokens: int = 0
|
|
output_tokens: int = 0
|
|
|
|
@property
|
|
def total_tokens(self) -> int:
|
|
return self.input_tokens + self.output_tokens
|
|
|
|
|
|
@dataclass
|
|
class ProviderResponse:
|
|
"""Normalised response from any provider."""
|
|
text: str | None # Text content (may be empty when tool calls present)
|
|
tool_calls: list[ToolCallResult] = field(default_factory=list)
|
|
usage: UsageStats = field(default_factory=UsageStats)
|
|
finish_reason: str = "stop" # "stop", "tool_use", "max_tokens", "error"
|
|
model: str = ""
|
|
images: list[str] = field(default_factory=list) # base64 data URLs from image-gen models
|
|
|
|
|
|
class AIProvider(ABC):
|
|
"""
|
|
Abstract base for AI providers.
|
|
|
|
Tool schema format (aide-internal / Anthropic-native):
|
|
{
|
|
"name": "tool_name",
|
|
"description": "What this tool does",
|
|
"input_schema": {
|
|
"type": "object",
|
|
"properties": { ... },
|
|
"required": [...]
|
|
}
|
|
}
|
|
|
|
Providers translate this to their own wire format internally.
|
|
"""
|
|
|
|
@property
|
|
@abstractmethod
|
|
def name(self) -> str:
|
|
"""Human-readable provider name, e.g. 'Anthropic' or 'OpenRouter'."""
|
|
|
|
@property
|
|
@abstractmethod
|
|
def default_model(self) -> str:
|
|
"""Default model ID to use when none is specified."""
|
|
|
|
@abstractmethod
|
|
def chat(
|
|
self,
|
|
messages: list[dict],
|
|
tools: list[dict] | None = None,
|
|
system: str = "",
|
|
model: str = "",
|
|
max_tokens: int = 4096,
|
|
) -> ProviderResponse:
|
|
"""
|
|
Synchronous chat completion.
|
|
|
|
Args:
|
|
messages: Conversation history in OpenAI-style format
|
|
(role/content pairs, plus tool_call and tool_result messages)
|
|
tools: List of tool schemas in aide-internal format (may be None)
|
|
system: System prompt text
|
|
model: Model ID (uses default_model if empty)
|
|
max_tokens: Max tokens in response
|
|
|
|
Returns:
|
|
Normalised ProviderResponse
|
|
"""
|
|
|
|
@abstractmethod
|
|
async def chat_async(
|
|
self,
|
|
messages: list[dict],
|
|
tools: list[dict] | None = None,
|
|
system: str = "",
|
|
model: str = "",
|
|
max_tokens: int = 4096,
|
|
) -> ProviderResponse:
|
|
"""Async variant of chat(). Used by the FastAPI agent loop."""
|