bug fixes. Added model info screen with pricing info

This commit is contained in:
2026-02-05 14:43:37 +01:00
parent 06a3c898d3
commit a47b1c95a2
6 changed files with 234 additions and 0 deletions

View File

@@ -141,6 +141,8 @@ class AIClient:
system_prompt: Optional[str] = None,
online: bool = False,
transforms: Optional[List[str]] = None,
enable_web_search: bool = False,
web_search_config: Optional[Dict[str, Any]] = None,
) -> Union[ChatResponse, Iterator[StreamChunk]]:
"""
Send a chat request.
@@ -156,6 +158,8 @@ class AIClient:
system_prompt: System prompt to prepend
online: Whether to enable online mode
transforms: List of transforms (e.g., ["middle-out"])
enable_web_search: Enable native web search (Anthropic only)
web_search_config: Web search configuration (Anthropic only)
Returns:
ChatResponse for non-streaming, Iterator[StreamChunk] for streaming
@@ -226,6 +230,8 @@ class AIClient:
tools=tools,
tool_choice=tool_choice,
transforms=transforms,
enable_web_search=enable_web_search,
web_search_config=web_search_config or {},
)
def chat_with_tools(

View File

@@ -23,6 +23,7 @@ from oai.tui.screens import (
CreditsScreen,
HelpScreen,
InputDialog,
ModelInfoScreen,
ModelSelectorScreen,
StatsScreen,
)
@@ -302,6 +303,17 @@ class oAIChatApp(App):
await self.push_screen(CreditsScreen(self.session.client))
return
if cmd_word == "info":
# Show model info modal
if self.session.selected_model:
provider_name = self.session.client.provider_name
await self.push_screen(ModelInfoScreen(self.session.selected_model, provider_name))
else:
chat_display = self.query_one(ChatDisplay)
error_widget = SystemMessageWidget("❌ No model selected. Use /model to select a model.")
await chat_display.add_message(error_widget)
return
if cmd_word == "clear":
chat_display = self.query_one(ChatDisplay)
chat_display.clear_messages()

View File

@@ -6,6 +6,7 @@ from oai.tui.screens.conversation_selector import ConversationSelectorScreen
from oai.tui.screens.credits_screen import CreditsScreen
from oai.tui.screens.dialogs import AlertDialog, ConfirmDialog, InputDialog
from oai.tui.screens.help_screen import HelpScreen
from oai.tui.screens.model_info_screen import ModelInfoScreen
from oai.tui.screens.model_selector import ModelSelectorScreen
from oai.tui.screens.stats_screen import StatsScreen
@@ -18,6 +19,7 @@ __all__ = [
"CreditsScreen",
"InputDialog",
"HelpScreen",
"ModelInfoScreen",
"ModelSelectorScreen",
"StatsScreen",
]

View File

@@ -68,6 +68,7 @@ class CommandsScreen(ModalScreen[None]):
[green]/help[/] - Show help screen with keyboard shortcuts
[green]/commands[/] - Show this commands reference
[green]/model[/] - Open model selector (or press F2)
[green]/info[/] - Show detailed information about current model
[green]/stats[/] - Show session statistics (or press Ctrl+S)
[green]/credits[/] - Check account credits (OpenRouter) or view console link
[green]/clear[/] - Clear chat display

View File

@@ -0,0 +1,212 @@
"""Model information screen for oAI TUI."""
from typing import Any, Dict, Optional
from textual.app import ComposeResult
from textual.containers import Container, Vertical, VerticalScroll
from textual.screen import ModalScreen
from textual.widgets import Button, Static
class ModelInfoScreen(ModalScreen[None]):
"""Modal screen displaying detailed model information."""
DEFAULT_CSS = """
ModelInfoScreen {
align: center middle;
}
ModelInfoScreen > Container {
width: 80;
height: auto;
max-height: 90%;
background: #1e1e1e;
border: solid #555555;
}
ModelInfoScreen .header {
dock: top;
width: 100%;
height: auto;
background: #2d2d2d;
color: #cccccc;
padding: 0 2;
}
ModelInfoScreen .content {
width: 100%;
height: auto;
max-height: 30;
background: #1e1e1e;
padding: 2;
color: #cccccc;
overflow-y: auto;
}
ModelInfoScreen .footer {
dock: bottom;
width: 100%;
height: auto;
background: #2d2d2d;
padding: 1 2;
align: center middle;
}
"""
def __init__(self, model_data: Dict[str, Any], provider_name: str):
super().__init__()
self.model_data = model_data
self.provider_name = provider_name
def compose(self) -> ComposeResult:
"""Compose the screen."""
with Container():
yield Static("[bold]Model Information[/]", classes="header")
with VerticalScroll(classes="content"):
yield Static(self._get_model_info_text(), markup=True)
with Vertical(classes="footer"):
yield Button("Close", id="close", variant="primary")
def _get_model_info_text(self) -> str:
"""Generate the model information text."""
lines = []
# Header
model_name = self.model_data.get("name", "Unknown")
model_id = self.model_data.get("id", "Unknown")
lines.append(f"[bold cyan]═══ {model_name.upper()} ═══[/]\n")
# Basic Information
lines.append("[bold yellow]Basic Information[/]")
lines.append(f"[bold]Model ID:[/] {model_id}")
lines.append(f"[bold]Model Name:[/] {model_name}")
lines.append(f"[bold]Provider:[/] {self.provider_name}")
# Context Length
context_length = self.model_data.get("context_length", 0)
if context_length > 0:
lines.append(f"[bold]Context Length:[/] {context_length:,} tokens")
lines.append("")
# Pricing Information - Always show section
lines.append("[bold yellow]Pricing[/]")
pricing = self.model_data.get("pricing", {})
if pricing and (pricing.get("prompt") or pricing.get("completion")):
# Pricing data available
prompt_price = float(pricing.get("prompt", 0)) * 1_000_000
completion_price = float(pricing.get("completion", 0)) * 1_000_000
if prompt_price > 0:
lines.append(f"[bold]Input Price:[/] [green]${prompt_price:.2f}[/] per million tokens")
else:
lines.append(f"[bold]Input Price:[/] [dim]N/A[/]")
if completion_price > 0:
lines.append(f"[bold]Output Price:[/] [green]${completion_price:.2f}[/] per million tokens")
else:
lines.append(f"[bold]Output Price:[/] [dim]N/A[/]")
# Calculate approximate cost for a typical conversation if both prices available
if prompt_price > 0 and completion_price > 0:
typical_cost = (prompt_price * 1000 / 1_000_000) + (completion_price * 2000 / 1_000_000)
lines.append(f"[dim]Typical chat (1k in, 2k out): ~${typical_cost:.4f}[/]")
else:
# No pricing data available
if self.provider_name == "ollama":
lines.append(f"[bold]Input Price:[/] [green]Free (local)[/]")
lines.append(f"[bold]Output Price:[/] [green]Free (local)[/]")
lines.append(f"[dim]Running locally - no API costs[/]")
else:
lines.append(f"[bold]Input Price:[/] [dim]N/A[/]")
lines.append(f"[bold]Output Price:[/] [dim]N/A[/]")
lines.append("")
# Capabilities
lines.append("[bold yellow]Capabilities[/]")
arch = self.model_data.get("architecture", {})
input_modalities = arch.get("input_modalities", [])
output_modalities = arch.get("output_modalities", [])
supported_params = self.model_data.get("supported_parameters", [])
# Input/Output Modalities
if input_modalities:
input_str = ", ".join(input_modalities)
lines.append(f"[bold]Input Modalities:[/] {input_str}")
else:
lines.append(f"[bold]Input Modalities:[/] text")
if output_modalities:
output_str = ", ".join(output_modalities)
lines.append(f"[bold]Output Modalities:[/] {output_str}")
else:
lines.append(f"[bold]Output Modalities:[/] text")
lines.append("")
# Feature Support
lines.append("[bold yellow]Feature Support[/]")
# Image support
has_image = "image" in input_modalities
image_icon = "[green]✓[/]" if has_image else "[red]✗[/]"
lines.append(f"[bold]Image Input:[/] {image_icon} {'Supported' if has_image else 'Not supported'}")
# Tool/Function calling
has_tools = "tools" in supported_params
tools_icon = "[green]✓[/]" if has_tools else "[red]✗[/]"
lines.append(f"[bold]Function Calling:[/] {tools_icon} {'Supported' if has_tools else 'Not supported'}")
# Streaming
has_streaming = "stream" in supported_params or True # Most models support streaming
stream_icon = "[green]✓[/]" if has_streaming else "[red]✗[/]"
lines.append(f"[bold]Streaming:[/] {stream_icon} {'Supported' if has_streaming else 'Not supported'}")
# Temperature
has_temp = "temperature" in supported_params or True
temp_icon = "[green]✓[/]" if has_temp else "[red]✗[/]"
lines.append(f"[bold]Temperature:[/] {temp_icon} {'Supported' if has_temp else 'Not supported'}")
lines.append("")
# Additional Information
if self.provider_name == "openrouter":
lines.append("[bold yellow]OpenRouter Specific[/]")
# Top provider
top_provider = self.model_data.get("top_provider", {})
if top_provider:
max_completion = top_provider.get("max_completion_tokens")
if max_completion:
lines.append(f"[bold]Max Completion:[/] {max_completion:,} tokens")
# Per request limits
per_request = self.model_data.get("per_request_limits")
if per_request:
if per_request.get("prompt_tokens"):
lines.append(f"[bold]Max Prompt:[/] {per_request['prompt_tokens']:,} tokens")
if per_request.get("completion_tokens"):
lines.append(f"[bold]Max Completion:[/] {per_request['completion_tokens']:,} tokens")
elif self.provider_name == "ollama":
lines.append("[bold yellow]Ollama Specific[/]")
lines.append("[dim]Running locally on your machine[/]")
lines.append(f"[dim]Model file: {model_id}[/]")
lines.append("\n[dim]Tip: Use /model to switch models[/]")
return "\n".join(lines)
def on_button_pressed(self, event: Button.Pressed) -> None:
"""Handle button press."""
self.dismiss()
def on_key(self, event) -> None:
"""Handle keyboard shortcuts."""
if event.key in ("escape", "enter"):
self.dismiss()

View File

@@ -61,6 +61,7 @@ class CommandDropdown(VerticalScroll):
("/help", "Show help screen"),
("/commands", "Show all commands"),
("/model", "Select AI model"),
("/info", "Show detailed model information"),
("/provider", "Switch AI provider"),
("/provider openrouter", "Switch to OpenRouter"),
("/provider anthropic", "Switch to Anthropic (Claude)"),