bug fixes. Added model info screen with pricing info
This commit is contained in:
212
oai/tui/screens/model_info_screen.py
Normal file
212
oai/tui/screens/model_info_screen.py
Normal file
@@ -0,0 +1,212 @@
|
||||
"""Model information screen for oAI TUI."""
|
||||
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from textual.app import ComposeResult
|
||||
from textual.containers import Container, Vertical, VerticalScroll
|
||||
from textual.screen import ModalScreen
|
||||
from textual.widgets import Button, Static
|
||||
|
||||
|
||||
class ModelInfoScreen(ModalScreen[None]):
|
||||
"""Modal screen displaying detailed model information."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
ModelInfoScreen {
|
||||
align: center middle;
|
||||
}
|
||||
|
||||
ModelInfoScreen > Container {
|
||||
width: 80;
|
||||
height: auto;
|
||||
max-height: 90%;
|
||||
background: #1e1e1e;
|
||||
border: solid #555555;
|
||||
}
|
||||
|
||||
ModelInfoScreen .header {
|
||||
dock: top;
|
||||
width: 100%;
|
||||
height: auto;
|
||||
background: #2d2d2d;
|
||||
color: #cccccc;
|
||||
padding: 0 2;
|
||||
}
|
||||
|
||||
ModelInfoScreen .content {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
max-height: 30;
|
||||
background: #1e1e1e;
|
||||
padding: 2;
|
||||
color: #cccccc;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
ModelInfoScreen .footer {
|
||||
dock: bottom;
|
||||
width: 100%;
|
||||
height: auto;
|
||||
background: #2d2d2d;
|
||||
padding: 1 2;
|
||||
align: center middle;
|
||||
}
|
||||
"""
|
||||
|
||||
def __init__(self, model_data: Dict[str, Any], provider_name: str):
|
||||
super().__init__()
|
||||
self.model_data = model_data
|
||||
self.provider_name = provider_name
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the screen."""
|
||||
with Container():
|
||||
yield Static("[bold]Model Information[/]", classes="header")
|
||||
with VerticalScroll(classes="content"):
|
||||
yield Static(self._get_model_info_text(), markup=True)
|
||||
with Vertical(classes="footer"):
|
||||
yield Button("Close", id="close", variant="primary")
|
||||
|
||||
def _get_model_info_text(self) -> str:
|
||||
"""Generate the model information text."""
|
||||
lines = []
|
||||
|
||||
# Header
|
||||
model_name = self.model_data.get("name", "Unknown")
|
||||
model_id = self.model_data.get("id", "Unknown")
|
||||
|
||||
lines.append(f"[bold cyan]═══ {model_name.upper()} ═══[/]\n")
|
||||
|
||||
# Basic Information
|
||||
lines.append("[bold yellow]Basic Information[/]")
|
||||
lines.append(f"[bold]Model ID:[/] {model_id}")
|
||||
lines.append(f"[bold]Model Name:[/] {model_name}")
|
||||
lines.append(f"[bold]Provider:[/] {self.provider_name}")
|
||||
|
||||
# Context Length
|
||||
context_length = self.model_data.get("context_length", 0)
|
||||
if context_length > 0:
|
||||
lines.append(f"[bold]Context Length:[/] {context_length:,} tokens")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Pricing Information - Always show section
|
||||
lines.append("[bold yellow]Pricing[/]")
|
||||
|
||||
pricing = self.model_data.get("pricing", {})
|
||||
|
||||
if pricing and (pricing.get("prompt") or pricing.get("completion")):
|
||||
# Pricing data available
|
||||
prompt_price = float(pricing.get("prompt", 0)) * 1_000_000
|
||||
completion_price = float(pricing.get("completion", 0)) * 1_000_000
|
||||
|
||||
if prompt_price > 0:
|
||||
lines.append(f"[bold]Input Price:[/] [green]${prompt_price:.2f}[/] per million tokens")
|
||||
else:
|
||||
lines.append(f"[bold]Input Price:[/] [dim]N/A[/]")
|
||||
|
||||
if completion_price > 0:
|
||||
lines.append(f"[bold]Output Price:[/] [green]${completion_price:.2f}[/] per million tokens")
|
||||
else:
|
||||
lines.append(f"[bold]Output Price:[/] [dim]N/A[/]")
|
||||
|
||||
# Calculate approximate cost for a typical conversation if both prices available
|
||||
if prompt_price > 0 and completion_price > 0:
|
||||
typical_cost = (prompt_price * 1000 / 1_000_000) + (completion_price * 2000 / 1_000_000)
|
||||
lines.append(f"[dim]Typical chat (1k in, 2k out): ~${typical_cost:.4f}[/]")
|
||||
else:
|
||||
# No pricing data available
|
||||
if self.provider_name == "ollama":
|
||||
lines.append(f"[bold]Input Price:[/] [green]Free (local)[/]")
|
||||
lines.append(f"[bold]Output Price:[/] [green]Free (local)[/]")
|
||||
lines.append(f"[dim]Running locally - no API costs[/]")
|
||||
else:
|
||||
lines.append(f"[bold]Input Price:[/] [dim]N/A[/]")
|
||||
lines.append(f"[bold]Output Price:[/] [dim]N/A[/]")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Capabilities
|
||||
lines.append("[bold yellow]Capabilities[/]")
|
||||
|
||||
arch = self.model_data.get("architecture", {})
|
||||
input_modalities = arch.get("input_modalities", [])
|
||||
output_modalities = arch.get("output_modalities", [])
|
||||
supported_params = self.model_data.get("supported_parameters", [])
|
||||
|
||||
# Input/Output Modalities
|
||||
if input_modalities:
|
||||
input_str = ", ".join(input_modalities)
|
||||
lines.append(f"[bold]Input Modalities:[/] {input_str}")
|
||||
else:
|
||||
lines.append(f"[bold]Input Modalities:[/] text")
|
||||
|
||||
if output_modalities:
|
||||
output_str = ", ".join(output_modalities)
|
||||
lines.append(f"[bold]Output Modalities:[/] {output_str}")
|
||||
else:
|
||||
lines.append(f"[bold]Output Modalities:[/] text")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Feature Support
|
||||
lines.append("[bold yellow]Feature Support[/]")
|
||||
|
||||
# Image support
|
||||
has_image = "image" in input_modalities
|
||||
image_icon = "[green]✓[/]" if has_image else "[red]✗[/]"
|
||||
lines.append(f"[bold]Image Input:[/] {image_icon} {'Supported' if has_image else 'Not supported'}")
|
||||
|
||||
# Tool/Function calling
|
||||
has_tools = "tools" in supported_params
|
||||
tools_icon = "[green]✓[/]" if has_tools else "[red]✗[/]"
|
||||
lines.append(f"[bold]Function Calling:[/] {tools_icon} {'Supported' if has_tools else 'Not supported'}")
|
||||
|
||||
# Streaming
|
||||
has_streaming = "stream" in supported_params or True # Most models support streaming
|
||||
stream_icon = "[green]✓[/]" if has_streaming else "[red]✗[/]"
|
||||
lines.append(f"[bold]Streaming:[/] {stream_icon} {'Supported' if has_streaming else 'Not supported'}")
|
||||
|
||||
# Temperature
|
||||
has_temp = "temperature" in supported_params or True
|
||||
temp_icon = "[green]✓[/]" if has_temp else "[red]✗[/]"
|
||||
lines.append(f"[bold]Temperature:[/] {temp_icon} {'Supported' if has_temp else 'Not supported'}")
|
||||
|
||||
lines.append("")
|
||||
|
||||
# Additional Information
|
||||
if self.provider_name == "openrouter":
|
||||
lines.append("[bold yellow]OpenRouter Specific[/]")
|
||||
|
||||
# Top provider
|
||||
top_provider = self.model_data.get("top_provider", {})
|
||||
if top_provider:
|
||||
max_completion = top_provider.get("max_completion_tokens")
|
||||
if max_completion:
|
||||
lines.append(f"[bold]Max Completion:[/] {max_completion:,} tokens")
|
||||
|
||||
# Per request limits
|
||||
per_request = self.model_data.get("per_request_limits")
|
||||
if per_request:
|
||||
if per_request.get("prompt_tokens"):
|
||||
lines.append(f"[bold]Max Prompt:[/] {per_request['prompt_tokens']:,} tokens")
|
||||
if per_request.get("completion_tokens"):
|
||||
lines.append(f"[bold]Max Completion:[/] {per_request['completion_tokens']:,} tokens")
|
||||
|
||||
elif self.provider_name == "ollama":
|
||||
lines.append("[bold yellow]Ollama Specific[/]")
|
||||
lines.append("[dim]Running locally on your machine[/]")
|
||||
lines.append(f"[dim]Model file: {model_id}[/]")
|
||||
|
||||
lines.append("\n[dim]Tip: Use /model to switch models[/]")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Handle button press."""
|
||||
self.dismiss()
|
||||
|
||||
def on_key(self, event) -> None:
|
||||
"""Handle keyboard shortcuts."""
|
||||
if event.key in ("escape", "enter"):
|
||||
self.dismiss()
|
||||
Reference in New Issue
Block a user