More info in models using . To use ]7;file://localhost/ at start of query use ]7;file://localhost/. Plus some other changes.

This commit is contained in:
2025-12-21 19:21:14 +01:00
parent 229ffdf51a
commit 36a412138d
2 changed files with 75 additions and 40 deletions

3
.gitignore vendored
View File

@@ -30,3 +30,6 @@ diagnose.py
*.xml *.xml
build* build*
*.spec *.spec
compiled/
images/oai-iOS-Default-1024x1024@1x.png
images/oai.icon/

110
oai.py
View File

@@ -47,6 +47,13 @@ cache_dir.mkdir(parents=True, exist_ok=True)
# Rich console for chat UI (separate from logging) # Rich console for chat UI (separate from logging)
console = Console() console = Console()
# Valid commands list for validation
VALID_COMMANDS = {
'/retry', '/online', '/memory', '/paste', '/export', '/save', '/load',
'/delete', '/list', '/prev', '/next', '/stats', '/middleout', '/reset',
'/info', '/model', '/maxtoken', '/system', '/config', '/credits', '/clear', '/cl', '/help'
}
# Supported code file extensions # Supported code file extensions
SUPPORTED_CODE_EXTENSIONS = { SUPPORTED_CODE_EXTENSIONS = {
'.py', '.js', '.ts', '.cs', '.java', '.c', '.cpp', '.h', '.hpp', '.py', '.js', '.ts', '.cs', '.java', '.c', '.cpp', '.h', '.hpp',
@@ -84,7 +91,7 @@ app_logger.setLevel(logging.INFO)
# DB configuration # DB configuration
database = config_dir / 'oai_config.db' database = config_dir / 'oai_config.db'
DB_FILE = str(database) DB_FILE = str(database)
version = '1.8' version = '1.9'
def create_table_if_not_exists(): def create_table_if_not_exists():
"""Ensure the config and conversation_sessions tables exist.""" """Ensure the config and conversation_sessions tables exist."""
@@ -163,6 +170,29 @@ def estimate_cost(input_tokens: int, output_tokens: int) -> float:
"""Estimate cost in USD based on token counts.""" """Estimate cost in USD based on token counts."""
return (input_tokens * MODEL_PRICING['input'] / 1_000_000) + (output_tokens * MODEL_PRICING['output'] / 1_000_000) return (input_tokens * MODEL_PRICING['input'] / 1_000_000) + (output_tokens * MODEL_PRICING['output'] / 1_000_000)
def has_web_search_capability(model: Dict[str, Any]) -> bool:
"""Check if model supports web search based on supported_parameters."""
supported_params = model.get("supported_parameters", [])
# Web search is typically indicated by 'tools' parameter support
return "tools" in supported_params
def has_image_capability(model: Dict[str, Any]) -> bool:
"""Check if model supports image input based on input modalities."""
architecture = model.get("architecture", {})
input_modalities = architecture.get("input_modalities", [])
return "image" in input_modalities
def supports_online_mode(model: Dict[str, Any]) -> bool:
"""Check if model supports :online suffix for web search."""
# Models that support tools parameter can use :online
return has_web_search_capability(model)
def get_effective_model_id(base_model_id: str, online_enabled: bool) -> str:
"""Get the effective model ID with :online suffix if enabled."""
if online_enabled and not base_model_id.endswith(':online'):
return f"{base_model_id}:online"
return base_model_id
def export_as_markdown(session_history: List[Dict[str, str]], session_system_prompt: str = "") -> str: def export_as_markdown(session_history: List[Dict[str, str]], session_system_prompt: str = "") -> str:
"""Export conversation history as Markdown.""" """Export conversation history as Markdown."""
lines = ["# Conversation Export", ""] lines = ["# Conversation Export", ""]
@@ -349,29 +379,6 @@ def clear_screen():
except: except:
print("\n" * 100) print("\n" * 100)
def has_web_search_capability(model: Dict[str, Any]) -> bool:
"""Check if model supports web search based on supported_parameters."""
supported_params = model.get("supported_parameters", [])
# Web search is typically indicated by 'tools' parameter support
return "tools" in supported_params
def has_image_capability(model: Dict[str, Any]) -> bool:
"""Check if model supports image input based on input modalities."""
architecture = model.get("architecture", {})
input_modalities = architecture.get("input_modalities", [])
return "image" in input_modalities
def supports_online_mode(model: Dict[str, Any]) -> bool:
"""Check if model supports :online suffix for web search."""
# Models that support tools parameter can use :online
return has_web_search_capability(model)
def get_effective_model_id(base_model_id: str, online_enabled: bool) -> str:
"""Get the effective model ID with :online suffix if enabled."""
if online_enabled and not base_model_id.endswith(':online'):
return f"{base_model_id}:online"
return base_model_id
def display_paginated_table(table: Table, title: str): def display_paginated_table(table: Table, title: str):
"""Display a table with pagination support using Rich console for colored output, repeating header on each page.""" """Display a table with pagination support using Rich console for colored output, repeating header on each page."""
# Get terminal height (subtract some lines for prompt and margins) # Get terminal height (subtract some lines for prompt and margins)
@@ -573,6 +580,24 @@ def chat():
while True: while True:
try: try:
user_input = session.prompt("You> ", auto_suggest=AutoSuggestFromHistory()).strip() user_input = session.prompt("You> ", auto_suggest=AutoSuggestFromHistory()).strip()
# Handle // escape sequence - convert to single / and treat as regular text
if user_input.startswith("//"):
user_input = user_input[1:] # Remove first slash, keep the rest
# Don't process as command, jump to message processing
# Check for unknown commands (starts with / but not a valid command)
elif user_input.startswith("/") and user_input.lower() not in ["exit", "quit", "bye"]:
# Extract command (first word after /)
command_word = user_input.split()[0].lower() if user_input.split() else user_input.lower()
# Check if it's a valid command or partial match
if not any(command_word.startswith(cmd) for cmd in VALID_COMMANDS):
console.print(f"[bold red]Unknown command: {command_word}[/]")
console.print("[bold yellow]Type /help to see all available commands.[/]")
app_logger.warning(f"Unknown command attempted: {command_word}")
continue
if user_input.lower() in ["exit", "quit", "bye"]: if user_input.lower() in ["exit", "quit", "bye"]:
total_tokens = total_input_tokens + total_output_tokens total_tokens = total_input_tokens + total_output_tokens
app_logger.info(f"Session ended. Total messages: {message_count}, Total tokens: {total_tokens}, Total cost: ${total_cost:.4f}") # Log session summary app_logger.info(f"Session ended. Total messages: {message_count}, Total tokens: {total_tokens}, Total cost: ${total_cost:.4f}") # Log session summary
@@ -962,6 +987,7 @@ def chat():
table.add_row("Name", model_to_show["name"]) table.add_row("Name", model_to_show["name"])
table.add_row("Description", model_to_show.get("description", "N/A")) table.add_row("Description", model_to_show.get("description", "N/A"))
table.add_row("Context Length", str(model_to_show.get("context_length", "N/A"))) table.add_row("Context Length", str(model_to_show.get("context_length", "N/A")))
table.add_row("Online Support", "Yes" if supports_online_mode(model_to_show) else "No")
table.add_row("Pricing - Prompt ($/M tokens)", pricing.get("prompt", "N/A")) table.add_row("Pricing - Prompt ($/M tokens)", pricing.get("prompt", "N/A"))
table.add_row("Pricing - Completion ($/M tokens)", pricing.get("completion", "N/A")) table.add_row("Pricing - Completion ($/M tokens)", pricing.get("completion", "N/A"))
table.add_row("Pricing - Request ($)", pricing.get("request", "N/A")) table.add_row("Pricing - Request ($)", pricing.get("request", "N/A"))
@@ -969,7 +995,6 @@ def chat():
table.add_row("Input Modalities", ", ".join(architecture.get("input_modalities", [])) or "None") table.add_row("Input Modalities", ", ".join(architecture.get("input_modalities", [])) or "None")
table.add_row("Output Modalities", ", ".join(architecture.get("output_modalities", [])) or "None") table.add_row("Output Modalities", ", ".join(architecture.get("output_modalities", [])) or "None")
table.add_row("Supported Parameters", supported_params) table.add_row("Supported Parameters", supported_params)
table.add_row("Online Mode Support", "Yes" if supports_online_mode(model_to_show) else "No")
table.add_row("Top Provider Context Length", str(top_provider.get("context_length", "N/A"))) table.add_row("Top Provider Context Length", str(top_provider.get("context_length", "N/A")))
table.add_row("Max Completion Tokens", str(top_provider.get("max_completion_tokens", "N/A"))) table.add_row("Max Completion Tokens", str(top_provider.get("max_completion_tokens", "N/A")))
table.add_row("Moderated", "Yes" if top_provider.get("is_moderated", False) else "No") table.add_row("Moderated", "Yes" if top_provider.get("is_moderated", False) else "No")
@@ -977,7 +1002,7 @@ def chat():
console.print(Panel(table, title=f"[bold green]Model Info: {model_to_show['name']}[/]", title_align="left")) console.print(Panel(table, title=f"[bold green]Model Info: {model_to_show['name']}[/]", title_align="left"))
continue continue
# Model selection with colored checkmarks (removed Web column) # Model selection with Image and Online columns
elif user_input.startswith("/model"): elif user_input.startswith("/model"):
app_logger.info("User initiated model selection") app_logger.info("User initiated model selection")
args = user_input[7:].strip() args = user_input[7:].strip()
@@ -989,11 +1014,12 @@ def chat():
console.print(f"[bold red]No models match '{search_term}'. Try '/model'.[/]") console.print(f"[bold red]No models match '{search_term}'. Try '/model'.[/]")
continue continue
# Create table with colored checkmarks (removed Web column) # Create table with Image and Online columns
table = Table("No.", "Name", "ID", "Image", show_header=True, header_style="bold magenta") table = Table("No.", "Name", "ID", "Image", "Online", show_header=True, header_style="bold magenta")
for i, model in enumerate(filtered_models, 1): for i, model in enumerate(filtered_models, 1):
image_support = "[green]✓[/green]" if has_image_capability(model) else "[red]✗[/red]" image_support = "[green]✓[/green]" if has_image_capability(model) else "[red]✗[/red]"
table.add_row(str(i), model["name"], model["id"], image_support) online_support = "[green]✓[/green]" if supports_online_mode(model) else "[red]✗[/red]"
table.add_row(str(i), model["name"], model["id"], image_support, online_support)
# Use pagination for the table # Use pagination for the table
title = f"[bold green]Available Models ({'All' if not search_term else f'Search: {search_term}'})[/]" title = f"[bold green]Available Models ({'All' if not search_term else f'Search: {search_term}'})[/]"
@@ -1012,7 +1038,7 @@ def chat():
console.print("[dim yellow]Note: Online mode auto-disabled when changing models.[/]") console.print("[dim yellow]Note: Online mode auto-disabled when changing models.[/]")
console.print(f"[bold cyan]Selected: {selected_model['name']} ({selected_model['id']})[/]") console.print(f"[bold cyan]Selected: {selected_model['name']} ({selected_model['id']})[/]")
if supports_online_mode(selected_model): if supports_online_mode(selected_model):
console.print("[dim green]This model supports online mode. Use '/online on' to enable web search.[/]") console.print("[dim green]This model supports online mode. Use '/online on' to enable web search.[/]")
app_logger.info(f"Model selected: {selected_model['name']} ({selected_model['id']})") app_logger.info(f"Model selected: {selected_model['name']} ({selected_model['id']})")
break break
console.print("[bold red]Invalid choice. Try again.[/]") console.print("[bold red]Invalid choice. Try again.[/]")
@@ -1137,11 +1163,12 @@ def chat():
console.print(f"[bold red]No models match '{search_term}'. Try without search.[/]") console.print(f"[bold red]No models match '{search_term}'. Try without search.[/]")
continue continue
# Create table with colored checkmarks (removed Web column) # Create table with Image and Online columns
table = Table("No.", "Name", "ID", "Image", show_header=True, header_style="bold magenta") table = Table("No.", "Name", "ID", "Image", "Online", show_header=True, header_style="bold magenta")
for i, model in enumerate(filtered_models, 1): for i, model in enumerate(filtered_models, 1):
image_support = "[green]✓[/green]" if has_image_capability(model) else "[red]✗[/red]" image_support = "[green]✓[/green]" if has_image_capability(model) else "[red]✗[/red]"
table.add_row(str(i), model["name"], model["id"], image_support) online_support = "[green]✓[/green]" if supports_online_mode(model) else "[red]✗[/red]"
table.add_row(str(i), model["name"], model["id"], image_support, online_support)
# Use pagination for the table # Use pagination for the table
title = f"[bold green]Available Models for Default ({'All' if not search_term else f'Search: {search_term}'})[/]" title = f"[bold green]Available Models for Default ({'All' if not search_term else f'Search: {search_term}'})[/]"
@@ -1210,7 +1237,7 @@ def chat():
console.print("[bold red]Unable to fetch credits. Check your API key or network.[/]") console.print("[bold red]Unable to fetch credits. Check your API key or network.[/]")
continue continue
if user_input.lower() == "/clear": if user_input.lower() == "/clear" or user_input.lower() == "/cl":
clear_screen() clear_screen()
DEFAULT_MODEL_ID = get_config('default_model') DEFAULT_MODEL_ID = get_config('default_model')
token_value = session_max_token if session_max_token != 0 else " Not set" token_value = session_max_token if session_max_token != 0 else " Not set"
@@ -1230,9 +1257,9 @@ def chat():
"" ""
) )
help_table.add_row( help_table.add_row(
"/clear", "/clear or /cl",
"Clear the terminal screen for a clean interface. You can also use the keycombo [bold]ctrl+l[/]", "Clear the terminal screen for a clean interface. You can also use the keycombo [bold]ctrl+l[/]",
"/clear" "/clear\n/cl"
) )
help_table.add_row( help_table.add_row(
"/help", "/help",
@@ -1288,7 +1315,7 @@ def chat():
) )
help_table.add_row( help_table.add_row(
"/model [search]", "/model [search]",
"Select or change the current model for the session. Supports searching by name or ID. Shows image capabilities.", "Select or change the current model for the session. Shows image and online capabilities. Supports searching by name or ID.",
"/model\n/model gpt" "/model\n/model gpt"
) )
@@ -1320,7 +1347,7 @@ def chat():
) )
help_table.add_row( help_table.add_row(
"/config model [search]", "/config model [search]",
"Set default model that loads on startup. Doesn't change current session model. Shows image capabilities.", "Set default model that loads on startup. Shows image and online capabilities. Doesn't change current session model.",
"/config model gpt" "/config model gpt"
) )
help_table.add_row( help_table.add_row(
@@ -1421,6 +1448,11 @@ def chat():
"Use /paste to send clipboard content (plain text/code) to AI.", "Use /paste to send clipboard content (plain text/code) to AI.",
"/paste\n/paste Explain this" "/paste\n/paste Explain this"
) )
help_table.add_row(
"// escape",
"Start message with // to send a literal / character (e.g., //command sends '/command' as text, not a command)",
"//help sends '/help' as text"
)
# ===== EXIT ===== # ===== EXIT =====
help_table.add_row( help_table.add_row(
@@ -1438,7 +1470,7 @@ def chat():
help_table, help_table,
title="[bold cyan]oAI Chat Help (Version %s)[/]" % version, title="[bold cyan]oAI Chat Help (Version %s)[/]" % version,
title_align="center", title_align="center",
subtitle="💡 Tip: Commands are case-insensitive • Memory ON by default (toggle with /memory) • Visit: https://iurl.no/oai", subtitle="💡 Tip: Commands are case-insensitive • Memory ON by default (toggle with /memory) • Use // to escape / at start of input • Visit: https://iurl.no/oai",
subtitle_align="center", subtitle_align="center",
border_style="cyan" border_style="cyan"
)) ))