4 Commits
1.7 ... 1.9

3 changed files with 300 additions and 22 deletions

9
.gitignore vendored
View File

@@ -24,3 +24,12 @@ Pipfile.lock # Consider if you want to include or exclude
*~.xib *~.xib
README.md.old README.md.old
oai.zip oai.zip
.note
diagnose.py
*.log
*.xml
build*
*.spec
compiled/
images/oai-iOS-Default-1024x1024@1x.png
images/oai.icon/

View File

@@ -77,6 +77,10 @@ alias oai='python3 <path to your file>'
On first run, you will be prompted to enter your OpenRouter API key. On first run, you will be prompted to enter your OpenRouter API key.
### 6. Use Binaries
You can also just download the supplied binary for either Mac wit Mx (M1, M2 etc) `oai_mac_arm64.zip` and follow [#3](https://gitlab.pm/rune/oai#3-copy-to-path). Or download for Linux (64bit) `oai_linux_x86_64.zip` and also follow [#3](https://gitlab.pm/rune/oai#3-copy-to-path).
## Usage ## Usage
### Starting the Application ### Starting the Application

307
oai.py
View File

@@ -35,16 +35,25 @@ APP_URL = "https://iurl.no/oai"
# Paths # Paths
home = Path.home() home = Path.home()
config_dir = home / '.config' / 'oai' config_dir = home / '.config' / 'oai'
cache_dir = home / '.cache' / 'oai'
history_file = config_dir / 'history.txt' # Persistent input history file history_file = config_dir / 'history.txt' # Persistent input history file
database = config_dir / 'oai_config.db' database = config_dir / 'oai_config.db'
log_file = config_dir / 'oai.log' log_file = config_dir / 'oai.log'
# Create dirs if needed # Create dirs if needed
config_dir.mkdir(parents=True, exist_ok=True) config_dir.mkdir(parents=True, exist_ok=True)
cache_dir.mkdir(parents=True, exist_ok=True)
# Rich console for chat UI (separate from logging) # Rich console for chat UI (separate from logging)
console = Console() console = Console()
# Valid commands list for validation
VALID_COMMANDS = {
'/retry', '/online', '/memory', '/paste', '/export', '/save', '/load',
'/delete', '/list', '/prev', '/next', '/stats', '/middleout', '/reset',
'/info', '/model', '/maxtoken', '/system', '/config', '/credits', '/clear', '/cl', '/help'
}
# Supported code file extensions # Supported code file extensions
SUPPORTED_CODE_EXTENSIONS = { SUPPORTED_CODE_EXTENSIONS = {
'.py', '.js', '.ts', '.cs', '.java', '.c', '.cpp', '.h', '.hpp', '.py', '.js', '.ts', '.cs', '.java', '.c', '.cpp', '.h', '.hpp',
@@ -82,7 +91,7 @@ app_logger.setLevel(logging.INFO)
# DB configuration # DB configuration
database = config_dir / 'oai_config.db' database = config_dir / 'oai_config.db'
DB_FILE = str(database) DB_FILE = str(database)
version = '1.7' version = '1.9'
def create_table_if_not_exists(): def create_table_if_not_exists():
"""Ensure the config and conversation_sessions tables exist.""" """Ensure the config and conversation_sessions tables exist."""
@@ -161,6 +170,29 @@ def estimate_cost(input_tokens: int, output_tokens: int) -> float:
"""Estimate cost in USD based on token counts.""" """Estimate cost in USD based on token counts."""
return (input_tokens * MODEL_PRICING['input'] / 1_000_000) + (output_tokens * MODEL_PRICING['output'] / 1_000_000) return (input_tokens * MODEL_PRICING['input'] / 1_000_000) + (output_tokens * MODEL_PRICING['output'] / 1_000_000)
def has_web_search_capability(model: Dict[str, Any]) -> bool:
"""Check if model supports web search based on supported_parameters."""
supported_params = model.get("supported_parameters", [])
# Web search is typically indicated by 'tools' parameter support
return "tools" in supported_params
def has_image_capability(model: Dict[str, Any]) -> bool:
"""Check if model supports image input based on input modalities."""
architecture = model.get("architecture", {})
input_modalities = architecture.get("input_modalities", [])
return "image" in input_modalities
def supports_online_mode(model: Dict[str, Any]) -> bool:
"""Check if model supports :online suffix for web search."""
# Models that support tools parameter can use :online
return has_web_search_capability(model)
def get_effective_model_id(base_model_id: str, online_enabled: bool) -> str:
"""Get the effective model ID with :online suffix if enabled."""
if online_enabled and not base_model_id.endswith(':online'):
return f"{base_model_id}:online"
return base_model_id
def export_as_markdown(session_history: List[Dict[str, str]], session_system_prompt: str = "") -> str: def export_as_markdown(session_history: List[Dict[str, str]], session_system_prompt: str = "") -> str:
"""Export conversation history as Markdown.""" """Export conversation history as Markdown."""
lines = ["# Conversation Export", ""] lines = ["# Conversation Export", ""]
@@ -347,6 +379,143 @@ def clear_screen():
except: except:
print("\n" * 100) print("\n" * 100)
def display_paginated_table(table: Table, title: str):
"""Display a table with pagination support using Rich console for colored output, repeating header on each page."""
# Get terminal height (subtract some lines for prompt and margins)
try:
terminal_height = os.get_terminal_size().lines - 8
except:
terminal_height = 20 # Fallback if terminal size can't be determined
# Create a segment-based approach to capture Rich-rendered output
from rich.segment import Segment
# Render the table to segments
segments = list(console.render(table))
# Convert segments to lines while preserving style
current_line_segments = []
all_lines = []
for segment in segments:
if segment.text == '\n':
all_lines.append(current_line_segments)
current_line_segments = []
else:
current_line_segments.append(segment)
# Add last line if not empty
if current_line_segments:
all_lines.append(current_line_segments)
total_lines = len(all_lines)
# If fits on one screen after segment analysis
if total_lines <= terminal_height:
console.print(Panel(table, title=title, title_align="left"))
return
# Separate header from data rows
# Typically the first 3 lines are: top border, header row, separator
header_lines = []
data_lines = []
# Find where the header ends (usually after the first horizontal line after header text)
header_end_index = 0
found_header_text = False
for i, line_segments in enumerate(all_lines):
# Check if this line contains header-style text (bold/magenta usually)
has_header_style = any(
seg.style and ('bold' in str(seg.style) or 'magenta' in str(seg.style))
for seg in line_segments
)
if has_header_style:
found_header_text = True
# After finding header text, the next line with box-drawing chars is the separator
if found_header_text and i > 0:
line_text = ''.join(seg.text for seg in line_segments)
# Check for horizontal line characters (─ ━ ╌ etc.)
if any(char in line_text for char in ['', '', '', '', '', '']):
header_end_index = i
break
# If we found a header separator, split there
if header_end_index > 0:
header_lines = all_lines[:header_end_index + 1] # Include the separator
data_lines = all_lines[header_end_index + 1:]
else:
# Fallback: assume first 3 lines are header
header_lines = all_lines[:min(3, len(all_lines))]
data_lines = all_lines[min(3, len(all_lines)):]
# Calculate how many data lines fit per page (accounting for header)
lines_per_page = terminal_height - len(header_lines)
# Display with pagination
current_line = 0
page_number = 1
while current_line < len(data_lines):
# Clear screen for each page
clear_screen()
# Print title
console.print(f"[bold cyan]{title} (Page {page_number})[/]")
# Print header on every page
for line_segments in header_lines:
for segment in line_segments:
console.print(segment.text, style=segment.style, end="")
console.print() # New line after each row
# Calculate how many data lines to show on this page
end_line = min(current_line + lines_per_page, len(data_lines))
# Print data lines for this page
for line_segments in data_lines[current_line:end_line]:
for segment in line_segments:
console.print(segment.text, style=segment.style, end="")
console.print() # New line after each row
# Update position
current_line = end_line
page_number += 1
# If there's more content, wait for user
if current_line < len(data_lines):
console.print(f"\n[dim yellow]--- Press SPACE for next page, or any other key to finish (Page {page_number - 1}, showing {end_line}/{len(data_lines)} data rows) ---[/dim yellow]")
try:
import sys
import tty
import termios
# Save terminal settings
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
# Set terminal to raw mode to read single character
tty.setraw(fd)
char = sys.stdin.read(1)
# If not space, break pagination
if char != ' ':
break
finally:
# Restore terminal settings
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
except:
# Fallback for Windows or if termios not available
input_char = input().strip()
if input_char != '':
break
else:
# No more content
break
@app.command() @app.command()
def chat(): def chat():
global API_KEY, OPENROUTER_BASE_URL, STREAM_ENABLED, MAX_TOKEN, COST_WARNING_THRESHOLD global API_KEY, OPENROUTER_BASE_URL, STREAM_ENABLED, MAX_TOKEN, COST_WARNING_THRESHOLD
@@ -362,6 +531,7 @@ def chat():
conversation_memory_enabled = True # Memory ON by default conversation_memory_enabled = True # Memory ON by default
memory_start_index = 0 # Track when memory was last enabled memory_start_index = 0 # Track when memory was last enabled
saved_conversations_cache = [] # Cache for /list results to use with /load by number saved_conversations_cache = [] # Cache for /list results to use with /load by number
online_mode_enabled = False # Online mode (web search) disabled by default
app_logger.info("Starting new chat session with memory enabled") # Log session start app_logger.info("Starting new chat session with memory enabled") # Log session start
@@ -410,6 +580,24 @@ def chat():
while True: while True:
try: try:
user_input = session.prompt("You> ", auto_suggest=AutoSuggestFromHistory()).strip() user_input = session.prompt("You> ", auto_suggest=AutoSuggestFromHistory()).strip()
# Handle // escape sequence - convert to single / and treat as regular text
if user_input.startswith("//"):
user_input = user_input[1:] # Remove first slash, keep the rest
# Don't process as command, jump to message processing
# Check for unknown commands (starts with / but not a valid command)
elif user_input.startswith("/") and user_input.lower() not in ["exit", "quit", "bye"]:
# Extract command (first word after /)
command_word = user_input.split()[0].lower() if user_input.split() else user_input.lower()
# Check if it's a valid command or partial match
if not any(command_word.startswith(cmd) for cmd in VALID_COMMANDS):
console.print(f"[bold red]Unknown command: {command_word}[/]")
console.print("[bold yellow]Type /help to see all available commands.[/]")
app_logger.warning(f"Unknown command attempted: {command_word}")
continue
if user_input.lower() in ["exit", "quit", "bye"]: if user_input.lower() in ["exit", "quit", "bye"]:
total_tokens = total_input_tokens + total_output_tokens total_tokens = total_input_tokens + total_output_tokens
app_logger.info(f"Session ended. Total messages: {message_count}, Total tokens: {total_tokens}, Total cost: ${total_cost:.4f}") # Log session summary app_logger.info(f"Session ended. Total messages: {message_count}, Total tokens: {total_tokens}, Total cost: ${total_cost:.4f}") # Log session summary
@@ -426,6 +614,39 @@ def chat():
console.print("[bold green]Retrying last prompt...[/]") console.print("[bold green]Retrying last prompt...[/]")
app_logger.info(f"Retrying prompt: {last_prompt[:100]}...") app_logger.info(f"Retrying prompt: {last_prompt[:100]}...")
user_input = last_prompt user_input = last_prompt
elif user_input.lower().startswith("/online"):
args = user_input[8:].strip()
if not args:
status = "enabled" if online_mode_enabled else "disabled"
console.print(f"[bold blue]Online mode (web search) {status}.[/]")
if selected_model:
if supports_online_mode(selected_model):
console.print(f"[dim green]Current model '{selected_model['name']}' supports online mode.[/]")
else:
console.print(f"[dim yellow]Current model '{selected_model['name']}' does not support online mode.[/]")
continue
if args.lower() == "on":
if not selected_model:
console.print("[bold red]No model selected. Select a model first with '/model'.[/]")
continue
if not supports_online_mode(selected_model):
console.print(f"[bold red]Model '{selected_model['name']}' does not support online mode (web search).[/]")
console.print("[dim yellow]Online mode requires models with 'tools' parameter support.[/]")
app_logger.warning(f"Online mode activation failed - model {selected_model['id']} doesn't support it")
continue
online_mode_enabled = True
console.print("[bold green]Online mode enabled. Model will use web search capabilities.[/]")
console.print(f"[dim blue]Effective model ID: {get_effective_model_id(selected_model['id'], True)}[/]")
app_logger.info(f"Online mode enabled for model {selected_model['id']}")
elif args.lower() == "off":
online_mode_enabled = False
console.print("[bold green]Online mode disabled. Model will not use web search.[/]")
if selected_model:
console.print(f"[dim blue]Effective model ID: {selected_model['id']}[/]")
app_logger.info("Online mode disabled")
else:
console.print("[bold yellow]Usage: /online on|off (or /online to view status)[/]")
continue
elif user_input.lower().startswith("/memory"): elif user_input.lower().startswith("/memory"):
args = user_input[8:].strip() args = user_input[8:].strip()
if not args: if not args:
@@ -755,7 +976,7 @@ def chat():
console.print(f"[bold red]Model '{args}' not found.[/]") console.print(f"[bold red]Model '{args}' not found.[/]")
continue continue
# Display model info (unchanged) # Display model info
pricing = model_to_show.get("pricing", {}) pricing = model_to_show.get("pricing", {})
architecture = model_to_show.get("architecture", {}) architecture = model_to_show.get("architecture", {})
supported_params = ", ".join(model_to_show.get("supported_parameters", [])) or "None" supported_params = ", ".join(model_to_show.get("supported_parameters", [])) or "None"
@@ -766,6 +987,7 @@ def chat():
table.add_row("Name", model_to_show["name"]) table.add_row("Name", model_to_show["name"])
table.add_row("Description", model_to_show.get("description", "N/A")) table.add_row("Description", model_to_show.get("description", "N/A"))
table.add_row("Context Length", str(model_to_show.get("context_length", "N/A"))) table.add_row("Context Length", str(model_to_show.get("context_length", "N/A")))
table.add_row("Online Support", "Yes" if supports_online_mode(model_to_show) else "No")
table.add_row("Pricing - Prompt ($/M tokens)", pricing.get("prompt", "N/A")) table.add_row("Pricing - Prompt ($/M tokens)", pricing.get("prompt", "N/A"))
table.add_row("Pricing - Completion ($/M tokens)", pricing.get("completion", "N/A")) table.add_row("Pricing - Completion ($/M tokens)", pricing.get("completion", "N/A"))
table.add_row("Pricing - Request ($)", pricing.get("request", "N/A")) table.add_row("Pricing - Request ($)", pricing.get("request", "N/A"))
@@ -780,7 +1002,7 @@ def chat():
console.print(Panel(table, title=f"[bold green]Model Info: {model_to_show['name']}[/]", title_align="left")) console.print(Panel(table, title=f"[bold green]Model Info: {model_to_show['name']}[/]", title_align="left"))
continue continue
# Model selection (unchanged but with logging) # Model selection with Image and Online columns
elif user_input.startswith("/model"): elif user_input.startswith("/model"):
app_logger.info("User initiated model selection") app_logger.info("User initiated model selection")
args = user_input[7:].strip() args = user_input[7:].strip()
@@ -791,10 +1013,18 @@ def chat():
if not filtered_models: if not filtered_models:
console.print(f"[bold red]No models match '{search_term}'. Try '/model'.[/]") console.print(f"[bold red]No models match '{search_term}'. Try '/model'.[/]")
continue continue
table = Table("No.", "Name", "ID", show_header=True, header_style="bold magenta")
# Create table with Image and Online columns
table = Table("No.", "Name", "ID", "Image", "Online", show_header=True, header_style="bold magenta")
for i, model in enumerate(filtered_models, 1): for i, model in enumerate(filtered_models, 1):
table.add_row(str(i), model["name"], model["id"]) image_support = "[green]✓[/green]" if has_image_capability(model) else "[red]✗[/red]"
console.print(Panel(table, title=f"[bold green]Available Models ({'All' if not search_term else f'Search: {search_term}'})[/]", title_align="left")) online_support = "[green]✓[/green]" if supports_online_mode(model) else "[red]✗[/red]"
table.add_row(str(i), model["name"], model["id"], image_support, online_support)
# Use pagination for the table
title = f"[bold green]Available Models ({'All' if not search_term else f'Search: {search_term}'})[/]"
display_paginated_table(table, title)
while True: while True:
try: try:
choice = int(typer.prompt("Enter model number (or 0 to cancel)")) choice = int(typer.prompt("Enter model number (or 0 to cancel)"))
@@ -802,7 +1032,13 @@ def chat():
break break
if 1 <= choice <= len(filtered_models): if 1 <= choice <= len(filtered_models):
selected_model = filtered_models[choice - 1] selected_model = filtered_models[choice - 1]
# Disable online mode when switching models (user must re-enable)
if online_mode_enabled:
online_mode_enabled = False
console.print("[dim yellow]Note: Online mode auto-disabled when changing models.[/]")
console.print(f"[bold cyan]Selected: {selected_model['name']} ({selected_model['id']})[/]") console.print(f"[bold cyan]Selected: {selected_model['name']} ({selected_model['id']})[/]")
if supports_online_mode(selected_model):
console.print("[dim green]✓ This model supports online mode. Use '/online on' to enable web search.[/]")
app_logger.info(f"Model selected: {selected_model['name']} ({selected_model['id']})") app_logger.info(f"Model selected: {selected_model['name']} ({selected_model['id']})")
break break
console.print("[bold red]Invalid choice. Try again.[/]") console.print("[bold red]Invalid choice. Try again.[/]")
@@ -926,10 +1162,18 @@ def chat():
if not filtered_models: if not filtered_models:
console.print(f"[bold red]No models match '{search_term}'. Try without search.[/]") console.print(f"[bold red]No models match '{search_term}'. Try without search.[/]")
continue continue
table = Table("No.", "Name", "ID", show_header=True, header_style="bold magenta")
# Create table with Image and Online columns
table = Table("No.", "Name", "ID", "Image", "Online", show_header=True, header_style="bold magenta")
for i, model in enumerate(filtered_models, 1): for i, model in enumerate(filtered_models, 1):
table.add_row(str(i), model["name"], model["id"]) image_support = "[green]✓[/green]" if has_image_capability(model) else "[red]✗[/red]"
console.print(Panel(table, title=f"[bold green]Available Models for Default ({'All' if not search_term else f'Search: {search_term}'})[/]", title_align="left")) online_support = "[green]✓[/green]" if supports_online_mode(model) else "[red]✗[/red]"
table.add_row(str(i), model["name"], model["id"], image_support, online_support)
# Use pagination for the table
title = f"[bold green]Available Models for Default ({'All' if not search_term else f'Search: {search_term}'})[/]"
display_paginated_table(table, title)
while True: while True:
try: try:
choice = int(typer.prompt("Enter model number (or 0 to cancel)")) choice = int(typer.prompt("Enter model number (or 0 to cancel)"))
@@ -956,6 +1200,7 @@ def chat():
table.add_row("Streaming", "Enabled" if STREAM_ENABLED == "on" else "Disabled") table.add_row("Streaming", "Enabled" if STREAM_ENABLED == "on" else "Disabled")
table.add_row("Default Model", DEFAULT_MODEL_ID or "[Not set]") table.add_row("Default Model", DEFAULT_MODEL_ID or "[Not set]")
table.add_row("Current Model", "[Not set]" if selected_model is None else str(selected_model["name"])) table.add_row("Current Model", "[Not set]" if selected_model is None else str(selected_model["name"]))
table.add_row("Online Mode", "Enabled" if online_mode_enabled else "Disabled")
table.add_row("Max Token", str(MAX_TOKEN)) table.add_row("Max Token", str(MAX_TOKEN))
table.add_row("Session Token", "[Not set]" if session_max_token == 0 else str(session_max_token)) table.add_row("Session Token", "[Not set]" if session_max_token == 0 else str(session_max_token))
table.add_row("Session System Prompt", session_system_prompt or "[Not set]") table.add_row("Session System Prompt", session_system_prompt or "[Not set]")
@@ -992,12 +1237,14 @@ def chat():
console.print("[bold red]Unable to fetch credits. Check your API key or network.[/]") console.print("[bold red]Unable to fetch credits. Check your API key or network.[/]")
continue continue
if user_input.lower() == "/clear": if user_input.lower() == "/clear" or user_input.lower() == "/cl":
clear_screen() clear_screen()
DEFAULT_MODEL_ID = get_config('default_model') DEFAULT_MODEL_ID = get_config('default_model')
token_value = session_max_token if session_max_token != 0 else " Not set" token_value = session_max_token if session_max_token != 0 else " Not set"
console.print(f"[bold cyan]Token limits: Max= {MAX_TOKEN}, Session={token_value}[/]") console.print(f"[bold cyan]Token limits: Max= {MAX_TOKEN}, Session={token_value}[/]")
console.print("[bold blue]Active model[/] [bold red]%s[/]" %(str(selected_model["name"]) if selected_model else "None")) console.print("[bold blue]Active model[/] [bold red]%s[/]" %(str(selected_model["name"]) if selected_model else "None"))
if online_mode_enabled:
console.print("[bold cyan]Online mode: Enabled (web search active)[/]")
continue continue
if user_input.lower() == "/help": if user_input.lower() == "/help":
@@ -1010,9 +1257,9 @@ def chat():
"" ""
) )
help_table.add_row( help_table.add_row(
"/clear", "/clear or /cl",
"Clear the terminal screen for a clean interface. You can also use the keycombo [bold]ctrl+l[/]", "Clear the terminal screen for a clean interface. You can also use the keycombo [bold]ctrl+l[/]",
"/clear" "/clear\n/cl"
) )
help_table.add_row( help_table.add_row(
"/help", "/help",
@@ -1029,6 +1276,11 @@ def chat():
"View the next response in history.", "View the next response in history.",
"/next" "/next"
) )
help_table.add_row(
"/online [on|off]",
"Enable/disable online mode (web search) for current model. Only works with models that support tools.",
"/online on\n/online off"
)
help_table.add_row( help_table.add_row(
"/paste [prompt]", "/paste [prompt]",
"Paste plain text/code from clipboard and send to AI. Optional prompt can be added.", "Paste plain text/code from clipboard and send to AI. Optional prompt can be added.",
@@ -1058,12 +1310,12 @@ def chat():
) )
help_table.add_row( help_table.add_row(
"/info [model_id]", "/info [model_id]",
"Display detailed info (pricing, modalities, context length, etc.) for current or specified model.", "Display detailed info (pricing, modalities, context length, online support, etc.) for current or specified model.",
"/info\n/info gpt-4o" "/info\n/info gpt-4o"
) )
help_table.add_row( help_table.add_row(
"/model [search]", "/model [search]",
"Select or change the current model for the session. Supports searching by name or ID.", "Select or change the current model for the session. Shows image and online capabilities. Supports searching by name or ID.",
"/model\n/model gpt" "/model\n/model gpt"
) )
@@ -1095,7 +1347,7 @@ def chat():
) )
help_table.add_row( help_table.add_row(
"/config model [search]", "/config model [search]",
"Set default model that loads on startup. Doesn't change current session model.", "Set default model that loads on startup. Shows image and online capabilities. Doesn't change current session model.",
"/config model gpt" "/config model gpt"
) )
help_table.add_row( help_table.add_row(
@@ -1196,6 +1448,11 @@ def chat():
"Use /paste to send clipboard content (plain text/code) to AI.", "Use /paste to send clipboard content (plain text/code) to AI.",
"/paste\n/paste Explain this" "/paste\n/paste Explain this"
) )
help_table.add_row(
"// escape",
"Start message with // to send a literal / character (e.g., //command sends '/command' as text, not a command)",
"//help sends '/help' as text"
)
# ===== EXIT ===== # ===== EXIT =====
help_table.add_row( help_table.add_row(
@@ -1213,7 +1470,7 @@ def chat():
help_table, help_table,
title="[bold cyan]oAI Chat Help (Version %s)[/]" % version, title="[bold cyan]oAI Chat Help (Version %s)[/]" % version,
title_align="center", title_align="center",
subtitle="💡 Tip: Commands are case-insensitive • Memory ON by default (toggle with /memory) • Visit: https://iurl.no/oai", subtitle="💡 Tip: Commands are case-insensitive • Memory ON by default (toggle with /memory) • Use // to escape / at start of input • Visit: https://iurl.no/oai",
subtitle_align="center", subtitle_align="center",
border_style="cyan" border_style="cyan"
)) ))
@@ -1324,9 +1581,12 @@ def chat():
# Add current user message # Add current user message
api_messages.append({"role": "user", "content": message_content}) api_messages.append({"role": "user", "content": message_content})
# Get effective model ID with :online suffix if enabled
effective_model_id = get_effective_model_id(selected_model["id"], online_mode_enabled)
# Build API params with app identification headers (using http_headers) # Build API params with app identification headers (using http_headers)
api_params = { api_params = {
"model": selected_model["id"], "model": effective_model_id,
"messages": api_messages, "messages": api_messages,
"stream": STREAM_ENABLED == "on", "stream": STREAM_ENABLED == "on",
"http_headers": { "http_headers": {
@@ -1343,12 +1603,15 @@ def chat():
file_count = len(file_attachments) file_count = len(file_attachments)
history_messages_count = len(session_history) - memory_start_index if conversation_memory_enabled else 0 history_messages_count = len(session_history) - memory_start_index if conversation_memory_enabled else 0
memory_status = "ON" if conversation_memory_enabled else "OFF" memory_status = "ON" if conversation_memory_enabled else "OFF"
app_logger.info(f"API Request: Model '{selected_model['id']}', Prompt length: {len(text_part)} chars, {file_count} file(s) attached, Memory: {memory_status}, History sent: {history_messages_count} messages, Transforms: middle-out {'enabled' if middle_out_enabled else 'disabled'}, App: {APP_NAME} ({APP_URL}).") online_status = "ON" if online_mode_enabled else "OFF"
app_logger.info(f"API Request: Model '{effective_model_id}' (Online: {online_status}), Prompt length: {len(text_part)} chars, {file_count} file(s) attached, Memory: {memory_status}, History sent: {history_messages_count} messages, Transforms: middle-out {'enabled' if middle_out_enabled else 'disabled'}, App: {APP_NAME} ({APP_URL}).")
# Send and handle response with metrics and timing # Send and handle response with metrics and timing
is_streaming = STREAM_ENABLED == "on" is_streaming = STREAM_ENABLED == "on"
if is_streaming: if is_streaming:
console.print("[bold green]Streaming response...[/] [dim](Press Ctrl+C to cancel)[/]") console.print("[bold green]Streaming response...[/] [dim](Press Ctrl+C to cancel)[/]")
if online_mode_enabled:
console.print("[dim cyan]🌐 Online mode active - model has web search access[/]")
console.print("") # Add spacing before response console.print("") # Add spacing before response
else: else:
console.print("[bold green]Thinking...[/]", end="\r") console.print("[bold green]Thinking...[/]", end="\r")
@@ -1356,7 +1619,7 @@ def chat():
start_time = time.time() # Start timing request start_time = time.time() # Start timing request
try: try:
response = client.chat.send(**api_params) response = client.chat.send(**api_params)
app_logger.info(f"API call successful for model '{selected_model['id']}'") app_logger.info(f"API call successful for model '{effective_model_id}'")
except Exception as e: except Exception as e:
console.print(f"[bold red]Error sending request: {e}[/]") console.print(f"[bold red]Error sending request: {e}[/]")
app_logger.error(f"API Error: {type(e).__name__}: {e}") app_logger.error(f"API Error: {type(e).__name__}: {e}")
@@ -1413,7 +1676,7 @@ def chat():
message_count += 1 message_count += 1
# Log response metrics # Log response metrics
app_logger.info(f"Response: Tokens - I:{input_tokens} O:{output_tokens} T:{input_tokens + output_tokens}, Cost: ${msg_cost:.4f}, Time: {response_time:.2f}s") app_logger.info(f"Response: Tokens - I:{input_tokens} O:{output_tokens} T:{input_tokens + output_tokens}, Cost: ${msg_cost:.4f}, Time: {response_time:.2f}s, Online: {online_mode_enabled}")
# Per-message metrics display with context info # Per-message metrics display with context info
if conversation_memory_enabled: if conversation_memory_enabled:
@@ -1421,7 +1684,9 @@ def chat():
context_info = f", Context: {context_count} msg(s)" if context_count > 1 else "" context_info = f", Context: {context_count} msg(s)" if context_count > 1 else ""
else: else:
context_info = ", Memory: OFF" context_info = ", Memory: OFF"
console.print(f"\n[dim blue]📊 Metrics: {input_tokens + output_tokens} tokens | ${msg_cost:.4f} | {response_time:.2f}s{context_info} | Session: {total_input_tokens + total_output_tokens} tokens | ${total_cost:.4f}[/]")
online_info = " 🌐" if online_mode_enabled else ""
console.print(f"\n[dim blue]📊 Metrics: {input_tokens + output_tokens} tokens | ${msg_cost:.4f} | {response_time:.2f}s{context_info}{online_info} | Session: {total_input_tokens + total_output_tokens} tokens | ${total_cost:.4f}[/]")
# Cost and credit alerts # Cost and credit alerts
warnings = [] warnings = []