Bug fixes for v3.0.0

This commit is contained in:
2026-02-06 09:48:37 +01:00
parent a47b1c95a2
commit 603d42b7ff
9 changed files with 280 additions and 119 deletions

View File

@@ -139,7 +139,6 @@ class HelpCommand(Command):
("/retry", "Resend the last prompt.", "/retry"), ("/retry", "Resend the last prompt.", "/retry"),
("/memory", "Toggle conversation memory.", "/memory on"), ("/memory", "Toggle conversation memory.", "/memory on"),
("/online", "Toggle online mode (web search).", "/online on"), ("/online", "Toggle online mode (web search).", "/online on"),
("/paste", "Paste from clipboard with optional prompt.", "/paste Explain"),
]), ]),
("[bold cyan]━━━ NAVIGATION ━━━[/]", [ ("[bold cyan]━━━ NAVIGATION ━━━[/]", [
("/prev", "View previous response in history.", "/prev"), ("/prev", "View previous response in history.", "/prev"),
@@ -150,6 +149,7 @@ class HelpCommand(Command):
("/model", "Select AI model.", "/model gpt"), ("/model", "Select AI model.", "/model gpt"),
("/info", "Show model information.", "/info"), ("/info", "Show model information.", "/info"),
("/config", "View or change settings.", "/config stream on"), ("/config", "View or change settings.", "/config stream on"),
("/config log", "Set log level.", "/config log debug"),
("/maxtoken", "Set session token limit.", "/maxtoken 2000"), ("/maxtoken", "Set session token limit.", "/maxtoken 2000"),
("/system", "Set system prompt.", "/system You are an expert"), ("/system", "Set system prompt.", "/system You are an expert"),
]), ]),
@@ -1022,13 +1022,20 @@ class ConfigCommand(Command):
else: else:
pass pass
elif setting == "loglevel": elif setting in ["loglevel", "log"]:
valid_levels = ["debug", "info", "warning", "error", "critical"] valid_levels = ["debug", "info", "warning", "error", "critical"]
if value and value.lower() in valid_levels: if value:
if value.lower() in valid_levels:
settings.set_log_level(value.lower()) settings.set_log_level(value.lower())
print_success(f"Log level set to: {value.lower()}") message = f"Log level set to: {value.lower()}"
return CommandResult.success(message=message)
else: else:
print_info(f"Valid levels: {', '.join(valid_levels)}") message = f"Invalid log level. Valid levels: {', '.join(valid_levels)}"
return CommandResult.error(message)
else:
# Show current log level
message = f"Current log level: {settings.log_level}\nValid levels: {', '.join(valid_levels)}"
return CommandResult.success(message=message)
else: else:
pass pass
@@ -1600,32 +1607,15 @@ class PasteCommand(Command):
@property @property
def help(self) -> CommandHelp: def help(self) -> CommandHelp:
return CommandHelp( return CommandHelp(
description="Paste from clipboard and send to AI.", description="[Disabled] Use Cmd+V (macOS) or Ctrl+V (Linux/Windows) to paste directly.",
usage="/paste [prompt]", usage="/paste",
notes="This command is disabled. Use keyboard shortcuts to paste instead.",
) )
def execute(self, args: str, context: CommandContext) -> CommandResult: def execute(self, args: str, context: CommandContext) -> CommandResult:
try: # Disabled - use Cmd+V (macOS) or Ctrl+V (Linux/Windows) instead
import pyperclip message = "💡 Tip: Use Cmd+V (macOS) or Ctrl+V (Linux/Windows) to paste directly"
content = pyperclip.paste() return CommandResult.success(message=message)
except ImportError:
message = "pyperclip not installed"
return CommandResult.error(message)
except Exception as e:
message = f"Failed to access clipboard: {e}"
return CommandResult.error(str(e))
if not content:
message = "Clipboard is empty"
return CommandResult.error(message)
# Build the prompt
if args:
full_prompt = f"{args}\n\n```\n{content}\n```"
else:
full_prompt = content
return CommandResult.success(data={"paste_prompt": full_prompt})
class ModelCommand(Command): class ModelCommand(Command):
"""Select AI model.""" """Select AI model."""

View File

@@ -221,18 +221,24 @@ class AIClient:
f"messages={len(chat_messages)}, stream={stream}" f"messages={len(chat_messages)}, stream={stream}"
) )
return self.provider.chat( # Build provider chat parameters
model=model_id, chat_params = {
messages=chat_messages, "model": model_id,
stream=stream, "messages": chat_messages,
max_tokens=max_tokens, "stream": stream,
temperature=temperature, "max_tokens": max_tokens,
tools=tools, "temperature": temperature,
tool_choice=tool_choice, "tools": tools,
transforms=transforms, "tool_choice": tool_choice,
enable_web_search=enable_web_search, "transforms": transforms,
web_search_config=web_search_config or {}, }
)
# Only pass web search params to Anthropic provider
if self.provider_name == "anthropic":
chat_params["enable_web_search"] = enable_web_search
chat_params["web_search_config"] = web_search_config or {}
return self.provider.chat(**chat_params)
def chat_with_tools( def chat_with_tools(
self, self,

View File

@@ -517,15 +517,21 @@ class ChatSession:
Returns: Returns:
Tuple of (full_text, usage) Tuple of (full_text, usage)
""" """
response = self.client.chat( # Build chat parameters
messages=messages, chat_params = {
model=model_id, "messages": messages,
stream=True, "model": model_id,
max_tokens=max_tokens, "stream": True,
transforms=transforms, "max_tokens": max_tokens,
enable_web_search=enable_web_search, "transforms": transforms,
web_search_config=web_search_config or {}, }
)
# Only pass web search params to Anthropic provider
if self.client.provider_name == "anthropic":
chat_params["enable_web_search"] = enable_web_search
chat_params["web_search_config"] = web_search_config or {}
response = self.client.chat(**chat_params)
if isinstance(response, ChatResponse): if isinstance(response, ChatResponse):
return response.content or "", response.usage return response.content or "", response.usage
@@ -647,8 +653,9 @@ class ChatSession:
): ):
yield chunk yield chunk
else: else:
# Non-streaming request # Non-streaming request - run in thread to avoid blocking event loop
response = self.client.chat( response = await asyncio.to_thread(
self.client.chat,
messages=messages, messages=messages,
model=model_id, model=model_id,
stream=False, stream=False,
@@ -691,7 +698,9 @@ class ChatSession:
api_messages = list(messages) api_messages = list(messages)
while loop_count < max_loops: while loop_count < max_loops:
response = self.client.chat( # Run in thread to avoid blocking event loop
response = await asyncio.to_thread(
self.client.chat,
messages=api_messages, messages=api_messages,
model=model_id, model=model_id,
stream=False, stream=False,
@@ -837,18 +846,28 @@ class ChatSession:
Yields: Yields:
StreamChunk objects StreamChunk objects
""" """
response = self.client.chat( # Build chat parameters
messages=messages, chat_params = {
model=model_id, "messages": messages,
stream=True, "model": model_id,
max_tokens=max_tokens, "stream": True,
transforms=transforms, "max_tokens": max_tokens,
enable_web_search=enable_web_search, "transforms": transforms,
web_search_config=web_search_config or {}, }
)
# Only pass web search params to Anthropic provider
if self.client.provider_name == "anthropic":
chat_params["enable_web_search"] = enable_web_search
chat_params["web_search_config"] = web_search_config or {}
# For streaming, call directly (generator yields control naturally)
# For non-streaming, we'll detect it and run in thread
if chat_params.get("stream", True):
# Streaming - call directly, iteration will yield control
response = self.client.chat(**chat_params)
if isinstance(response, ChatResponse): if isinstance(response, ChatResponse):
# Non-streaming response # Provider returned non-streaming despite stream=True
chunk = StreamChunk( chunk = StreamChunk(
id="", id="",
delta_content=response.content, delta_content=response.content,
@@ -858,12 +877,24 @@ class ChatSession:
yield chunk yield chunk
return return
# Stream the response # Stream the response - yield control between chunks
for chunk in response: for chunk in response:
await asyncio.sleep(0) # Yield control to event loop
if chunk.error: if chunk.error:
yield StreamChunk(id="", delta_content=None, usage=None, error=chunk.error) yield StreamChunk(id="", delta_content=None, usage=None, error=chunk.error)
break break
yield chunk yield chunk
else:
# Non-streaming - run in thread to avoid blocking
response = await asyncio.to_thread(self.client.chat, **chat_params)
if isinstance(response, ChatResponse):
chunk = StreamChunk(
id="",
delta_content=response.content,
usage=response.usage,
error=None,
)
yield chunk
# ========== END ASYNC METHODS ========== # ========== END ASYNC METHODS ==========

View File

@@ -7,7 +7,7 @@ from typing import Optional
import pyperclip import pyperclip
from textual.app import App, ComposeResult from textual.app import App, ComposeResult
from textual.widgets import Input from textual.widgets import TextArea
from oai import __version__ from oai import __version__
from oai.commands.registry import CommandStatus, registry from oai.commands.registry import CommandStatus, registry
@@ -36,6 +36,7 @@ from oai.tui.widgets import (
SystemMessageWidget, SystemMessageWidget,
UserMessageWidget, UserMessageWidget,
) )
from oai.tui.widgets.input_bar import ChatTextArea
from oai.tui.widgets.command_dropdown import CommandDropdown from oai.tui.widgets.command_dropdown import CommandDropdown
@@ -66,6 +67,8 @@ class oAIChatApp(App):
self.input_history: list[str] = [] self.input_history: list[str] = []
self.history_index: int = -1 self.history_index: int = -1
self._navigating_history: bool = False self._navigating_history: bool = False
self._cancel_generation: bool = False
self._is_generating: bool = False
def compose(self) -> ComposeResult: def compose(self) -> ComposeResult:
"""Compose the TUI layout.""" """Compose the TUI layout."""
@@ -80,6 +83,9 @@ class oAIChatApp(App):
def on_mount(self) -> None: def on_mount(self) -> None:
"""Handle app mount.""" """Handle app mount."""
# Load input history from file
self._load_input_history()
# Focus the input # Focus the input
input_bar = self.query_one(InputBar) input_bar = self.query_one(InputBar)
chat_input = input_bar.get_input() chat_input = input_bar.get_input()
@@ -97,11 +103,47 @@ class oAIChatApp(App):
if self.session.online_enabled: if self.session.online_enabled:
input_bar.update_online_mode(True) input_bar.update_online_mode(True)
def _load_input_history(self) -> None:
"""Load input history from history.txt file."""
from oai.constants import HISTORY_FILE
try:
if HISTORY_FILE.exists():
with open(HISTORY_FILE, "r", encoding="utf-8") as f:
# Load all non-empty lines and unescape newlines
self.input_history = [
line.strip().replace("\\n", "\n")
for line in f
if line.strip()
]
except Exception as e:
self.logger.error(f"Failed to load input history: {e}")
def _save_input_to_history(self, user_input: str) -> None:
"""Append input to history.txt file."""
from oai.constants import HISTORY_FILE
try:
# Escape newlines so multiline inputs stay as one history entry
escaped_input = user_input.replace("\n", "\\n")
with open(HISTORY_FILE, "a", encoding="utf-8") as f:
f.write(f"{escaped_input}\n")
except Exception as e:
self.logger.error(f"Failed to save input to history: {e}")
def on_key(self, event) -> None: def on_key(self, event) -> None:
"""Handle global keyboard shortcuts.""" """Handle global keyboard shortcuts."""
# Debug: Show what key was pressed # Debug: Show what key was pressed
# self.notify(f"Key pressed: {event.key}", severity="information") # self.notify(f"Key pressed: {event.key}", severity="information")
# Handle Escape to cancel generation
if event.key == "escape" and self._is_generating:
self._cancel_generation = True
self.notify("⏹️ Stopping generation...", severity="warning")
event.prevent_default()
event.stop()
return
# Don't handle keys if a modal screen is open (let the modal handle them) # Don't handle keys if a modal screen is open (let the modal handle them)
if len(self.screen_stack) > 1: if len(self.screen_stack) > 1:
return return
@@ -119,25 +161,29 @@ class oAIChatApp(App):
if dropdown_visible: if dropdown_visible:
if event.key == "up": if event.key == "up":
event.prevent_default() event.prevent_default()
event.stop()
dropdown.move_selection_up() dropdown.move_selection_up()
return return
elif event.key == "down": elif event.key == "down":
event.prevent_default() event.prevent_default()
event.stop()
dropdown.move_selection_down() dropdown.move_selection_down()
return return
elif event.key == "tab": elif event.key == "tab":
# Tab accepts the selected command and adds space for arguments # Tab accepts the selected command and adds space for arguments
event.prevent_default() event.prevent_default()
event.stop()
selected = dropdown.get_selected_command() selected = dropdown.get_selected_command()
if selected: if selected:
chat_input.value = selected + " " chat_input.text = selected + " "
chat_input.cursor_position = len(chat_input.value) chat_input.move_cursor_relative(rows=0, columns=len(selected) + 1)
dropdown.hide() dropdown.hide()
return return
elif event.key == "enter": elif event.key == "enter":
# Enter accepts the selected command # Enter accepts the selected command
# If command needs more input, add space; otherwise submit # If command needs more input, add space; otherwise submit
event.prevent_default() event.prevent_default()
event.stop() # Stop propagation to prevent TextArea from processing
selected = dropdown.get_selected_command() selected = dropdown.get_selected_command()
if selected: if selected:
# Commands that require additional arguments # Commands that require additional arguments
@@ -155,13 +201,13 @@ class oAIChatApp(App):
if needs_input: if needs_input:
# Add space and wait for user to type more # Add space and wait for user to type more
chat_input.value = selected + " " chat_input.text = selected + " "
chat_input.cursor_position = len(chat_input.value) chat_input.move_cursor_relative(rows=0, columns=len(selected) + 1)
dropdown.hide() dropdown.hide()
else: else:
# Command is complete, submit it directly # Command is complete, submit it directly
dropdown.hide() dropdown.hide()
chat_input.value = "" # Clear immediately chat_input.clear() # Clear immediately
# Process the command directly # Process the command directly
async def submit_command(): async def submit_command():
await self._process_submitted_input(selected) await self._process_submitted_input(selected)
@@ -170,8 +216,14 @@ class oAIChatApp(App):
elif event.key == "escape": elif event.key == "escape":
# Escape closes dropdown # Escape closes dropdown
event.prevent_default() event.prevent_default()
event.stop()
dropdown.hide() dropdown.hide()
return return
# Escape to clear input (when dropdown not visible)
elif event.key == "escape":
event.prevent_default()
chat_input.clear()
return
# Otherwise, arrow keys navigate history # Otherwise, arrow keys navigate history
elif event.key == "up": elif event.key == "up":
event.prevent_default() event.prevent_default()
@@ -211,9 +263,9 @@ class oAIChatApp(App):
event.prevent_default() event.prevent_default()
self.action_copy_last_response() self.action_copy_last_response()
def on_input_changed(self, event: Input.Changed) -> None: def on_text_area_changed(self, event: TextArea.Changed) -> None:
"""Handle input value changes to show/hide command dropdown.""" """Handle text area value changes to show/hide command dropdown."""
if event.input.id != "chat-input": if event.text_area.id != "chat-input":
return return
# Don't show dropdown when navigating history # Don't show dropdown when navigating history
@@ -221,7 +273,7 @@ class oAIChatApp(App):
return return
dropdown = self.query_one(CommandDropdown) dropdown = self.query_one(CommandDropdown)
value = event.value value = event.text_area.text
# Show dropdown if input starts with / # Show dropdown if input starts with /
if value.startswith("/") and not value.startswith("//"): if value.startswith("/") and not value.startswith("//"):
@@ -229,16 +281,18 @@ class oAIChatApp(App):
else: else:
dropdown.hide() dropdown.hide()
async def on_input_submitted(self, event: Input.Submitted) -> None: async def on_chat_text_area_submit(self, event: ChatTextArea.Submit) -> None:
"""Handle input submission.""" """Handle Enter key submission from ChatTextArea."""
user_input = event.value.strip() user_input = event.value.strip()
if not user_input: if not user_input:
return return
# Clear input field immediately # Clear input field
event.input.value = "" input_bar = self.query_one(InputBar)
chat_input = input_bar.get_input()
chat_input.clear()
# Process the input (async, will wait for AI response) # Process the input
await self._process_submitted_input(user_input) await self._process_submitted_input(user_input)
async def _process_submitted_input(self, user_input: str) -> None: async def _process_submitted_input(self, user_input: str) -> None:
@@ -254,9 +308,10 @@ class oAIChatApp(App):
dropdown = self.query_one(CommandDropdown) dropdown = self.query_one(CommandDropdown)
dropdown.hide() dropdown.hide()
# Add to history # Add to in-memory history and save to file
self.input_history.append(user_input) self.input_history.append(user_input)
self.history_index = -1 self.history_index = -1
self._save_input_to_history(user_input)
# Always show what the user typed # Always show what the user typed
chat_display = self.query_one(ChatDisplay) chat_display = self.query_one(ChatDisplay)
@@ -495,8 +550,12 @@ class oAIChatApp(App):
assistant_widget = AssistantMessageWidget(model_name, chat_display=chat_display) assistant_widget = AssistantMessageWidget(model_name, chat_display=chat_display)
await chat_display.add_message(assistant_widget) await chat_display.add_message(assistant_widget)
# Show loading indicator immediately # Show loading indicator with cancellation hint
assistant_widget.set_content("_Thinking..._") assistant_widget.set_content("_Thinking... (Press Esc to stop)_")
# Set generation flags
self._is_generating = True
self._cancel_generation = False
try: try:
# Stream response # Stream response
@@ -505,8 +564,11 @@ class oAIChatApp(App):
stream=self.settings.stream_enabled, stream=self.settings.stream_enabled,
) )
# Stream and collect response # Stream and collect response with cancellation support
full_text, usage = await assistant_widget.stream_response(response_iterator) full_text, usage = await assistant_widget.stream_response(
response_iterator,
cancel_check=lambda: self._cancel_generation
)
# Add to history if we got a response # Add to history if we got a response
if full_text: if full_text:
@@ -539,8 +601,15 @@ class oAIChatApp(App):
# Update footer # Update footer
self._update_footer() self._update_footer()
# Check if generation was cancelled
if self._cancel_generation and full_text:
assistant_widget.set_content(full_text + "\n\n_[Generation stopped by user]_")
except Exception as e: except Exception as e:
assistant_widget.set_content(f"❌ Error: {str(e)}") assistant_widget.set_content(f"❌ Error: {str(e)}")
finally:
self._is_generating = False
self._cancel_generation = False
def _update_footer(self) -> None: def _update_footer(self) -> None:
"""Update footer statistics.""" """Update footer statistics."""
@@ -860,9 +929,8 @@ class oAIChatApp(App):
elif "retry_prompt" in data: elif "retry_prompt" in data:
await self.handle_message(data["retry_prompt"]) await self.handle_message(data["retry_prompt"])
# Paste prompt # Paste command is disabled - users should use Cmd+V/Ctrl+V instead
elif "paste_prompt" in data: # No special handling needed
await self.handle_message(data["paste_prompt"])
def _show_model_selector(self, search: str = "", set_as_default: bool = False) -> None: def _show_model_selector(self, search: str = "", set_as_default: bool = False) -> None:
"""Show the model selector screen.""" """Show the model selector screen."""
@@ -994,7 +1062,7 @@ class oAIChatApp(App):
callback=handle_confirmation callback=handle_confirmation
) )
def _navigate_history_backward(self, input_widget: Input) -> None: def _navigate_history_backward(self, input_widget: TextArea) -> None:
"""Navigate backward through input history (Up arrow).""" """Navigate backward through input history (Up arrow)."""
if not self.input_history: if not self.input_history:
return return
@@ -1011,14 +1079,14 @@ class oAIChatApp(App):
# Update input with history item # Update input with history item
if 0 <= self.history_index < len(self.input_history): if 0 <= self.history_index < len(self.input_history):
input_widget.value = self.input_history[self.history_index] input_widget.text = self.input_history[self.history_index]
# Move cursor to end # Move cursor to end
input_widget.cursor_position = len(input_widget.value) input_widget.move_cursor((999, 999)) # Move to end
# Clear flag after a short delay # Clear flag after a short delay
self.set_timer(0.1, lambda: setattr(self, "_navigating_history", False)) self.set_timer(0.1, lambda: setattr(self, "_navigating_history", False))
def _navigate_history_forward(self, input_widget: Input) -> None: def _navigate_history_forward(self, input_widget: TextArea) -> None:
"""Navigate forward through input history (Down arrow).""" """Navigate forward through input history (Down arrow)."""
if not self.input_history or self.history_index == -1: if not self.input_history or self.history_index == -1:
return return
@@ -1029,12 +1097,12 @@ class oAIChatApp(App):
# Move forward in history # Move forward in history
if self.history_index < len(self.input_history) - 1: if self.history_index < len(self.input_history) - 1:
self.history_index += 1 self.history_index += 1
input_widget.value = self.input_history[self.history_index] input_widget.text = self.input_history[self.history_index]
input_widget.cursor_position = len(input_widget.value) input_widget.move_cursor((999, 999)) # Move to end
else: else:
# At the newest item, clear the input # At the newest item, clear the input
self.history_index = -1 self.history_index = -1
input_widget.value = "" input_widget.clear()
# Clear flag after a short delay # Clear flag after a short delay
self.set_timer(0.1, lambda: setattr(self, "_navigating_history", False)) self.set_timer(0.1, lambda: setattr(self, "_navigating_history", False))

View File

@@ -111,6 +111,7 @@ class CommandsScreen(ModalScreen[None]):
[green]/config model <id>[/] - Set default model [green]/config model <id>[/] - Set default model
[green]/config system <prompt>[/] - Set system prompt [green]/config system <prompt>[/] - Set system prompt
[green]/config maxtoken <num>[/] - Set token limit [green]/config maxtoken <num>[/] - Set token limit
[green]/config log <level>[/] - Set log level (debug/info/warning/error/critical)
[bold cyan]Memory & Context[/] [bold cyan]Memory & Context[/]

View File

@@ -64,6 +64,9 @@ class HelpScreen(ModalScreen[None]):
[bold]F1[/] Show this help (Ctrl+H may not work) [bold]F1[/] Show this help (Ctrl+H may not work)
[bold]F2[/] Open model selector (Ctrl+M may not work) [bold]F2[/] Open model selector (Ctrl+M may not work)
[bold]F3[/] Copy last AI response to clipboard [bold]F3[/] Copy last AI response to clipboard
[bold]Enter[/] Submit message
[bold]Ctrl+Enter[/] Insert newline (for multiline messages)
[bold]Esc[/] Stop/cancel AI response generation
[bold]Ctrl+S[/] Show session statistics [bold]Ctrl+S[/] Show session statistics
[bold]Ctrl+L[/] Clear chat display [bold]Ctrl+L[/] Clear chat display
[bold]Ctrl+P[/] Show previous message [bold]Ctrl+P[/] Show previous message
@@ -71,7 +74,7 @@ class HelpScreen(ModalScreen[None]):
[bold]Ctrl+Y[/] Copy last AI response (alternative to F3) [bold]Ctrl+Y[/] Copy last AI response (alternative to F3)
[bold]Ctrl+Q[/] Quit application [bold]Ctrl+Q[/] Quit application
[bold]Up/Down[/] Navigate input history [bold]Up/Down[/] Navigate input history
[bold]ESC[/] Close dialogs [bold]ESC[/] Clear input / Close dialogs
[dim]Note: Some Ctrl keys may be captured by your terminal[/] [dim]Note: Some Ctrl keys may be captured by your terminal[/]
[bold cyan]═══ SLASH COMMANDS ═══[/] [bold cyan]═══ SLASH COMMANDS ═══[/]
@@ -89,6 +92,7 @@ class HelpScreen(ModalScreen[None]):
/config stream on Enable streaming responses /config stream on Enable streaming responses
/system [prompt] Set session system prompt /system [prompt] Set session system prompt
/maxtoken [n] Set session token limit /maxtoken [n] Set session token limit
/config log [level] Set log level (debug/info/warning/error/critical)
[bold yellow]Conversation Management:[/] [bold yellow]Conversation Management:[/]
/save [name] Save current conversation /save [name] Save current conversation

View File

@@ -98,6 +98,7 @@ class CommandDropdown(VerticalScroll):
("/config model", "Set default model"), ("/config model", "Set default model"),
("/config system", "Set system prompt"), ("/config system", "Set system prompt"),
("/config maxtoken", "Set token limit"), ("/config maxtoken", "Set token limit"),
("/config log", "Set log level (debug/info/warning/error)"),
("/system", "Set system prompt"), ("/system", "Set system prompt"),
("/maxtoken", "Set token limit"), ("/maxtoken", "Set token limit"),
("/retry", "Retry last prompt"), ("/retry", "Retry last prompt"),

View File

@@ -2,7 +2,53 @@
from textual.app import ComposeResult from textual.app import ComposeResult
from textual.containers import Horizontal from textual.containers import Horizontal
from textual.widgets import Input, Static from textual.message import Message
from textual.widgets import Static, TextArea
class ChatTextArea(TextArea):
"""Custom TextArea that sends submit message on Enter (unless dropdown is open)."""
class Submit(Message):
"""Message sent when Enter is pressed."""
def __init__(self, value: str) -> None:
super().__init__()
self.value = value
def _on_key(self, event) -> None:
"""Handle key events BEFORE TextArea processes them."""
# Check if command dropdown is visible
dropdown_visible = False
try:
from oai.tui.widgets.command_dropdown import CommandDropdown
dropdown = self.app.query_one(CommandDropdown)
dropdown_visible = dropdown.has_class("visible")
except:
pass
if event.key == "enter":
if dropdown_visible:
# Dropdown is open - prevent TextArea from inserting newline
# but let event bubble up to app for dropdown handling
event.prevent_default()
# Don't call stop() - let it bubble to app's on_key
# Don't call super - we don't want newline
return
else:
# Dropdown not visible - submit the message
event.prevent_default()
event.stop()
self.post_message(self.Submit(self.text))
return
elif event.key in ("ctrl+j", "ctrl+enter"):
# Insert newline on Ctrl+Enter
event.prevent_default()
event.stop()
self.insert("\n")
return
# For all other keys, let TextArea handle them normally
super()._on_key(event)
class InputBar(Horizontal): class InputBar(Horizontal):
@@ -16,10 +62,9 @@ class InputBar(Horizontal):
def compose(self) -> ComposeResult: def compose(self) -> ComposeResult:
"""Compose the input bar.""" """Compose the input bar."""
yield Static(self._format_prefix(), id="input-prefix", classes="prefix-hidden" if not (self.mcp_status or self.online_mode) else "") yield Static(self._format_prefix(), id="input-prefix", classes="prefix-hidden" if not (self.mcp_status or self.online_mode) else "")
yield Input( text_area = ChatTextArea(id="chat-input")
placeholder="Type a message or /command...", text_area.show_line_numbers = False
id="chat-input" yield text_area
)
def _format_prefix(self) -> str: def _format_prefix(self) -> str:
"""Format the input prefix with status indicators.""" """Format the input prefix with status indicators."""
@@ -44,6 +89,6 @@ class InputBar(Horizontal):
prefix = self.query_one("#input-prefix", Static) prefix = self.query_one("#input-prefix", Static)
prefix.update(self._format_prefix()) prefix.update(self._format_prefix())
def get_input(self) -> Input: def get_input(self) -> ChatTextArea:
"""Get the input widget.""" """Get the input widget."""
return self.query_one("#chat-input", Input) return self.query_one("#chat-input", ChatTextArea)

View File

@@ -35,7 +35,13 @@ class UserMessageWidget(Static):
def compose(self) -> ComposeResult: def compose(self) -> ComposeResult:
"""Compose the user message.""" """Compose the user message."""
yield Static(f"[bold green]You:[/] {self.content}") yield Static(f"[bold green]You:[/] {self.content}", id="user-message-content")
def update_content(self, new_content: str) -> None:
"""Update the message content."""
self.content = new_content
content_widget = self.query_one("#user-message-content", Static)
content_widget.update(f"[bold green]You:[/] {new_content}")
class SystemMessageWidget(Static): class SystemMessageWidget(Static):
@@ -64,13 +70,22 @@ class AssistantMessageWidget(Static):
yield Static(f"[bold]{self.model_name}:[/]", id="assistant-label") yield Static(f"[bold]{self.model_name}:[/]", id="assistant-label")
yield RichLog(id="assistant-content", highlight=True, markup=True, wrap=True) yield RichLog(id="assistant-content", highlight=True, markup=True, wrap=True)
async def stream_response(self, response_iterator: AsyncIterator) -> Tuple[str, Any]: async def stream_response(self, response_iterator: AsyncIterator, cancel_check=None) -> Tuple[str, Any]:
"""Stream tokens progressively and return final text and usage.""" """Stream tokens progressively and return final text and usage.
Args:
response_iterator: Async iterator of response chunks
cancel_check: Optional callable that returns True if generation should be cancelled
"""
log = self.query_one("#assistant-content", RichLog) log = self.query_one("#assistant-content", RichLog)
self.full_text = "" self.full_text = ""
usage = None usage = None
async for chunk in response_iterator: async for chunk in response_iterator:
# Check for cancellation
if cancel_check and cancel_check():
break
if hasattr(chunk, "delta_content") and chunk.delta_content: if hasattr(chunk, "delta_content") and chunk.delta_content:
self.full_text += chunk.delta_content self.full_text += chunk.delta_content
log.clear() log.clear()