Bug fixes for v3.0.0
This commit is contained in:
@@ -139,7 +139,6 @@ class HelpCommand(Command):
|
||||
("/retry", "Resend the last prompt.", "/retry"),
|
||||
("/memory", "Toggle conversation memory.", "/memory on"),
|
||||
("/online", "Toggle online mode (web search).", "/online on"),
|
||||
("/paste", "Paste from clipboard with optional prompt.", "/paste Explain"),
|
||||
]),
|
||||
("[bold cyan]━━━ NAVIGATION ━━━[/]", [
|
||||
("/prev", "View previous response in history.", "/prev"),
|
||||
@@ -150,6 +149,7 @@ class HelpCommand(Command):
|
||||
("/model", "Select AI model.", "/model gpt"),
|
||||
("/info", "Show model information.", "/info"),
|
||||
("/config", "View or change settings.", "/config stream on"),
|
||||
("/config log", "Set log level.", "/config log debug"),
|
||||
("/maxtoken", "Set session token limit.", "/maxtoken 2000"),
|
||||
("/system", "Set system prompt.", "/system You are an expert"),
|
||||
]),
|
||||
@@ -1022,13 +1022,20 @@ class ConfigCommand(Command):
|
||||
else:
|
||||
pass
|
||||
|
||||
elif setting == "loglevel":
|
||||
elif setting in ["loglevel", "log"]:
|
||||
valid_levels = ["debug", "info", "warning", "error", "critical"]
|
||||
if value and value.lower() in valid_levels:
|
||||
settings.set_log_level(value.lower())
|
||||
print_success(f"Log level set to: {value.lower()}")
|
||||
if value:
|
||||
if value.lower() in valid_levels:
|
||||
settings.set_log_level(value.lower())
|
||||
message = f"✓ Log level set to: {value.lower()}"
|
||||
return CommandResult.success(message=message)
|
||||
else:
|
||||
message = f"Invalid log level. Valid levels: {', '.join(valid_levels)}"
|
||||
return CommandResult.error(message)
|
||||
else:
|
||||
print_info(f"Valid levels: {', '.join(valid_levels)}")
|
||||
# Show current log level
|
||||
message = f"Current log level: {settings.log_level}\nValid levels: {', '.join(valid_levels)}"
|
||||
return CommandResult.success(message=message)
|
||||
|
||||
else:
|
||||
pass
|
||||
@@ -1600,32 +1607,15 @@ class PasteCommand(Command):
|
||||
@property
|
||||
def help(self) -> CommandHelp:
|
||||
return CommandHelp(
|
||||
description="Paste from clipboard and send to AI.",
|
||||
usage="/paste [prompt]",
|
||||
description="[Disabled] Use Cmd+V (macOS) or Ctrl+V (Linux/Windows) to paste directly.",
|
||||
usage="/paste",
|
||||
notes="This command is disabled. Use keyboard shortcuts to paste instead.",
|
||||
)
|
||||
|
||||
def execute(self, args: str, context: CommandContext) -> CommandResult:
|
||||
try:
|
||||
import pyperclip
|
||||
content = pyperclip.paste()
|
||||
except ImportError:
|
||||
message = "pyperclip not installed"
|
||||
return CommandResult.error(message)
|
||||
except Exception as e:
|
||||
message = f"Failed to access clipboard: {e}"
|
||||
return CommandResult.error(str(e))
|
||||
|
||||
if not content:
|
||||
message = "Clipboard is empty"
|
||||
return CommandResult.error(message)
|
||||
|
||||
# Build the prompt
|
||||
if args:
|
||||
full_prompt = f"{args}\n\n```\n{content}\n```"
|
||||
else:
|
||||
full_prompt = content
|
||||
|
||||
return CommandResult.success(data={"paste_prompt": full_prompt})
|
||||
# Disabled - use Cmd+V (macOS) or Ctrl+V (Linux/Windows) instead
|
||||
message = "💡 Tip: Use Cmd+V (macOS) or Ctrl+V (Linux/Windows) to paste directly"
|
||||
return CommandResult.success(message=message)
|
||||
|
||||
class ModelCommand(Command):
|
||||
"""Select AI model."""
|
||||
|
||||
@@ -221,18 +221,24 @@ class AIClient:
|
||||
f"messages={len(chat_messages)}, stream={stream}"
|
||||
)
|
||||
|
||||
return self.provider.chat(
|
||||
model=model_id,
|
||||
messages=chat_messages,
|
||||
stream=stream,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
tools=tools,
|
||||
tool_choice=tool_choice,
|
||||
transforms=transforms,
|
||||
enable_web_search=enable_web_search,
|
||||
web_search_config=web_search_config or {},
|
||||
)
|
||||
# Build provider chat parameters
|
||||
chat_params = {
|
||||
"model": model_id,
|
||||
"messages": chat_messages,
|
||||
"stream": stream,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature,
|
||||
"tools": tools,
|
||||
"tool_choice": tool_choice,
|
||||
"transforms": transforms,
|
||||
}
|
||||
|
||||
# Only pass web search params to Anthropic provider
|
||||
if self.provider_name == "anthropic":
|
||||
chat_params["enable_web_search"] = enable_web_search
|
||||
chat_params["web_search_config"] = web_search_config or {}
|
||||
|
||||
return self.provider.chat(**chat_params)
|
||||
|
||||
def chat_with_tools(
|
||||
self,
|
||||
|
||||
@@ -517,15 +517,21 @@ class ChatSession:
|
||||
Returns:
|
||||
Tuple of (full_text, usage)
|
||||
"""
|
||||
response = self.client.chat(
|
||||
messages=messages,
|
||||
model=model_id,
|
||||
stream=True,
|
||||
max_tokens=max_tokens,
|
||||
transforms=transforms,
|
||||
enable_web_search=enable_web_search,
|
||||
web_search_config=web_search_config or {},
|
||||
)
|
||||
# Build chat parameters
|
||||
chat_params = {
|
||||
"messages": messages,
|
||||
"model": model_id,
|
||||
"stream": True,
|
||||
"max_tokens": max_tokens,
|
||||
"transforms": transforms,
|
||||
}
|
||||
|
||||
# Only pass web search params to Anthropic provider
|
||||
if self.client.provider_name == "anthropic":
|
||||
chat_params["enable_web_search"] = enable_web_search
|
||||
chat_params["web_search_config"] = web_search_config or {}
|
||||
|
||||
response = self.client.chat(**chat_params)
|
||||
|
||||
if isinstance(response, ChatResponse):
|
||||
return response.content or "", response.usage
|
||||
@@ -647,8 +653,9 @@ class ChatSession:
|
||||
):
|
||||
yield chunk
|
||||
else:
|
||||
# Non-streaming request
|
||||
response = self.client.chat(
|
||||
# Non-streaming request - run in thread to avoid blocking event loop
|
||||
response = await asyncio.to_thread(
|
||||
self.client.chat,
|
||||
messages=messages,
|
||||
model=model_id,
|
||||
stream=False,
|
||||
@@ -691,7 +698,9 @@ class ChatSession:
|
||||
api_messages = list(messages)
|
||||
|
||||
while loop_count < max_loops:
|
||||
response = self.client.chat(
|
||||
# Run in thread to avoid blocking event loop
|
||||
response = await asyncio.to_thread(
|
||||
self.client.chat,
|
||||
messages=api_messages,
|
||||
model=model_id,
|
||||
stream=False,
|
||||
@@ -837,33 +846,55 @@ class ChatSession:
|
||||
Yields:
|
||||
StreamChunk objects
|
||||
"""
|
||||
response = self.client.chat(
|
||||
messages=messages,
|
||||
model=model_id,
|
||||
stream=True,
|
||||
max_tokens=max_tokens,
|
||||
transforms=transforms,
|
||||
enable_web_search=enable_web_search,
|
||||
web_search_config=web_search_config or {},
|
||||
)
|
||||
# Build chat parameters
|
||||
chat_params = {
|
||||
"messages": messages,
|
||||
"model": model_id,
|
||||
"stream": True,
|
||||
"max_tokens": max_tokens,
|
||||
"transforms": transforms,
|
||||
}
|
||||
|
||||
if isinstance(response, ChatResponse):
|
||||
# Non-streaming response
|
||||
chunk = StreamChunk(
|
||||
id="",
|
||||
delta_content=response.content,
|
||||
usage=response.usage,
|
||||
error=None,
|
||||
)
|
||||
yield chunk
|
||||
return
|
||||
# Only pass web search params to Anthropic provider
|
||||
if self.client.provider_name == "anthropic":
|
||||
chat_params["enable_web_search"] = enable_web_search
|
||||
chat_params["web_search_config"] = web_search_config or {}
|
||||
|
||||
# Stream the response
|
||||
for chunk in response:
|
||||
if chunk.error:
|
||||
yield StreamChunk(id="", delta_content=None, usage=None, error=chunk.error)
|
||||
break
|
||||
yield chunk
|
||||
# For streaming, call directly (generator yields control naturally)
|
||||
# For non-streaming, we'll detect it and run in thread
|
||||
if chat_params.get("stream", True):
|
||||
# Streaming - call directly, iteration will yield control
|
||||
response = self.client.chat(**chat_params)
|
||||
|
||||
if isinstance(response, ChatResponse):
|
||||
# Provider returned non-streaming despite stream=True
|
||||
chunk = StreamChunk(
|
||||
id="",
|
||||
delta_content=response.content,
|
||||
usage=response.usage,
|
||||
error=None,
|
||||
)
|
||||
yield chunk
|
||||
return
|
||||
|
||||
# Stream the response - yield control between chunks
|
||||
for chunk in response:
|
||||
await asyncio.sleep(0) # Yield control to event loop
|
||||
if chunk.error:
|
||||
yield StreamChunk(id="", delta_content=None, usage=None, error=chunk.error)
|
||||
break
|
||||
yield chunk
|
||||
else:
|
||||
# Non-streaming - run in thread to avoid blocking
|
||||
response = await asyncio.to_thread(self.client.chat, **chat_params)
|
||||
if isinstance(response, ChatResponse):
|
||||
chunk = StreamChunk(
|
||||
id="",
|
||||
delta_content=response.content,
|
||||
usage=response.usage,
|
||||
error=None,
|
||||
)
|
||||
yield chunk
|
||||
|
||||
# ========== END ASYNC METHODS ==========
|
||||
|
||||
|
||||
128
oai/tui/app.py
128
oai/tui/app.py
@@ -7,7 +7,7 @@ from typing import Optional
|
||||
|
||||
import pyperclip
|
||||
from textual.app import App, ComposeResult
|
||||
from textual.widgets import Input
|
||||
from textual.widgets import TextArea
|
||||
|
||||
from oai import __version__
|
||||
from oai.commands.registry import CommandStatus, registry
|
||||
@@ -36,6 +36,7 @@ from oai.tui.widgets import (
|
||||
SystemMessageWidget,
|
||||
UserMessageWidget,
|
||||
)
|
||||
from oai.tui.widgets.input_bar import ChatTextArea
|
||||
from oai.tui.widgets.command_dropdown import CommandDropdown
|
||||
|
||||
|
||||
@@ -66,6 +67,8 @@ class oAIChatApp(App):
|
||||
self.input_history: list[str] = []
|
||||
self.history_index: int = -1
|
||||
self._navigating_history: bool = False
|
||||
self._cancel_generation: bool = False
|
||||
self._is_generating: bool = False
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the TUI layout."""
|
||||
@@ -80,6 +83,9 @@ class oAIChatApp(App):
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Handle app mount."""
|
||||
# Load input history from file
|
||||
self._load_input_history()
|
||||
|
||||
# Focus the input
|
||||
input_bar = self.query_one(InputBar)
|
||||
chat_input = input_bar.get_input()
|
||||
@@ -97,11 +103,47 @@ class oAIChatApp(App):
|
||||
if self.session.online_enabled:
|
||||
input_bar.update_online_mode(True)
|
||||
|
||||
def _load_input_history(self) -> None:
|
||||
"""Load input history from history.txt file."""
|
||||
from oai.constants import HISTORY_FILE
|
||||
|
||||
try:
|
||||
if HISTORY_FILE.exists():
|
||||
with open(HISTORY_FILE, "r", encoding="utf-8") as f:
|
||||
# Load all non-empty lines and unescape newlines
|
||||
self.input_history = [
|
||||
line.strip().replace("\\n", "\n")
|
||||
for line in f
|
||||
if line.strip()
|
||||
]
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to load input history: {e}")
|
||||
|
||||
def _save_input_to_history(self, user_input: str) -> None:
|
||||
"""Append input to history.txt file."""
|
||||
from oai.constants import HISTORY_FILE
|
||||
|
||||
try:
|
||||
# Escape newlines so multiline inputs stay as one history entry
|
||||
escaped_input = user_input.replace("\n", "\\n")
|
||||
with open(HISTORY_FILE, "a", encoding="utf-8") as f:
|
||||
f.write(f"{escaped_input}\n")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to save input to history: {e}")
|
||||
|
||||
def on_key(self, event) -> None:
|
||||
"""Handle global keyboard shortcuts."""
|
||||
# Debug: Show what key was pressed
|
||||
# self.notify(f"Key pressed: {event.key}", severity="information")
|
||||
|
||||
# Handle Escape to cancel generation
|
||||
if event.key == "escape" and self._is_generating:
|
||||
self._cancel_generation = True
|
||||
self.notify("⏹️ Stopping generation...", severity="warning")
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
return
|
||||
|
||||
# Don't handle keys if a modal screen is open (let the modal handle them)
|
||||
if len(self.screen_stack) > 1:
|
||||
return
|
||||
@@ -119,25 +161,29 @@ class oAIChatApp(App):
|
||||
if dropdown_visible:
|
||||
if event.key == "up":
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
dropdown.move_selection_up()
|
||||
return
|
||||
elif event.key == "down":
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
dropdown.move_selection_down()
|
||||
return
|
||||
elif event.key == "tab":
|
||||
# Tab accepts the selected command and adds space for arguments
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
selected = dropdown.get_selected_command()
|
||||
if selected:
|
||||
chat_input.value = selected + " "
|
||||
chat_input.cursor_position = len(chat_input.value)
|
||||
chat_input.text = selected + " "
|
||||
chat_input.move_cursor_relative(rows=0, columns=len(selected) + 1)
|
||||
dropdown.hide()
|
||||
return
|
||||
elif event.key == "enter":
|
||||
# Enter accepts the selected command
|
||||
# If command needs more input, add space; otherwise submit
|
||||
event.prevent_default()
|
||||
event.stop() # Stop propagation to prevent TextArea from processing
|
||||
selected = dropdown.get_selected_command()
|
||||
if selected:
|
||||
# Commands that require additional arguments
|
||||
@@ -155,13 +201,13 @@ class oAIChatApp(App):
|
||||
|
||||
if needs_input:
|
||||
# Add space and wait for user to type more
|
||||
chat_input.value = selected + " "
|
||||
chat_input.cursor_position = len(chat_input.value)
|
||||
chat_input.text = selected + " "
|
||||
chat_input.move_cursor_relative(rows=0, columns=len(selected) + 1)
|
||||
dropdown.hide()
|
||||
else:
|
||||
# Command is complete, submit it directly
|
||||
dropdown.hide()
|
||||
chat_input.value = "" # Clear immediately
|
||||
chat_input.clear() # Clear immediately
|
||||
# Process the command directly
|
||||
async def submit_command():
|
||||
await self._process_submitted_input(selected)
|
||||
@@ -170,8 +216,14 @@ class oAIChatApp(App):
|
||||
elif event.key == "escape":
|
||||
# Escape closes dropdown
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
dropdown.hide()
|
||||
return
|
||||
# Escape to clear input (when dropdown not visible)
|
||||
elif event.key == "escape":
|
||||
event.prevent_default()
|
||||
chat_input.clear()
|
||||
return
|
||||
# Otherwise, arrow keys navigate history
|
||||
elif event.key == "up":
|
||||
event.prevent_default()
|
||||
@@ -211,9 +263,9 @@ class oAIChatApp(App):
|
||||
event.prevent_default()
|
||||
self.action_copy_last_response()
|
||||
|
||||
def on_input_changed(self, event: Input.Changed) -> None:
|
||||
"""Handle input value changes to show/hide command dropdown."""
|
||||
if event.input.id != "chat-input":
|
||||
def on_text_area_changed(self, event: TextArea.Changed) -> None:
|
||||
"""Handle text area value changes to show/hide command dropdown."""
|
||||
if event.text_area.id != "chat-input":
|
||||
return
|
||||
|
||||
# Don't show dropdown when navigating history
|
||||
@@ -221,7 +273,7 @@ class oAIChatApp(App):
|
||||
return
|
||||
|
||||
dropdown = self.query_one(CommandDropdown)
|
||||
value = event.value
|
||||
value = event.text_area.text
|
||||
|
||||
# Show dropdown if input starts with /
|
||||
if value.startswith("/") and not value.startswith("//"):
|
||||
@@ -229,16 +281,18 @@ class oAIChatApp(App):
|
||||
else:
|
||||
dropdown.hide()
|
||||
|
||||
async def on_input_submitted(self, event: Input.Submitted) -> None:
|
||||
"""Handle input submission."""
|
||||
async def on_chat_text_area_submit(self, event: ChatTextArea.Submit) -> None:
|
||||
"""Handle Enter key submission from ChatTextArea."""
|
||||
user_input = event.value.strip()
|
||||
if not user_input:
|
||||
return
|
||||
|
||||
# Clear input field immediately
|
||||
event.input.value = ""
|
||||
# Clear input field
|
||||
input_bar = self.query_one(InputBar)
|
||||
chat_input = input_bar.get_input()
|
||||
chat_input.clear()
|
||||
|
||||
# Process the input (async, will wait for AI response)
|
||||
# Process the input
|
||||
await self._process_submitted_input(user_input)
|
||||
|
||||
async def _process_submitted_input(self, user_input: str) -> None:
|
||||
@@ -254,9 +308,10 @@ class oAIChatApp(App):
|
||||
dropdown = self.query_one(CommandDropdown)
|
||||
dropdown.hide()
|
||||
|
||||
# Add to history
|
||||
# Add to in-memory history and save to file
|
||||
self.input_history.append(user_input)
|
||||
self.history_index = -1
|
||||
self._save_input_to_history(user_input)
|
||||
|
||||
# Always show what the user typed
|
||||
chat_display = self.query_one(ChatDisplay)
|
||||
@@ -495,8 +550,12 @@ class oAIChatApp(App):
|
||||
assistant_widget = AssistantMessageWidget(model_name, chat_display=chat_display)
|
||||
await chat_display.add_message(assistant_widget)
|
||||
|
||||
# Show loading indicator immediately
|
||||
assistant_widget.set_content("_Thinking..._")
|
||||
# Show loading indicator with cancellation hint
|
||||
assistant_widget.set_content("_Thinking... (Press Esc to stop)_")
|
||||
|
||||
# Set generation flags
|
||||
self._is_generating = True
|
||||
self._cancel_generation = False
|
||||
|
||||
try:
|
||||
# Stream response
|
||||
@@ -505,8 +564,11 @@ class oAIChatApp(App):
|
||||
stream=self.settings.stream_enabled,
|
||||
)
|
||||
|
||||
# Stream and collect response
|
||||
full_text, usage = await assistant_widget.stream_response(response_iterator)
|
||||
# Stream and collect response with cancellation support
|
||||
full_text, usage = await assistant_widget.stream_response(
|
||||
response_iterator,
|
||||
cancel_check=lambda: self._cancel_generation
|
||||
)
|
||||
|
||||
# Add to history if we got a response
|
||||
if full_text:
|
||||
@@ -539,8 +601,15 @@ class oAIChatApp(App):
|
||||
# Update footer
|
||||
self._update_footer()
|
||||
|
||||
# Check if generation was cancelled
|
||||
if self._cancel_generation and full_text:
|
||||
assistant_widget.set_content(full_text + "\n\n_[Generation stopped by user]_")
|
||||
|
||||
except Exception as e:
|
||||
assistant_widget.set_content(f"❌ Error: {str(e)}")
|
||||
finally:
|
||||
self._is_generating = False
|
||||
self._cancel_generation = False
|
||||
|
||||
def _update_footer(self) -> None:
|
||||
"""Update footer statistics."""
|
||||
@@ -860,9 +929,8 @@ class oAIChatApp(App):
|
||||
elif "retry_prompt" in data:
|
||||
await self.handle_message(data["retry_prompt"])
|
||||
|
||||
# Paste prompt
|
||||
elif "paste_prompt" in data:
|
||||
await self.handle_message(data["paste_prompt"])
|
||||
# Paste command is disabled - users should use Cmd+V/Ctrl+V instead
|
||||
# No special handling needed
|
||||
|
||||
def _show_model_selector(self, search: str = "", set_as_default: bool = False) -> None:
|
||||
"""Show the model selector screen."""
|
||||
@@ -994,7 +1062,7 @@ class oAIChatApp(App):
|
||||
callback=handle_confirmation
|
||||
)
|
||||
|
||||
def _navigate_history_backward(self, input_widget: Input) -> None:
|
||||
def _navigate_history_backward(self, input_widget: TextArea) -> None:
|
||||
"""Navigate backward through input history (Up arrow)."""
|
||||
if not self.input_history:
|
||||
return
|
||||
@@ -1011,14 +1079,14 @@ class oAIChatApp(App):
|
||||
|
||||
# Update input with history item
|
||||
if 0 <= self.history_index < len(self.input_history):
|
||||
input_widget.value = self.input_history[self.history_index]
|
||||
input_widget.text = self.input_history[self.history_index]
|
||||
# Move cursor to end
|
||||
input_widget.cursor_position = len(input_widget.value)
|
||||
input_widget.move_cursor((999, 999)) # Move to end
|
||||
|
||||
# Clear flag after a short delay
|
||||
self.set_timer(0.1, lambda: setattr(self, "_navigating_history", False))
|
||||
|
||||
def _navigate_history_forward(self, input_widget: Input) -> None:
|
||||
def _navigate_history_forward(self, input_widget: TextArea) -> None:
|
||||
"""Navigate forward through input history (Down arrow)."""
|
||||
if not self.input_history or self.history_index == -1:
|
||||
return
|
||||
@@ -1029,12 +1097,12 @@ class oAIChatApp(App):
|
||||
# Move forward in history
|
||||
if self.history_index < len(self.input_history) - 1:
|
||||
self.history_index += 1
|
||||
input_widget.value = self.input_history[self.history_index]
|
||||
input_widget.cursor_position = len(input_widget.value)
|
||||
input_widget.text = self.input_history[self.history_index]
|
||||
input_widget.move_cursor((999, 999)) # Move to end
|
||||
else:
|
||||
# At the newest item, clear the input
|
||||
self.history_index = -1
|
||||
input_widget.value = ""
|
||||
input_widget.clear()
|
||||
|
||||
# Clear flag after a short delay
|
||||
self.set_timer(0.1, lambda: setattr(self, "_navigating_history", False))
|
||||
|
||||
@@ -111,6 +111,7 @@ class CommandsScreen(ModalScreen[None]):
|
||||
[green]/config model <id>[/] - Set default model
|
||||
[green]/config system <prompt>[/] - Set system prompt
|
||||
[green]/config maxtoken <num>[/] - Set token limit
|
||||
[green]/config log <level>[/] - Set log level (debug/info/warning/error/critical)
|
||||
|
||||
[bold cyan]Memory & Context[/]
|
||||
|
||||
|
||||
@@ -64,6 +64,9 @@ class HelpScreen(ModalScreen[None]):
|
||||
[bold]F1[/] Show this help (Ctrl+H may not work)
|
||||
[bold]F2[/] Open model selector (Ctrl+M may not work)
|
||||
[bold]F3[/] Copy last AI response to clipboard
|
||||
[bold]Enter[/] Submit message
|
||||
[bold]Ctrl+Enter[/] Insert newline (for multiline messages)
|
||||
[bold]Esc[/] Stop/cancel AI response generation
|
||||
[bold]Ctrl+S[/] Show session statistics
|
||||
[bold]Ctrl+L[/] Clear chat display
|
||||
[bold]Ctrl+P[/] Show previous message
|
||||
@@ -71,7 +74,7 @@ class HelpScreen(ModalScreen[None]):
|
||||
[bold]Ctrl+Y[/] Copy last AI response (alternative to F3)
|
||||
[bold]Ctrl+Q[/] Quit application
|
||||
[bold]Up/Down[/] Navigate input history
|
||||
[bold]ESC[/] Close dialogs
|
||||
[bold]ESC[/] Clear input / Close dialogs
|
||||
[dim]Note: Some Ctrl keys may be captured by your terminal[/]
|
||||
|
||||
[bold cyan]═══ SLASH COMMANDS ═══[/]
|
||||
@@ -89,6 +92,7 @@ class HelpScreen(ModalScreen[None]):
|
||||
/config stream on Enable streaming responses
|
||||
/system [prompt] Set session system prompt
|
||||
/maxtoken [n] Set session token limit
|
||||
/config log [level] Set log level (debug/info/warning/error/critical)
|
||||
|
||||
[bold yellow]Conversation Management:[/]
|
||||
/save [name] Save current conversation
|
||||
|
||||
@@ -98,6 +98,7 @@ class CommandDropdown(VerticalScroll):
|
||||
("/config model", "Set default model"),
|
||||
("/config system", "Set system prompt"),
|
||||
("/config maxtoken", "Set token limit"),
|
||||
("/config log", "Set log level (debug/info/warning/error)"),
|
||||
("/system", "Set system prompt"),
|
||||
("/maxtoken", "Set token limit"),
|
||||
("/retry", "Retry last prompt"),
|
||||
|
||||
@@ -2,7 +2,53 @@
|
||||
|
||||
from textual.app import ComposeResult
|
||||
from textual.containers import Horizontal
|
||||
from textual.widgets import Input, Static
|
||||
from textual.message import Message
|
||||
from textual.widgets import Static, TextArea
|
||||
|
||||
|
||||
class ChatTextArea(TextArea):
|
||||
"""Custom TextArea that sends submit message on Enter (unless dropdown is open)."""
|
||||
|
||||
class Submit(Message):
|
||||
"""Message sent when Enter is pressed."""
|
||||
def __init__(self, value: str) -> None:
|
||||
super().__init__()
|
||||
self.value = value
|
||||
|
||||
def _on_key(self, event) -> None:
|
||||
"""Handle key events BEFORE TextArea processes them."""
|
||||
# Check if command dropdown is visible
|
||||
dropdown_visible = False
|
||||
try:
|
||||
from oai.tui.widgets.command_dropdown import CommandDropdown
|
||||
dropdown = self.app.query_one(CommandDropdown)
|
||||
dropdown_visible = dropdown.has_class("visible")
|
||||
except:
|
||||
pass
|
||||
|
||||
if event.key == "enter":
|
||||
if dropdown_visible:
|
||||
# Dropdown is open - prevent TextArea from inserting newline
|
||||
# but let event bubble up to app for dropdown handling
|
||||
event.prevent_default()
|
||||
# Don't call stop() - let it bubble to app's on_key
|
||||
# Don't call super - we don't want newline
|
||||
return
|
||||
else:
|
||||
# Dropdown not visible - submit the message
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
self.post_message(self.Submit(self.text))
|
||||
return
|
||||
elif event.key in ("ctrl+j", "ctrl+enter"):
|
||||
# Insert newline on Ctrl+Enter
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
self.insert("\n")
|
||||
return
|
||||
|
||||
# For all other keys, let TextArea handle them normally
|
||||
super()._on_key(event)
|
||||
|
||||
|
||||
class InputBar(Horizontal):
|
||||
@@ -16,10 +62,9 @@ class InputBar(Horizontal):
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the input bar."""
|
||||
yield Static(self._format_prefix(), id="input-prefix", classes="prefix-hidden" if not (self.mcp_status or self.online_mode) else "")
|
||||
yield Input(
|
||||
placeholder="Type a message or /command...",
|
||||
id="chat-input"
|
||||
)
|
||||
text_area = ChatTextArea(id="chat-input")
|
||||
text_area.show_line_numbers = False
|
||||
yield text_area
|
||||
|
||||
def _format_prefix(self) -> str:
|
||||
"""Format the input prefix with status indicators."""
|
||||
@@ -44,6 +89,6 @@ class InputBar(Horizontal):
|
||||
prefix = self.query_one("#input-prefix", Static)
|
||||
prefix.update(self._format_prefix())
|
||||
|
||||
def get_input(self) -> Input:
|
||||
def get_input(self) -> ChatTextArea:
|
||||
"""Get the input widget."""
|
||||
return self.query_one("#chat-input", Input)
|
||||
return self.query_one("#chat-input", ChatTextArea)
|
||||
|
||||
@@ -35,7 +35,13 @@ class UserMessageWidget(Static):
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the user message."""
|
||||
yield Static(f"[bold green]You:[/] {self.content}")
|
||||
yield Static(f"[bold green]You:[/] {self.content}", id="user-message-content")
|
||||
|
||||
def update_content(self, new_content: str) -> None:
|
||||
"""Update the message content."""
|
||||
self.content = new_content
|
||||
content_widget = self.query_one("#user-message-content", Static)
|
||||
content_widget.update(f"[bold green]You:[/] {new_content}")
|
||||
|
||||
|
||||
class SystemMessageWidget(Static):
|
||||
@@ -64,13 +70,22 @@ class AssistantMessageWidget(Static):
|
||||
yield Static(f"[bold]{self.model_name}:[/]", id="assistant-label")
|
||||
yield RichLog(id="assistant-content", highlight=True, markup=True, wrap=True)
|
||||
|
||||
async def stream_response(self, response_iterator: AsyncIterator) -> Tuple[str, Any]:
|
||||
"""Stream tokens progressively and return final text and usage."""
|
||||
async def stream_response(self, response_iterator: AsyncIterator, cancel_check=None) -> Tuple[str, Any]:
|
||||
"""Stream tokens progressively and return final text and usage.
|
||||
|
||||
Args:
|
||||
response_iterator: Async iterator of response chunks
|
||||
cancel_check: Optional callable that returns True if generation should be cancelled
|
||||
"""
|
||||
log = self.query_one("#assistant-content", RichLog)
|
||||
self.full_text = ""
|
||||
usage = None
|
||||
|
||||
async for chunk in response_iterator:
|
||||
# Check for cancellation
|
||||
if cancel_check and cancel_check():
|
||||
break
|
||||
|
||||
if hasattr(chunk, "delta_content") and chunk.delta_content:
|
||||
self.full_text += chunk.delta_content
|
||||
log.clear()
|
||||
|
||||
Reference in New Issue
Block a user