Files
oai/oai/constants.py

472 lines
17 KiB
Python

"""
Application-wide constants for oAI.
This module contains all configuration constants, default values, and static
definitions used throughout the application. Centralizing these values makes
the codebase easier to maintain and configure.
"""
from pathlib import Path
from typing import Set, Dict, Any
import logging
# Import version from single source of truth
from oai import __version__
# =============================================================================
# APPLICATION METADATA
# =============================================================================
APP_NAME = "oAI"
APP_VERSION = __version__ # Single source of truth in oai/__init__.py
APP_URL = "https://iurl.no/oai"
APP_DESCRIPTION = "Open AI Chat Client with Multi-Provider Support"
# =============================================================================
# FILE PATHS
# =============================================================================
HOME_DIR = Path.home()
CONFIG_DIR = HOME_DIR / ".config" / "oai"
CACHE_DIR = HOME_DIR / ".cache" / "oai"
HISTORY_FILE = CONFIG_DIR / "history.txt"
DATABASE_FILE = CONFIG_DIR / "oai_config.db"
LOG_FILE = CONFIG_DIR / "oai.log"
# =============================================================================
# API CONFIGURATION
# =============================================================================
DEFAULT_BASE_URL = "https://openrouter.ai/api/v1"
DEFAULT_STREAM_ENABLED = True
DEFAULT_MAX_TOKENS = 100_000
DEFAULT_ONLINE_MODE = False
# =============================================================================
# PROVIDER CONFIGURATION
# =============================================================================
# Provider names
PROVIDER_OPENROUTER = "openrouter"
PROVIDER_ANTHROPIC = "anthropic"
PROVIDER_OPENAI = "openai"
PROVIDER_OLLAMA = "ollama"
VALID_PROVIDERS = [PROVIDER_OPENROUTER, PROVIDER_ANTHROPIC, PROVIDER_OPENAI, PROVIDER_OLLAMA]
# Provider base URLs
ANTHROPIC_BASE_URL = "https://api.anthropic.com"
OPENAI_BASE_URL = "https://api.openai.com/v1"
OLLAMA_DEFAULT_URL = "http://localhost:11434"
# Default provider
DEFAULT_PROVIDER = PROVIDER_OPENROUTER
# =============================================================================
# DEFAULT SYSTEM PROMPT
# =============================================================================
DEFAULT_SYSTEM_PROMPT = (
"You are a knowledgeable and helpful AI assistant. Provide clear, accurate, "
"and well-structured responses. Be concise yet thorough. When uncertain about "
"something, acknowledge your limitations. For technical topics, include relevant "
"details and examples when helpful."
)
# =============================================================================
# PRICING DEFAULTS (per million tokens)
# =============================================================================
DEFAULT_INPUT_PRICE = 3.0
DEFAULT_OUTPUT_PRICE = 15.0
MODEL_PRICING: Dict[str, float] = {
"input": DEFAULT_INPUT_PRICE,
"output": DEFAULT_OUTPUT_PRICE,
}
# =============================================================================
# CREDIT ALERTS
# =============================================================================
LOW_CREDIT_RATIO = 0.1 # Alert when credits < 10% of total
LOW_CREDIT_AMOUNT = 1.0 # Alert when credits < $1.00
DEFAULT_COST_WARNING_THRESHOLD = 0.01 # Alert when single message cost exceeds this
COST_WARNING_THRESHOLD = DEFAULT_COST_WARNING_THRESHOLD # Alias for convenience
# =============================================================================
# LOGGING CONFIGURATION
# =============================================================================
DEFAULT_LOG_MAX_SIZE_MB = 10
DEFAULT_LOG_BACKUP_COUNT = 2
DEFAULT_LOG_LEVEL = "info"
VALID_LOG_LEVELS: Dict[str, int] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
# =============================================================================
# FILE HANDLING
# =============================================================================
# Maximum file size for reading (10 MB)
MAX_FILE_SIZE = 10 * 1024 * 1024
# Content truncation threshold (50 KB)
CONTENT_TRUNCATION_THRESHOLD = 50 * 1024
# Maximum items in directory listing
MAX_LIST_ITEMS = 1000
# Supported code file extensions for syntax highlighting
SUPPORTED_CODE_EXTENSIONS: Set[str] = {
".py", ".js", ".ts", ".cs", ".java", ".c", ".cpp", ".h", ".hpp",
".rb", ".ruby", ".php", ".swift", ".kt", ".kts", ".go",
".sh", ".bat", ".ps1", ".r", ".scala", ".pl", ".lua", ".dart",
".elm", ".xml", ".json", ".yaml", ".yml", ".md", ".txt",
}
# All allowed file extensions for attachment
ALLOWED_FILE_EXTENSIONS: Set[str] = {
# Code files
".py", ".js", ".ts", ".jsx", ".tsx", ".vue", ".java", ".c", ".cpp", ".cc", ".cxx",
".h", ".hpp", ".hxx", ".rb", ".go", ".rs", ".swift", ".kt", ".kts", ".php",
".sh", ".bash", ".zsh", ".fish", ".bat", ".cmd", ".ps1",
# Data files
".json", ".csv", ".yaml", ".yml", ".toml", ".xml", ".sql", ".db", ".sqlite", ".sqlite3",
# Documents
".txt", ".md", ".log", ".conf", ".cfg", ".ini", ".env", ".properties",
# Images
".png", ".jpg", ".jpeg", ".gif", ".bmp", ".webp", ".svg", ".ico",
# Archives
".zip", ".tar", ".gz", ".bz2", ".7z", ".rar", ".xz",
# Config files
".lock", ".gitignore", ".dockerignore", ".editorconfig", ".eslintrc",
".prettierrc", ".babelrc", ".nvmrc", ".npmrc",
# Binary/Compiled
".pyc", ".pyo", ".pyd", ".so", ".dll", ".dylib", ".exe", ".app",
".dmg", ".pkg", ".deb", ".rpm", ".apk", ".ipa",
# ML/AI
".pkl", ".pickle", ".joblib", ".npy", ".npz", ".safetensors", ".onnx",
".pt", ".pth", ".ckpt", ".pb", ".tflite", ".mlmodel", ".coreml", ".rknn",
# Data formats
".wasm", ".proto", ".graphql", ".graphqls", ".grpc", ".avro", ".parquet",
".orc", ".feather", ".arrow", ".hdf5", ".h5", ".mat", ".rdata", ".rds",
# Other
".pdf", ".class", ".jar", ".war",
}
# =============================================================================
# SECURITY CONFIGURATION
# =============================================================================
# System directories that should never be accessed
SYSTEM_DIRS_BLACKLIST: Set[str] = {
# macOS
"/System", "/Library", "/private", "/usr", "/bin", "/sbin",
# Linux
"/boot", "/dev", "/proc", "/sys", "/root",
# Windows
"C:\\Windows", "C:\\Program Files", "C:\\Program Files (x86)",
}
# Directories to skip during file operations
SKIP_DIRECTORIES: Set[str] = {
# Python virtual environments
".venv", "venv", "env", "virtualenv",
"site-packages", "dist-packages",
# Python caches
"__pycache__", ".pytest_cache", ".mypy_cache",
# JavaScript/Node
"node_modules",
# Version control
".git", ".svn",
# IDEs
".idea", ".vscode",
# Build directories
"build", "dist", "eggs", ".eggs",
}
# =============================================================================
# DATABASE QUERIES - SQL SAFETY
# =============================================================================
# Maximum query execution timeout (seconds)
MAX_QUERY_TIMEOUT = 5
# Maximum rows returned from queries
MAX_QUERY_RESULTS = 1000
# Default rows per query
DEFAULT_QUERY_LIMIT = 100
# Keywords that are blocked in database queries
DANGEROUS_SQL_KEYWORDS: Set[str] = {
"INSERT", "UPDATE", "DELETE", "DROP", "CREATE",
"ALTER", "TRUNCATE", "REPLACE", "ATTACH", "DETACH",
"PRAGMA", "VACUUM", "REINDEX",
}
# =============================================================================
# MCP CONFIGURATION
# =============================================================================
# Maximum tool call iterations per request
MAX_TOOL_LOOPS = 5
# =============================================================================
# VALID COMMANDS
# =============================================================================
VALID_COMMANDS: Set[str] = {
"/retry", "/online", "/memory", "/paste", "/export", "/save", "/load",
"/delete", "/list", "/prev", "/next", "/stats", "/middleout", "/reset",
"/info", "/model", "/maxtoken", "/system", "/config", "/credits", "/clear",
"/cl", "/help", "/mcp",
}
# =============================================================================
# COMMAND HELP DATABASE
# =============================================================================
COMMAND_HELP: Dict[str, Dict[str, Any]] = {
"/clear": {
"aliases": ["/cl"],
"description": "Clear the terminal screen for a clean interface.",
"usage": "/clear\n/cl",
"examples": [
("Clear screen", "/clear"),
("Using short alias", "/cl"),
],
"notes": "You can also use the keyboard shortcut Ctrl+L.",
},
"/help": {
"description": "Display help information for commands.",
"usage": "/help [command|topic]",
"examples": [
("Show all commands", "/help"),
("Get help for a specific command", "/help /model"),
("Get detailed MCP help", "/help mcp"),
],
"notes": "Use /help without arguments to see the full command list.",
},
"mcp": {
"description": "Complete guide to MCP (Model Context Protocol).",
"usage": "See detailed examples below",
"examples": [],
"notes": """
MCP (Model Context Protocol) gives your AI assistant direct access to:
• Local files and folders (read, search, list)
• SQLite databases (inspect, search, query)
FILE MODE (default):
/mcp on Start MCP server
/mcp add ~/Documents Grant access to folder
/mcp list View all allowed folders
DATABASE MODE:
/mcp add db ~/app/data.db Add specific database
/mcp db list View all databases
/mcp db 1 Work with database #1
/mcp files Switch back to file mode
WRITE MODE (optional):
/mcp write on Enable file modifications
/mcp write off Disable write mode (back to read-only)
For command-specific help: /help /mcp
""",
},
"/mcp": {
"description": "Manage MCP for local file access and SQLite database querying.",
"usage": "/mcp <command> [args]",
"examples": [
("Enable MCP server", "/mcp on"),
("Disable MCP server", "/mcp off"),
("Show MCP status", "/mcp status"),
("", ""),
("━━━ FILE MODE ━━━", ""),
("Add folder for file access", "/mcp add ~/Documents"),
("Remove folder", "/mcp remove ~/Desktop"),
("List allowed folders", "/mcp list"),
("Enable write mode", "/mcp write on"),
("", ""),
("━━━ DATABASE MODE ━━━", ""),
("Add SQLite database", "/mcp add db ~/app/data.db"),
("List all databases", "/mcp db list"),
("Switch to database #1", "/mcp db 1"),
("Switch back to file mode", "/mcp files"),
],
"notes": "MCP allows AI to read local files and query SQLite databases.",
},
"/memory": {
"description": "Toggle conversation memory.",
"usage": "/memory [on|off]",
"examples": [
("Check current memory status", "/memory"),
("Enable conversation memory", "/memory on"),
("Disable memory (save costs)", "/memory off"),
],
"notes": "Memory is ON by default. Disabling saves tokens.",
},
"/online": {
"description": "Enable or disable online mode (web search).",
"usage": "/online [on|off]",
"examples": [
("Check online mode status", "/online"),
("Enable web search", "/online on"),
("Disable web search", "/online off"),
],
"notes": "Not all models support online mode.",
},
"/paste": {
"description": "Paste plain text from clipboard and send to the AI.",
"usage": "/paste [prompt]",
"examples": [
("Paste clipboard content", "/paste"),
("Paste with a question", "/paste Explain this code"),
],
"notes": "Only plain text is supported.",
},
"/retry": {
"description": "Resend the last prompt from conversation history.",
"usage": "/retry",
"examples": [("Retry last message", "/retry")],
"notes": "Requires at least one message in history.",
},
"/next": {
"description": "View the next response in conversation history.",
"usage": "/next",
"examples": [("Navigate to next response", "/next")],
"notes": "Use /prev to go backward.",
},
"/prev": {
"description": "View the previous response in conversation history.",
"usage": "/prev",
"examples": [("Navigate to previous response", "/prev")],
"notes": "Use /next to go forward.",
},
"/reset": {
"description": "Clear conversation history and reset system prompt.",
"usage": "/reset",
"examples": [("Reset conversation", "/reset")],
"notes": "Requires confirmation.",
},
"/info": {
"description": "Display detailed information about a model.",
"usage": "/info [model_id]",
"examples": [
("Show current model info", "/info"),
("Show specific model info", "/info gpt-4o"),
],
"notes": "Shows pricing, capabilities, and context length.",
},
"/model": {
"description": "Select or change the AI model.",
"usage": "/model [search_term]",
"examples": [
("List all models", "/model"),
("Search for GPT models", "/model gpt"),
("Search for Claude models", "/model claude"),
],
"notes": "Models are numbered for easy selection.",
},
"/config": {
"description": "View or modify application configuration.",
"usage": "/config [setting] [value]",
"examples": [
("View all settings", "/config"),
("Set API key", "/config api"),
("Set default model", "/config model"),
("Set system prompt", "/config system You are a helpful assistant"),
("Enable streaming", "/config stream on"),
],
"notes": "Available: api, url, model, system, stream, costwarning, maxtoken, online, loglevel.",
},
"/maxtoken": {
"description": "Set a temporary session token limit.",
"usage": "/maxtoken [value]",
"examples": [
("View current session limit", "/maxtoken"),
("Set session limit to 2000", "/maxtoken 2000"),
],
"notes": "Cannot exceed stored max token limit.",
},
"/system": {
"description": "Set or clear the session-level system prompt.",
"usage": "/system [prompt|clear|default <prompt>]",
"examples": [
("View current system prompt", "/system"),
("Set as Python expert", "/system You are a Python expert"),
("Multiline with newlines", r"/system You are an expert.\nBe clear and concise."),
("Save as default", "/system default You are a helpful assistant"),
("Revert to default", "/system clear"),
("Blank prompt", '/system ""'),
],
"notes": r"Use \n for newlines. /system clear reverts to hardcoded default.",
},
"/save": {
"description": "Save the current conversation history.",
"usage": "/save <name>",
"examples": [("Save conversation", "/save my_chat")],
"notes": "Saved conversations can be loaded later with /load.",
},
"/load": {
"description": "Load a saved conversation.",
"usage": "/load <name|number>",
"examples": [
("Load by name", "/load my_chat"),
("Load by number from /list", "/load 3"),
],
"notes": "Use /list to see numbered conversations.",
},
"/delete": {
"description": "Delete a saved conversation.",
"usage": "/delete <name|number>",
"examples": [("Delete by name", "/delete my_chat")],
"notes": "Requires confirmation. Cannot be undone.",
},
"/list": {
"description": "List all saved conversations.",
"usage": "/list",
"examples": [("Show saved conversations", "/list")],
"notes": "Conversations are numbered for use with /load and /delete.",
},
"/export": {
"description": "Export the current conversation to a file.",
"usage": "/export <format> <filename>",
"examples": [
("Export as Markdown", "/export md notes.md"),
("Export as JSON", "/export json conversation.json"),
("Export as HTML", "/export html report.html"),
],
"notes": "Available formats: md, json, html.",
},
"/stats": {
"description": "Display session statistics.",
"usage": "/stats",
"examples": [("View session statistics", "/stats")],
"notes": "Shows tokens, costs, and credits.",
},
"/credits": {
"description": "Display your OpenRouter account credits.",
"usage": "/credits",
"examples": [("Check credits", "/credits")],
"notes": "Shows total, used, and remaining credits.",
},
"/middleout": {
"description": "Toggle middle-out transform for long prompts.",
"usage": "/middleout [on|off]",
"examples": [
("Check status", "/middleout"),
("Enable compression", "/middleout on"),
],
"notes": "Compresses prompts exceeding context size.",
},
}