2025-11-04 05:17:27 +01:00
|
|
|
import os
|
|
|
|
|
import sys
|
|
|
|
|
import json
|
|
|
|
|
import sqlite3
|
|
|
|
|
import signal
|
|
|
|
|
import logging
|
|
|
|
|
import traceback
|
|
|
|
|
import readline
|
|
|
|
|
import glob as glob_module
|
|
|
|
|
from concurrent.futures import ThreadPoolExecutor
|
|
|
|
|
from pr.config import DB_PATH, LOG_FILE, DEFAULT_MODEL, DEFAULT_API_URL, MODEL_LIST_URL, HISTORY_FILE
|
|
|
|
|
from pr.ui import Colors, render_markdown
|
|
|
|
|
from pr.core.context import init_system_message, truncate_tool_result
|
|
|
|
|
from pr.core.api import call_api
|
|
|
|
|
from pr.tools import (
|
|
|
|
|
http_fetch, run_command, run_command_interactive, read_file, write_file,
|
|
|
|
|
list_directory, mkdir, chdir, getpwd, db_set, db_get, db_query,
|
|
|
|
|
web_search, web_search_news, python_exec, index_source_directory,
|
|
|
|
|
open_editor, editor_insert_text, editor_replace_text, editor_search,
|
|
|
|
|
search_replace,close_editor,create_diff,apply_patch,
|
|
|
|
|
tail_process, kill_process
|
|
|
|
|
)
|
|
|
|
|
from pr.tools.patch import display_file_diff
|
|
|
|
|
from pr.tools.filesystem import display_edit_summary, display_edit_timeline, clear_edit_tracker
|
|
|
|
|
from pr.tools.base import get_tools_definition
|
|
|
|
|
from pr.commands import handle_command
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger('pr')
|
|
|
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
|
|
|
|
|
|
file_handler = logging.FileHandler(LOG_FILE)
|
|
|
|
|
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
|
|
|
|
logger.addHandler(file_handler)
|
|
|
|
|
|
|
|
|
|
class Assistant:
|
|
|
|
|
def __init__(self, args):
|
|
|
|
|
self.args = args
|
|
|
|
|
self.messages = []
|
|
|
|
|
self.verbose = args.verbose
|
|
|
|
|
self.debug = getattr(args, 'debug', False)
|
|
|
|
|
self.syntax_highlighting = not args.no_syntax
|
|
|
|
|
|
|
|
|
|
if self.debug:
|
|
|
|
|
console_handler = logging.StreamHandler()
|
|
|
|
|
console_handler.setLevel(logging.DEBUG)
|
|
|
|
|
console_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
|
|
|
|
|
logger.addHandler(console_handler)
|
|
|
|
|
logger.debug("Debug mode enabled")
|
|
|
|
|
self.api_key = os.environ.get('OPENROUTER_API_KEY', '')
|
|
|
|
|
self.model = args.model or os.environ.get('AI_MODEL', DEFAULT_MODEL)
|
|
|
|
|
self.api_url = args.api_url or os.environ.get('API_URL', DEFAULT_API_URL)
|
|
|
|
|
self.model_list_url = args.model_list_url or os.environ.get('MODEL_LIST_URL', MODEL_LIST_URL)
|
|
|
|
|
self.use_tools = os.environ.get('USE_TOOLS', '1') == '1'
|
|
|
|
|
self.strict_mode = os.environ.get('STRICT_MODE', '0') == '1'
|
|
|
|
|
self.interrupt_count = 0
|
|
|
|
|
self.python_globals = {}
|
|
|
|
|
self.db_conn = None
|
|
|
|
|
self.autonomous_mode = False
|
|
|
|
|
self.autonomous_iterations = 0
|
|
|
|
|
self.init_database()
|
|
|
|
|
self.messages.append(init_system_message(args))
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
from pr.core.enhanced_assistant import EnhancedAssistant
|
|
|
|
|
self.enhanced = EnhancedAssistant(self)
|
|
|
|
|
if self.debug:
|
|
|
|
|
logger.debug("Enhanced assistant features initialized")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.warning(f"Could not initialize enhanced features: {e}")
|
|
|
|
|
self.enhanced = None
|
|
|
|
|
|
|
|
|
|
def init_database(self):
|
|
|
|
|
try:
|
|
|
|
|
logger.debug(f"Initializing database at {DB_PATH}")
|
|
|
|
|
self.db_conn = sqlite3.connect(DB_PATH, check_same_thread=False)
|
|
|
|
|
cursor = self.db_conn.cursor()
|
|
|
|
|
|
|
|
|
|
cursor.execute('''CREATE TABLE IF NOT EXISTS kv_store
|
|
|
|
|
(key TEXT PRIMARY KEY, value TEXT, timestamp REAL)''')
|
|
|
|
|
|
|
|
|
|
cursor.execute('''CREATE TABLE IF NOT EXISTS file_versions
|
|
|
|
|
(id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
|
|
|
filepath TEXT, content TEXT, hash TEXT,
|
|
|
|
|
timestamp REAL, version INTEGER)''')
|
|
|
|
|
|
|
|
|
|
self.db_conn.commit()
|
|
|
|
|
logger.debug("Database initialized successfully")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"Database initialization error: {e}")
|
|
|
|
|
self.db_conn = None
|
|
|
|
|
|
|
|
|
|
def execute_tool_calls(self, tool_calls):
|
|
|
|
|
results = []
|
|
|
|
|
|
|
|
|
|
logger.debug(f"Executing {len(tool_calls)} tool call(s)")
|
|
|
|
|
|
|
|
|
|
with ThreadPoolExecutor(max_workers=5) as executor:
|
|
|
|
|
futures = []
|
|
|
|
|
|
|
|
|
|
for tool_call in tool_calls:
|
|
|
|
|
func_name = tool_call['function']['name']
|
|
|
|
|
arguments = json.loads(tool_call['function']['arguments'])
|
|
|
|
|
logger.debug(f"Tool call: {func_name} with arguments: {arguments}")
|
|
|
|
|
|
|
|
|
|
func_map = {
|
|
|
|
|
'http_fetch': lambda **kw: http_fetch(**kw),
|
|
|
|
|
'run_command': lambda **kw: run_command(**kw),
|
|
|
|
|
'tail_process': lambda **kw: tail_process(**kw),
|
|
|
|
|
'kill_process': lambda **kw: kill_process(**kw),
|
|
|
|
|
'run_command_interactive': lambda **kw: run_command_interactive(**kw),
|
|
|
|
|
'read_file': lambda **kw: read_file(**kw, db_conn=self.db_conn),
|
|
|
|
|
'write_file': lambda **kw: write_file(**kw, db_conn=self.db_conn),
|
|
|
|
|
'list_directory': lambda **kw: list_directory(**kw),
|
|
|
|
|
'mkdir': lambda **kw: mkdir(**kw),
|
|
|
|
|
'chdir': lambda **kw: chdir(**kw),
|
|
|
|
|
'getpwd': lambda **kw: getpwd(**kw),
|
|
|
|
|
'db_set': lambda **kw: db_set(**kw, db_conn=self.db_conn),
|
|
|
|
|
'db_get': lambda **kw: db_get(**kw, db_conn=self.db_conn),
|
|
|
|
|
'db_query': lambda **kw: db_query(**kw, db_conn=self.db_conn),
|
|
|
|
|
'web_search': lambda **kw: web_search(**kw),
|
|
|
|
|
'web_search_news': lambda **kw: web_search_news(**kw),
|
|
|
|
|
'python_exec': lambda **kw: python_exec(**kw, python_globals=self.python_globals),
|
|
|
|
|
'index_source_directory': lambda **kw: index_source_directory(**kw),
|
|
|
|
|
'search_replace': lambda **kw: search_replace(**kw, db_conn=self.db_conn),
|
|
|
|
|
'open_editor': lambda **kw: open_editor(**kw),
|
|
|
|
|
'editor_insert_text': lambda **kw: editor_insert_text(**kw, db_conn=self.db_conn),
|
|
|
|
|
'editor_replace_text': lambda **kw: editor_replace_text(**kw, db_conn=self.db_conn),
|
|
|
|
|
'editor_search': lambda **kw: editor_search(**kw),
|
|
|
|
|
'close_editor': lambda **kw: close_editor(**kw),
|
|
|
|
|
'create_diff': lambda **kw: create_diff(**kw),
|
|
|
|
|
'apply_patch': lambda **kw: apply_patch(**kw, db_conn=self.db_conn),
|
|
|
|
|
'display_file_diff': lambda **kw: display_file_diff(**kw),
|
|
|
|
|
'display_edit_summary': lambda **kw: display_edit_summary(),
|
|
|
|
|
'display_edit_timeline': lambda **kw: display_edit_timeline(**kw),
|
|
|
|
|
'clear_edit_tracker': lambda **kw: clear_edit_tracker(),
|
2025-11-04 05:57:23 +01:00
|
|
|
'create_agent': lambda **kw: create_agent(**kw),
|
|
|
|
|
'list_agents': lambda **kw: list_agents(**kw),
|
|
|
|
|
'execute_agent_task': lambda **kw: execute_agent_task(**kw),
|
|
|
|
|
'remove_agent': lambda **kw: remove_agent(**kw),
|
|
|
|
|
'collaborate_agents': lambda **kw: collaborate_agents(**kw),
|
|
|
|
|
'add_knowledge_entry': lambda **kw: add_knowledge_entry(**kw),
|
|
|
|
|
'get_knowledge_entry': lambda **kw: get_knowledge_entry(**kw),
|
|
|
|
|
'search_knowledge': lambda **kw: search_knowledge(**kw),
|
|
|
|
|
'get_knowledge_by_category': lambda **kw: get_knowledge_by_category(**kw),
|
|
|
|
|
'update_knowledge_importance': lambda **kw: update_knowledge_importance(**kw),
|
|
|
|
|
'delete_knowledge_entry': lambda **kw: delete_knowledge_entry(**kw),
|
|
|
|
|
'get_knowledge_statistics': lambda **kw: get_knowledge_statistics(**kw),
|
2025-11-04 05:17:27 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if func_name in func_map:
|
|
|
|
|
future = executor.submit(func_map[func_name], **arguments)
|
|
|
|
|
futures.append((tool_call['id'], future))
|
|
|
|
|
|
|
|
|
|
for tool_id, future in futures:
|
|
|
|
|
try:
|
|
|
|
|
result = future.result(timeout=30)
|
|
|
|
|
result = truncate_tool_result(result)
|
|
|
|
|
logger.debug(f"Tool result for {tool_id}: {str(result)[:200]}...")
|
|
|
|
|
results.append({
|
|
|
|
|
"tool_call_id": tool_id,
|
|
|
|
|
"role": "tool",
|
|
|
|
|
"content": json.dumps(result)
|
|
|
|
|
})
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.debug(f"Tool error for {tool_id}: {str(e)}")
|
|
|
|
|
error_msg = str(e)[:200] if len(str(e)) > 200 else str(e)
|
|
|
|
|
results.append({
|
|
|
|
|
"tool_call_id": tool_id,
|
|
|
|
|
"role": "tool",
|
|
|
|
|
"content": json.dumps({"status": "error", "error": error_msg})
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
def process_response(self, response):
|
|
|
|
|
if 'error' in response:
|
|
|
|
|
return f"Error: {response['error']}"
|
|
|
|
|
|
|
|
|
|
if 'choices' not in response or not response['choices']:
|
|
|
|
|
return "No response from API"
|
|
|
|
|
|
|
|
|
|
message = response['choices'][0]['message']
|
|
|
|
|
self.messages.append(message)
|
|
|
|
|
|
|
|
|
|
if 'tool_calls' in message and message['tool_calls']:
|
|
|
|
|
if self.verbose:
|
|
|
|
|
print(f"{Colors.YELLOW}Executing tool calls...{Colors.RESET}")
|
|
|
|
|
|
|
|
|
|
tool_results = self.execute_tool_calls(message['tool_calls'])
|
|
|
|
|
|
|
|
|
|
for result in tool_results:
|
|
|
|
|
self.messages.append(result)
|
|
|
|
|
|
|
|
|
|
follow_up = call_api(
|
|
|
|
|
self.messages, self.model, self.api_url, self.api_key,
|
|
|
|
|
self.use_tools, get_tools_definition(), verbose=self.verbose
|
|
|
|
|
)
|
|
|
|
|
return self.process_response(follow_up)
|
|
|
|
|
|
|
|
|
|
content = message.get('content', '')
|
|
|
|
|
return render_markdown(content, self.syntax_highlighting)
|
|
|
|
|
|
|
|
|
|
def signal_handler(self, signum, frame):
|
|
|
|
|
if self.autonomous_mode:
|
|
|
|
|
self.interrupt_count += 1
|
|
|
|
|
if self.interrupt_count >= 2:
|
|
|
|
|
print(f"\n{Colors.RED}Force exiting autonomous mode...{Colors.RESET}")
|
|
|
|
|
self.autonomous_mode = False
|
|
|
|
|
sys.exit(0)
|
|
|
|
|
else:
|
|
|
|
|
print(f"\n{Colors.YELLOW}Press Ctrl+C again to force exit{Colors.RESET}")
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
self.interrupt_count += 1
|
|
|
|
|
if self.interrupt_count >= 2:
|
|
|
|
|
print(f"\n{Colors.RED}Exiting...{Colors.RESET}")
|
|
|
|
|
self.cleanup()
|
|
|
|
|
sys.exit(0)
|
|
|
|
|
else:
|
|
|
|
|
print(f"\n{Colors.YELLOW}Press Ctrl+C again to exit{Colors.RESET}")
|
|
|
|
|
|
|
|
|
|
def setup_readline(self):
|
|
|
|
|
try:
|
|
|
|
|
readline.read_history_file(HISTORY_FILE)
|
|
|
|
|
except FileNotFoundError:
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
readline.set_history_length(1000)
|
|
|
|
|
|
|
|
|
|
import atexit
|
|
|
|
|
atexit.register(readline.write_history_file, HISTORY_FILE)
|
|
|
|
|
|
|
|
|
|
commands = ['exit', 'quit', 'help', 'reset', 'dump', 'verbose',
|
|
|
|
|
'models', 'tools', 'review', 'refactor', 'obfuscate', '/auto']
|
|
|
|
|
|
|
|
|
|
def completer(text, state):
|
|
|
|
|
options = [cmd for cmd in commands if cmd.startswith(text)]
|
|
|
|
|
|
|
|
|
|
glob_pattern = os.path.expanduser(text) + '*'
|
|
|
|
|
path_options = glob_module.glob(glob_pattern)
|
|
|
|
|
|
|
|
|
|
path_options = [p + os.sep if os.path.isdir(p) else p for p in path_options]
|
|
|
|
|
|
|
|
|
|
combined_options = sorted(list(set(options + path_options)))
|
2025-11-04 05:57:23 +01:00
|
|
|
#combined_options.extend(self.commands)
|
2025-11-04 05:17:27 +01:00
|
|
|
|
|
|
|
|
if state < len(combined_options):
|
|
|
|
|
return combined_options[state]
|
|
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
delims = readline.get_completer_delims()
|
|
|
|
|
readline.set_completer_delims(delims.replace('/', ''))
|
|
|
|
|
|
|
|
|
|
readline.set_completer(completer)
|
|
|
|
|
readline.parse_and_bind('tab: complete')
|
|
|
|
|
|
|
|
|
|
def run_repl(self):
|
|
|
|
|
self.setup_readline()
|
|
|
|
|
signal.signal(signal.SIGINT, self.signal_handler)
|
|
|
|
|
|
|
|
|
|
print(f"{Colors.BOLD}r{Colors.RESET}")
|
|
|
|
|
print(f"Type 'help' for commands or start chatting")
|
|
|
|
|
|
|
|
|
|
while True:
|
|
|
|
|
try:
|
|
|
|
|
user_input = input(f"{Colors.BLUE}You>{Colors.RESET} ").strip()
|
|
|
|
|
|
|
|
|
|
if not user_input:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
cmd_result = handle_command(self, user_input)
|
|
|
|
|
|
|
|
|
|
if cmd_result is False:
|
|
|
|
|
break
|
|
|
|
|
elif cmd_result is True:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
process_message(self, user_input)
|
|
|
|
|
|
|
|
|
|
except EOFError:
|
|
|
|
|
break
|
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
|
self.signal_handler(None, None)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"{Colors.RED}Error: {e}{Colors.RESET}")
|
|
|
|
|
logging.error(f"REPL error: {e}\n{traceback.format_exc()}")
|
|
|
|
|
|
|
|
|
|
def run_single(self):
|
|
|
|
|
if self.args.message:
|
|
|
|
|
message = self.args.message
|
|
|
|
|
else:
|
|
|
|
|
message = sys.stdin.read()
|
|
|
|
|
|
2025-11-04 05:57:23 +01:00
|
|
|
from pr.autonomous.mode import run_autonomous_mode
|
|
|
|
|
run_autonomous_mode(self, message)
|
2025-11-04 05:17:27 +01:00
|
|
|
|
|
|
|
|
def cleanup(self):
|
|
|
|
|
if hasattr(self, 'enhanced') and self.enhanced:
|
|
|
|
|
try:
|
|
|
|
|
self.enhanced.cleanup()
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"Error cleaning up enhanced features: {e}")
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
from pr.multiplexer import cleanup_all_multiplexers
|
|
|
|
|
cleanup_all_multiplexers()
|
|
|
|
|
except Exception as e:
|
|
|
|
|
logger.error(f"Error cleaning up multiplexers: {e}")
|
|
|
|
|
|
|
|
|
|
if self.db_conn:
|
|
|
|
|
self.db_conn.close()
|
|
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
|
try:
|
2025-11-04 05:57:23 +01:00
|
|
|
print(f"DEBUG: interactive={self.args.interactive}, message={self.args.message}, isatty={sys.stdin.isatty()}")
|
2025-11-04 05:17:27 +01:00
|
|
|
if self.args.interactive or (not self.args.message and sys.stdin.isatty()):
|
2025-11-04 05:57:23 +01:00
|
|
|
print("DEBUG: calling run_repl")
|
2025-11-04 05:17:27 +01:00
|
|
|
self.run_repl()
|
|
|
|
|
else:
|
2025-11-04 05:57:23 +01:00
|
|
|
print("DEBUG: calling run_single")
|
2025-11-04 05:17:27 +01:00
|
|
|
self.run_single()
|
|
|
|
|
finally:
|
|
|
|
|
self.cleanup()
|
|
|
|
|
|
|
|
|
|
def process_message(assistant, message):
|
|
|
|
|
assistant.messages.append({"role": "user", "content": message})
|
|
|
|
|
|
|
|
|
|
logger.debug(f"Processing user message: {message[:100]}...")
|
|
|
|
|
logger.debug(f"Current message count: {len(assistant.messages)}")
|
|
|
|
|
|
|
|
|
|
if assistant.verbose:
|
|
|
|
|
print(f"{Colors.GRAY}Sending request to API...{Colors.RESET}")
|
|
|
|
|
|
|
|
|
|
response = call_api(
|
|
|
|
|
assistant.messages, assistant.model, assistant.api_url,
|
|
|
|
|
assistant.api_key, assistant.use_tools, get_tools_definition(),
|
|
|
|
|
verbose=assistant.verbose
|
|
|
|
|
)
|
|
|
|
|
result = assistant.process_response(response)
|
|
|
|
|
|
|
|
|
|
print(f"\n{Colors.GREEN}r:{Colors.RESET} {result}\n")
|