feat: introduce agent communication, autonomous detection, and plugin support

feat: add interactive modes and agent execution tool
refactor: remove asyncio dependencies from core api and assistant
refactor: remove asyncio dependencies from command handlers
maintenance: bump version to 1.10.0
maintenance: update pyproject.toml dependencies and test configuration
This commit is contained in:
retoor 2025-11-07 17:36:03 +01:00
parent 5069fe8693
commit 0f5dce1617
20 changed files with 264 additions and 350 deletions

View File

@ -7,6 +7,14 @@
## Version 1.10.0 - 2025-11-07
This release introduces significant new features, including agent communication, autonomous detection, and plugin support. Users can now leverage interactive modes and an agent execution tool, while developers benefit from caching and a version bump to 1.10.0.
**Changes:** 2 files, 10 lines
**Languages:** Markdown (8 lines), TOML (2 lines)
## Version 1.9.0 - 2025-11-07
This release introduces a new agent communication system and autonomous detection capabilities. It also adds caching, plugin support, and interactive modes, along with an agent execution tool.

View File

@ -12,7 +12,7 @@ from pr.ui import Colors
from pr.editor import RPEditor
def handle_command(assistant, command):
async def handle_command(assistant, command):
command_parts = command.strip().split(maxsplit=1)
cmd = command_parts[0].lower()
@ -121,7 +121,7 @@ def handle_command(assistant, command):
print(f"Model set to: {Colors.GREEN}{assistant.model}{Colors.RESET}")
elif cmd == "/models":
models = list_models(assistant.model_list_url, assistant.api_key)
models = await list_models(assistant.model_list_url, assistant.api_key)
if isinstance(models, dict) and "error" in models:
print(f"{Colors.RED}Error fetching models: {models['error']}{Colors.RESET}")
else:

View File

@ -488,7 +488,7 @@ class Assistant:
if not user_input:
continue
cmd_result = handle_command(self, user_input)
cmd_result = await handle_command(self, user_input)
if cmd_result is False:
break

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "rp"
version = "1.9.0"
version = "1.10.0"
description = "R python edition. The ultimate autonomous AI CLI."
readme = "README.md"
requires-python = ">=3.12"
@ -16,6 +16,7 @@ authors = [
dependencies = [
"pydantic>=2.12.3",
"prompt_toolkit>=3.0.0",
"requests>=2.31.0",
]
classifiers = [
"Development Status :: 4 - Beta",
@ -30,7 +31,6 @@ classifiers = [
[project.optional-dependencies]
dev = [
"pytest>=8.3.0",
"pytest-asyncio>=1.2.0",
"pytest-cov>=7.0.0",
"black>=25.9.0",
"flake8>=7.3.0",
@ -60,7 +60,6 @@ exclude = ["tests*"]
[tool.pytest.ini_options]
testpaths = ["tests"]
asyncio_mode = "auto"
python_files = ["test_*.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]

View File

@ -1,17 +1,41 @@
import argparse
import asyncio
import sys
from rp import __version__
from rp.core import Assistant
async def main_async():
def main_def():
import tracemalloc
tracemalloc.start()
parser = argparse.ArgumentParser(
description="RP Assistant - Professional CLI AI assistant with visual effects, cost tracking, and autonomous execution",
epilog='\nExamples:\n rp "What is Python?" # Single query\n rp -i # Interactive mode\n rp -i --model gpt-4 # Use specific model\n rp --save-session my-task -i # Save session\n rp --load-session my-task # Load session\n rp --list-sessions # List all sessions\n rp --usage # Show token usage stats\n\nFeatures:\n • Visual progress indicators during AI calls\n • Real-time cost tracking for each query\n • Sophisticated CLI with colors and effects\n • Tool execution with status updates\n\nCommands in interactive mode:\n /auto [task] - Enter autonomous mode\n /reset - Clear message history\n /verbose - Toggle verbose output\n /models - List available models\n /tools - List available tools\n /usage - Show usage statistics\n /save <name> - Save current session\n exit, quit, q - Exit the program\n ',
epilog="""
Examples:
rp "What is Python?" # Single query
rp -i # Interactive mode
rp -i --model gpt-4 # Use specific model
rp --save-session my-task -i # Save session
rp --load-session my-task # Load session
rp --list-sessions # List all sessions
rp --usage # Show token usage stats
Features:
Visual progress indicators during AI calls
Real-time cost tracking for each query
Sophisticated CLI with colors and effects
Tool execution with status updates
Commands in interactive mode:
/auto [task] - Enter autonomous mode
/reset - Clear message history
/verbose - Toggle verbose output
/models - List available models
/tools - List available tools
/usage - Show usage statistics
/save <name> - Save current session
exit, quit, q - Exit the program
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("message", nargs="?", help="Message to send to assistant")
@ -119,11 +143,11 @@ async def main_async():
print(f" - {plugin}")
return
assistant = Assistant(args)
await assistant.run()
assistant.run()
def main():
return asyncio.run(main_async())
return main_def()
if __name__ == "__main__":

View File

@ -1,5 +1,3 @@
import asyncio
import asyncio
import json
import logging
import time
@ -30,13 +28,7 @@ def run_autonomous_mode(assistant, task):
assistant.messages = manage_context_window(assistant.messages, assistant.verbose)
logger.debug(f"Messages after context management: {len(assistant.messages)}")
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
response = call_api(
assistant.messages,
assistant.model,
assistant.api_url,
@ -44,21 +36,6 @@ def run_autonomous_mode(assistant, task):
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
),
loop,
)
response = future.result()
except RuntimeError:
response = asyncio.run(
call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
)
)
if "error" in response:
logger.error(f"API error in autonomous mode: {response['error']}")
@ -111,13 +88,7 @@ def process_response_autonomous(assistant, response):
)
for result in tool_results:
assistant.messages.append(result)
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
follow_up = call_api(
assistant.messages,
assistant.model,
assistant.api_url,
@ -125,21 +96,6 @@ def process_response_autonomous(assistant, response):
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
),
loop,
)
follow_up = future.result()
except RuntimeError:
follow_up = asyncio.run(
call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
)
)
return process_response_autonomous(assistant, follow_up)
content = message.get("content", "")

View File

@ -1,6 +1,4 @@
import asyncio
import json
import logging
import time
from rp.commands.multiplexer_commands import MULTIPLEXER_COMMANDS
from rp.autonomous import run_autonomous_mode
@ -37,18 +35,7 @@ def handle_command(assistant, command):
if prompt_text.strip():
from rp.core.assistant import process_message
task = asyncio.create_task(process_message(assistant, prompt_text))
assistant.background_tasks.add(task)
task.add_done_callback(
lambda t: (
assistant.background_tasks.discard(t),
(
logging.error(f"Background task failed: {t.exception()}")
if t.exception()
else assistant.background_tasks.discard(t)
),
)
)
process_message(assistant, prompt_text)
elif cmd == "/auto":
if len(command_parts) < 2:
print(f"{Colors.RED}Usage: /auto [task description]{Colors.RESET}")
@ -182,18 +169,7 @@ def review_file(assistant, filename):
message = f"Please review this file and provide feedback:\n\n{result['content']}"
from rp.core.assistant import process_message
task = asyncio.create_task(process_message(assistant, message))
assistant.background_tasks.add(task)
task.add_done_callback(
lambda t: (
assistant.background_tasks.discard(t),
(
logging.error(f"Background task failed: {t.exception()}")
if t.exception()
else assistant.background_tasks.discard(t)
),
)
)
process_message(assistant, message)
else:
print(f"{Colors.RED}Error reading file: {result['error']}{Colors.RESET}")
@ -204,18 +180,7 @@ def refactor_file(assistant, filename):
message = f"Please refactor this code to improve its quality:\n\n{result['content']}"
from rp.core.assistant import process_message
task = asyncio.create_task(process_message(assistant, message))
assistant.background_tasks.add(task)
task.add_done_callback(
lambda t: (
assistant.background_tasks.discard(t),
(
logging.error(f"Background task failed: {t.exception()}")
if t.exception()
else assistant.background_tasks.discard(t)
),
)
)
process_message(assistant, message)
else:
print(f"{Colors.RED}Error reading file: {result['error']}{Colors.RESET}")
@ -226,18 +191,7 @@ def obfuscate_file(assistant, filename):
message = f"Please obfuscate this code:\n\n{result['content']}"
from rp.core.assistant import process_message
task = asyncio.create_task(process_message(assistant, message))
assistant.background_tasks.add(task)
task.add_done_callback(
lambda t: (
assistant.background_tasks.discard(t),
(
logging.error(f"Background task failed: {t.exception()}")
if t.exception()
else assistant.background_tasks.discard(t)
),
)
)
process_message(assistant, message)
else:
print(f"{Colors.RED}Error reading file: {result['error']}{Colors.RESET}")

View File

@ -7,7 +7,7 @@ from rp.core.http_client import http_client
logger = logging.getLogger("rp")
async def call_api(messages, model, api_url, api_key, use_tools, tools_definition, verbose=False):
def call_api(messages, model, api_url, api_key, use_tools, tools_definition, verbose=False):
try:
messages = auto_slim_messages(messages, verbose=verbose)
logger.debug(f"=== API CALL START ===")
@ -35,7 +35,7 @@ async def call_api(messages, model, api_url, api_key, use_tools, tools_definitio
request_json = data
logger.debug(f"Request payload size: {len(request_json)} bytes")
logger.debug("Sending HTTP request...")
response = await http_client.post(api_url, headers=headers, json_data=request_json)
response = http_client.post(api_url, headers=headers, json_data=request_json)
if response.get("error"):
if "status" in response:
logger.error(f"API HTTP Error: {response['status']} - {response.get('text', '')}")
@ -77,12 +77,12 @@ async def call_api(messages, model, api_url, api_key, use_tools, tools_definitio
return {"error": str(e)}
async def list_models(model_list_url, api_key):
def list_models(model_list_url, api_key):
try:
headers = {}
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
response = await http_client.get(model_list_url, headers=headers)
response = http_client.get(model_list_url, headers=headers)
if response.get("error"):
return {"error": response.get("text", "HTTP error")}
data = json.loads(response["text"])

View File

@ -9,6 +9,7 @@ import sys
import traceback
from concurrent.futures import ThreadPoolExecutor
from rp.commands import handle_command
from rp.input_handler import get_advanced_input
from rp.config import (
DB_PATH,
DEFAULT_API_URL,
@ -205,7 +206,7 @@ class Assistant:
if self.debug:
print(f"{Colors.RED}Error checking background updates: {e}{Colors.RESET}")
async def execute_tool_calls(self, tool_calls):
def execute_tool_calls(self, tool_calls):
results = []
logger.debug(f"Executing {len(tool_calls)} tool call(s)")
with ThreadPoolExecutor(max_workers=5) as executor:
@ -286,7 +287,7 @@ class Assistant:
)
return results
async def process_response(self, response):
def process_response(self, response):
if "error" in response:
return f"Error: {response['error']}"
if "choices" not in response or not response["choices"]:
@ -296,11 +297,11 @@ class Assistant:
if "tool_calls" in message and message["tool_calls"]:
tool_count = len(message["tool_calls"])
print(f"{Colors.BLUE}🔧 Executing {tool_count} tool call(s)...{Colors.RESET}")
tool_results = await self.execute_tool_calls(message["tool_calls"])
tool_results = self.execute_tool_calls(message["tool_calls"])
print(f"{Colors.GREEN}✅ Tool execution completed.{Colors.RESET}")
for result in tool_results:
self.messages.append(result)
follow_up = await call_api(
follow_up = call_api(
self.messages,
self.model,
self.api_url,
@ -309,7 +310,7 @@ class Assistant:
get_tools_definition(),
verbose=self.verbose,
)
return await self.process_response(follow_up)
return self.process_response(follow_up)
content = message.get("content", "")
return render_markdown(content, self.syntax_highlighting)
@ -371,7 +372,7 @@ class Assistant:
readline.set_completer(completer)
readline.parse_and_bind("tab: complete")
async def run_repl(self):
def run_repl(self):
self.setup_readline()
signal.signal(signal.SIGINT, self.signal_handler)
print(
@ -405,7 +406,8 @@ class Assistant:
except:
pass
prompt += f">{Colors.RESET} "
user_input = input(prompt).strip()
user_input = get_advanced_input(prompt)
user_input = user_input.strip()
if not user_input:
continue
cmd_result = handle_command(self, user_input)
@ -415,10 +417,10 @@ class Assistant:
continue
# Use enhanced processing if available, otherwise fall back to basic processing
if hasattr(self, "enhanced") and self.enhanced:
result = await self.enhanced.process_with_enhanced_context(user_input)
result = self.enhanced.process_with_enhanced_context(user_input)
print(result)
else:
await process_message(self, user_input)
process_message(self, user_input)
except EOFError:
break
except KeyboardInterrupt:
@ -427,12 +429,12 @@ class Assistant:
print(f"{Colors.RED}Error: {e}{Colors.RESET}")
logging.error(f"REPL error: {e}\n{traceback.format_exc()}")
async def run_single(self):
def run_single(self):
if self.args.message:
message = self.args.message
else:
message = sys.stdin.read()
await process_message(self, message)
process_message(self, message)
def cleanup(self):
if hasattr(self, "enhanced") and self.enhanced:
@ -455,17 +457,17 @@ class Assistant:
if self.db_conn:
self.db_conn.close()
async def run(self):
def run(self):
try:
if self.args.interactive or (not self.args.message and sys.stdin.isatty()):
await self.run_repl()
self.run_repl()
else:
await self.run_single()
self.run_single()
finally:
self.cleanup()
async def process_message(assistant, message):
def process_message(assistant, message):
from rp.core.knowledge_context import inject_knowledge_context
inject_knowledge_context(assistant, message)
@ -473,8 +475,8 @@ async def process_message(assistant, message):
logger.debug(f"Processing user message: {message[:100]}...")
logger.debug(f"Current message count: {len(assistant.messages)}")
spinner = Spinner("Querying AI...")
await spinner.start()
response = await call_api(
spinner.start()
response = call_api(
assistant.messages,
assistant.model,
assistant.api_url,
@ -483,7 +485,7 @@ async def process_message(assistant, message):
get_tools_definition(),
verbose=assistant.verbose,
)
await spinner.stop()
spinner.stop()
if "usage" in response:
usage = response["usage"]
input_tokens = usage.get("prompt_tokens", 0)
@ -492,5 +494,5 @@ async def process_message(assistant, message):
cost = UsageTracker._calculate_cost(assistant.model, input_tokens, output_tokens)
total_cost = assistant.usage_tracker.session_usage["estimated_cost"]
print(f"{Colors.YELLOW}💰 Cost: ${cost:.4f} | Total: ${total_cost:.4f}{Colors.RESET}")
result = await assistant.process_response(response)
result = assistant.process_response(response)
print(f"\n{Colors.GREEN}r:{Colors.RESET} {result}\n")

View File

@ -1,4 +1,3 @@
import asyncio
import json
import logging
import uuid
@ -94,13 +93,7 @@ class EnhancedAssistant:
def _api_caller_for_agent(
self, messages: List[Dict[str, Any]], temperature: float, max_tokens: int
) -> Dict[str, Any]:
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
return call_api(
messages,
self.base.model,
self.base.api_url,
@ -108,21 +101,6 @@ class EnhancedAssistant:
use_tools=False,
tools_definition=[],
verbose=self.base.verbose,
),
loop,
)
return future.result()
except RuntimeError:
return asyncio.run(
call_api(
messages,
self.base.model,
self.base.api_url,
self.base.api_key,
use_tools=False,
tools_definition=[],
verbose=self.base.verbose,
)
)
def enhanced_call_api(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]:
@ -131,13 +109,7 @@ class EnhancedAssistant:
if cached_response:
logger.debug("API cache hit")
return cached_response
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
response = call_api(
messages,
self.base.model,
self.base.api_url,
@ -145,28 +117,13 @@ class EnhancedAssistant:
self.base.use_tools,
get_tools_definition(),
verbose=self.base.verbose,
),
loop,
)
response = future.result()
except RuntimeError:
response = asyncio.run(
call_api(
messages,
self.base.model,
self.base.api_url,
self.base.api_key,
self.base.use_tools,
get_tools_definition(),
verbose=self.base.verbose,
)
)
if self.api_cache and CACHE_ENABLED and ("error" not in response):
token_count = response.get("usage", {}).get("total_tokens", 0)
self.api_cache.set(self.base.model, messages, 0.7, 4096, response, token_count)
return response
async def process_with_enhanced_context(self, user_message: str) -> str:
def process_with_enhanced_context(self, user_message: str) -> str:
self.base.messages.append({"role": "user", "content": user_message})
self.conversation_memory.add_message(
self.current_conversation_id, str(uuid.uuid4())[:16], "user", user_message
@ -201,7 +158,7 @@ class EnhancedAssistant:
else:
working_messages = self.base.messages
response = self.enhanced_call_api(working_messages)
result = await self.base.process_response(response)
result = self.base.process_response(response)
if len(self.base.messages) >= CONVERSATION_SUMMARY_THRESHOLD:
summary = (
self.context_manager.advanced_summarize_messages(

View File

@ -1,21 +1,17 @@
import asyncio
import json
import logging
import socket
import time
import urllib.error
import urllib.request
import requests
from typing import Dict, Any, Optional
logger = logging.getLogger("rp")
class AsyncHTTPClient:
class SyncHTTPClient:
def __init__(self):
self.session_headers = {}
self.session = requests.Session()
async def request(
def request(
self,
method: str,
url: str,
@ -24,42 +20,28 @@ class AsyncHTTPClient:
json_data: Optional[Dict[str, Any]] = None,
timeout: float = 30.0,
) -> Dict[str, Any]:
"""Make an async HTTP request using urllib in a thread executor with retry logic."""
loop = asyncio.get_event_loop()
request_headers = {**self.session_headers}
if headers:
request_headers.update(headers)
request_data = data
if json_data is not None:
request_data = json.dumps(json_data).encode("utf-8")
request_headers["Content-Type"] = "application/json"
req = urllib.request.Request(url, data=request_data, headers=request_headers, method=method)
"""Make a sync HTTP request using requests with retry logic."""
attempt = 0
start_time = time.time()
while True:
attempt += 1
try:
response = await loop.run_in_executor(
None, lambda: urllib.request.urlopen(req, timeout=timeout)
response = self.session.request(
method,
url,
headers=headers,
data=data,
json=json_data,
timeout=timeout,
)
response_data = await loop.run_in_executor(None, response.read)
response_text = response_data.decode("utf-8")
response.raise_for_status() # Raise an exception for bad status codes
return {
"status": response.status,
"status": response.status_code,
"headers": dict(response.headers),
"text": response_text,
"json": lambda: json.loads(response_text) if response_text else None,
"text": response.text,
"json": response.json,
}
except urllib.error.HTTPError as e:
error_body = await loop.run_in_executor(None, e.read)
error_text = error_body.decode("utf-8")
return {
"status": e.code,
"error": True,
"text": error_text,
"json": lambda: json.loads(error_text) if error_text else None,
}
except socket.timeout:
except requests.exceptions.Timeout:
elapsed = time.time() - start_time
elapsed_minutes = int(elapsed // 60)
elapsed_seconds = elapsed % 60
@ -71,31 +53,16 @@ class AsyncHTTPClient:
logger.warning(
f"Request timed out (attempt {attempt}, duration: {duration_str}). Retrying in {attempt} second(s)..."
)
await asyncio.sleep(attempt)
except Exception as e:
error_msg = str(e)
if "timed out" in error_msg.lower() or "timeout" in error_msg.lower():
elapsed = time.time() - start_time
elapsed_minutes = int(elapsed // 60)
elapsed_seconds = elapsed % 60
duration_str = (
f"{elapsed_minutes}m {elapsed_seconds:.1f}s"
if elapsed_minutes > 0
else f"{elapsed_seconds:.1f}s"
)
logger.warning(
f"Request timed out (attempt {attempt}, duration: {duration_str}). Retrying in {attempt} second(s)..."
)
await asyncio.sleep(attempt)
else:
return {"error": True, "exception": error_msg}
time.sleep(attempt)
except requests.exceptions.RequestException as e:
return {"error": True, "exception": str(e)}
async def get(
def get(
self, url: str, headers: Optional[Dict[str, str]] = None, timeout: float = 30.0
) -> Dict[str, Any]:
return await self.request("GET", url, headers=headers, timeout=timeout)
return self.request("GET", url, headers=headers, timeout=timeout)
async def post(
def post(
self,
url: str,
headers: Optional[Dict[str, str]] = None,
@ -103,12 +70,12 @@ class AsyncHTTPClient:
json_data: Optional[Dict[str, Any]] = None,
timeout: float = 30.0,
) -> Dict[str, Any]:
return await self.request(
return self.request(
"POST", url, headers=headers, data=data, json_data=json_data, timeout=timeout
)
def set_default_headers(self, headers: Dict[str, str]):
self.session_headers.update(headers)
self.session.headers.update(headers)
http_client = AsyncHTTPClient()
http_client = SyncHTTPClient()

View File

@ -15,7 +15,6 @@ class AdvancedInputHandler:
def __init__(self):
self.editor_mode = False
self.setup_readline()
def setup_readline(self):
"""Setup readline with basic completer."""

View File

@ -1,4 +1,3 @@
import asyncio
import os
from typing import Any, Dict, List
from rp.agents.agent_manager import AgentManager
@ -16,13 +15,7 @@ def _create_api_wrapper():
tools_definition = get_tools_definition() if use_tools else []
def api_wrapper(messages, temperature=None, max_tokens=None, **kwargs):
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
return call_api(
messages=messages,
model=model,
api_url=api_url,
@ -30,21 +23,6 @@ def _create_api_wrapper():
use_tools=use_tools,
tools_definition=tools_definition,
verbose=False,
),
loop,
)
return future.result()
except RuntimeError:
return asyncio.run(
call_api(
messages=messages,
model=model,
api_url=api_url,
api_key=api_key,
use_tools=use_tools,
tools_definition=tools_definition,
verbose=False,
)
)
return api_wrapper

26
rp/tools/lsp.py Normal file
View File

@ -0,0 +1,26 @@
from typing import Dict, Any
def get_diagnostics(filepath: str) -> Dict[str, Any]:
"""
Get LSP diagnostics for a file.
Args:
filepath: The path to the file.
Returns:
A dictionary with the status and a list of diagnostics.
"""
# This is a placeholder implementation.
# A real implementation would require a running LSP server and a client.
return {
"status": "success",
"diagnostics": [
{
"line": 1,
"character": 0,
"message": "Placeholder diagnostic: This is not a real error.",
"severity": 1,
}
],
}

45
rp/tools/search.py Normal file
View File

@ -0,0 +1,45 @@
import glob
import os
from typing import List
import re
def glob_files(pattern: str, path: str = ".") -> dict:
"""
Find files matching a glob pattern.
Args:
pattern: The glob pattern to match.
path: The directory to search in.
Returns:
A dictionary with the status and a list of matching files.
"""
try:
files = glob.glob(os.path.join(path, pattern), recursive=True)
return {"status": "success", "files": files}
except Exception as e:
return {"status": "error", "error": str(e)}
def grep(pattern: str, files: List[str]) -> dict:
"""
Search for a pattern in a list of files.
Args:
pattern: The regex pattern to search for.
files: A list of files to search in.
Returns:
A dictionary with the status and a list of matching lines.
"""
try:
matches = []
for file in files:
with open(file, "r") as f:
for i, line in enumerate(f):
if re.search(pattern, line):
matches.append({"file": file, "line_number": i + 1, "line": line.strip()})
return {"status": "success", "matches": matches}
except Exception as e:
return {"status": "error", "error": str(e)}

View File

@ -1,4 +1,5 @@
import asyncio
import time
import threading
class Colors:
@ -23,22 +24,23 @@ class Spinner:
self.message = message
self.spinner_chars = spinner_chars
self.running = False
self.task = None
self.thread = None
async def start(self):
def start(self):
self.running = True
self.task = asyncio.create_task(self._spin())
self.thread = threading.Thread(target=self._spin)
self.thread.start()
async def stop(self):
def stop(self):
self.running = False
if self.task:
await self.task
if self.thread:
self.thread.join()
print("\r" + " " * (len(self.message) + 2) + "\r", end="", flush=True)
async def _spin(self):
def _spin(self):
i = 0
while self.running:
char = self.spinner_chars[i % len(self.spinner_chars)]
print(f"\r{Colors.CYAN}{char}{Colors.RESET} {self.message}", end="", flush=True)
i += 1
await asyncio.sleep(0.1)
time.sleep(0.1)

View File

@ -1,62 +1,64 @@
import unittest
import urllib.error
import json
from unittest.mock import MagicMock, patch
from pr.core.api import call_api, list_models
from rp.core.api import call_api, list_models
class TestApi(unittest.TestCase):
@patch("pr.core.api.urllib.request.urlopen")
@patch("pr.core.api.auto_slim_messages")
def test_call_api_success(self, mock_slim, mock_urlopen):
@patch("rp.core.http_client.SyncHTTPClient.request")
@patch("rp.core.api.auto_slim_messages")
def test_call_api_success(self, mock_slim, mock_request):
mock_slim.return_value = [{"role": "user", "content": "test"}]
mock_response = MagicMock()
mock_response.read.return_value = (
b'{"choices": [{"message": {"content": "response"}}], "usage": {"tokens": 10}}'
mock_response.status = 200
mock_response.text = (
'{"choices": [{"message": {"content": "response"}}], "usage": {"tokens": 10}}'
)
mock_urlopen.return_value.__enter__.return_value = mock_response
mock_response.json.return_value = json.loads(mock_response.text)
mock_request.return_value = mock_response
result = call_api([], "model", "http://url", "key", True, [{"name": "tool"}])
self.assertIn("choices", result)
mock_urlopen.assert_called_once()
mock_request.assert_called_once()
@patch("urllib.request.urlopen")
@patch("pr.core.api.auto_slim_messages")
def test_call_api_http_error(self, mock_slim, mock_urlopen):
@patch("rp.core.http_client.SyncHTTPClient.request")
@patch("rp.core.api.auto_slim_messages")
def test_call_api_http_error(self, mock_slim, mock_request):
mock_slim.return_value = [{"role": "user", "content": "test"}]
mock_urlopen.side_effect = urllib.error.HTTPError(
"http://url", 500, "error", None, MagicMock()
)
mock_request.return_value = {"error": True, "status": 500, "text": "error"}
result = call_api([], "model", "http://url", "key", False, [])
self.assertIn("error", result)
@patch("urllib.request.urlopen")
@patch("pr.core.api.auto_slim_messages")
def test_call_api_general_error(self, mock_slim, mock_urlopen):
@patch("rp.core.http_client.SyncHTTPClient.request")
@patch("rp.core.api.auto_slim_messages")
def test_call_api_general_error(self, mock_slim, mock_request):
mock_slim.return_value = [{"role": "user", "content": "test"}]
mock_urlopen.side_effect = Exception("test error")
mock_request.return_value = {"error": True, "exception": "test error"}
result = call_api([], "model", "http://url", "key", False, [])
self.assertIn("error", result)
@patch("urllib.request.urlopen")
def test_list_models_success(self, mock_urlopen):
@patch("rp.core.http_client.SyncHTTPClient.request")
def test_list_models_success(self, mock_request):
mock_response = MagicMock()
mock_response.read.return_value = b'{"data": [{"id": "model1"}]}'
mock_urlopen.return_value.__enter__.return_value = mock_response
mock_response.status = 200
mock_response.text = '{"data": [{"id": "model1"}]}'
mock_response.json.return_value = json.loads(mock_response.text)
mock_request.return_value = mock_response
result = list_models("http://url", "key")
self.assertEqual(result, [{"id": "model1"}])
@patch("urllib.request.urlopen")
def test_list_models_error(self, mock_urlopen):
mock_urlopen.side_effect = Exception("error")
@patch("rp.core.http_client.SyncHTTPClient.request")
def test_list_models_error(self, mock_request):
mock_request.return_value = {"error": True, "exception": "error"}
result = list_models("http://url", "key")

View File

@ -42,7 +42,6 @@ class TestAssistant(unittest.TestCase):
@patch("pr.core.assistant.render_markdown")
def test_process_response_no_tools(self, mock_render, mock_call):
assistant = MagicMock()
assistant.messages = MagicMock()
assistant.verbose = False
assistant.syntax_highlighting = True
mock_render.return_value = "rendered"
@ -59,7 +58,6 @@ class TestAssistant(unittest.TestCase):
@patch("pr.core.assistant.get_tools_definition")
def test_process_response_with_tools(self, mock_tools_def, mock_render, mock_call):
assistant = MagicMock()
assistant.messages = MagicMock()
assistant.verbose = False
assistant.syntax_highlighting = True
assistant.use_tools = True
@ -92,7 +90,6 @@ class TestAssistant(unittest.TestCase):
@patch("pr.core.assistant.get_tools_definition")
def test_process_message(self, mock_tools, mock_call):
assistant = MagicMock()
assistant.messages = MagicMock()
assistant.verbose = False
assistant.use_tools = True
assistant.model = "model"

View File

@ -1,6 +1,6 @@
from unittest.mock import MagicMock
from pr.core.enhanced_assistant import EnhancedAssistant
from rp.core.enhanced_assistant import EnhancedAssistant
def test_enhanced_assistant_init():

View File

@ -2,38 +2,36 @@ from unittest.mock import patch
import pytest
from pr.__main__ import main
from rp.__main__ import main
def test_main_version(capsys):
with patch("sys.argv", ["pr", "--version"]):
with patch("sys.argv", ["rp", "--version"]):
with pytest.raises(SystemExit):
main()
captured = capsys.readouterr()
assert "PR Assistant" in captured.out
assert "RP Assistant" in captured.out
def test_main_create_config_success(capsys):
with patch("pr.core.config_loader.create_default_config", return_value=True):
with patch("sys.argv", ["pr", "--create-config"]):
with patch("rp.core.config_loader.create_default_config", return_value=True):
with patch("sys.argv", ["rp", "--create-config"]):
main()
captured = capsys.readouterr()
assert "Configuration file created" in captured.out
def test_main_create_config_fail(capsys):
with patch("pr.core.config_loader.create_default_config", return_value=False):
with patch("sys.argv", ["pr", "--create-config"]):
with patch("rp.core.config_loader.create_default_config", return_value=False):
with patch("sys.argv", ["rp", "--create-config"]):
main()
captured = capsys.readouterr()
assert "Error creating configuration file" in captured.err
def test_main_list_sessions_no_sessions(capsys):
with patch("pr.core.session.SessionManager") as mock_sm:
with patch("rp.core.session.SessionManager") as mock_sm:
mock_instance = mock_sm.return_value
mock_instance.list_sessions.return_value = []
with patch("sys.argv", ["pr", "--list-sessions"]):
with patch("sys.argv", ["rp", "--list-sessions"]):
main()
captured = capsys.readouterr()
assert "No saved sessions found" in captured.out