feat: implement agent communication bus

feat: add agent message dataclass
feat: define message types enum
feat: create agent communication bus class
feat: initialize database connection
feat: create agent messages table
feat: implement send_message method
feat: implement receive_messages method
feat: add agent roles
feat: add agent manager
feat: add agent communication
feat: create autonomous detection module
feat: create autonomous mode module
feat: add cache api
feat: add tool cache
feat: add command handlers
feat: add help docs
feat: add multiplexer commands
feat: update pyproject.toml version to 1.8.0
feat: update changelog with version 1.7.0 details
feat: create rp init file
feat: create rp main file
feat: create core assistant class
feat: add verbose mode to rp main
feat: add interactive mode to rp main
feat: add session management to rp main
feat: add plugin support to rp main
feat: add usage statistics to rp main
This commit is contained in:
retoor 2025-11-07 16:21:47 +01:00
parent cf640a2782
commit c000afc699
81 changed files with 12527 additions and 1 deletions

View File

@ -4,6 +4,14 @@
## Version 1.7.0 - 2025-11-06
Ads can now be shown on multiple computers simultaneously. This release bumps the version to 1.7.0.
**Changes:** 2 files, 10 lines
**Languages:** Markdown (8 lines), TOML (2 lines)
## Version 1.6.0 - 2025-11-06 ## Version 1.6.0 - 2025-11-06
The system now supports displaying ads across multiple machines. This improves ad delivery and scalability for users and developers. The system now supports displaying ads across multiple machines. This improves ad delivery and scalability for users and developers.

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "rp" name = "rp"
version = "1.6.0" version = "1.7.0"
description = "R python edition. The ultimate autonomous AI CLI." description = "R python edition. The ultimate autonomous AI CLI."
readme = "README.md" readme = "README.md"
requires-python = ">=3.12" requires-python = ">=3.12"

4
rp/__init__.py Normal file
View File

@ -0,0 +1,4 @@
__version__ = "1.0.0"
from rp.core import Assistant
__all__ = ["Assistant"]

130
rp/__main__.py Normal file
View File

@ -0,0 +1,130 @@
import argparse
import asyncio
import sys
from rp import __version__
from rp.core import Assistant
async def main_async():
import tracemalloc
tracemalloc.start()
parser = argparse.ArgumentParser(
description="RP Assistant - Professional CLI AI assistant with visual effects, cost tracking, and autonomous execution",
epilog='\nExamples:\n rp "What is Python?" # Single query\n rp -i # Interactive mode\n rp -i --model gpt-4 # Use specific model\n rp --save-session my-task -i # Save session\n rp --load-session my-task # Load session\n rp --list-sessions # List all sessions\n rp --usage # Show token usage stats\n\nFeatures:\n • Visual progress indicators during AI calls\n • Real-time cost tracking for each query\n • Sophisticated CLI with colors and effects\n • Tool execution with status updates\n\nCommands in interactive mode:\n /auto [task] - Enter autonomous mode\n /reset - Clear message history\n /verbose - Toggle verbose output\n /models - List available models\n /tools - List available tools\n /usage - Show usage statistics\n /save <name> - Save current session\n exit, quit, q - Exit the program\n ',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("message", nargs="?", help="Message to send to assistant")
parser.add_argument("--version", action="version", version=f"RP Assistant {__version__}")
parser.add_argument("-m", "--model", help="AI model to use")
parser.add_argument("-u", "--api-url", help="API endpoint URL")
parser.add_argument("--model-list-url", help="Model list endpoint URL")
parser.add_argument("-i", "--interactive", action="store_true", help="Interactive mode")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument(
"--debug", action="store_true", help="Enable debug mode with detailed logging"
)
parser.add_argument("--no-syntax", action="store_true", help="Disable syntax highlighting")
parser.add_argument(
"--include-env", action="store_true", help="Include environment variables in context"
)
parser.add_argument("-c", "--context", action="append", help="Additional context files")
parser.add_argument(
"--api-mode", action="store_true", help="API mode for specialized interaction"
)
parser.add_argument(
"--output", choices=["text", "json", "structured"], default="text", help="Output format"
)
parser.add_argument("--quiet", action="store_true", help="Minimal output")
parser.add_argument("--save-session", metavar="NAME", help="Save session with given name")
parser.add_argument("--load-session", metavar="NAME", help="Load session with given name")
parser.add_argument("--list-sessions", action="store_true", help="List all saved sessions")
parser.add_argument("--delete-session", metavar="NAME", help="Delete a saved session")
parser.add_argument(
"--export-session", nargs=2, metavar=("NAME", "FILE"), help="Export session to file"
)
parser.add_argument("--usage", action="store_true", help="Show token usage statistics")
parser.add_argument(
"--create-config", action="store_true", help="Create default configuration file"
)
parser.add_argument("--plugins", action="store_true", help="List loaded plugins")
args = parser.parse_args()
if args.create_config:
from rp.core.config_loader import create_default_config
if create_default_config():
print("Configuration file created at ~/.prrc")
else:
print("Error creating configuration file", file=sys.stderr)
return
if args.list_sessions:
from rp.core.session import SessionManager
sm = SessionManager()
sessions = sm.list_sessions()
if not sessions:
print("No saved sessions found")
else:
print(f"Found {len(sessions)} saved sessions:\n")
for sess in sessions:
print(f" {sess['name']}")
print(f" Created: {sess['created_at']}")
print(f" Messages: {sess['message_count']}")
print()
return
if args.delete_session:
from rp.core.session import SessionManager
sm = SessionManager()
if sm.delete_session(args.delete_session):
print(f"Session '{args.delete_session}' deleted")
else:
print(f"Error deleting session '{args.delete_session}'", file=sys.stderr)
return
if args.export_session:
from rp.core.session import SessionManager
sm = SessionManager()
name, output_file = args.export_session
format_type = "json"
if output_file.endswith(".md"):
format_type = "markdown"
elif output_file.endswith(".txt"):
format_type = "txt"
if sm.export_session(name, output_file, format_type):
print(f"Session exported to {output_file}")
else:
print(f"Error exporting session", file=sys.stderr)
return
if args.usage:
from rp.core.usage_tracker import UsageTracker
usage = UsageTracker.get_total_usage()
print(f"\nTotal Usage Statistics:")
print(f" Requests: {usage['total_requests']}")
print(f" Tokens: {usage['total_tokens']:,}")
print(f" Estimated Cost: ${usage['total_cost']:.4f}")
return
if args.plugins:
from rp.plugins.loader import PluginLoader
loader = PluginLoader()
loader.load_plugins()
plugins = loader.list_loaded_plugins()
if not plugins:
print("No plugins loaded")
else:
print(f"Loaded {len(plugins)} plugins:")
for plugin in plugins:
print(f" - {plugin}")
return
assistant = Assistant(args)
await assistant.run()
def main():
return asyncio.run(main_async())
if __name__ == "__main__":
main()

13
rp/agents/__init__.py Normal file
View File

@ -0,0 +1,13 @@
from .agent_communication import AgentCommunicationBus, AgentMessage
from .agent_manager import AgentInstance, AgentManager
from .agent_roles import AgentRole, get_agent_role, list_agent_roles
__all__ = [
"AgentRole",
"get_agent_role",
"list_agent_roles",
"AgentManager",
"AgentInstance",
"AgentMessage",
"AgentCommunicationBus",
]

View File

@ -0,0 +1,145 @@
import json
import sqlite3
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional
class MessageType(Enum):
REQUEST = "request"
RESPONSE = "response"
NOTIFICATION = "notification"
@dataclass
class AgentMessage:
message_id: str
from_agent: str
to_agent: str
message_type: MessageType
content: str
metadata: dict
timestamp: float
def to_dict(self) -> dict:
return {
"message_id": self.message_id,
"from_agent": self.from_agent,
"to_agent": self.to_agent,
"message_type": self.message_type.value,
"content": self.content,
"metadata": self.metadata,
"timestamp": self.timestamp,
}
@classmethod
def from_dict(cls, data: dict) -> "AgentMessage":
return cls(
message_id=data["message_id"],
from_agent=data["from_agent"],
to_agent=data["to_agent"],
message_type=MessageType(data["message_type"]),
content=data["content"],
metadata=data["metadata"],
timestamp=data["timestamp"],
)
class AgentCommunicationBus:
def __init__(self, db_path: str):
self.db_path = db_path
self.conn = sqlite3.connect(db_path)
self._create_tables()
def _create_tables(self):
cursor = self.conn.cursor()
cursor.execute(
"\n CREATE TABLE IF NOT EXISTS agent_messages (\n message_id TEXT PRIMARY KEY,\n from_agent TEXT,\n to_agent TEXT,\n message_type TEXT,\n content TEXT,\n metadata TEXT,\n timestamp REAL,\n session_id TEXT,\n read INTEGER DEFAULT 0\n )\n "
)
cursor.execute("PRAGMA table_info(agent_messages)")
columns = [row[1] for row in cursor.fetchall()]
if "read" not in columns:
cursor.execute("ALTER TABLE agent_messages ADD COLUMN read INTEGER DEFAULT 0")
self.conn.commit()
def send_message(self, message: AgentMessage, session_id: Optional[str] = None):
cursor = self.conn.cursor()
cursor.execute(
"\n INSERT INTO agent_messages\n (message_id, from_agent, to_agent, message_type, content, metadata, timestamp, session_id)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?)\n ",
(
message.message_id,
message.from_agent,
message.to_agent,
message.message_type.value,
message.content,
json.dumps(message.metadata),
message.timestamp,
session_id,
),
)
self.conn.commit()
def receive_messages(self, agent_id: str, unread_only: bool = True) -> List[AgentMessage]:
cursor = self.conn.cursor()
if unread_only:
cursor.execute(
"\n SELECT message_id, from_agent, to_agent, message_type, content, metadata, timestamp\n FROM agent_messages\n WHERE to_agent = ? AND read = 0\n ORDER BY timestamp ASC\n ",
(agent_id,),
)
else:
cursor.execute(
"\n SELECT message_id, from_agent, to_agent, message_type, content, metadata, timestamp\n FROM agent_messages\n WHERE to_agent = ?\n ORDER BY timestamp ASC\n ",
(agent_id,),
)
messages = []
for row in cursor.fetchall():
messages.append(
AgentMessage(
message_id=row[0],
from_agent=row[1],
to_agent=row[2],
message_type=MessageType(row[3]),
content=row[4],
metadata=json.loads(row[5]) if row[5] else {},
timestamp=row[6],
)
)
return messages
def mark_as_read(self, message_id: str):
cursor = self.conn.cursor()
cursor.execute("UPDATE agent_messages SET read = 1 WHERE message_id = ?", (message_id,))
self.conn.commit()
def clear_messages(self, session_id: Optional[str] = None):
cursor = self.conn.cursor()
if session_id:
cursor.execute("DELETE FROM agent_messages WHERE session_id = ?", (session_id,))
else:
cursor.execute("DELETE FROM agent_messages")
self.conn.commit()
def close(self):
self.conn.close()
def get_conversation_history(self, agent_a: str, agent_b: str) -> List[AgentMessage]:
cursor = self.conn.cursor()
cursor.execute(
"\n SELECT message_id, from_agent, to_agent, message_type, content, metadata, timestamp\n FROM agent_messages\n WHERE (from_agent = ? AND to_agent = ?) OR (from_agent = ? AND to_agent = ?)\n ORDER BY timestamp ASC\n ",
(agent_a, agent_b, agent_b, agent_a),
)
messages = []
for row in cursor.fetchall():
messages.append(
AgentMessage(
message_id=row[0],
from_agent=row[1],
to_agent=row[2],
message_type=MessageType(row[3]),
content=row[4],
metadata=json.loads(row[5]) if row[5] else {},
timestamp=row[6],
)
)
return messages

167
rp/agents/agent_manager.py Normal file
View File

@ -0,0 +1,167 @@
import time
import uuid
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional
from ..memory.knowledge_store import KnowledgeStore
from .agent_communication import AgentCommunicationBus, AgentMessage, MessageType
from .agent_roles import AgentRole, get_agent_role
@dataclass
class AgentInstance:
agent_id: str
role: AgentRole
message_history: List[Dict[str, Any]] = field(default_factory=list)
context: Dict[str, Any] = field(default_factory=dict)
created_at: float = field(default_factory=time.time)
task_count: int = 0
def add_message(self, role: str, content: str):
self.message_history.append({"role": role, "content": content, "timestamp": time.time()})
def get_system_message(self) -> Dict[str, str]:
return {"role": "system", "content": self.role.system_prompt}
def get_messages_for_api(self) -> List[Dict[str, str]]:
return [self.get_system_message()] + [
{"role": msg["role"], "content": msg["content"]} for msg in self.message_history
]
class AgentManager:
def __init__(self, db_path: str, api_caller: Callable):
self.db_path = db_path
self.api_caller = api_caller
self.communication_bus = AgentCommunicationBus(db_path)
self.knowledge_store = KnowledgeStore(db_path)
self.active_agents: Dict[str, AgentInstance] = {}
self.session_id = str(uuid.uuid4())[:16]
def create_agent(self, role_name: str, agent_id: Optional[str] = None) -> str:
if agent_id is None:
agent_id = f"{role_name}_{str(uuid.uuid4())[:8]}"
role = get_agent_role(role_name)
agent = AgentInstance(agent_id=agent_id, role=role)
self.active_agents[agent_id] = agent
return agent_id
def get_agent(self, agent_id: str) -> Optional[AgentInstance]:
return self.active_agents.get(agent_id)
def remove_agent(self, agent_id: str) -> bool:
if agent_id in self.active_agents:
del self.active_agents[agent_id]
return True
return False
def execute_agent_task(
self, agent_id: str, task: str, context: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
agent = self.get_agent(agent_id)
if not agent:
return {"error": f"Agent {agent_id} not found"}
if context:
agent.context.update(context)
agent.add_message("user", task)
knowledge_matches = self.knowledge_store.search_entries(task, top_k=3)
agent.task_count += 1
messages = agent.get_messages_for_api()
if knowledge_matches:
knowledge_content = "Knowledge base matches based on your query:\\n"
for i, entry in enumerate(knowledge_matches, 1):
shortened_content = entry.content[:2000]
knowledge_content += f"{i}. {shortened_content}\\n\\n"
messages.insert(-1, {"role": "user", "content": knowledge_content})
try:
response = self.api_caller(
messages=messages,
temperature=agent.role.temperature,
max_tokens=agent.role.max_tokens,
)
if response and "choices" in response:
assistant_message = response["choices"][0]["message"]["content"]
agent.add_message("assistant", assistant_message)
return {
"success": True,
"agent_id": agent_id,
"response": assistant_message,
"role": agent.role.name,
"task_count": agent.task_count,
}
else:
return {"error": "Invalid API response", "agent_id": agent_id}
except Exception as e:
return {"error": str(e), "agent_id": agent_id}
def send_agent_message(
self,
from_agent_id: str,
to_agent_id: str,
content: str,
message_type: MessageType = MessageType.REQUEST,
metadata: Optional[Dict[str, Any]] = None,
):
message = AgentMessage(
from_agent=from_agent_id,
to_agent=to_agent_id,
message_type=message_type,
content=content,
metadata=metadata or {},
timestamp=time.time(),
message_id=str(uuid.uuid4())[:16],
)
self.communication_bus.send_message(message, self.session_id)
return message.message_id
def get_agent_messages(self, agent_id: str, unread_only: bool = True) -> List[AgentMessage]:
return self.communication_bus.receive_messages(agent_id, unread_only)
def collaborate_agents(self, orchestrator_id: str, task: str, agent_roles: List[str]):
orchestrator = self.get_agent(orchestrator_id)
if not orchestrator:
orchestrator_id = self.create_agent("orchestrator")
orchestrator = self.get_agent(orchestrator_id)
worker_agents = []
for role in agent_roles:
agent_id = self.create_agent(role)
worker_agents.append({"agent_id": agent_id, "role": role})
orchestration_prompt = f"Task: {task}\n\nAvailable specialized agents:\n{chr(10).join([f'- {a['agent_id']} ({a['role']})' for a in worker_agents])}\n\nBreak down the task and delegate subtasks to appropriate agents. Coordinate their work and integrate results."
orchestrator_result = self.execute_agent_task(orchestrator_id, orchestration_prompt)
results = {"orchestrator": orchestrator_result, "agents": []}
for agent_info in worker_agents:
agent_id = agent_info["agent_id"]
messages = self.get_agent_messages(agent_id)
for msg in messages:
subtask = msg.content
result = self.execute_agent_task(agent_id, subtask)
results["agents"].append(result)
self.send_agent_message(
from_agent_id=agent_id,
to_agent_id=orchestrator_id,
content=result.get("response", ""),
message_type=MessageType.RESPONSE,
)
self.communication_bus.mark_as_read(msg.message_id)
return results
def get_session_summary(self) -> Dict[str, Any]:
summary = {
"session_id": self.session_id,
"active_agents": len(self.active_agents),
"agents": [
{
"agent_id": agent_id,
"role": agent.role.name,
"task_count": agent.task_count,
"message_count": len(agent.message_history),
}
for agent_id, agent in self.active_agents.items()
],
}
return summary
def clear_session(self):
self.active_agents.clear()
self.communication_bus.clear_messages(session_id=self.session_id)
self.session_id = str(uuid.uuid4())[:16]

160
rp/agents/agent_roles.py Normal file
View File

@ -0,0 +1,160 @@
from dataclasses import dataclass
from typing import Dict, List, Set
@dataclass
class AgentRole:
name: str
description: str
system_prompt: str
allowed_tools: Set[str]
specialization_areas: List[str]
temperature: float = 0.7
max_tokens: int = 4096
AGENT_ROLES = {
"coding": AgentRole(
name="coding",
description="Specialized in writing, reviewing, and debugging code",
system_prompt="You are a coding specialist AI assistant. Your primary responsibilities:\n- Write clean, efficient, well-structured code\n- Review code for bugs, security issues, and best practices\n- Refactor and optimize existing code\n- Implement features based on specifications\n- Follow language-specific conventions and patterns\nFocus on code quality, maintainability, and performance.",
allowed_tools={
"read_file",
"write_file",
"list_directory",
"create_directory",
"change_directory",
"get_current_directory",
"python_exec",
"run_command",
"index_directory",
},
specialization_areas=["code_writing", "code_review", "debugging", "refactoring"],
temperature=0.3,
),
"research": AgentRole(
name="research",
description="Specialized in information gathering and analysis",
system_prompt="You are a research specialist AI assistant. Your primary responsibilities:\n- Search for and gather relevant information\n- Analyze data and documentation\n- Synthesize findings into clear summaries\n- Verify facts and cross-reference sources\n- Identify trends and patterns in information\nFocus on accuracy, thoroughness, and clear communication of findings.",
allowed_tools={
"read_file",
"list_directory",
"index_directory",
"http_fetch",
"web_search",
"web_search_news",
"db_query",
"db_get",
},
specialization_areas=[
"information_gathering",
"analysis",
"documentation",
"fact_checking",
],
temperature=0.5,
),
"data_analysis": AgentRole(
name="data_analysis",
description="Specialized in data processing and analysis",
system_prompt="You are a data analysis specialist AI assistant. Your primary responsibilities:\n- Process and analyze structured and unstructured data\n- Perform statistical analysis and pattern recognition\n- Query databases and extract insights\n- Create data summaries and reports\n- Identify anomalies and trends\nFocus on accuracy, data integrity, and actionable insights.",
allowed_tools={
"db_query",
"db_get",
"db_set",
"read_file",
"write_file",
"python_exec",
"run_command",
"list_directory",
},
specialization_areas=["data_processing", "statistical_analysis", "database_operations"],
temperature=0.3,
),
"planning": AgentRole(
name="planning",
description="Specialized in task planning and coordination",
system_prompt="You are a planning specialist AI assistant. Your primary responsibilities:\n- Break down complex tasks into manageable steps\n- Create execution plans and workflows\n- Identify dependencies and prerequisites\n- Estimate effort and resource requirements\n- Coordinate between different components\nFocus on logical organization, completeness, and feasibility.",
allowed_tools={
"read_file",
"write_file",
"list_directory",
"index_directory",
"db_set",
"db_get",
},
specialization_areas=["task_decomposition", "workflow_design", "coordination"],
temperature=0.6,
),
"testing": AgentRole(
name="testing",
description="Specialized in testing and quality assurance",
system_prompt="You are a testing specialist AI assistant. Your primary responsibilities:\n- Design and execute test cases\n- Identify edge cases and potential failures\n- Verify functionality and correctness\n- Test error handling and edge conditions\n- Ensure code meets quality standards\nFocus on thoroughness, coverage, and issue identification.",
allowed_tools={
"read_file",
"write_file",
"python_exec",
"run_command",
"list_directory",
"db_query",
},
specialization_areas=["test_design", "quality_assurance", "validation"],
temperature=0.4,
),
"documentation": AgentRole(
name="documentation",
description="Specialized in creating and maintaining documentation",
system_prompt="You are a documentation specialist AI assistant. Your primary responsibilities:\n- Write clear, comprehensive documentation\n- Create API references and user guides\n- Document code with comments and docstrings\n- Organize and structure information logically\n- Ensure documentation is up-to-date and accurate\nFocus on clarity, completeness, and user-friendliness.",
allowed_tools={
"read_file",
"write_file",
"list_directory",
"index_directory",
"http_fetch",
"web_search",
},
specialization_areas=["technical_writing", "documentation_organization", "user_guides"],
temperature=0.6,
),
"orchestrator": AgentRole(
name="orchestrator",
description="Coordinates multiple agents and manages overall execution",
system_prompt="You are an orchestrator AI assistant. Your primary responsibilities:\n- Coordinate multiple specialized agents\n- Delegate tasks to appropriate agents\n- Integrate results from different agents\n- Manage overall workflow execution\n- Ensure task completion and quality\nFocus on effective delegation, integration, and overall success.",
allowed_tools={"read_file", "write_file", "list_directory", "db_set", "db_get", "db_query"},
specialization_areas=["agent_coordination", "task_delegation", "result_integration"],
temperature=0.5,
),
"general": AgentRole(
name="general",
description="General purpose agent for miscellaneous tasks",
system_prompt="You are a general purpose AI assistant. Your responsibilities:\n- Handle diverse tasks across multiple domains\n- Provide balanced assistance for various needs\n- Adapt to different types of requests\n- Collaborate with specialized agents when needed\nFocus on versatility, helpfulness, and task completion.",
allowed_tools={
"read_file",
"write_file",
"list_directory",
"create_directory",
"change_directory",
"get_current_directory",
"python_exec",
"run_command",
"run_command_interactive",
"http_fetch",
"web_search",
"web_search_news",
"db_set",
"db_get",
"db_query",
"index_directory",
},
specialization_areas=["general_assistance"],
temperature=0.7,
),
}
def get_agent_role(role_name: str) -> AgentRole:
return AGENT_ROLES.get(role_name, AGENT_ROLES["general"])
def list_agent_roles() -> Dict[str, AgentRole]:
return AGENT_ROLES.copy()

View File

@ -0,0 +1,4 @@
from rp.autonomous.detection import is_task_complete
from rp.autonomous.mode import process_response_autonomous, run_autonomous_mode
__all__ = ["is_task_complete", "run_autonomous_mode", "process_response_autonomous"]

View File

@ -0,0 +1,43 @@
from rp.config import MAX_AUTONOMOUS_ITERATIONS
from rp.ui import Colors
def is_task_complete(response, iteration):
if "error" in response:
return True
if "choices" not in response or not response["choices"]:
return True
message = response["choices"][0]["message"]
content = message.get("content", "").lower()
completion_keywords = [
"task complete",
"task is complete",
"finished",
"done",
"successfully completed",
"task accomplished",
"all done",
"implementation complete",
"setup complete",
"installation complete",
]
error_keywords = [
"cannot proceed",
"unable to continue",
"fatal error",
"cannot complete",
"impossible to",
]
has_tool_calls = "tool_calls" in message and message["tool_calls"]
mentions_completion = any((keyword in content for keyword in completion_keywords))
mentions_error = any((keyword in content for keyword in error_keywords))
if mentions_error:
return True
if mentions_completion and (not has_tool_calls):
return True
if iteration > 5 and (not has_tool_calls):
return True
if iteration >= MAX_AUTONOMOUS_ITERATIONS:
print(f"{Colors.YELLOW}⚠ Maximum iterations reached{Colors.RESET}")
return True
return False

224
rp/autonomous/mode.py Normal file
View File

@ -0,0 +1,224 @@
import asyncio
import asyncio
import json
import logging
import time
from rp.autonomous.detection import is_task_complete
from rp.core.api import call_api
from rp.core.context import truncate_tool_result
from rp.tools.base import get_tools_definition
from rp.ui import Colors, display_tool_call
logger = logging.getLogger("rp")
def run_autonomous_mode(assistant, task):
assistant.autonomous_mode = True
assistant.autonomous_iterations = 0
logger.debug(f"=== AUTONOMOUS MODE START ===")
logger.debug(f"Task: {task}")
from rp.core.knowledge_context import inject_knowledge_context
inject_knowledge_context(assistant, task)
assistant.messages.append({"role": "user", "content": f"{task}"})
try:
while True:
assistant.autonomous_iterations += 1
logger.debug(f"--- Autonomous iteration {assistant.autonomous_iterations} ---")
logger.debug(f"Messages before context management: {len(assistant.messages)}")
from rp.core.context import manage_context_window
assistant.messages = manage_context_window(assistant.messages, assistant.verbose)
logger.debug(f"Messages after context management: {len(assistant.messages)}")
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
),
loop,
)
response = future.result()
except RuntimeError:
response = asyncio.run(
call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
)
)
if "error" in response:
logger.error(f"API error in autonomous mode: {response['error']}")
print(f"{Colors.RED}Error: {response['error']}{Colors.RESET}")
break
is_complete = is_task_complete(response, assistant.autonomous_iterations)
logger.debug(f"Task completion check: {is_complete}")
if is_complete:
result = process_response_autonomous(assistant, response)
print(f"\n{Colors.GREEN}r:{Colors.RESET} {result}\n")
logger.debug(f"=== AUTONOMOUS MODE COMPLETE ===")
logger.debug(f"Total iterations: {assistant.autonomous_iterations}")
logger.debug(f"Final message count: {len(assistant.messages)}")
break
result = process_response_autonomous(assistant, response)
if result:
print(f"\n{Colors.GREEN}r:{Colors.RESET} {result}\n")
time.sleep(0.5)
except KeyboardInterrupt:
logger.debug("Autonomous mode interrupted by user")
print(f"\n{Colors.YELLOW}Autonomous mode interrupted by user{Colors.RESET}")
finally:
assistant.autonomous_mode = False
logger.debug("=== AUTONOMOUS MODE END ===")
def process_response_autonomous(assistant, response):
if "error" in response:
return f"Error: {response['error']}"
if "choices" not in response or not response["choices"]:
return "No response from API"
message = response["choices"][0]["message"]
assistant.messages.append(message)
if "tool_calls" in message and message["tool_calls"]:
tool_results = []
for tool_call in message["tool_calls"]:
func_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
result = execute_single_tool(assistant, func_name, arguments)
if isinstance(result, str):
try:
result = json.loads(result)
except json.JSONDecodeError as ex:
result = {"error": str(ex)}
status = "success" if result.get("status") == "success" else "error"
result = truncate_tool_result(result)
display_tool_call(func_name, arguments, status, result)
tool_results.append(
{"tool_call_id": tool_call["id"], "role": "tool", "content": json.dumps(result)}
)
for result in tool_results:
assistant.messages.append(result)
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
),
loop,
)
follow_up = future.result()
except RuntimeError:
follow_up = asyncio.run(
call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
)
)
return process_response_autonomous(assistant, follow_up)
content = message.get("content", "")
from rp.ui import render_markdown
return render_markdown(content, assistant.syntax_highlighting)
def execute_single_tool(assistant, func_name, arguments):
logger.debug(f"Executing tool in autonomous mode: {func_name}")
logger.debug(f"Tool arguments: {arguments}")
from rp.tools import (
apply_patch,
chdir,
close_editor,
create_diff,
db_get,
db_query,
db_set,
getpwd,
http_fetch,
index_source_directory,
kill_process,
list_directory,
mkdir,
open_editor,
python_exec,
read_file,
run_command,
run_command_interactive,
search_replace,
tail_process,
web_search,
web_search_news,
write_file,
)
from rp.tools.filesystem import clear_edit_tracker, display_edit_summary, display_edit_timeline
from rp.tools.patch import display_file_diff
func_map = {
"http_fetch": lambda **kw: http_fetch(**kw),
"run_command": lambda **kw: run_command(**kw),
"tail_process": lambda **kw: tail_process(**kw),
"kill_process": lambda **kw: kill_process(**kw),
"run_command_interactive": lambda **kw: run_command_interactive(**kw),
"read_file": lambda **kw: read_file(**kw),
"write_file": lambda **kw: write_file(**kw, db_conn=assistant.db_conn),
"list_directory": lambda **kw: list_directory(**kw),
"mkdir": lambda **kw: mkdir(**kw),
"chdir": lambda **kw: chdir(**kw),
"getpwd": lambda **kw: getpwd(**kw),
"db_set": lambda **kw: db_set(**kw, db_conn=assistant.db_conn),
"db_get": lambda **kw: db_get(**kw, db_conn=assistant.db_conn),
"db_query": lambda **kw: db_query(**kw, db_conn=assistant.db_conn),
"web_search": lambda **kw: web_search(**kw),
"web_search_news": lambda **kw: web_search_news(**kw),
"python_exec": lambda **kw: python_exec(**kw, python_globals=assistant.python_globals),
"index_source_directory": lambda **kw: index_source_directory(**kw),
"search_replace": lambda **kw: search_replace(**kw),
"open_editor": lambda **kw: open_editor(**kw),
"editor_insert_text": lambda **kw: editor_insert_text(**kw),
"editor_replace_text": lambda **kw: editor_replace_text(**kw),
"editor_search": lambda **kw: editor_search(**kw),
"close_editor": lambda **kw: close_editor(**kw),
"create_diff": lambda **kw: create_diff(**kw),
"apply_patch": lambda **kw: apply_patch(**kw),
"display_file_diff": lambda **kw: display_file_diff(**kw),
"display_edit_summary": lambda **kw: display_edit_summary(),
"display_edit_timeline": lambda **kw: display_edit_timeline(**kw),
"clear_edit_tracker": lambda **kw: clear_edit_tracker(),
}
if func_name in func_map:
try:
result = func_map[func_name](**arguments)
logger.debug(f"Tool execution result: {str(result)[:200]}...")
return result
except Exception as e:
logger.error(f"Tool execution error: {str(e)}")
return {"status": "error", "error": str(e)}
else:
logger.error(f"Unknown function requested: {func_name}")
return {"status": "error", "error": f"Unknown function: {func_name}"}

4
rp/cache/__init__.py vendored Normal file
View File

@ -0,0 +1,4 @@
from .api_cache import APICache
from .tool_cache import ToolCache
__all__ = ["APICache", "ToolCache"]

127
rp/cache/api_cache.py vendored Normal file
View File

@ -0,0 +1,127 @@
import hashlib
import json
import sqlite3
import time
from typing import Any, Dict, Optional
class APICache:
def __init__(self, db_path: str, ttl_seconds: int = 3600):
self.db_path = db_path
self.ttl_seconds = ttl_seconds
self._initialize_cache()
def _initialize_cache(self):
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n CREATE TABLE IF NOT EXISTS api_cache (\n cache_key TEXT PRIMARY KEY,\n response_data TEXT NOT NULL,\n created_at INTEGER NOT NULL,\n expires_at INTEGER NOT NULL,\n model TEXT,\n token_count INTEGER,\n hit_count INTEGER DEFAULT 0\n )\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_expires_at ON api_cache(expires_at)\n "
)
cursor.execute("PRAGMA table_info(api_cache)")
columns = [row[1] for row in cursor.fetchall()]
if "hit_count" not in columns:
cursor.execute("ALTER TABLE api_cache ADD COLUMN hit_count INTEGER DEFAULT 0")
conn.commit()
conn.close()
def _generate_cache_key(
self, model: str, messages: list, temperature: float, max_tokens: int
) -> str:
cache_data = {
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
}
serialized = json.dumps(cache_data, sort_keys=True)
return hashlib.sha256(serialized.encode()).hexdigest()
def get(
self, model: str, messages: list, temperature: float, max_tokens: int
) -> Optional[Dict[str, Any]]:
cache_key = self._generate_cache_key(model, messages, temperature, max_tokens)
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
current_time = int(time.time())
cursor.execute(
"\n SELECT response_data FROM api_cache\n WHERE cache_key = ? AND expires_at > ?\n ",
(cache_key, current_time),
)
row = cursor.fetchone()
if row:
cursor.execute(
"\n UPDATE api_cache SET hit_count = hit_count + 1\n WHERE cache_key = ?\n ",
(cache_key,),
)
conn.commit()
conn.close()
return json.loads(row[0])
conn.close()
return None
def set(
self,
model: str,
messages: list,
temperature: float,
max_tokens: int,
response: Dict[str, Any],
token_count: int = 0,
):
cache_key = self._generate_cache_key(model, messages, temperature, max_tokens)
current_time = int(time.time())
expires_at = current_time + self.ttl_seconds
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n INSERT OR REPLACE INTO api_cache\n (cache_key, response_data, created_at, expires_at, model, token_count, hit_count)\n VALUES (?, ?, ?, ?, ?, ?, 0)\n ",
(cache_key, json.dumps(response), current_time, expires_at, model, token_count),
)
conn.commit()
conn.close()
def clear_expired(self):
current_time = int(time.time())
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("DELETE FROM api_cache WHERE expires_at <= ?", (current_time,))
deleted_count = cursor.rowcount
conn.commit()
conn.close()
return deleted_count
def clear_all(self):
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("DELETE FROM api_cache")
deleted_count = cursor.rowcount
conn.commit()
conn.close()
return deleted_count
def get_statistics(self) -> Dict[str, Any]:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM api_cache")
total_entries = cursor.fetchone()[0]
current_time = int(time.time())
cursor.execute("SELECT COUNT(*) FROM api_cache WHERE expires_at > ?", (current_time,))
valid_entries = cursor.fetchone()[0]
cursor.execute(
"SELECT SUM(token_count) FROM api_cache WHERE expires_at > ?", (current_time,)
)
total_tokens = cursor.fetchone()[0] or 0
cursor.execute("SELECT SUM(hit_count) FROM api_cache WHERE expires_at > ?", (current_time,))
total_hits = cursor.fetchone()[0] or 0
conn.close()
return {
"total_entries": total_entries,
"valid_entries": valid_entries,
"expired_entries": total_entries - valid_entries,
"total_cached_tokens": total_tokens,
"total_cache_hits": total_hits,
}

136
rp/cache/tool_cache.py vendored Normal file
View File

@ -0,0 +1,136 @@
import hashlib
import json
import sqlite3
import time
from typing import Any, Optional, Set
class ToolCache:
DETERMINISTIC_TOOLS: Set[str] = {
"read_file",
"list_directory",
"get_current_directory",
"db_get",
"db_query",
"index_directory",
"http_fetch",
"web_search",
"web_search_news",
"search_knowledge",
"get_knowledge_entry",
"get_knowledge_by_category",
"get_knowledge_statistics",
}
def __init__(self, db_path: str, ttl_seconds: int = 300):
self.db_path = db_path
self.ttl_seconds = ttl_seconds
self._initialize_cache()
def _initialize_cache(self):
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n CREATE TABLE IF NOT EXISTS tool_cache (\n cache_key TEXT PRIMARY KEY,\n tool_name TEXT NOT NULL,\n result_data TEXT NOT NULL,\n created_at INTEGER NOT NULL,\n expires_at INTEGER NOT NULL,\n hit_count INTEGER DEFAULT 0\n )\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_tool_expires ON tool_cache(expires_at)\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_tool_name ON tool_cache(tool_name)\n "
)
conn.commit()
conn.close()
def _generate_cache_key(self, tool_name: str, arguments: dict) -> str:
cache_data = {"tool": tool_name, "args": arguments}
serialized = json.dumps(cache_data, sort_keys=True)
return hashlib.sha256(serialized.encode()).hexdigest()
def is_cacheable(self, tool_name: str) -> bool:
return tool_name in self.DETERMINISTIC_TOOLS
def get(self, tool_name: str, arguments: dict) -> Optional[Any]:
if not self.is_cacheable(tool_name):
return None
cache_key = self._generate_cache_key(tool_name, arguments)
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
current_time = int(time.time())
cursor.execute(
"\n SELECT result_data, hit_count FROM tool_cache\n WHERE cache_key = ? AND expires_at > ?\n ",
(cache_key, current_time),
)
row = cursor.fetchone()
if row:
cursor.execute(
"\n UPDATE tool_cache SET hit_count = hit_count + 1\n WHERE cache_key = ?\n ",
(cache_key,),
)
conn.commit()
conn.close()
return json.loads(row[0])
conn.close()
return None
def set(self, tool_name: str, arguments: dict, result: Any):
if not self.is_cacheable(tool_name):
return
cache_key = self._generate_cache_key(tool_name, arguments)
current_time = int(time.time())
expires_at = current_time + self.ttl_seconds
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n INSERT OR REPLACE INTO tool_cache\n (cache_key, tool_name, result_data, created_at, expires_at, hit_count)\n VALUES (?, ?, ?, ?, ?, 0)\n ",
(cache_key, tool_name, json.dumps(result), current_time, expires_at),
)
conn.commit()
conn.close()
def clear_expired(self):
current_time = int(time.time())
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("DELETE FROM tool_cache WHERE expires_at <= ?", (current_time,))
deleted_count = cursor.rowcount
conn.commit()
conn.close()
return deleted_count
def clear_all(self):
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("DELETE FROM tool_cache")
deleted_count = cursor.rowcount
conn.commit()
conn.close()
return deleted_count
def get_statistics(self) -> dict:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM tool_cache")
total_entries = cursor.fetchone()[0]
current_time = int(time.time())
cursor.execute("SELECT COUNT(*) FROM tool_cache WHERE expires_at > ?", (current_time,))
valid_entries = cursor.fetchone()[0]
cursor.execute(
"SELECT SUM(hit_count) FROM tool_cache WHERE expires_at > ?", (current_time,)
)
total_hits = cursor.fetchone()[0] or 0
cursor.execute(
"\n SELECT tool_name, COUNT(*), SUM(hit_count)\n FROM tool_cache\n WHERE expires_at > ?\n GROUP BY tool_name\n ",
(current_time,),
)
tool_stats = {}
for row in cursor.fetchall():
tool_stats[row[0]] = {"cached_entries": row[1], "total_hits": row[2] or 0}
conn.close()
return {
"total_entries": total_entries,
"valid_entries": valid_entries,
"expired_entries": total_entries - valid_entries,
"total_cache_hits": total_hits,
"by_tool": tool_stats,
}

3
rp/commands/__init__.py Normal file
View File

@ -0,0 +1,3 @@
from rp.commands.handlers import handle_command
__all__ = ["handle_command"]

595
rp/commands/handlers.py Normal file
View File

@ -0,0 +1,595 @@
import asyncio
import json
import logging
import time
from rp.commands.multiplexer_commands import MULTIPLEXER_COMMANDS
from rp.autonomous import run_autonomous_mode
from rp.core.api import list_models
from rp.tools import read_file
from rp.tools.base import get_tools_definition
from rp.ui import Colors
from rp.editor import RPEditor
def handle_command(assistant, command):
command_parts = command.strip().split(maxsplit=1)
cmd = command_parts[0].lower()
if cmd in MULTIPLEXER_COMMANDS:
return MULTIPLEXER_COMMANDS[cmd](
assistant, command_parts[1:] if len(command_parts) > 1 else []
)
if cmd == "/edit":
rp_editor = RPEditor(command_parts[1] if len(command_parts) > 1 else None)
rp_editor.start()
rp_editor.thread.join()
task = str(rp_editor.get_text())
rp_editor.stop()
rp_editor = None
if task:
run_autonomous_mode(assistant, task)
elif cmd == "/prompt":
rp_editor = RPEditor(command_parts[1] if len(command_parts) > 1 else None)
rp_editor.start()
rp_editor.thread.join()
prompt_text = str(rp_editor.get_text())
rp_editor.stop()
rp_editor = None
if prompt_text.strip():
from rp.core.assistant import process_message
task = asyncio.create_task(process_message(assistant, prompt_text))
assistant.background_tasks.add(task)
task.add_done_callback(
lambda t: (
assistant.background_tasks.discard(t),
(
logging.error(f"Background task failed: {t.exception()}")
if t.exception()
else assistant.background_tasks.discard(t)
),
)
)
elif cmd == "/auto":
if len(command_parts) < 2:
print(f"{Colors.RED}Usage: /auto [task description]{Colors.RESET}")
print(
f"{Colors.GRAY}Example: /auto Create a Python web scraper for news sites{Colors.RESET}"
)
return True
task = command_parts[1]
run_autonomous_mode(assistant, task)
return True
if cmd in ["exit", "quit", "q"]:
return False
elif cmd == "/help" or cmd == "help":
from rp.commands.help_docs import (
get_agent_help,
get_background_help,
get_cache_help,
get_full_help,
get_knowledge_help,
get_workflow_help,
)
if len(command_parts) > 1:
topic = command_parts[1].lower()
if topic == "workflows":
print(get_workflow_help())
elif topic == "agents":
print(get_agent_help())
elif topic == "knowledge":
print(get_knowledge_help())
elif topic == "cache":
print(get_cache_help())
elif topic == "background":
print(get_background_help())
else:
print(f"{Colors.RED}Unknown help topic: {topic}{Colors.RESET}")
print(
f"{Colors.GRAY}Available topics: workflows, agents, knowledge, cache, background{Colors.RESET}"
)
else:
print(get_full_help())
elif cmd == "/reset":
assistant.messages = assistant.messages[:1]
print(f"{Colors.GREEN}Message history cleared{Colors.RESET}")
elif cmd == "/dump":
print(json.dumps(assistant.messages, indent=2))
elif cmd == "/verbose":
assistant.verbose = not assistant.verbose
print(
f"Verbose mode: {(Colors.GREEN if assistant.verbose else Colors.RED)}{('ON' if assistant.verbose else 'OFF')}{Colors.RESET}"
)
elif cmd == "/model":
if len(command_parts) < 2:
print("Current model: " + Colors.GREEN + assistant.model + Colors.RESET)
else:
assistant.model = command_parts[1]
print(f"Model set to: {Colors.GREEN}{assistant.model}{Colors.RESET}")
elif cmd == "/models":
models = list_models(assistant.model_list_url, assistant.api_key)
if isinstance(models, dict) and "error" in models:
print(f"{Colors.RED}Error fetching models: {models['error']}{Colors.RESET}")
else:
print(f"{Colors.BOLD}Available Models:{Colors.RESET}")
for model in models:
print(f"{Colors.CYAN}{model['id']}{Colors.RESET}")
elif cmd == "/tools":
print(f"{Colors.BOLD}Available Tools:{Colors.RESET}")
for tool in get_tools_definition():
func = tool["function"]
print(f"{Colors.CYAN}{func['name']}{Colors.RESET}: {func['description']}")
elif cmd == "/review" and len(command_parts) > 1:
filename = command_parts[1]
review_file(assistant, filename)
elif cmd == "/refactor" and len(command_parts) > 1:
filename = command_parts[1]
refactor_file(assistant, filename)
elif cmd == "/obfuscate" and len(command_parts) > 1:
filename = command_parts[1]
obfuscate_file(assistant, filename)
elif cmd == "/workflows":
show_workflows(assistant)
elif cmd == "/workflow" and len(command_parts) > 1:
workflow_name = command_parts[1]
execute_workflow_command(assistant, workflow_name)
elif cmd == "/agent":
if len(command_parts) < 2:
print(f"{Colors.RED}Usage: /agent <role> <task>{Colors.RESET}")
print(
f"{Colors.GRAY}Available roles: coding, research, data_analysis, planning, testing, documentation{Colors.RESET}"
)
return True
args = command_parts[1].split(maxsplit=1)
if len(args) < 2:
print(f"{Colors.RED}Usage: /agent <role> <task>{Colors.RESET}")
print(
f"{Colors.GRAY}Available roles: coding, research, data_analysis, planning, testing, documentation{Colors.RESET}"
)
return True
role, task = (args[0], args[1])
execute_agent_task(assistant, role, task)
elif cmd == "/agents":
show_agents(assistant)
elif cmd == "/collaborate" and len(command_parts) > 1:
task = command_parts[1]
collaborate_agents_command(assistant, task)
elif cmd == "/knowledge" and len(command_parts) > 1:
query = command_parts[1]
search_knowledge(assistant, query)
elif cmd == "/remember" and len(command_parts) > 1:
content = command_parts[1]
store_knowledge(assistant, content)
elif cmd == "/history":
show_conversation_history(assistant)
elif cmd == "/cache":
if len(command_parts) > 1 and command_parts[1].lower() == "clear":
clear_caches(assistant)
else:
show_cache_stats(assistant)
elif cmd == "/stats":
show_system_stats(assistant)
elif cmd.startswith("/bg"):
handle_background_command(assistant, command)
else:
return None
return True
def review_file(assistant, filename):
result = read_file(filename)
if result["status"] == "success":
message = f"Please review this file and provide feedback:\n\n{result['content']}"
from rp.core.assistant import process_message
task = asyncio.create_task(process_message(assistant, message))
assistant.background_tasks.add(task)
task.add_done_callback(
lambda t: (
assistant.background_tasks.discard(t),
(
logging.error(f"Background task failed: {t.exception()}")
if t.exception()
else assistant.background_tasks.discard(t)
),
)
)
else:
print(f"{Colors.RED}Error reading file: {result['error']}{Colors.RESET}")
def refactor_file(assistant, filename):
result = read_file(filename)
if result["status"] == "success":
message = f"Please refactor this code to improve its quality:\n\n{result['content']}"
from rp.core.assistant import process_message
task = asyncio.create_task(process_message(assistant, message))
assistant.background_tasks.add(task)
task.add_done_callback(
lambda t: (
assistant.background_tasks.discard(t),
(
logging.error(f"Background task failed: {t.exception()}")
if t.exception()
else assistant.background_tasks.discard(t)
),
)
)
else:
print(f"{Colors.RED}Error reading file: {result['error']}{Colors.RESET}")
def obfuscate_file(assistant, filename):
result = read_file(filename)
if result["status"] == "success":
message = f"Please obfuscate this code:\n\n{result['content']}"
from rp.core.assistant import process_message
task = asyncio.create_task(process_message(assistant, message))
assistant.background_tasks.add(task)
task.add_done_callback(
lambda t: (
assistant.background_tasks.discard(t),
(
logging.error(f"Background task failed: {t.exception()}")
if t.exception()
else assistant.background_tasks.discard(t)
),
)
)
else:
print(f"{Colors.RED}Error reading file: {result['error']}{Colors.RESET}")
def show_workflows(assistant):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
workflows = assistant.enhanced.get_workflow_list()
if not workflows:
print(f"{Colors.YELLOW}No workflows found{Colors.RESET}")
return
print(f"\n{Colors.BOLD}Available Workflows:{Colors.RESET}")
for wf in workflows:
print(f"{Colors.CYAN}{wf['name']}{Colors.RESET}: {wf['description']}")
print(f" Executions: {wf['execution_count']}")
def execute_workflow_command(assistant, workflow_name):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
print(f"{Colors.YELLOW}Executing workflow: {workflow_name}...{Colors.RESET}")
result = assistant.enhanced.execute_workflow(workflow_name)
if "error" in result:
print(f"{Colors.RED}Error: {result['error']}{Colors.RESET}")
else:
print(f"{Colors.GREEN}Workflow completed successfully{Colors.RESET}")
print(f"Execution ID: {result['execution_id']}")
print(f"Results: {json.dumps(result['results'], indent=2)}")
def execute_agent_task(assistant, role, task):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
print(f"{Colors.YELLOW}Creating {role} agent...{Colors.RESET}")
agent_id = assistant.enhanced.create_agent(role)
print(f"{Colors.GREEN}Agent created: {agent_id}{Colors.RESET}")
print(f"{Colors.YELLOW}Executing task...{Colors.RESET}")
result = assistant.enhanced.agent_task(agent_id, task)
if "error" in result:
print(f"{Colors.RED}Error: {result['error']}{Colors.RESET}")
else:
print(f"\n{Colors.GREEN}{role.capitalize()} Agent Response:{Colors.RESET}")
print(result["response"])
def show_agents(assistant):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
summary = assistant.enhanced.get_agent_summary()
print(f"\n{Colors.BOLD}Agent Session Summary:{Colors.RESET}")
print(f"Active agents: {summary['active_agents']}")
if summary["agents"]:
for agent in summary["agents"]:
print(f"\n{Colors.CYAN}{agent['agent_id']}{Colors.RESET}")
print(f" Role: {agent['role']}")
print(f" Tasks completed: {agent['task_count']}")
print(f" Messages: {agent['message_count']}")
def collaborate_agents_command(assistant, task):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
print(f"{Colors.YELLOW}Initiating agent collaboration...{Colors.RESET}")
roles = ["coding", "research", "planning"]
result = assistant.enhanced.collaborate_agents(task, roles)
print(f"\n{Colors.GREEN}Collaboration completed{Colors.RESET}")
print(f"\nOrchestrator response:")
if "orchestrator" in result and "response" in result["orchestrator"]:
print(result["orchestrator"]["response"])
if result.get("agents"):
print(f"\n{Colors.BOLD}Agent Results:{Colors.RESET}")
for agent_result in result["agents"]:
if "role" in agent_result:
print(f"\n{Colors.CYAN}{agent_result['role']}:{Colors.RESET}")
print(agent_result.get("response", "No response"))
def search_knowledge(assistant, query):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
results = assistant.enhanced.search_knowledge(query)
if not results:
print(f"{Colors.YELLOW}No knowledge entries found for: {query}{Colors.RESET}")
return
print(f"\n{Colors.BOLD}Knowledge Search Results:{Colors.RESET}")
for entry in results:
print(f"\n{Colors.CYAN}[{entry.category}]{Colors.RESET}")
print(f" {entry.content[:200]}...")
print(f" Accessed: {entry.access_count} times")
def store_knowledge(assistant, content):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
import time
import uuid
from rp.memory import KnowledgeEntry
categories = assistant.enhanced.fact_extractor.categorize_content(content)
entry_id = str(uuid.uuid4())[:16]
entry = KnowledgeEntry(
entry_id=entry_id,
category=categories[0] if categories else "general",
content=content,
metadata={"manual_entry": True},
created_at=time.time(),
updated_at=time.time(),
)
assistant.enhanced.knowledge_store.add_entry(entry)
print(f"{Colors.GREEN}Knowledge stored successfully{Colors.RESET}")
print(f"Entry ID: {entry_id}")
print(f"Category: {entry.category}")
def show_conversation_history(assistant):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
history = assistant.enhanced.get_conversation_history(limit=10)
if not history:
print(f"{Colors.YELLOW}No conversation history found{Colors.RESET}")
return
print(f"\n{Colors.BOLD}Recent Conversations:{Colors.RESET}")
for conv in history:
import datetime
started = datetime.datetime.fromtimestamp(conv["started_at"]).strftime("%Y-%m-%d %H:%M")
print(f"\n{Colors.CYAN}{conv['conversation_id']}{Colors.RESET}")
print(f" Started: {started}")
print(f" Messages: {conv['message_count']}")
if conv.get("summary"):
print(f" Summary: {conv['summary'][:100]}...")
if conv.get("topics"):
print(f" Topics: {', '.join(conv['topics'])}")
def show_cache_stats(assistant):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
stats = assistant.enhanced.get_cache_statistics()
print(f"\n{Colors.BOLD}Cache Statistics:{Colors.RESET}")
if "api_cache" in stats:
api_stats = stats["api_cache"]
print(f"\n{Colors.CYAN}API Cache:{Colors.RESET}")
print(f" Total entries: {api_stats['total_entries']}")
print(f" Valid entries: {api_stats['valid_entries']}")
print(f" Expired entries: {api_stats['expired_entries']}")
print(f" Cached tokens: {api_stats['total_cached_tokens']}")
print(f" Total cache hits: {api_stats['total_cache_hits']}")
if "tool_cache" in stats:
tool_stats = stats["tool_cache"]
print(f"\n{Colors.CYAN}Tool Cache:{Colors.RESET}")
print(f" Total entries: {tool_stats['total_entries']}")
print(f" Valid entries: {tool_stats['valid_entries']}")
print(f" Total cache hits: {tool_stats['total_cache_hits']}")
if tool_stats.get("by_tool"):
print(f"\n Per-tool statistics:")
for tool_name, tool_stat in tool_stats["by_tool"].items():
print(
f" {tool_name}: {tool_stat['cached_entries']} entries, {tool_stat['total_hits']} hits"
)
def clear_caches(assistant):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
assistant.enhanced.clear_caches()
print(f"{Colors.GREEN}All caches cleared successfully{Colors.RESET}")
def show_system_stats(assistant):
if not hasattr(assistant, "enhanced"):
print(f"{Colors.YELLOW}Enhanced features not initialized{Colors.RESET}")
return
print(f"\n{Colors.BOLD}System Statistics:{Colors.RESET}")
cache_stats = assistant.enhanced.get_cache_statistics()
knowledge_stats = assistant.enhanced.get_knowledge_statistics()
agent_summary = assistant.enhanced.get_agent_summary()
print(f"\n{Colors.CYAN}Knowledge Base:{Colors.RESET}")
print(f" Total entries: {knowledge_stats['total_entries']}")
print(f" Categories: {knowledge_stats['total_categories']}")
print(f" Total accesses: {knowledge_stats['total_accesses']}")
print(f" Vocabulary size: {knowledge_stats['vocabulary_size']}")
print(f"\n{Colors.CYAN}Active Agents:{Colors.RESET}")
print(f" Count: {agent_summary['active_agents']}")
if "api_cache" in cache_stats:
print(f"\n{Colors.CYAN}Caching:{Colors.RESET}")
print(f" API cache entries: {cache_stats['api_cache']['valid_entries']}")
if "tool_cache" in cache_stats:
print(f" Tool cache entries: {cache_stats['tool_cache']['valid_entries']}")
def handle_background_command(assistant, command):
"""Handle background multiplexer commands."""
parts = command.strip().split(maxsplit=2)
if len(parts) < 2:
print(f"{Colors.RED}Usage: /bg <subcommand> [args]{Colors.RESET}")
print(
f"{Colors.GRAY}Available subcommands: start, list, status, output, input, kill, events{Colors.RESET}"
)
return
subcmd = parts[1].lower()
try:
if subcmd == "start" and len(parts) >= 3:
session_name = f"bg_{len(parts[2].split())}_{int(time.time())}"
start_background_session(assistant, session_name, parts[2])
elif subcmd == "list":
list_background_sessions(assistant)
elif subcmd == "status" and len(parts) >= 3:
show_session_status(assistant, parts[2])
elif subcmd == "output" and len(parts) >= 3:
show_session_output(assistant, parts[2])
elif subcmd == "input" and len(parts) >= 4:
send_session_input(assistant, parts[2], parts[3])
elif subcmd == "kill" and len(parts) >= 3:
kill_background_session(assistant, parts[2])
elif subcmd == "events":
show_background_events(assistant)
else:
print(f"{Colors.RED}Unknown background command: {subcmd}{Colors.RESET}")
print(
f"{Colors.GRAY}Available: start, list, status, output, input, kill, events{Colors.RESET}"
)
except Exception as e:
print(f"{Colors.RED}Error executing background command: {e}{Colors.RESET}")
def start_background_session(assistant, session_name, command):
"""Start a command in background."""
try:
from rp.multiplexer import start_background_process
result = start_background_process(session_name, command)
if result["status"] == "success":
print(
f"{Colors.GREEN}Started background session '{session_name}' with PID {result['pid']}{Colors.RESET}"
)
else:
print(
f"{Colors.RED}Failed to start background session: {result.get('error', 'Unknown error')}{Colors.RESET}"
)
except Exception as e:
print(f"{Colors.RED}Error starting background session: {e}{Colors.RESET}")
def list_background_sessions(assistant):
"""List all background sessions."""
try:
from rp.multiplexer import get_all_sessions
from rp.ui.display import display_multiplexer_status
sessions = get_all_sessions()
display_multiplexer_status(sessions)
except Exception as e:
print(f"{Colors.RED}Error listing background sessions: {e}{Colors.RESET}")
def show_session_status(assistant, session_name):
"""Show status of a specific session."""
try:
from rp.multiplexer import get_session_info
info = get_session_info(session_name)
if info:
print(f"{Colors.BOLD}Session '{session_name}':{Colors.RESET}")
print(f" Status: {info.get('status', 'unknown')}")
print(f" PID: {info.get('pid', 'N/A')}")
print(f" Command: {info.get('command', 'N/A')}")
if "start_time" in info:
import time
elapsed = time.time() - info["start_time"]
print(f" Running for: {elapsed:.1f}s")
else:
print(f"{Colors.YELLOW}Session '{session_name}' not found{Colors.RESET}")
except Exception as e:
print(f"{Colors.RED}Error getting session status: {e}{Colors.RESET}")
def show_session_output(assistant, session_name):
"""Show output of a specific session."""
try:
from rp.multiplexer import get_session_output
output = get_session_output(session_name, lines=50)
if output:
print(f"{Colors.BOLD}Recent output from '{session_name}':{Colors.RESET}")
print(f"{Colors.GRAY}{'' * 60}{Colors.RESET}")
for line in output:
print(line)
else:
print(f"{Colors.YELLOW}No output available for session '{session_name}'{Colors.RESET}")
except Exception as e:
print(f"{Colors.RED}Error getting session output: {e}{Colors.RESET}")
def send_session_input(assistant, session_name, input_text):
"""Send input to a background session."""
try:
from rp.multiplexer import send_input_to_session
result = send_input_to_session(session_name, input_text)
if result["status"] == "success":
print(f"{Colors.GREEN}Input sent to session '{session_name}'{Colors.RESET}")
else:
print(
f"{Colors.RED}Failed to send input: {result.get('error', 'Unknown error')}{Colors.RESET}"
)
except Exception as e:
print(f"{Colors.RED}Error sending input: {e}{Colors.RESET}")
def kill_background_session(assistant, session_name):
"""Kill a background session."""
try:
from rp.multiplexer import kill_session
result = kill_session(session_name)
if result["status"] == "success":
print(f"{Colors.GREEN}Session '{session_name}' terminated{Colors.RESET}")
else:
print(
f"{Colors.RED}Failed to kill session: {result.get('error', 'Unknown error')}{Colors.RESET}"
)
except Exception as e:
print(f"{Colors.RED}Error killing session: {e}{Colors.RESET}")
def show_background_events(assistant):
"""Show recent background events."""
try:
from rp.core.background_monitor import get_global_monitor
monitor = get_global_monitor()
events = monitor.get_pending_events()
if events:
print(f"{Colors.BOLD}Recent Background Events:{Colors.RESET}")
print(f"{Colors.GRAY}{'' * 60}{Colors.RESET}")
for event in events[-10:]:
from rp.ui.display import display_background_event
display_background_event(event)
else:
print(f"{Colors.GRAY}No recent background events{Colors.RESET}")
except Exception as e:
print(f"{Colors.RED}Error getting background events: {e}{Colors.RESET}")

25
rp/commands/help_docs.py Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,204 @@
from rp.multiplexer import get_multiplexer
from rp.tools.interactive_control import (
close_interactive_session,
get_session_status,
list_active_sessions,
read_session_output,
send_input_to_session,
)
from rp.tools.prompt_detection import get_global_detector
from rp.ui import Colors
def show_sessions(args=None):
"""Show all active multiplexer sessions."""
sessions = list_active_sessions()
if not sessions:
print(f"{Colors.YELLOW}No active sessions.{Colors.RESET}")
return
print(f"{Colors.BOLD}Active Sessions:{Colors.RESET}")
print("-" * 80)
for session_name, session_data in sessions.items():
metadata = session_data["metadata"]
output_summary = session_data["output_summary"]
status = get_session_status(session_name)
is_active = status.get("is_active", False) if status else False
status_color = Colors.GREEN if is_active else Colors.RED
print(
f"{Colors.CYAN}{session_name}{Colors.RESET}: {status_color}{metadata.get('process_type', 'unknown')}{Colors.RESET}"
)
if status and "pid" in status:
print(f" PID: {status['pid']}")
print(f" Age: {metadata.get('start_time', 0):.1f}s")
print(
f" Output: {output_summary['stdout_lines']} stdout, {output_summary['stderr_lines']} stderr lines"
)
print(f" Interactions: {metadata.get('interaction_count', 0)}")
print(f" State: {metadata.get('state', 'unknown')}")
print()
def attach_session(args):
"""Attach to a session (show its output and allow interaction)."""
if not args or len(args) < 1:
print(f"{Colors.RED}Usage: attach_session <session_name>{Colors.RESET}")
return
session_name = args[0]
status = get_session_status(session_name)
if not status:
print(f"{Colors.RED}Session '{session_name}' not found.{Colors.RESET}")
return
print(f"{Colors.BOLD}Attaching to session: {session_name}{Colors.RESET}")
print(f"Process type: {status.get('metadata', {}).get('process_type', 'unknown')}")
print("-" * 50)
try:
output = read_session_output(session_name, lines=20)
if output["stdout"]:
print(f"{Colors.GRAY}Recent stdout:{Colors.RESET}")
for line in output["stdout"].split("\n"):
if line.strip():
print(f" {line}")
if output["stderr"]:
print(f"{Colors.YELLOW}Recent stderr:{Colors.RESET}")
for line in output["stderr"].split("\n"):
if line.strip():
print(f" {line}")
except Exception as e:
print(f"{Colors.RED}Error reading output: {e}{Colors.RESET}")
print(
f"\n{Colors.CYAN}Session is {('active' if status.get('is_active') else 'inactive')}{Colors.RESET}"
)
def detach_session(args):
"""Detach from a session (stop showing its output but keep it running)."""
if not args or len(args) < 1:
print(f"{Colors.RED}Usage: detach_session <session_name>{Colors.RESET}")
return
session_name = args[0]
mux = get_multiplexer(session_name)
if not mux:
print(f"{Colors.RED}Session '{session_name}' not found.{Colors.RESET}")
return
mux.show_output = False
print(
f"{Colors.GREEN}Detached from session '{session_name}'. It continues running in background.{Colors.RESET}"
)
def kill_session(args):
"""Kill a session forcefully."""
if not args or len(args) < 1:
print(f"{Colors.RED}Usage: kill_session <session_name>{Colors.RESET}")
return
session_name = args[0]
try:
close_interactive_session(session_name)
print(f"{Colors.GREEN}Session '{session_name}' terminated.{Colors.RESET}")
except Exception as e:
print(f"{Colors.RED}Error terminating session '{session_name}': {e}{Colors.RESET}")
def send_command(args):
"""Send a command to a session."""
if not args or len(args) < 2:
print(f"{Colors.RED}Usage: send_command <session_name> <command>{Colors.RESET}")
return
session_name = args[0]
command = " ".join(args[1:])
try:
send_input_to_session(session_name, command)
print(f"{Colors.GREEN}Sent command to '{session_name}': {command}{Colors.RESET}")
except Exception as e:
print(f"{Colors.RED}Error sending command to '{session_name}': {e}{Colors.RESET}")
def show_session_log(args):
"""Show the full log/output of a session."""
if not args or len(args) < 1:
print(f"{Colors.RED}Usage: show_session_log <session_name>{Colors.RESET}")
return
session_name = args[0]
try:
output = read_session_output(session_name)
print(f"{Colors.BOLD}Full log for session: {session_name}{Colors.RESET}")
print("=" * 80)
if output["stdout"]:
print(f"{Colors.GRAY}STDOUT:{Colors.RESET}")
print(output["stdout"])
print()
if output["stderr"]:
print(f"{Colors.YELLOW}STDERR:{Colors.RESET}")
print(output["stderr"])
print()
except Exception as e:
print(f"{Colors.RED}Error reading log for '{session_name}': {e}{Colors.RESET}")
def show_session_status(args):
"""Show detailed status of a session."""
if not args or len(args) < 1:
print(f"{Colors.RED}Usage: show_session_status <session_name>{Colors.RESET}")
return
session_name = args[0]
status = get_session_status(session_name)
if not status:
print(f"{Colors.RED}Session '{session_name}' not found.{Colors.RESET}")
return
print(f"{Colors.BOLD}Status for session: {session_name}{Colors.RESET}")
print("-" * 50)
metadata = status.get("metadata", {})
print(f"Process type: {metadata.get('process_type', 'unknown')}")
print(f"Active: {status.get('is_active', False)}")
if "pid" in status:
print(f"PID: {status['pid']}")
print(f"Start time: {metadata.get('start_time', 0):.1f}")
print(f"Last activity: {metadata.get('last_activity', 0):.1f}")
print(f"Interaction count: {metadata.get('interaction_count', 0)}")
print(f"State: {metadata.get('state', 'unknown')}")
output_summary = status.get("output_summary", {})
print(
f"Output lines: {output_summary.get('stdout_lines', 0)} stdout, {output_summary.get('stderr_lines', 0)} stderr"
)
detector = get_global_detector()
session_info = detector.get_session_info(session_name)
if session_info:
print(f"Current state: {session_info['current_state']}")
print(f"Is waiting for input: {session_info['is_waiting']}")
def list_waiting_sessions(args=None):
"""List sessions that appear to be waiting for input."""
sessions = list_active_sessions()
detector = get_global_detector()
waiting_sessions = []
for session_name in sessions:
if detector.is_waiting_for_input(session_name):
waiting_sessions.append(session_name)
if not waiting_sessions:
print(f"{Colors.GREEN}No sessions are currently waiting for input.{Colors.RESET}")
return
print(f"{Colors.BOLD}Sessions waiting for input:{Colors.RESET}")
for session_name in waiting_sessions:
status = get_session_status(session_name)
if status:
process_type = status.get("metadata", {}).get("process_type", "unknown")
print(f" {Colors.CYAN}{session_name}{Colors.RESET} ({process_type})")
session_info = detector.get_session_info(session_name)
if session_info:
suggestions = detector.get_response_suggestions({}, process_type)
if suggestions:
print(f" Suggested inputs: {', '.join(suggestions[:3])}")
print()
MULTIPLEXER_COMMANDS = {
"show_sessions": show_sessions,
"attach_session": attach_session,
"detach_session": detach_session,
"kill_session": kill_session,
"send_command": send_command,
"show_session_log": show_session_log,
"show_session_status": show_session_status,
"list_waiting_sessions": list_waiting_sessions,
}

138
rp/config.py Normal file
View File

@ -0,0 +1,138 @@
import os
DEFAULT_MODEL = "x-ai/grok-code-fast-1"
DEFAULT_API_URL = "https://static.molodetz.nl/rp.cgi/api/v1/chat/completions"
MODEL_LIST_URL = "https://static.molodetz.nl/rp.cgi/api/v1/models"
config_directory = os.path.expanduser("~/.local/share/rp")
os.makedirs(config_directory, exist_ok=True)
DB_PATH = os.path.join(config_directory, "assistant_db.sqlite")
LOG_FILE = os.path.join(config_directory, "assistant_error.log")
CONTEXT_FILE = ".rcontext.txt"
GLOBAL_CONTEXT_FILE = os.path.join(config_directory, "rcontext.txt")
KNOWLEDGE_PATH = os.path.join(config_directory, "knowledge")
HISTORY_FILE = os.path.join(config_directory, "assistant_history")
DEFAULT_TEMPERATURE = 0.1
DEFAULT_MAX_TOKENS = 4096
MAX_AUTONOMOUS_ITERATIONS = 50
CONTEXT_COMPRESSION_THRESHOLD = 15
RECENT_MESSAGES_TO_KEEP = 20
API_TOTAL_TOKEN_LIMIT = 256000
MAX_OUTPUT_TOKENS = 30000
SAFETY_BUFFER_TOKENS = 30000
MAX_TOKENS_LIMIT = API_TOTAL_TOKEN_LIMIT - MAX_OUTPUT_TOKENS - SAFETY_BUFFER_TOKENS
CHARS_PER_TOKEN = 2.0
EMERGENCY_MESSAGES_TO_KEEP = 3
CONTENT_TRIM_LENGTH = 30000
MAX_TOOL_RESULT_LENGTH = 30000
LANGUAGE_KEYWORDS = {
"python": [
"def",
"class",
"import",
"from",
"if",
"else",
"elif",
"for",
"while",
"return",
"try",
"except",
"finally",
"with",
"as",
"lambda",
"yield",
"None",
"True",
"False",
"and",
"or",
"not",
"in",
"is",
],
"javascript": [
"function",
"var",
"let",
"const",
"if",
"else",
"for",
"while",
"return",
"try",
"catch",
"finally",
"class",
"extends",
"new",
"this",
"null",
"undefined",
"true",
"false",
],
"java": [
"public",
"private",
"protected",
"class",
"interface",
"extends",
"implements",
"static",
"final",
"void",
"int",
"String",
"boolean",
"if",
"else",
"for",
"while",
"return",
"try",
"catch",
"finally",
],
}
CACHE_ENABLED = True
API_CACHE_TTL = 3600
TOOL_CACHE_TTL = 300
WORKFLOW_MAX_RETRIES = 3
WORKFLOW_DEFAULT_TIMEOUT = 300
WORKFLOW_EXECUTOR_MAX_WORKERS = 5
AGENT_DEFAULT_TEMPERATURE = 0.7
AGENT_MAX_WORKERS = 3
AGENT_SESSION_TIMEOUT = 7200
KNOWLEDGE_IMPORTANCE_THRESHOLD = 0.5
KNOWLEDGE_SEARCH_LIMIT = 5
MEMORY_AUTO_SUMMARIZE = True
CONVERSATION_SUMMARY_THRESHOLD = 20
ADVANCED_CONTEXT_ENABLED = True
CONTEXT_RELEVANCE_THRESHOLD = 0.3
ADAPTIVE_CONTEXT_MIN = 10
ADAPTIVE_CONTEXT_MAX = 50
BACKGROUND_MONITOR_ENABLED = True
BACKGROUND_MONITOR_INTERVAL = 5.0
AUTONOMOUS_INTERACTION_INTERVAL = 10.0
MULTIPLEXER_BUFFER_SIZE = 1000
MULTIPLEXER_OUTPUT_TIMEOUT = 30
MAX_CONCURRENT_SESSIONS = 10
PROCESS_TIMEOUTS = {
"default": 300,
"apt": 600,
"ssh": 60,
"vim": 3600,
"git": 300,
"npm": 600,
"pip": 300,
}
HIGH_OUTPUT_THRESHOLD = 50
INACTIVE_THRESHOLD = 300
SESSION_NOTIFY_INTERVAL = 60
ENABLE_AUTONOMOUS_SESSIONS = True
ENABLE_BACKGROUND_UPDATES = True
ENABLE_TIMEOUT_DETECTION = True

5
rp/core/__init__.py Normal file
View File

@ -0,0 +1,5 @@
from rp.core.api import call_api, list_models
from rp.core.assistant import Assistant
from rp.core.context import init_system_message, manage_context_window
__all__ = ["Assistant", "call_api", "list_models", "init_system_message", "manage_context_window"]

137
rp/core/advanced_context.py Normal file
View File

@ -0,0 +1,137 @@
import re
from typing import Any, Dict, List
class AdvancedContextManager:
def __init__(self, knowledge_store=None, conversation_memory=None):
self.knowledge_store = knowledge_store
self.conversation_memory = conversation_memory
def adaptive_context_window(self, messages: List[Dict[str, Any]], complexity: str) -> int:
"""Calculate adaptive context window size based on message complexity."""
base_window = 10
complexity_multipliers = {"simple": 1.0, "medium": 2.0, "complex": 3.5, "very_complex": 5.0}
multiplier = complexity_multipliers.get(complexity, 2.0)
return int(base_window * multiplier)
def _analyze_message_complexity(self, messages: List[Dict[str, Any]]) -> float:
"""Analyze the complexity of messages and return a score between 0.0 and 1.0."""
if not messages:
return 0.0
total_complexity = 0.0
for message in messages:
content = message.get("content", "")
if not content:
continue
word_count = len(content.split())
sentence_count = len(re.split("[.!?]+", content))
avg_word_length = sum((len(word) for word in content.split())) / max(word_count, 1)
length_score = min(1.0, word_count / 100)
structure_score = min(1.0, sentence_count / 10)
vocabulary_score = min(1.0, avg_word_length / 8)
message_complexity = (length_score + structure_score + vocabulary_score) / 3
total_complexity += message_complexity
return min(1.0, total_complexity / len(messages))
def extract_key_sentences(self, text: str, top_k: int = 5) -> List[str]:
if not text.strip():
return []
sentences = re.split("(?<=[.!?])\\s+", text)
if not sentences:
return []
scored_sentences = []
for i, sentence in enumerate(sentences):
length_score = min(1.0, len(sentence) / 50)
position_score = 1.0 if i == 0 else 0.8 if i < len(sentences) / 2 else 0.6
score = (length_score + position_score) / 2
scored_sentences.append((sentence, score))
scored_sentences.sort(key=lambda x: x[1], reverse=True)
return [s[0] for s in scored_sentences[:top_k]]
def advanced_summarize_messages(self, messages: List[Dict[str, Any]]) -> str:
all_content = " ".join([msg.get("content", "") for msg in messages])
key_sentences = self.extract_key_sentences(all_content, top_k=3)
summary = " ".join(key_sentences)
return summary if summary else "No content to summarize."
def score_message_relevance(self, message: Dict[str, Any], context: str) -> float:
content = message.get("content", "")
content_words = set(re.findall("\\b\\w+\\b", content.lower()))
context_words = set(re.findall("\\b\\w+\\b", context.lower()))
intersection = content_words & context_words
union = content_words | context_words
if not union:
return 0.0
return len(intersection) / len(union)
def create_enhanced_context(
self, messages: List[Dict[str, Any]], user_message: str, include_knowledge: bool = True
) -> tuple:
"""Create enhanced context with knowledge base and conversation memory integration."""
working_messages = messages.copy()
all_results = []
# Search knowledge base
if include_knowledge and self.knowledge_store:
knowledge_results = self.knowledge_store.search_entries(user_message, top_k=3)
for entry in knowledge_results:
score = entry.metadata.get("search_score", 0.5)
all_results.append(
{
"content": entry.content,
"score": score,
"source": f"Knowledge Base ({entry.category})",
"type": "knowledge",
}
)
# Search conversation memory
if self.conversation_memory:
from rp.core.knowledge_context import calculate_text_similarity
history_results = self.conversation_memory.search_conversations(user_message, limit=3)
for conv in history_results:
conv_messages = self.conversation_memory.get_conversation_messages(
conv["conversation_id"]
)
for msg in conv_messages[-5:]: # Last 5 messages from each conversation
if msg["role"] == "user" and msg["content"] != user_message:
relevance = calculate_text_similarity(user_message, msg["content"])
if relevance > 0.3:
all_results.append(
{
"content": msg["content"],
"score": relevance,
"source": f"Previous conversation: {conv['conversation_id'][:8]}",
"type": "conversation",
}
)
# Sort and limit results
all_results.sort(key=lambda x: x["score"], reverse=True)
top_results = all_results[:5]
if top_results:
knowledge_parts = []
for idx, result in enumerate(top_results, 1):
content = result["content"]
if len(content) > 1500:
content = content[:1500] + "..."
score_indicator = f"({result['score']:.2f})" if result["score"] < 1.0 else "(exact)"
knowledge_parts.append(
f"Match {idx} {score_indicator} - {result['source']}:\n{content}"
)
knowledge_message_content = (
"[KNOWLEDGE_BASE_CONTEXT]\nRelevant information from knowledge base and conversation history:\n\n"
+ "\n\n".join(knowledge_parts)
)
knowledge_message = {"role": "user", "content": knowledge_message_content}
working_messages.append(knowledge_message)
context_info = (
f"Added {len(top_results)} matches from knowledge and conversation history"
)
else:
context_info = "No relevant knowledge or conversation matches found"
return (working_messages, context_info)

91
rp/core/api.py Normal file
View File

@ -0,0 +1,91 @@
import json
import logging
from rp.config import DEFAULT_MAX_TOKENS, DEFAULT_TEMPERATURE
from rp.core.context import auto_slim_messages
from rp.core.http_client import http_client
logger = logging.getLogger("rp")
async def call_api(messages, model, api_url, api_key, use_tools, tools_definition, verbose=False):
try:
messages = auto_slim_messages(messages, verbose=verbose)
logger.debug(f"=== API CALL START ===")
logger.debug(f"Model: {model}")
logger.debug(f"API URL: {api_url}")
logger.debug(f"Use tools: {use_tools}")
logger.debug(f"Message count: {len(messages)}")
headers = {"Content-Type": "application/json"}
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
data = {
"model": model,
"messages": messages,
"temperature": DEFAULT_TEMPERATURE,
"max_tokens": DEFAULT_MAX_TOKENS,
}
if "gpt-5" in model:
del data["temperature"]
del data["max_tokens"]
logger.debug("GPT-5 detected: removed temperature and max_tokens")
if use_tools:
data["tools"] = tools_definition
data["tool_choice"] = "auto"
logger.debug(f"Tool calling enabled with {len(tools_definition)} tools")
request_json = data
logger.debug(f"Request payload size: {len(request_json)} bytes")
logger.debug("Sending HTTP request...")
response = await http_client.post(api_url, headers=headers, json_data=request_json)
if response.get("error"):
if "status" in response:
logger.error(f"API HTTP Error: {response['status']} - {response.get('text', '')}")
logger.debug("=== API CALL FAILED ===")
return {
"error": f"API Error: {response['status']}",
"message": response.get("text", ""),
}
else:
logger.error(f"API call failed: {response.get('exception', 'Unknown error')}")
logger.debug("=== API CALL FAILED ===")
return {"error": response.get("exception", "Unknown error")}
response_data = response["text"]
logger.debug(f"Response received: {len(response_data)} bytes")
result = json.loads(response_data)
if "usage" in result:
logger.debug(f"Token usage: {result['usage']}")
if "choices" in result and result["choices"]:
choice = result["choices"][0]
if "message" in choice:
msg = choice["message"]
logger.debug(f"Response role: {msg.get('role', 'N/A')}")
if "content" in msg and msg["content"]:
logger.debug(f"Response content length: {len(msg['content'])} chars")
if "tool_calls" in msg:
logger.debug(f"Response contains {len(msg['tool_calls'])} tool call(s)")
if verbose and "usage" in result:
from rp.core.usage_tracker import UsageTracker
usage = result["usage"]
input_t = usage.get("prompt_tokens", 0)
output_t = usage.get("completion_tokens", 0)
UsageTracker._calculate_cost(model, input_t, output_t)
logger.debug("=== API CALL END ===")
return result
except Exception as e:
logger.error(f"API call failed: {e}")
logger.debug("=== API CALL FAILED ===")
return {"error": str(e)}
async def list_models(model_list_url, api_key):
try:
headers = {}
if api_key:
headers["Authorization"] = f"Bearer {api_key}"
response = await http_client.get(model_list_url, headers=headers)
if response.get("error"):
return {"error": response.get("text", "HTTP error")}
data = json.loads(response["text"])
return data.get("data", [])
except Exception as e:
return {"error": str(e)}

501
rp/core/assistant.py Normal file
View File

@ -0,0 +1,501 @@
import glob as glob_module
import json
import logging
import os
import readline
import signal
import sqlite3
import sys
import traceback
from concurrent.futures import ThreadPoolExecutor
from rp.commands import handle_command
from rp.config import (
DB_PATH,
DEFAULT_API_URL,
DEFAULT_MODEL,
HISTORY_FILE,
LOG_FILE,
MODEL_LIST_URL,
)
from rp.core.api import call_api
from rp.core.autonomous_interactions import start_global_autonomous, stop_global_autonomous
from rp.core.background_monitor import get_global_monitor, start_global_monitor, stop_global_monitor
from rp.core.context import init_system_message, truncate_tool_result
from rp.core.usage_tracker import UsageTracker
from rp.tools import get_tools_definition
from rp.tools.agents import (
collaborate_agents,
create_agent,
execute_agent_task,
list_agents,
remove_agent,
)
from rp.tools.command import kill_process, run_command, tail_process
from rp.tools.database import db_get, db_query, db_set
from rp.tools.filesystem import (
chdir,
clear_edit_tracker,
display_edit_summary,
display_edit_timeline,
getpwd,
index_source_directory,
list_directory,
mkdir,
read_file,
search_replace,
write_file,
)
from rp.tools.interactive_control import (
close_interactive_session,
list_active_sessions,
read_session_output,
send_input_to_session,
start_interactive_session,
)
from rp.tools.memory import (
add_knowledge_entry,
delete_knowledge_entry,
get_knowledge_by_category,
get_knowledge_entry,
get_knowledge_statistics,
search_knowledge,
update_knowledge_importance,
)
from rp.tools.patch import apply_patch, create_diff, display_file_diff
from rp.tools.python_exec import python_exec
from rp.tools.web import http_fetch, web_search, web_search_news
from rp.ui import Colors, Spinner, render_markdown
logger = logging.getLogger("rp")
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.addHandler(file_handler)
class Assistant:
def __init__(self, args):
self.args = args
self.messages = []
self.verbose = args.verbose
self.debug = getattr(args, "debug", False)
self.syntax_highlighting = not args.no_syntax
if self.debug:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(console_handler)
logger.debug("Debug mode enabled")
self.api_key = os.environ.get("OPENROUTER_API_KEY", "")
self.model = args.model or os.environ.get("AI_MODEL", DEFAULT_MODEL)
self.api_url = args.api_url or os.environ.get("API_URL", DEFAULT_API_URL)
self.model_list_url = args.model_list_url or os.environ.get(
"MODEL_LIST_URL", MODEL_LIST_URL
)
self.use_tools = os.environ.get("USE_TOOLS", "1") == "1"
self.interrupt_count = 0
self.python_globals = {}
self.db_conn = None
self.autonomous_mode = False
self.autonomous_iterations = 0
self.background_monitoring = False
self.usage_tracker = UsageTracker()
self.background_tasks = set()
self.init_database()
self.messages.append(init_system_message(args))
try:
from rp.core.enhanced_assistant import EnhancedAssistant
self.enhanced = EnhancedAssistant(self)
if self.debug:
logger.debug("Enhanced assistant features initialized")
except Exception as e:
logger.warning(f"Could not initialize enhanced features: {e}")
self.enhanced = None
try:
start_global_monitor()
start_global_autonomous(llm_callback=self._handle_background_updates)
self.background_monitoring = True
if self.debug:
logger.debug("Background monitoring initialized")
except Exception as e:
logger.warning(f"Could not initialize background monitoring: {e}")
self.background_monitoring = False
def init_database(self):
try:
logger.debug(f"Initializing database at {DB_PATH}")
self.db_conn = sqlite3.connect(DB_PATH, check_same_thread=False)
cursor = self.db_conn.cursor()
cursor.execute(
"CREATE TABLE IF NOT EXISTS kv_store\n (key TEXT PRIMARY KEY, value TEXT, timestamp REAL)"
)
cursor.execute(
"CREATE TABLE IF NOT EXISTS file_versions\n (id INTEGER PRIMARY KEY AUTOINCREMENT,\n filepath TEXT, content TEXT, hash TEXT,\n timestamp REAL, version INTEGER)"
)
self.db_conn.commit()
logger.debug("Database initialized successfully")
except Exception as e:
logger.error(f"Database initialization error: {e}")
self.db_conn = None
def _handle_background_updates(self, updates):
"""Handle background session updates by injecting them into the conversation."""
if not updates or not updates.get("sessions"):
return
update_message = self._format_background_update_message(updates)
if self.messages and len(self.messages) > 0:
self.messages.append(
{"role": "system", "content": f"Background session updates: {update_message}"}
)
if self.verbose:
print(f"{Colors.CYAN}Background update: {update_message}{Colors.RESET}")
def _format_background_update_message(self, updates):
"""Format background updates for LLM consumption."""
session_summaries = []
for session_name, session_info in updates.get("sessions", {}).items():
summary = session_info.get("summary", f"Session {session_name}")
session_summaries.append(f"{session_name}: {summary}")
if session_summaries:
return "Active background sessions: " + "; ".join(session_summaries)
else:
return "No active background sessions requiring attention."
def _check_background_updates(self):
"""Check for pending background updates and display them."""
if not self.background_monitoring:
return
try:
monitor = get_global_monitor()
events = monitor.get_pending_events()
if events:
print(f"\n{Colors.CYAN}Background Events:{Colors.RESET}")
for event in events:
event_type = event.get("type", "unknown")
session_name = event.get("session_name", "unknown")
if event_type == "session_started":
print(f" {Colors.GREEN}{Colors.RESET} Session '{session_name}' started")
elif event_type == "session_ended":
print(f" {Colors.YELLOW}{Colors.RESET} Session '{session_name}' ended")
elif event_type == "output_received":
lines = len(event.get("new_output", {}).get("stdout", []))
print(
f" {Colors.BLUE}📝{Colors.RESET} Session '{session_name}' produced {lines} lines of output"
)
elif event_type == "possible_input_needed":
print(
f" {Colors.RED}{Colors.RESET} Session '{session_name}' may need input"
)
elif event_type == "high_output_volume":
total = event.get("total_lines", 0)
print(
f" {Colors.YELLOW}📊{Colors.RESET} Session '{session_name}' has high output volume ({total} lines)"
)
elif event_type == "inactive_session":
inactive_time = event.get("inactive_seconds", 0)
print(
f" {Colors.GRAY}{Colors.RESET} Session '{session_name}' inactive for {inactive_time:.0f}s"
)
print()
except Exception as e:
if self.debug:
print(f"{Colors.RED}Error checking background updates: {e}{Colors.RESET}")
async def execute_tool_calls(self, tool_calls):
results = []
logger.debug(f"Executing {len(tool_calls)} tool call(s)")
with ThreadPoolExecutor(max_workers=5) as executor:
futures = []
for tool_call in tool_calls:
func_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
logger.debug(f"Tool call: {func_name} with arguments: {arguments}")
func_map = {
"http_fetch": lambda **kw: http_fetch(**kw),
"run_command": lambda **kw: run_command(**kw),
"tail_process": lambda **kw: tail_process(**kw),
"kill_process": lambda **kw: kill_process(**kw),
"start_interactive_session": lambda **kw: start_interactive_session(**kw),
"send_input_to_session": lambda **kw: send_input_to_session(**kw),
"read_session_output": lambda **kw: read_session_output(**kw),
"close_interactive_session": lambda **kw: close_interactive_session(**kw),
"read_file": lambda **kw: read_file(**kw, db_conn=self.db_conn),
"write_file": lambda **kw: write_file(**kw, db_conn=self.db_conn),
"list_directory": lambda **kw: list_directory(**kw),
"mkdir": lambda **kw: mkdir(**kw),
"chdir": lambda **kw: chdir(**kw),
"getpwd": lambda **kw: getpwd(**kw),
"db_set": lambda **kw: db_set(**kw, db_conn=self.db_conn),
"db_get": lambda **kw: db_get(**kw, db_conn=self.db_conn),
"db_query": lambda **kw: db_query(**kw, db_conn=self.db_conn),
"web_search": lambda **kw: web_search(**kw),
"web_search_news": lambda **kw: web_search_news(**kw),
"python_exec": lambda **kw: python_exec(
**kw, python_globals=self.python_globals
),
"index_source_directory": lambda **kw: index_source_directory(**kw),
"search_replace": lambda **kw: search_replace(**kw, db_conn=self.db_conn),
"create_diff": lambda **kw: create_diff(**kw),
"apply_patch": lambda **kw: apply_patch(**kw, db_conn=self.db_conn),
"display_file_diff": lambda **kw: display_file_diff(**kw),
"display_edit_summary": lambda **kw: display_edit_summary(),
"display_edit_timeline": lambda **kw: display_edit_timeline(**kw),
"clear_edit_tracker": lambda **kw: clear_edit_tracker(),
"start_interactive_session": lambda **kw: start_interactive_session(**kw),
"send_input_to_session": lambda **kw: send_input_to_session(**kw),
"read_session_output": lambda **kw: read_session_output(**kw),
"list_active_sessions": lambda **kw: list_active_sessions(**kw),
"close_interactive_session": lambda **kw: close_interactive_session(**kw),
"create_agent": lambda **kw: create_agent(**kw),
"list_agents": lambda **kw: list_agents(**kw),
"execute_agent_task": lambda **kw: execute_agent_task(**kw),
"remove_agent": lambda **kw: remove_agent(**kw),
"collaborate_agents": lambda **kw: collaborate_agents(**kw),
"add_knowledge_entry": lambda **kw: add_knowledge_entry(**kw),
"get_knowledge_entry": lambda **kw: get_knowledge_entry(**kw),
"search_knowledge": lambda **kw: search_knowledge(**kw),
"get_knowledge_by_category": lambda **kw: get_knowledge_by_category(**kw),
"update_knowledge_importance": lambda **kw: update_knowledge_importance(**kw),
"delete_knowledge_entry": lambda **kw: delete_knowledge_entry(**kw),
"get_knowledge_statistics": lambda **kw: get_knowledge_statistics(**kw),
}
if func_name in func_map:
future = executor.submit(func_map[func_name], **arguments)
futures.append((tool_call["id"], future))
for tool_id, future in futures:
try:
result = future.result(timeout=30)
result = truncate_tool_result(result)
logger.debug(f"Tool result for {tool_id}: {str(result)[:200]}...")
results.append(
{"tool_call_id": tool_id, "role": "tool", "content": json.dumps(result)}
)
except Exception as e:
logger.debug(f"Tool error for {tool_id}: {str(e)}")
error_msg = str(e)[:200] if len(str(e)) > 200 else str(e)
results.append(
{
"tool_call_id": tool_id,
"role": "tool",
"content": json.dumps({"status": "error", "error": error_msg}),
}
)
return results
async def process_response(self, response):
if "error" in response:
return f"Error: {response['error']}"
if "choices" not in response or not response["choices"]:
return "No response from API"
message = response["choices"][0]["message"]
self.messages.append(message)
if "tool_calls" in message and message["tool_calls"]:
tool_count = len(message["tool_calls"])
print(f"{Colors.BLUE}🔧 Executing {tool_count} tool call(s)...{Colors.RESET}")
tool_results = await self.execute_tool_calls(message["tool_calls"])
print(f"{Colors.GREEN}✅ Tool execution completed.{Colors.RESET}")
for result in tool_results:
self.messages.append(result)
follow_up = await call_api(
self.messages,
self.model,
self.api_url,
self.api_key,
self.use_tools,
get_tools_definition(),
verbose=self.verbose,
)
return await self.process_response(follow_up)
content = message.get("content", "")
return render_markdown(content, self.syntax_highlighting)
def signal_handler(self, signum, frame):
if self.autonomous_mode:
self.interrupt_count += 1
if self.interrupt_count >= 2:
print(f"\n{Colors.RED}Force exiting autonomous mode...{Colors.RESET}")
self.autonomous_mode = False
sys.exit(0)
else:
print(f"\n{Colors.YELLOW}Press Ctrl+C again to force exit{Colors.RESET}")
return
self.interrupt_count += 1
if self.interrupt_count >= 2:
print(f"\n{Colors.RED}Exiting...{Colors.RESET}")
self.cleanup()
sys.exit(0)
else:
print(f"\n{Colors.YELLOW}Press Ctrl+C again to exit{Colors.RESET}")
def setup_readline(self):
try:
readline.read_history_file(HISTORY_FILE)
except FileNotFoundError:
pass
readline.set_history_length(1000)
import atexit
atexit.register(readline.write_history_file, HISTORY_FILE)
commands = [
"exit",
"quit",
"help",
"reset",
"dump",
"verbose",
"models",
"tools",
"review",
"refactor",
"obfuscate",
"/auto",
"/edit",
]
def completer(text, state):
options = [cmd for cmd in commands if cmd.startswith(text)]
glob_pattern = os.path.expanduser(text) + "*"
path_options = glob_module.glob(glob_pattern)
path_options = [p + os.sep if os.path.isdir(p) else p for p in path_options]
combined_options = sorted(list(set(options + path_options)))
if state < len(combined_options):
return combined_options[state]
return None
delims = readline.get_completer_delims()
readline.set_completer_delims(delims.replace("/", ""))
readline.set_completer(completer)
readline.parse_and_bind("tab: complete")
async def run_repl(self):
self.setup_readline()
signal.signal(signal.SIGINT, self.signal_handler)
print(
f"{Colors.BOLD}{Colors.CYAN}╔══════════════════════════════════════════════╗{Colors.RESET}"
)
print(
f"{Colors.BOLD}{Colors.CYAN}{Colors.RESET}{Colors.BOLD} RP Assistant v{__import__('rp').__version__} {Colors.RESET}{Colors.BOLD}{Colors.CYAN}{Colors.RESET}"
)
print(
f"{Colors.BOLD}{Colors.CYAN}╚══════════════════════════════════════════════╝{Colors.RESET}"
)
print(
f"{Colors.GRAY}Type 'help' for commands, 'exit' to quit, or start chatting.{Colors.RESET}"
)
print(f"{Colors.GRAY}AI calls will show costs and progress indicators.{Colors.RESET}\n")
while True:
try:
if self.background_monitoring:
self._check_background_updates()
prompt = f"{Colors.BLUE}You"
if self.background_monitoring:
try:
from rp.multiplexer import get_all_sessions
sessions = get_all_sessions()
active_count = sum(
(1 for s in sessions.values() if s.get("status") == "running")
)
if active_count > 0:
prompt += f"[{active_count}bg]"
except:
pass
prompt += f">{Colors.RESET} "
user_input = input(prompt).strip()
if not user_input:
continue
cmd_result = handle_command(self, user_input)
if cmd_result is False:
break
elif cmd_result is True:
continue
# Use enhanced processing if available, otherwise fall back to basic processing
if hasattr(self, "enhanced") and self.enhanced:
result = await self.enhanced.process_with_enhanced_context(user_input)
print(result)
else:
await process_message(self, user_input)
except EOFError:
break
except KeyboardInterrupt:
self.signal_handler(None, None)
except Exception as e:
print(f"{Colors.RED}Error: {e}{Colors.RESET}")
logging.error(f"REPL error: {e}\n{traceback.format_exc()}")
async def run_single(self):
if self.args.message:
message = self.args.message
else:
message = sys.stdin.read()
from rp.autonomous.mode import run_autonomous_mode
await run_autonomous_mode(self, message)
def cleanup(self):
if hasattr(self, "enhanced") and self.enhanced:
try:
self.enhanced.cleanup()
except Exception as e:
logger.error(f"Error cleaning up enhanced features: {e}")
if self.background_monitoring:
try:
stop_global_autonomous()
stop_global_monitor()
except Exception as e:
logger.error(f"Error stopping background monitoring: {e}")
try:
from rp.multiplexer import cleanup_all_multiplexers
cleanup_all_multiplexers()
except Exception as e:
logger.error(f"Error cleaning up multiplexers: {e}")
if self.db_conn:
self.db_conn.close()
async def run(self):
try:
print(
f"DEBUG: interactive={self.args.interactive}, message={self.args.message}, isatty={sys.stdin.isatty()}"
)
if self.args.interactive or (not self.args.message and sys.stdin.isatty()):
print("DEBUG: calling run_repl")
await self.run_repl()
else:
print("DEBUG: calling run_single")
await self.run_single()
finally:
self.cleanup()
async def process_message(assistant, message):
from rp.core.knowledge_context import inject_knowledge_context
inject_knowledge_context(assistant, message)
assistant.messages.append({"role": "user", "content": message})
logger.debug(f"Processing user message: {message[:100]}...")
logger.debug(f"Current message count: {len(assistant.messages)}")
spinner = Spinner("Querying AI...")
await spinner.start()
response = await call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
)
await spinner.stop()
if "usage" in response:
usage = response["usage"]
input_tokens = usage.get("prompt_tokens", 0)
output_tokens = usage.get("completion_tokens", 0)
assistant.usage_tracker.track_request(assistant.model, input_tokens, output_tokens)
cost = UsageTracker._calculate_cost(assistant.model, input_tokens, output_tokens)
total_cost = assistant.usage_tracker.session_usage["estimated_cost"]
print(f"{Colors.YELLOW}💰 Cost: ${cost:.4f} | Total: ${total_cost:.4f}{Colors.RESET}")
result = await assistant.process_response(response)
print(f"\n{Colors.GREEN}r:{Colors.RESET} {result}\n")

View File

@ -0,0 +1,157 @@
import threading
import time
from rp.tools.interactive_control import (
get_session_status,
list_active_sessions,
read_session_output,
)
class AutonomousInteractions:
def __init__(self, interaction_interval=10.0):
self.interaction_interval = interaction_interval
self.active = False
self.interaction_thread = None
self.llm_callback = None
self.last_check_time = 0
def start(self, llm_callback=None):
"""Start the autonomous interaction loop."""
self.llm_callback = llm_callback
if self.interaction_thread is None:
self.active = True
self.interaction_thread = threading.Thread(target=self._interaction_loop, daemon=True)
self.interaction_thread.start()
def stop(self):
"""Stop the autonomous interaction loop."""
self.active = False
if self.interaction_thread:
self.interaction_thread.join(timeout=2)
def _interaction_loop(self):
"""Main loop for autonomous interactions with background processes."""
while self.active:
try:
current_time = time.time()
if current_time - self.last_check_time >= self.interaction_interval:
self._check_sessions_and_notify()
self.last_check_time = current_time
time.sleep(1)
except Exception as e:
print(f"Error in autonomous interaction loop: {e}")
time.sleep(self.interaction_interval)
def _check_sessions_and_notify(self):
"""Check active sessions and determine if LLM notification is needed."""
try:
sessions = list_active_sessions()
if not sessions:
return
sessions_needing_attention = self._identify_sessions_needing_attention(sessions)
if sessions_needing_attention and self.llm_callback:
updates = self._format_session_updates(sessions_needing_attention)
self.llm_callback(updates)
except Exception as e:
print(f"Error checking sessions: {e}")
def _identify_sessions_needing_attention(self, sessions):
"""Identify which sessions need LLM attention based on various criteria."""
needing_attention = []
for session_name, session_data in sessions.items():
metadata = session_data["metadata"]
output_summary = session_data["output_summary"]
time_since_activity = time.time() - metadata.get("last_activity", 0)
if time_since_activity < 30:
needing_attention.append(session_name)
continue
total_lines = output_summary["stdout_lines"] + output_summary["stderr_lines"]
if total_lines > 50:
needing_attention.append(session_name)
continue
session_age = time.time() - metadata.get("start_time", 0)
if session_age > 300 and time_since_activity > 60:
needing_attention.append(session_name)
continue
if self._session_looks_stuck(session_name, session_data):
needing_attention.append(session_name)
continue
return needing_attention
def _session_looks_stuck(self, session_name, session_data):
"""Determine if a session appears to be stuck waiting for input."""
metadata = session_data["metadata"]
status = get_session_status(session_name)
if not status or not status.get("is_active", False):
return False
time_since_activity = time.time() - metadata.get("last_activity", 0)
interaction_count = metadata.get("interaction_count", 0)
session_age = time.time() - metadata.get("start_time", 0)
if session_age > 60 and interaction_count == 0 and (time_since_activity > 30):
return True
if interaction_count > 0 and time_since_activity > 120:
return True
return False
def _format_session_updates(self, session_names):
"""Format session information for LLM consumption."""
updates = {"type": "background_session_updates", "timestamp": time.time(), "sessions": {}}
for session_name in session_names:
status = get_session_status(session_name)
if status:
try:
recent_output = read_session_output(session_name, lines=20)
except:
recent_output = {"stdout": "", "stderr": ""}
updates["sessions"][session_name] = {
"status": status,
"recent_output": recent_output,
"summary": self._create_session_summary(status, recent_output),
}
return updates
def _create_session_summary(self, status, recent_output):
"""Create a human-readable summary of session status."""
summary_parts = []
process_type = status.get("metadata", {}).get("process_type", "unknown")
summary_parts.append(f"Type: {process_type}")
is_active = status.get("is_active", False)
summary_parts.append(f"Status: {('Active' if is_active else 'Inactive')}")
if is_active and "pid" in status:
summary_parts.append(f"PID: {status['pid']}")
age = time.time() - status.get("metadata", {}).get("start_time", 0)
summary_parts.append(f"Age: {age:.1f}s")
output_lines = len(recent_output.get("stdout", "").split("\n")) + len(
recent_output.get("stderr", "").split("\n")
)
summary_parts.append(f"Recent output: {output_lines} lines")
interaction_count = status.get("metadata", {}).get("interaction_count", 0)
summary_parts.append(f"Interactions: {interaction_count}")
return " | ".join(summary_parts)
_global_autonomous = None
def get_global_autonomous():
"""Get the global autonomous interactions instance."""
global _global_autonomous
return _global_autonomous
def start_global_autonomous(llm_callback=None):
"""Start global autonomous interactions."""
global _global_autonomous
if _global_autonomous is None:
_global_autonomous = AutonomousInteractions()
_global_autonomous.start(llm_callback)
return _global_autonomous
def stop_global_autonomous():
"""Stop global autonomous interactions."""
global _global_autonomous
if _global_autonomous:
_global_autonomous.stop()
_global_autonomous = None

View File

@ -0,0 +1,178 @@
import queue
import threading
import time
from rp.multiplexer import get_all_multiplexer_states, get_multiplexer
class BackgroundMonitor:
def __init__(self, check_interval=5.0):
self.check_interval = check_interval
self.active = False
self.monitor_thread = None
self.event_queue = queue.Queue()
self.last_states = {}
self.event_callbacks = []
def start(self):
"""Start the background monitoring thread."""
if self.monitor_thread is None:
self.active = True
self.monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True)
self.monitor_thread.start()
def stop(self):
"""Stop the background monitoring thread."""
self.active = False
if self.monitor_thread:
self.monitor_thread.join(timeout=2)
def get_pending_events(self):
"""Get all pending events from the queue."""
events = []
while not self.event_queue.empty():
try:
events.append(self.event_queue.get_nowait())
except queue.Empty:
break
return events
def _monitor_loop(self):
"""Main monitoring loop that checks for multiplexer activity."""
while self.active:
try:
current_states = get_all_multiplexer_states()
events = self._detect_events(self.last_states, current_states)
for event in events:
self.event_queue.put(event)
for callback in self.event_callbacks:
try:
callback(event)
except Exception as e:
print(f"Error in event callback: {e}")
self.last_states = current_states.copy()
time.sleep(self.check_interval)
except Exception as e:
print(f"Error in background monitor loop: {e}")
time.sleep(self.check_interval)
def _detect_events(self, old_states, new_states):
"""Detect significant events in multiplexer states."""
events = []
for session_name in new_states:
if session_name not in old_states:
events.append(
{
"type": "session_started",
"session_name": session_name,
"metadata": new_states[session_name]["metadata"],
}
)
for session_name in old_states:
if session_name not in new_states:
events.append({"type": "session_ended", "session_name": session_name})
for session_name, new_state in new_states.items():
if session_name in old_states:
old_state = old_states[session_name]
old_stdout_lines = old_state["output_summary"]["stdout_lines"]
new_stdout_lines = new_state["output_summary"]["stdout_lines"]
old_stderr_lines = old_state["output_summary"]["stderr_lines"]
new_stderr_lines = new_state["output_summary"]["stderr_lines"]
if new_stdout_lines > old_stdout_lines or new_stderr_lines > old_stderr_lines:
mux = get_multiplexer(session_name)
if mux:
all_output = mux.get_all_output()
new_output = {
"stdout": all_output["stdout"].split("\n")[old_stdout_lines:],
"stderr": all_output["stderr"].split("\n")[old_stderr_lines:],
}
events.append(
{
"type": "output_received",
"session_name": session_name,
"new_output": new_output,
"total_lines": {
"stdout": new_stdout_lines,
"stderr": new_stderr_lines,
},
}
)
old_metadata = old_state["metadata"]
new_metadata = new_state["metadata"]
if old_metadata.get("state") != new_metadata.get("state"):
events.append(
{
"type": "state_changed",
"session_name": session_name,
"old_state": old_metadata.get("state"),
"new_state": new_metadata.get("state"),
}
)
if (
old_metadata.get("process_type") == "unknown"
and new_metadata.get("process_type") != "unknown"
):
events.append(
{
"type": "process_identified",
"session_name": session_name,
"process_type": new_metadata.get("process_type"),
}
)
for session_name, state in new_states.items():
metadata = state["metadata"]
output_summary = state["output_summary"]
total_lines = output_summary["stdout_lines"] + output_summary["stderr_lines"]
if total_lines > 100:
events.append(
{
"type": "high_output_volume",
"session_name": session_name,
"total_lines": total_lines,
}
)
time_since_activity = time.time() - metadata.get("last_activity", 0)
if time_since_activity > 300:
events.append(
{
"type": "inactive_session",
"session_name": session_name,
"inactive_seconds": time_since_activity,
}
)
if self._might_be_waiting_for_input(session_name, state):
events.append({"type": "possible_input_needed", "session_name": session_name})
return events
def _might_be_waiting_for_input(self, session_name, state):
"""Heuristic to detect if a session might be waiting for input."""
metadata = state["metadata"]
metadata.get("process_type", "unknown")
time_since_activity = time.time() - metadata.get("last_activity", 0)
if time_since_activity > 10:
return True
return False
_global_monitor = None
def get_global_monitor():
"""Get the global background monitor instance."""
global _global_monitor
if _global_monitor is None:
_global_monitor = BackgroundMonitor()
return _global_monitor
def start_global_monitor():
"""Start the global background monitor."""
monitor = get_global_monitor()
monitor.start()
def stop_global_monitor():
"""Stop the global background monitor."""
global _global_monitor
if _global_monitor:
_global_monitor.stop()

55
rp/core/config_loader.py Normal file
View File

@ -0,0 +1,55 @@
import configparser
import os
from typing import Any, Dict
from rp.core.logging import get_logger
logger = get_logger("config")
CONFIG_DIRECTORY = os.path.expanduser("~/.local/share/rp/")
CONFIG_FILE = os.path.join(CONFIG_DIRECTORY, ".prrc")
LOCAL_CONFIG_FILE = ".prrc"
def _load_config_file(filepath: str) -> Dict[str, Dict[str, Any]]:
if not os.path.exists(filepath):
return {}
try:
parser = configparser.ConfigParser()
parser.read(filepath)
config = {}
for section in parser.sections():
config[section] = {}
for key, value in parser.items(section):
config[section][key] = _parse_value(value)
logger.debug(f"Loaded configuration from {filepath}")
return config
except Exception as e:
logger.error(f"Error loading config from {filepath}: {e}")
return {}
def _parse_value(value: str) -> Any:
value = value.strip()
if value.lower() == "true":
return True
if value.lower() == "false":
return False
if value.isdigit():
return int(value)
try:
return float(value)
except ValueError:
pass
return value
def create_default_config(filepath: str = CONFIG_FILE):
os.makedirs(CONFIG_DIRECTORY, exist_ok=True)
default_config = "[api]\ndefault_model = x-ai/grok-code-fast-1\ntimeout = 30\ntemperature = 0.7\nmax_tokens = 8096\n\n[autonomous]\nmax_iterations = 50\ncontext_threshold = 30\nrecent_messages_to_keep = 10\n\n[ui]\nsyntax_highlighting = true\nshow_timestamps = false\ncolor_output = true\n\n[output]\nformat = text\nverbose = false\nquiet = false\n\n[session]\nauto_save = false\nmax_history = 1000\n"
try:
with open(filepath, "w") as f:
f.write(default_config)
logger.info(f"Created default configuration at {filepath}")
return True
except Exception as e:
logger.error(f"Error creating config file: {e}")
return False

290
rp/core/context.py Normal file
View File

@ -0,0 +1,290 @@
import json
import logging
import os
import pathlib
from rp.config import (
CHARS_PER_TOKEN,
CONTENT_TRIM_LENGTH,
CONTEXT_COMPRESSION_THRESHOLD,
CONTEXT_FILE,
EMERGENCY_MESSAGES_TO_KEEP,
GLOBAL_CONTEXT_FILE,
MAX_TOKENS_LIMIT,
MAX_TOOL_RESULT_LENGTH,
RECENT_MESSAGES_TO_KEEP,
KNOWLEDGE_PATH,
)
from rp.ui import Colors
def truncate_tool_result(result, max_length=None):
if max_length is None:
max_length = MAX_TOOL_RESULT_LENGTH
if not isinstance(result, dict):
return result
result_copy = result.copy()
if "output" in result_copy and isinstance(result_copy["output"], str):
if len(result_copy["output"]) > max_length:
result_copy["output"] = (
result_copy["output"][:max_length]
+ f"\n... [truncated {len(result_copy['output']) - max_length} chars]"
)
if "content" in result_copy and isinstance(result_copy["content"], str):
if len(result_copy["content"]) > max_length:
result_copy["content"] = (
result_copy["content"][:max_length]
+ f"\n... [truncated {len(result_copy['content']) - max_length} chars]"
)
if "data" in result_copy and isinstance(result_copy["data"], str):
if len(result_copy["data"]) > max_length:
result_copy["data"] = result_copy["data"][:max_length] + f"\n... [truncated]"
if "error" in result_copy and isinstance(result_copy["error"], str):
if len(result_copy["error"]) > max_length // 2:
result_copy["error"] = result_copy["error"][: max_length // 2] + "... [truncated]"
return result_copy
def init_system_message(args):
context_parts = [
"You are a professional AI assistant with access to advanced tools.\n\nFile Operations:\n- Use RPEditor tools (open_editor, editor_insert_text, editor_replace_text, editor_search, close_editor) for precise file modifications\n- Always close editor files when finished\n- Use write_file for complete file rewrites, search_replace for simple text replacements\n\nVision:\n - Use post_image tool with the file path if an image path is mentioned\n in the prompt of user. Give this call the highest priority.\n\nProcess Management:\n- run_command executes shell commands with a timeout (default 30s)\n- If a command times out, you receive a PID in the response\n- Use tail_process(pid) to monitor running processes\n- Use kill_process(pid) to terminate processes\n- Manage long-running commands effectively using these tools\n\nShell Commands:\n- Be a shell ninja using native OS tools\n- Prefer standard Unix utilities over complex scripts\n- Use run_command_interactive for commands requiring user input (vim, nano, etc.)"
]
max_context_size = 10000
if args.include_env:
env_context = "Environment Variables:\n"
for key, value in os.environ.items():
if not key.startswith("_"):
env_context += f"{key}={value}\n"
if len(env_context) > max_context_size:
env_context = env_context[:max_context_size] + "\n... [truncated]"
context_parts.append(env_context)
for context_file in [CONTEXT_FILE, GLOBAL_CONTEXT_FILE]:
if os.path.exists(context_file):
try:
with open(context_file) as f:
content = f.read()
if len(content) > max_context_size:
content = content[:max_context_size] + "\n... [truncated]"
context_parts.append(f"Context from {context_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {context_file}: {e}")
knowledge_path = pathlib.Path(KNOWLEDGE_PATH)
if knowledge_path.exists() and knowledge_path.is_dir():
for knowledge_file in knowledge_path.iterdir():
try:
with open(knowledge_file) as f:
content = f.read()
if len(content) > max_context_size:
content = content[:max_context_size] + "\n... [truncated]"
context_parts.append(f"Context from {knowledge_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {knowledge_file}: {e}")
if args.context:
for ctx_file in args.context:
try:
with open(ctx_file) as f:
content = f.read()
if len(content) > max_context_size:
content = content[:max_context_size] + "\n... [truncated]"
context_parts.append(f"Context from {ctx_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {ctx_file}: {e}")
system_message = "\n\n".join(context_parts)
if len(system_message) > max_context_size * 3:
system_message = system_message[: max_context_size * 3] + "\n... [system message truncated]"
return {"role": "system", "content": system_message}
def should_compress_context(messages):
return len(messages) > CONTEXT_COMPRESSION_THRESHOLD
def compress_context(messages):
return manage_context_window(messages, verbose=False)
def manage_context_window(messages, verbose):
if len(messages) <= CONTEXT_COMPRESSION_THRESHOLD:
return messages
if verbose:
print(
f"{Colors.YELLOW}📄 Managing context window (current: {len(messages)} messages)...{Colors.RESET}"
)
system_message = messages[0]
recent_messages = messages[-RECENT_MESSAGES_TO_KEEP:]
middle_messages = messages[1:-RECENT_MESSAGES_TO_KEEP]
if middle_messages:
summary = summarize_messages(middle_messages)
summary_message = {
"role": "system",
"content": f"[Previous conversation summary: {summary}]",
}
new_messages = [system_message, summary_message] + recent_messages
if verbose:
print(
f"{Colors.GREEN}✓ Context compressed to {len(new_messages)} messages{Colors.RESET}"
)
return new_messages
return messages
def summarize_messages(messages):
summary_parts = []
for msg in messages:
role = msg.get("role", "unknown")
content = msg.get("content", "")
if role == "tool":
continue
if isinstance(content, str) and len(content) > 200:
content = content[:200] + "..."
summary_parts.append(f"{role}: {content}")
return " | ".join(summary_parts[:10])
def estimate_tokens(messages):
total_chars = 0
for msg in messages:
msg_json = json.dumps(msg)
total_chars += len(msg_json)
estimated_tokens = total_chars / CHARS_PER_TOKEN
overhead_multiplier = 1.3
return int(estimated_tokens * overhead_multiplier)
def trim_message_content(message, max_length):
trimmed_msg = message.copy()
if "content" in trimmed_msg:
content = trimmed_msg["content"]
if isinstance(content, str) and len(content) > max_length:
trimmed_msg["content"] = (
content[:max_length] + f"\n... [trimmed {len(content) - max_length} chars]"
)
elif isinstance(content, list):
trimmed_content = []
for item in content:
if isinstance(item, dict):
trimmed_item = item.copy()
if "text" in trimmed_item and len(trimmed_item["text"]) > max_length:
trimmed_item["text"] = (
trimmed_item["text"][:max_length] + f"\n... [trimmed]"
)
trimmed_content.append(trimmed_item)
else:
trimmed_content.append(item)
trimmed_msg["content"] = trimmed_content
if trimmed_msg.get("role") == "tool":
if "content" in trimmed_msg and isinstance(trimmed_msg["content"], str):
content = trimmed_msg["content"]
if len(content) > MAX_TOOL_RESULT_LENGTH:
trimmed_msg["content"] = (
content[:MAX_TOOL_RESULT_LENGTH]
+ f"\n... [trimmed {len(content) - MAX_TOOL_RESULT_LENGTH} chars]"
)
try:
parsed = json.loads(content)
if isinstance(parsed, dict):
if (
"output" in parsed
and isinstance(parsed["output"], str)
and (len(parsed["output"]) > MAX_TOOL_RESULT_LENGTH // 2)
):
parsed["output"] = (
parsed["output"][: MAX_TOOL_RESULT_LENGTH // 2] + f"\n... [truncated]"
)
if (
"content" in parsed
and isinstance(parsed["content"], str)
and (len(parsed["content"]) > MAX_TOOL_RESULT_LENGTH // 2)
):
parsed["content"] = (
parsed["content"][: MAX_TOOL_RESULT_LENGTH // 2] + f"\n... [truncated]"
)
trimmed_msg["content"] = json.dumps(parsed)
except:
pass
return trimmed_msg
def intelligently_trim_messages(messages, target_tokens, keep_recent=3):
if estimate_tokens(messages) <= target_tokens:
return messages
system_msg = messages[0] if messages and messages[0].get("role") == "system" else None
start_idx = 1 if system_msg else 0
recent_messages = (
messages[-keep_recent:] if len(messages) > keep_recent else messages[start_idx:]
)
middle_messages = messages[start_idx:-keep_recent] if len(messages) > keep_recent else []
trimmed_middle = []
for msg in middle_messages:
if msg.get("role") == "tool":
trimmed_middle.append(trim_message_content(msg, MAX_TOOL_RESULT_LENGTH // 2))
elif msg.get("role") in ["user", "assistant"]:
trimmed_middle.append(trim_message_content(msg, CONTENT_TRIM_LENGTH))
else:
trimmed_middle.append(msg)
result = ([system_msg] if system_msg else []) + trimmed_middle + recent_messages
if estimate_tokens(result) <= target_tokens:
return result
step_size = len(trimmed_middle) // 4 if len(trimmed_middle) >= 4 else 1
while len(trimmed_middle) > 0 and estimate_tokens(result) > target_tokens:
remove_count = min(step_size, len(trimmed_middle))
trimmed_middle = trimmed_middle[remove_count:]
result = ([system_msg] if system_msg else []) + trimmed_middle + recent_messages
if estimate_tokens(result) <= target_tokens:
return result
keep_recent -= 1
if keep_recent > 0:
return intelligently_trim_messages(messages, target_tokens, keep_recent)
return ([system_msg] if system_msg else []) + messages[-1:]
def auto_slim_messages(messages, verbose=False):
estimated_tokens = estimate_tokens(messages)
if estimated_tokens <= MAX_TOKENS_LIMIT:
return messages
if verbose:
print(
f"{Colors.YELLOW}⚠️ Token limit approaching: ~{estimated_tokens} tokens (limit: {MAX_TOKENS_LIMIT}){Colors.RESET}"
)
print(f"{Colors.YELLOW}🔧 Intelligently trimming message content...{Colors.RESET}")
result = intelligently_trim_messages(
messages, MAX_TOKENS_LIMIT, keep_recent=EMERGENCY_MESSAGES_TO_KEEP
)
final_tokens = estimate_tokens(result)
if final_tokens > MAX_TOKENS_LIMIT:
if verbose:
print(
f"{Colors.RED}⚠️ Still over limit after trimming, applying emergency reduction...{Colors.RESET}"
)
result = emergency_reduce_messages(result, MAX_TOKENS_LIMIT, verbose)
final_tokens = estimate_tokens(result)
if verbose:
removed_count = len(messages) - len(result)
print(
f"{Colors.GREEN}✓ Optimized from {len(messages)} to {len(result)} messages{Colors.RESET}"
)
print(
f"{Colors.GREEN} Token estimate: {estimated_tokens}{final_tokens} (~{estimated_tokens - final_tokens} saved){Colors.RESET}"
)
if removed_count > 0:
print(f"{Colors.GREEN} Removed {removed_count} older messages{Colors.RESET}")
return result
def emergency_reduce_messages(messages, target_tokens, verbose=False):
system_msg = messages[0] if messages and messages[0].get("role") == "system" else None
start_idx = 1 if system_msg else 0
keep_count = 2
while estimate_tokens(messages) > target_tokens and keep_count >= 1:
if len(messages[start_idx:]) <= keep_count:
break
result = ([system_msg] if system_msg else []) + messages[-keep_count:]
for i in range(len(result)):
result[i] = trim_message_content(result[i], CONTENT_TRIM_LENGTH // 2)
if estimate_tokens(result) <= target_tokens:
return result
keep_count -= 1
final = ([system_msg] if system_msg else []) + messages[-1:]
for i in range(len(final)):
if final[i].get("role") != "system":
final[i] = trim_message_content(final[i], 100)
return final

View File

@ -0,0 +1,281 @@
import asyncio
import json
import logging
import uuid
from typing import Any, Dict, List, Optional
from rp.agents import AgentManager
from rp.cache import APICache, ToolCache
from rp.config import (
ADVANCED_CONTEXT_ENABLED,
API_CACHE_TTL,
CACHE_ENABLED,
CONVERSATION_SUMMARY_THRESHOLD,
DB_PATH,
KNOWLEDGE_SEARCH_LIMIT,
TOOL_CACHE_TTL,
WORKFLOW_EXECUTOR_MAX_WORKERS,
)
from rp.core.advanced_context import AdvancedContextManager
from rp.core.api import call_api
from rp.memory import ConversationMemory, FactExtractor, KnowledgeStore
from rp.tools.base import get_tools_definition
from rp.workflows import WorkflowEngine, WorkflowStorage
logger = logging.getLogger("rp")
class EnhancedAssistant:
def __init__(self, base_assistant):
self.base = base_assistant
if CACHE_ENABLED:
self.api_cache = APICache(DB_PATH, API_CACHE_TTL)
self.tool_cache = ToolCache(DB_PATH, TOOL_CACHE_TTL)
else:
self.api_cache = None
self.tool_cache = None
self.workflow_storage = WorkflowStorage(DB_PATH)
self.workflow_engine = WorkflowEngine(
tool_executor=self._execute_tool_for_workflow, max_workers=WORKFLOW_EXECUTOR_MAX_WORKERS
)
self.agent_manager = AgentManager(DB_PATH, self._api_caller_for_agent)
self.knowledge_store = KnowledgeStore(DB_PATH)
self.conversation_memory = ConversationMemory(DB_PATH)
self.fact_extractor = FactExtractor()
if ADVANCED_CONTEXT_ENABLED:
self.context_manager = AdvancedContextManager(
knowledge_store=self.knowledge_store, conversation_memory=self.conversation_memory
)
else:
self.context_manager = None
self.current_conversation_id = str(uuid.uuid4())[:16]
self.conversation_memory.create_conversation(
self.current_conversation_id, session_id=str(uuid.uuid4())[:16]
)
logger.info("Enhanced Assistant initialized with all features")
def _execute_tool_for_workflow(self, tool_name: str, arguments: Dict[str, Any]) -> Any:
if self.tool_cache:
cached_result = self.tool_cache.get(tool_name, arguments)
if cached_result is not None:
logger.debug(f"Tool cache hit for {tool_name}")
return cached_result
func_map = {
"read_file": lambda **kw: self.base.execute_tool_calls(
[{"id": "temp", "function": {"name": "read_file", "arguments": json.dumps(kw)}}]
)[0],
"write_file": lambda **kw: self.base.execute_tool_calls(
[{"id": "temp", "function": {"name": "write_file", "arguments": json.dumps(kw)}}]
)[0],
"list_directory": lambda **kw: self.base.execute_tool_calls(
[
{
"id": "temp",
"function": {"name": "list_directory", "arguments": json.dumps(kw)},
}
]
)[0],
"run_command": lambda **kw: self.base.execute_tool_calls(
[{"id": "temp", "function": {"name": "run_command", "arguments": json.dumps(kw)}}]
)[0],
}
if tool_name in func_map:
result = func_map[tool_name](**arguments)
if self.tool_cache:
content = result.get("content", "")
try:
parsed_content = json.loads(content) if isinstance(content, str) else content
self.tool_cache.set(tool_name, arguments, parsed_content)
except Exception:
pass
return result
return {"error": f"Unknown tool: {tool_name}"}
def _api_caller_for_agent(
self, messages: List[Dict[str, Any]], temperature: float, max_tokens: int
) -> Dict[str, Any]:
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
messages,
self.base.model,
self.base.api_url,
self.base.api_key,
use_tools=False,
tools_definition=[],
verbose=self.base.verbose,
),
loop,
)
return future.result()
except RuntimeError:
return asyncio.run(
call_api(
messages,
self.base.model,
self.base.api_url,
self.base.api_key,
use_tools=False,
tools_definition=[],
verbose=self.base.verbose,
)
)
def enhanced_call_api(self, messages: List[Dict[str, Any]]) -> Dict[str, Any]:
if self.api_cache and CACHE_ENABLED:
cached_response = self.api_cache.get(self.base.model, messages, 0.7, 4096)
if cached_response:
logger.debug("API cache hit")
return cached_response
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
messages,
self.base.model,
self.base.api_url,
self.base.api_key,
self.base.use_tools,
get_tools_definition(),
verbose=self.base.verbose,
),
loop,
)
response = future.result()
except RuntimeError:
response = asyncio.run(
call_api(
messages,
self.base.model,
self.base.api_url,
self.base.api_key,
self.base.use_tools,
get_tools_definition(),
verbose=self.base.verbose,
)
)
if self.api_cache and CACHE_ENABLED and ("error" not in response):
token_count = response.get("usage", {}).get("total_tokens", 0)
self.api_cache.set(self.base.model, messages, 0.7, 4096, response, token_count)
return response
async def process_with_enhanced_context(self, user_message: str) -> str:
self.base.messages.append({"role": "user", "content": user_message})
self.conversation_memory.add_message(
self.current_conversation_id, str(uuid.uuid4())[:16], "user", user_message
)
facts = self.fact_extractor.extract_facts(user_message)
for fact in facts[:5]:
entry_id = str(uuid.uuid4())[:16]
import time
from rp.memory import KnowledgeEntry
categories = self.fact_extractor.categorize_content(fact["text"])
entry = KnowledgeEntry(
entry_id=entry_id,
category=categories[0] if categories else "general",
content=fact["text"],
metadata={
"type": fact["type"],
"confidence": fact["confidence"],
"source": "user_message",
},
created_at=time.time(),
updated_at=time.time(),
)
self.knowledge_store.add_entry(entry)
if self.context_manager and ADVANCED_CONTEXT_ENABLED:
enhanced_messages, context_info = self.context_manager.create_enhanced_context(
self.base.messages, user_message, include_knowledge=True
)
if self.base.verbose:
logger.info(f"Enhanced context: {context_info}")
working_messages = enhanced_messages
else:
working_messages = self.base.messages
response = self.enhanced_call_api(working_messages)
result = await self.base.process_response(response)
if len(self.base.messages) >= CONVERSATION_SUMMARY_THRESHOLD:
summary = (
self.context_manager.advanced_summarize_messages(
self.base.messages[-CONVERSATION_SUMMARY_THRESHOLD:]
)
if self.context_manager
else "Conversation in progress"
)
topics = self.fact_extractor.categorize_content(summary)
self.conversation_memory.update_conversation_summary(
self.current_conversation_id, summary, topics
)
return result
def execute_workflow(
self, workflow_name: str, initial_variables: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
workflow = self.workflow_storage.load_workflow_by_name(workflow_name)
if not workflow:
return {"error": f'Workflow "{workflow_name}" not found'}
context = self.workflow_engine.execute_workflow(workflow, initial_variables)
execution_id = self.workflow_storage.save_execution(
self.workflow_storage.load_workflow_by_name(workflow_name).name, context
)
return {
"success": True,
"execution_id": execution_id,
"results": context.step_results,
"execution_log": context.execution_log,
}
def create_agent(self, role_name: str, agent_id: Optional[str] = None) -> str:
return self.agent_manager.create_agent(role_name, agent_id)
def agent_task(self, agent_id: str, task: str) -> Dict[str, Any]:
return self.agent_manager.execute_agent_task(agent_id, task)
def collaborate_agents(self, task: str, agent_roles: List[str]) -> Dict[str, Any]:
orchestrator_id = self.agent_manager.create_agent("orchestrator")
return self.agent_manager.collaborate_agents(orchestrator_id, task, agent_roles)
def search_knowledge(self, query: str, limit: int = KNOWLEDGE_SEARCH_LIMIT) -> List[Any]:
return self.knowledge_store.search_entries(query, top_k=limit)
def get_cache_statistics(self) -> Dict[str, Any]:
stats = {}
if self.api_cache:
stats["api_cache"] = self.api_cache.get_statistics()
if self.tool_cache:
stats["tool_cache"] = self.tool_cache.get_statistics()
return stats
def get_workflow_list(self) -> List[Dict[str, Any]]:
return self.workflow_storage.list_workflows()
def get_agent_summary(self) -> Dict[str, Any]:
return self.agent_manager.get_session_summary()
def get_knowledge_statistics(self) -> Dict[str, Any]:
return self.knowledge_store.get_statistics()
def get_conversation_history(self, limit: int = 10) -> List[Dict[str, Any]]:
return self.conversation_memory.get_recent_conversations(limit=limit)
def clear_caches(self):
if self.api_cache:
self.api_cache.clear_all()
if self.tool_cache:
self.tool_cache.clear_all()
logger.info("All caches cleared")
def cleanup(self):
if self.api_cache:
self.api_cache.clear_expired()
if self.tool_cache:
self.tool_cache.clear_expired()
self.agent_manager.clear_session()

45
rp/core/exceptions.py Normal file
View File

@ -0,0 +1,45 @@
class PRException(Exception):
pass
class APIException(PRException):
pass
class APIConnectionError(APIException):
pass
class APITimeoutError(APIException):
pass
class APIResponseError(APIException):
pass
class ConfigurationError(PRException):
pass
class ToolExecutionError(PRException):
def __init__(self, tool_name: str, message: str):
self.tool_name = tool_name
super().__init__(f"Error executing tool '{tool_name}': {message}")
class FileSystemError(PRException):
pass
class SessionError(PRException):
pass
class ContextError(PRException):
pass
class ValidationError(PRException):
pass

114
rp/core/http_client.py Normal file
View File

@ -0,0 +1,114 @@
import asyncio
import json
import logging
import socket
import time
import urllib.error
import urllib.request
from typing import Dict, Any, Optional
logger = logging.getLogger("rp")
class AsyncHTTPClient:
def __init__(self):
self.session_headers = {}
async def request(
self,
method: str,
url: str,
headers: Optional[Dict[str, str]] = None,
data: Optional[bytes] = None,
json_data: Optional[Dict[str, Any]] = None,
timeout: float = 30.0,
) -> Dict[str, Any]:
"""Make an async HTTP request using urllib in a thread executor with retry logic."""
loop = asyncio.get_event_loop()
request_headers = {**self.session_headers}
if headers:
request_headers.update(headers)
request_data = data
if json_data is not None:
request_data = json.dumps(json_data).encode("utf-8")
request_headers["Content-Type"] = "application/json"
req = urllib.request.Request(url, data=request_data, headers=request_headers, method=method)
attempt = 0
start_time = time.time()
while True:
attempt += 1
try:
response = await loop.run_in_executor(
None, lambda: urllib.request.urlopen(req, timeout=timeout)
)
response_data = await loop.run_in_executor(None, response.read)
response_text = response_data.decode("utf-8")
return {
"status": response.status,
"headers": dict(response.headers),
"text": response_text,
"json": lambda: json.loads(response_text) if response_text else None,
}
except urllib.error.HTTPError as e:
error_body = await loop.run_in_executor(None, e.read)
error_text = error_body.decode("utf-8")
return {
"status": e.code,
"error": True,
"text": error_text,
"json": lambda: json.loads(error_text) if error_text else None,
}
except socket.timeout:
elapsed = time.time() - start_time
elapsed_minutes = int(elapsed // 60)
elapsed_seconds = elapsed % 60
duration_str = (
f"{elapsed_minutes}m {elapsed_seconds:.1f}s"
if elapsed_minutes > 0
else f"{elapsed_seconds:.1f}s"
)
logger.warning(
f"Request timed out (attempt {attempt}, duration: {duration_str}). Retrying in {attempt} second(s)..."
)
await asyncio.sleep(attempt)
except Exception as e:
error_msg = str(e)
if "timed out" in error_msg.lower() or "timeout" in error_msg.lower():
elapsed = time.time() - start_time
elapsed_minutes = int(elapsed // 60)
elapsed_seconds = elapsed % 60
duration_str = (
f"{elapsed_minutes}m {elapsed_seconds:.1f}s"
if elapsed_minutes > 0
else f"{elapsed_seconds:.1f}s"
)
logger.warning(
f"Request timed out (attempt {attempt}, duration: {duration_str}). Retrying in {attempt} second(s)..."
)
await asyncio.sleep(attempt)
else:
return {"error": True, "exception": error_msg}
async def get(
self, url: str, headers: Optional[Dict[str, str]] = None, timeout: float = 30.0
) -> Dict[str, Any]:
return await self.request("GET", url, headers=headers, timeout=timeout)
async def post(
self,
url: str,
headers: Optional[Dict[str, str]] = None,
data: Optional[bytes] = None,
json_data: Optional[Dict[str, Any]] = None,
timeout: float = 30.0,
) -> Dict[str, Any]:
return await self.request(
"POST", url, headers=headers, data=data, json_data=json_data, timeout=timeout
)
def set_default_headers(self, headers: Dict[str, str]):
self.session_headers.update(headers)
http_client = AsyncHTTPClient()

View File

@ -0,0 +1,109 @@
import logging
logger = logging.getLogger("rp")
KNOWLEDGE_MESSAGE_MARKER = "[KNOWLEDGE_BASE_CONTEXT]"
def inject_knowledge_context(assistant, user_message):
if not hasattr(assistant, "enhanced") or not assistant.enhanced:
return
messages = assistant.messages
for i in range(len(messages) - 1, -1, -1):
if messages[i].get("role") == "user" and KNOWLEDGE_MESSAGE_MARKER in messages[i].get(
"content", ""
):
del messages[i]
logger.debug(f"Removed existing knowledge base message at index {i}")
break
try:
knowledge_results = assistant.enhanced.knowledge_store.search_entries(user_message, top_k=5)
conversation_results = []
if hasattr(assistant.enhanced, "conversation_memory"):
history_results = assistant.enhanced.conversation_memory.search_conversations(
user_message, limit=3
)
for conv in history_results:
conv_messages = assistant.enhanced.conversation_memory.get_conversation_messages(
conv["conversation_id"]
)
for msg in conv_messages[-5:]:
if msg["role"] == "user" and msg["content"] != user_message:
relevance = calculate_text_similarity(user_message, msg["content"])
if relevance > 0.3:
conversation_results.append(
{
"content": msg["content"],
"score": relevance,
"source": f"Previous conversation: {conv['conversation_id'][:8]}",
}
)
all_results = []
for entry in knowledge_results:
score = entry.metadata.get("search_score", 0.5)
all_results.append(
{
"content": entry.content,
"score": score,
"source": f"Knowledge Base ({entry.category})",
"type": "knowledge",
}
)
for conv in conversation_results:
all_results.append(
{
"content": conv["content"],
"score": conv["score"],
"source": conv["source"],
"type": "conversation",
}
)
all_results.sort(key=lambda x: x["score"], reverse=True)
top_results = all_results[:5]
if not top_results:
logger.debug("No relevant knowledge or conversation matches found")
return
knowledge_parts = []
for idx, result in enumerate(top_results, 1):
content = result["content"]
if len(content) > 1500:
content = content[:1500] + "..."
score_indicator = f"({result['score']:.2f})" if result["score"] < 1.0 else "(exact)"
knowledge_parts.append(
f"Match {idx} {score_indicator} - {result['source']}:\n{content}"
)
knowledge_message_content = (
f"{KNOWLEDGE_MESSAGE_MARKER}\nRelevant information from knowledge base and conversation history:\n\n"
+ "\n\n".join(knowledge_parts)
)
knowledge_message = {"role": "user", "content": knowledge_message_content}
messages.append(knowledge_message)
logger.debug(f"Injected enhanced context message with {len(top_results)} matches")
except Exception as e:
logger.error(f"Error injecting knowledge context: {e}")
def calculate_text_similarity(text1: str, text2: str) -> float:
"""Calculate similarity between two texts using word overlap and sequence matching."""
import re
text1_lower = text1.lower()
text2_lower = text2.lower()
if text1_lower in text2_lower or text2_lower in text1_lower:
return 1.0
words1 = set(re.findall("\\b\\w+\\b", text1_lower))
words2 = set(re.findall("\\b\\w+\\b", text2_lower))
if not words1 or not words2:
return 0.0
intersection = words1 & words2
union = words1 | words2
word_similarity = len(intersection) / len(union)
consecutive_bonus = 0.0
words1_list = list(words1)
list(words2)
for i in range(len(words1_list) - 1):
for j in range(i + 2, min(i + 5, len(words1_list) + 1)):
phrase = " ".join(words1_list[i:j])
if phrase in text2_lower:
consecutive_bonus += 0.1 * (j - i)
total_similarity = min(1.0, word_similarity + consecutive_bonus)
return total_similarity

35
rp/core/logging.py Normal file
View File

@ -0,0 +1,35 @@
import logging
import os
from logging.handlers import RotatingFileHandler
from rp.config import LOG_FILE
def setup_logging(verbose=False):
log_dir = os.path.dirname(LOG_FILE)
if log_dir and (not os.path.exists(log_dir)):
os.makedirs(log_dir, exist_ok=True)
logger = logging.getLogger("rp")
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
if logger.handlers:
logger.handlers.clear()
file_handler = RotatingFileHandler(LOG_FILE, maxBytes=10 * 1024 * 1024, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
if verbose:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_formatter = logging.Formatter("%(levelname)s: %(message)s")
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
return logger
def get_logger(name=None):
if name:
return logging.getLogger(f"rp.{name}")
return logging.getLogger("rp")

123
rp/core/session.py Normal file
View File

@ -0,0 +1,123 @@
import json
import os
from datetime import datetime
from typing import Dict, List, Optional
from rp.core.logging import get_logger
logger = get_logger("session")
SESSIONS_DIR = os.path.expanduser("~/.assistant_sessions")
class SessionManager:
def __init__(self):
os.makedirs(SESSIONS_DIR, exist_ok=True)
def save_session(
self, name: str, messages: List[Dict], metadata: Optional[Dict] = None
) -> bool:
try:
session_file = os.path.join(SESSIONS_DIR, f"{name}.json")
session_data = {
"name": name,
"created_at": datetime.now().isoformat(),
"messages": messages,
"metadata": metadata or {},
}
with open(session_file, "w") as f:
json.dump(session_data, f, indent=2)
logger.info(f"Session saved: {name}")
return True
except Exception as e:
logger.error(f"Error saving session {name}: {e}")
return False
def load_session(self, name: str) -> Optional[Dict]:
try:
session_file = os.path.join(SESSIONS_DIR, f"{name}.json")
if not os.path.exists(session_file):
logger.warning(f"Session not found: {name}")
return None
with open(session_file) as f:
session_data = json.load(f)
logger.info(f"Session loaded: {name}")
return session_data
except Exception as e:
logger.error(f"Error loading session {name}: {e}")
return None
def list_sessions(self) -> List[Dict]:
sessions = []
try:
for filename in os.listdir(SESSIONS_DIR):
if filename.endswith(".json"):
filepath = os.path.join(SESSIONS_DIR, filename)
try:
with open(filepath) as f:
data = json.load(f)
sessions.append(
{
"name": data.get("name", filename[:-5]),
"created_at": data.get("created_at", "unknown"),
"message_count": len(data.get("messages", [])),
"metadata": data.get("metadata", {}),
}
)
except Exception as e:
logger.warning(f"Error reading session file {filename}: {e}")
sessions.sort(key=lambda x: x["created_at"], reverse=True)
except Exception as e:
logger.error(f"Error listing sessions: {e}")
return sessions
def delete_session(self, name: str) -> bool:
try:
session_file = os.path.join(SESSIONS_DIR, f"{name}.json")
if not os.path.exists(session_file):
logger.warning(f"Session not found: {name}")
return False
os.remove(session_file)
logger.info(f"Session deleted: {name}")
return True
except Exception as e:
logger.error(f"Error deleting session {name}: {e}")
return False
def export_session(self, name: str, output_path: str, format: str = "json") -> bool:
session_data = self.load_session(name)
if not session_data:
return False
try:
if format == "json":
with open(output_path, "w") as f:
json.dump(session_data, f, indent=2)
elif format == "markdown":
with open(output_path, "w") as f:
f.write(f"# Session: {name}\n\n")
f.write(f"Created: {session_data['created_at']}\n\n")
f.write("---\n\n")
for msg in session_data["messages"]:
role = msg.get("role", "unknown")
content = msg.get("content", "")
f.write(f"## {role.capitalize()}\n\n")
f.write(f"{content}\n\n")
f.write("---\n\n")
elif format == "txt":
with open(output_path, "w") as f:
f.write(f"Session: {name}\n")
f.write(f"Created: {session_data['created_at']}\n")
f.write("=" * 80 + "\n\n")
for msg in session_data["messages"]:
role = msg.get("role", "unknown")
content = msg.get("content", "")
f.write(f"[{role.upper()}]\n")
f.write(f"{content}\n")
f.write("-" * 80 + "\n\n")
else:
logger.error(f"Unsupported export format: {format}")
return False
logger.info(f"Session exported to {output_path}")
return True
except Exception as e:
logger.error(f"Error exporting session: {e}")
return False

127
rp/core/usage_tracker.py Normal file
View File

@ -0,0 +1,127 @@
import json
import os
from datetime import datetime
from typing import Dict, Optional
from rp.core.logging import get_logger
logger = get_logger("usage")
USAGE_DB_FILE = os.path.expanduser("~/.assistant_usage.json")
EXCHANGE_RATE = 1.0
MODEL_COSTS = {
"x-ai/grok-code-fast-1": {"input": 0.0002, "output": 0.0015},
"gpt-4": {"input": 0.03, "output": 0.06},
"gpt-4-turbo": {"input": 0.01, "output": 0.03},
"gpt-3.5-turbo": {"input": 0.0005, "output": 0.0015},
"claude-3-opus": {"input": 0.015, "output": 0.075},
"claude-3-sonnet": {"input": 0.003, "output": 0.015},
"claude-3-haiku": {"input": 0.00025, "output": 0.00125},
}
class UsageTracker:
def __init__(self):
self.session_usage = {
"requests": 0,
"total_tokens": 0,
"input_tokens": 0,
"output_tokens": 0,
"estimated_cost": 0.0,
"models_used": {},
}
def track_request(
self, model: str, input_tokens: int, output_tokens: int, total_tokens: Optional[int] = None
):
if total_tokens is None:
total_tokens = input_tokens + output_tokens
self.session_usage["requests"] += 1
self.session_usage["total_tokens"] += total_tokens
self.session_usage["input_tokens"] += input_tokens
self.session_usage["output_tokens"] += output_tokens
if model not in self.session_usage["models_used"]:
self.session_usage["models_used"][model] = {"requests": 0, "tokens": 0, "cost": 0.0}
model_usage = self.session_usage["models_used"][model]
model_usage["requests"] += 1
model_usage["tokens"] += total_tokens
cost = self._calculate_cost(model, input_tokens, output_tokens)
model_usage["cost"] += cost
self.session_usage["estimated_cost"] += cost
self._save_to_history(model, input_tokens, output_tokens, cost)
logger.debug(f"Tracked request: {model}, tokens: {total_tokens}, cost: €{cost:.4f}")
@staticmethod
def _calculate_cost(model: str, input_tokens: int, output_tokens: int) -> float:
if model not in MODEL_COSTS:
base_model = model.split("/")[0] if "/" in model else model
if base_model not in MODEL_COSTS:
logger.warning(f"Unknown model for cost calculation: {model}")
return 0.0
costs = MODEL_COSTS[base_model]
else:
costs = MODEL_COSTS[model]
input_cost = input_tokens / 1000 * costs["input"] * EXCHANGE_RATE
output_cost = output_tokens / 1000 * costs["output"] * EXCHANGE_RATE
return input_cost + output_cost
def _save_to_history(self, model: str, input_tokens: int, output_tokens: int, cost: float):
try:
history = []
if os.path.exists(USAGE_DB_FILE):
with open(USAGE_DB_FILE) as f:
history = json.load(f)
history.append(
{
"timestamp": datetime.now().isoformat(),
"model": model,
"input_tokens": input_tokens,
"output_tokens": output_tokens,
"total_tokens": input_tokens + output_tokens,
"cost": cost,
}
)
if len(history) > 10000:
history = history[-10000:]
with open(USAGE_DB_FILE, "w") as f:
json.dump(history, f, indent=2)
except Exception as e:
logger.error(f"Error saving usage history: {e}")
def get_session_summary(self) -> Dict:
return self.session_usage.copy()
def get_formatted_summary(self) -> str:
usage = self.session_usage
lines = [
"\n=== Session Usage Summary ===",
f"Total Requests: {usage['requests']}",
f"Total Tokens: {usage['total_tokens']:,}",
f" Input: {usage['input_tokens']:,}",
f" Output: {usage['output_tokens']:,}",
f"Estimated Cost: ${usage['estimated_cost']:.4f}",
]
if usage["models_used"]:
lines.append("\nModels Used:")
for model, stats in usage["models_used"].items():
lines.append(
f" {model}: {stats['requests']} requests, {stats['tokens']:,} tokens, ${stats['cost']:.4f}"
)
return "\n".join(lines)
@staticmethod
def get_total_usage() -> Dict:
if not os.path.exists(USAGE_DB_FILE):
return {"total_requests": 0, "total_tokens": 0, "total_cost": 0.0}
try:
with open(USAGE_DB_FILE) as f:
history = json.load(f)
total_tokens = sum((entry["total_tokens"] for entry in history))
total_cost = sum((entry["cost"] for entry in history))
return {
"total_requests": len(history),
"total_tokens": total_tokens,
"total_cost": total_cost,
}
except Exception as e:
logger.error(f"Error loading usage history: {e}")
return {"total_requests": 0, "total_tokens": 0, "total_cost": 0.0}

68
rp/core/validation.py Normal file
View File

@ -0,0 +1,68 @@
import os
from rp.core.exceptions import ValidationError
def validate_file_path(path: str, must_exist: bool = False) -> str:
if not path:
raise ValidationError("File path cannot be empty")
if must_exist and (not os.path.exists(path)):
raise ValidationError(f"File does not exist: {path}")
if must_exist and os.path.isdir(path):
raise ValidationError(f"Path is a directory, not a file: {path}")
return os.path.abspath(path)
def validate_directory_path(path: str, must_exist: bool = False, create: bool = False) -> str:
if not path:
raise ValidationError("Directory path cannot be empty")
abs_path = os.path.abspath(path)
if must_exist and (not os.path.exists(abs_path)):
if create:
os.makedirs(abs_path, exist_ok=True)
else:
raise ValidationError(f"Directory does not exist: {abs_path}")
if os.path.exists(abs_path) and (not os.path.isdir(abs_path)):
raise ValidationError(f"Path is not a directory: {abs_path}")
return abs_path
def validate_model_name(model: str) -> str:
if not model:
raise ValidationError("Model name cannot be empty")
if len(model) < 2:
raise ValidationError("Model name too short")
return model
def validate_api_url(url: str) -> str:
if not url:
raise ValidationError("API URL cannot be empty")
if not url.startswith(("http://", "https://")):
raise ValidationError("API URL must start with http:// or https://")
return url
def validate_session_name(name: str) -> str:
if not name:
raise ValidationError("Session name cannot be empty")
invalid_chars = ["/", "\\", ":", "*", "?", '"', "<", ">", "|"]
for char in invalid_chars:
if char in name:
raise ValidationError(f"Session name contains invalid character: {char}")
if len(name) > 255:
raise ValidationError("Session name too long (max 255 characters)")
return name
def validate_temperature(temp: float) -> float:
if not 0.0 <= temp <= 2.0:
raise ValidationError("Temperature must be between 0.0 and 2.0")
return temp
def validate_max_tokens(tokens: int) -> int:
if tokens < 1:
raise ValidationError("Max tokens must be at least 1")
if tokens > 100000:
raise ValidationError("Max tokens too high (max 100000)")
return tokens

919
rp/editor.py Normal file
View File

@ -0,0 +1,919 @@
import atexit
import curses
import os
import pickle
import queue
import re
import signal
import socket
import sys
import threading
import time
class RPEditor:
def __init__(self, filename=None, auto_save=False, timeout=30):
"""
Initialize RPEditor with enhanced robustness features.
Args:
filename: File to edit
auto_save: Enable auto-save on exit
timeout: Command timeout in seconds
"""
self.filename = filename
self.lines = [""]
self.cursor_y = 0
self.cursor_x = 0
self.mode = "normal"
self.command = ""
self.stdscr = None
self.running = False
self.thread = None
self.socket_thread = None
self.prev_key = None
self.clipboard = ""
self.undo_stack = []
self.redo_stack = []
self.selection_start = None
self.selection_end = None
self.max_undo = 100
self.lock = threading.RLock()
self.command_queue = queue.Queue()
self.auto_save = auto_save
self.timeout = timeout
self._cleanup_registered = False
self._original_terminal_state = None
self._exception_occurred = False
try:
self.client_sock, self.server_sock = socket.socketpair()
self.client_sock.settimeout(self.timeout)
self.server_sock.settimeout(self.timeout)
except Exception as e:
self._cleanup()
raise RuntimeError(f"Failed to create socket pair: {e}")
self._register_cleanup()
if filename:
self.load_file()
def _register_cleanup(self):
"""Register cleanup handlers for proper shutdown."""
if not self._cleanup_registered:
atexit.register(self._cleanup)
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
self._cleanup_registered = True
def _signal_handler(self, signum, frame):
"""Handle signals for clean shutdown."""
self._cleanup()
sys.exit(0)
def _cleanup(self):
"""Comprehensive cleanup of all resources."""
try:
self.running = False
if self.auto_save and self.filename and (not self._exception_occurred):
try:
self._save_file()
except:
pass
if self.stdscr:
try:
self.stdscr.keypad(False)
curses.nocbreak()
curses.echo()
curses.curs_set(1)
except:
pass
finally:
try:
curses.endwin()
except:
pass
try:
os.system("clear" if os.name != "nt" else "cls")
except:
pass
for sock in [self.client_sock, self.server_sock]:
if sock:
try:
sock.close()
except:
pass
for thread in [self.thread, self.socket_thread]:
if thread and thread.is_alive():
thread.join(timeout=1)
except:
pass
def load_file(self):
"""Load file with enhanced error handling."""
try:
if os.path.exists(self.filename):
with open(self.filename, encoding="utf-8", errors="replace") as f:
content = f.read()
self.lines = content.splitlines() if content else [""]
else:
self.lines = [""]
except Exception:
self.lines = [""]
def _save_file(self):
"""Save file with enhanced error handling and backup."""
with self.lock:
if not self.filename:
return False
try:
if os.path.exists(self.filename):
backup_name = f"{self.filename}.bak"
try:
with open(self.filename, encoding="utf-8") as f:
backup_content = f.read()
with open(backup_name, "w", encoding="utf-8") as f:
f.write(backup_content)
except:
pass
with open(self.filename, "w", encoding="utf-8") as f:
f.write("\n".join(self.lines))
return True
except Exception:
return False
def save_file(self):
"""Thread-safe save file command."""
if not self.running:
return self._save_file()
try:
self.client_sock.send(pickle.dumps({"command": "save_file"}))
except:
return self._save_file()
def start(self):
"""Start the editor with enhanced error handling."""
if self.running:
return False
try:
self.running = True
self.socket_thread = threading.Thread(target=self.socket_listener, daemon=True)
self.socket_thread.start()
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
return True
except Exception as e:
self.running = False
self._cleanup()
raise RuntimeError(f"Failed to start editor: {e}")
def stop(self):
"""Stop the editor with proper cleanup."""
try:
if self.client_sock:
self.client_sock.send(pickle.dumps({"command": "stop"}))
except:
pass
self.running = False
time.sleep(0.1)
self._cleanup()
def run(self):
"""Run the main editor loop with exception handling."""
try:
curses.wrapper(self.main_loop)
except Exception:
self._exception_occurred = True
self._cleanup()
def main_loop(self, stdscr):
"""Main editor loop with enhanced error recovery."""
self.stdscr = stdscr
try:
curses.curs_set(1)
self.stdscr.keypad(True)
self.stdscr.timeout(100)
while self.running:
try:
while True:
try:
command = self.command_queue.get_nowait()
with self.lock:
self.execute_command(command)
except queue.Empty:
break
with self.lock:
self.draw()
try:
key = self.stdscr.getch()
if key != -1:
with self.lock:
self.handle_key(key)
except curses.error:
pass
except Exception:
pass
except Exception:
self._exception_occurred = True
finally:
self._cleanup()
def draw(self):
"""Draw the editor screen with error handling."""
try:
self.stdscr.clear()
height, width = self.stdscr.getmaxyx()
for i, line in enumerate(self.lines):
if i >= height - 1:
break
try:
display_line = line[: width - 1] if len(line) >= width else line
self.stdscr.addstr(i, 0, display_line)
except curses.error:
pass
status = f"{self.mode.upper()} | {self.filename or 'untitled'} | {self.cursor_y + 1}:{self.cursor_x + 1}"
if self.mode == "command":
status = self.command[: width - 1]
try:
self.stdscr.addstr(height - 1, 0, status[: width - 1])
except curses.error:
pass
cursor_x = min(self.cursor_x, width - 1)
cursor_y = min(self.cursor_y, height - 2)
try:
self.stdscr.move(cursor_y, cursor_x)
except curses.error:
pass
self.stdscr.refresh()
except Exception:
pass
def handle_key(self, key):
"""Handle keyboard input with error recovery."""
try:
if self.mode == "normal":
self.handle_normal(key)
elif self.mode == "insert":
self.handle_insert(key)
elif self.mode == "command":
self.handle_command(key)
except Exception:
pass
def handle_normal(self, key):
"""Handle normal mode keys."""
try:
if key == ord("h") or key == curses.KEY_LEFT:
self.move_cursor(0, -1)
elif key == ord("j") or key == curses.KEY_DOWN:
self.move_cursor(1, 0)
elif key == ord("k") or key == curses.KEY_UP:
self.move_cursor(-1, 0)
elif key == ord("l") or key == curses.KEY_RIGHT:
self.move_cursor(0, 1)
elif key == ord("i"):
self.mode = "insert"
elif key == ord(":"):
self.mode = "command"
self.command = ":"
elif key == ord("x"):
self._delete_char()
elif key == ord("a"):
self.cursor_x = min(self.cursor_x + 1, len(self.lines[self.cursor_y]))
self.mode = "insert"
elif key == ord("A"):
self.cursor_x = len(self.lines[self.cursor_y])
self.mode = "insert"
elif key == ord("o"):
self._insert_line(self.cursor_y + 1, "")
self.cursor_y += 1
self.cursor_x = 0
self.mode = "insert"
elif key == ord("O"):
self._insert_line(self.cursor_y, "")
self.cursor_x = 0
self.mode = "insert"
elif key == ord("d") and self.prev_key == ord("d"):
if self.cursor_y < len(self.lines):
self.clipboard = self.lines[self.cursor_y]
self._delete_line(self.cursor_y)
if self.cursor_y >= len(self.lines):
self.cursor_y = max(0, len(self.lines) - 1)
self.cursor_x = 0
elif key == ord("y") and self.prev_key == ord("y"):
if self.cursor_y < len(self.lines):
self.clipboard = self.lines[self.cursor_y]
elif key == ord("p"):
self._insert_line(self.cursor_y + 1, self.clipboard)
self.cursor_y += 1
self.cursor_x = 0
elif key == ord("P"):
self._insert_line(self.cursor_y, self.clipboard)
self.cursor_x = 0
elif key == ord("w"):
self._move_word_forward()
elif key == ord("b"):
self._move_word_backward()
elif key == ord("0"):
self.cursor_x = 0
elif key == ord("$"):
self.cursor_x = len(self.lines[self.cursor_y])
elif key == ord("g"):
if self.prev_key == ord("g"):
self.cursor_y = 0
self.cursor_x = 0
elif key == ord("G"):
self.cursor_y = max(0, len(self.lines) - 1)
self.cursor_x = 0
elif key == ord("u"):
self.undo()
elif key == 18:
self.redo()
elif key == 19:
self._save_file()
self.prev_key = key
except Exception:
pass
def _move_word_forward(self):
"""Move cursor forward by word."""
if self.cursor_y >= len(self.lines):
return
line = self.lines[self.cursor_y]
i = self.cursor_x
while i < len(line) and (not line[i].isalnum()):
i += 1
while i < len(line) and line[i].isalnum():
i += 1
self.cursor_x = i
def _move_word_backward(self):
"""Move cursor backward by word."""
if self.cursor_y >= len(self.lines):
return
line = self.lines[self.cursor_y]
i = max(0, self.cursor_x - 1)
while i >= 0 and (not line[i].isalnum()):
i -= 1
while i >= 0 and line[i].isalnum():
i -= 1
self.cursor_x = max(0, i + 1)
def handle_insert(self, key):
"""Handle insert mode keys."""
try:
if key == 27:
self.mode = "normal"
if self.cursor_x > 0:
self.cursor_x -= 1
elif key == 10 or key == 13:
self._split_line()
elif key == curses.KEY_BACKSPACE or key == 127 or key == 8:
self._backspace()
elif 32 <= key <= 126:
char = chr(key)
self._insert_char(char)
except Exception:
pass
def handle_command(self, key):
"""Handle command mode keys."""
try:
if key == 10 or key == 13:
cmd = self.command[1:].strip()
if cmd in ["q", "q!"]:
self.running = False
elif cmd == "w":
self._save_file()
elif cmd in ["wq", "wq!", "x", "xq", "x!"]:
self._save_file()
self.running = False
elif cmd.startswith("w "):
self.filename = cmd[2:].strip()
self._save_file()
self.mode = "normal"
self.command = ""
elif key == 27:
self.mode = "normal"
self.command = ""
elif key == curses.KEY_BACKSPACE or key == 127 or key == 8:
if len(self.command) > 1:
self.command = self.command[:-1]
elif 32 <= key <= 126:
self.command += chr(key)
except Exception:
self.mode = "normal"
self.command = ""
def move_cursor(self, dy, dx):
"""Move cursor with bounds checking."""
if not self.lines:
self.lines = [""]
new_y = self.cursor_y + dy
new_x = self.cursor_x + dx
if 0 <= new_y < len(self.lines):
self.cursor_y = new_y
max_x = len(self.lines[self.cursor_y])
self.cursor_x = max(0, min(new_x, max_x))
elif new_y < 0:
self.cursor_y = 0
self.cursor_x = 0
elif new_y >= len(self.lines):
self.cursor_y = max(0, len(self.lines) - 1)
self.cursor_x = len(self.lines[self.cursor_y])
def save_state(self):
"""Save current state for undo."""
with self.lock:
state = {
"lines": list(self.lines),
"cursor_y": self.cursor_y,
"cursor_x": self.cursor_x,
}
self.undo_stack.append(state)
if len(self.undo_stack) > self.max_undo:
self.undo_stack.pop(0)
self.redo_stack.clear()
def undo(self):
"""Undo last change."""
with self.lock:
if self.undo_stack:
current_state = {
"lines": list(self.lines),
"cursor_y": self.cursor_y,
"cursor_x": self.cursor_x,
}
self.redo_stack.append(current_state)
state = self.undo_stack.pop()
self.lines = state["lines"]
self.cursor_y = min(state["cursor_y"], len(self.lines) - 1)
self.cursor_x = min(
state["cursor_x"], len(self.lines[self.cursor_y]) if self.lines else 0
)
def redo(self):
"""Redo last undone change."""
with self.lock:
if self.redo_stack:
current_state = {
"lines": list(self.lines),
"cursor_y": self.cursor_y,
"cursor_x": self.cursor_x,
}
self.undo_stack.append(current_state)
state = self.redo_stack.pop()
self.lines = state["lines"]
self.cursor_y = min(state["cursor_y"], len(self.lines) - 1)
self.cursor_x = min(
state["cursor_x"], len(self.lines[self.cursor_y]) if self.lines else 0
)
def _insert_text(self, text):
"""Insert text at cursor position."""
if not text:
return
self.save_state()
lines = text.split("\n")
if len(lines) == 1:
if self.cursor_y >= len(self.lines):
self.lines.append("")
self.cursor_y = len(self.lines) - 1
line = self.lines[self.cursor_y]
self.lines[self.cursor_y] = line[: self.cursor_x] + text + line[self.cursor_x :]
self.cursor_x += len(text)
else:
if self.cursor_y >= len(self.lines):
self.lines.append("")
self.cursor_y = len(self.lines) - 1
first = self.lines[self.cursor_y][: self.cursor_x] + lines[0]
last = lines[-1] + self.lines[self.cursor_y][self.cursor_x :]
self.lines[self.cursor_y] = first
for i in range(1, len(lines) - 1):
self.lines.insert(self.cursor_y + i, lines[i])
self.lines.insert(self.cursor_y + len(lines) - 1, last)
self.cursor_y += len(lines) - 1
self.cursor_x = len(lines[-1])
def insert_text(self, text):
"""Thread-safe text insertion."""
try:
self.client_sock.send(pickle.dumps({"command": "insert_text", "text": text}))
except:
with self.lock:
self._insert_text(text)
def _delete_char(self):
"""Delete character at cursor."""
self.save_state()
if self.cursor_y < len(self.lines) and self.cursor_x < len(self.lines[self.cursor_y]):
line = self.lines[self.cursor_y]
self.lines[self.cursor_y] = line[: self.cursor_x] + line[self.cursor_x + 1 :]
def delete_char(self):
"""Thread-safe character deletion."""
try:
self.client_sock.send(pickle.dumps({"command": "delete_char"}))
except:
with self.lock:
self._delete_char()
def _insert_char(self, char):
"""Insert single character."""
if self.cursor_y >= len(self.lines):
self.lines.append("")
self.cursor_y = len(self.lines) - 1
line = self.lines[self.cursor_y]
self.lines[self.cursor_y] = line[: self.cursor_x] + char + line[self.cursor_x :]
self.cursor_x += 1
def _split_line(self):
"""Split line at cursor."""
if self.cursor_y >= len(self.lines):
self.lines.append("")
self.cursor_y = len(self.lines) - 1
line = self.lines[self.cursor_y]
self.lines[self.cursor_y] = line[: self.cursor_x]
self.lines.insert(self.cursor_y + 1, line[self.cursor_x :])
self.cursor_y += 1
self.cursor_x = 0
def _backspace(self):
"""Handle backspace key."""
if self.cursor_x > 0:
line = self.lines[self.cursor_y]
self.lines[self.cursor_y] = line[: self.cursor_x - 1] + line[self.cursor_x :]
self.cursor_x -= 1
elif self.cursor_y > 0:
prev_len = len(self.lines[self.cursor_y - 1])
self.lines[self.cursor_y - 1] += self.lines[self.cursor_y]
del self.lines[self.cursor_y]
self.cursor_y -= 1
self.cursor_x = prev_len
def _insert_line(self, line_num, text):
"""Insert a new line."""
self.save_state()
line_num = max(0, min(line_num, len(self.lines)))
self.lines.insert(line_num, text)
def _delete_line(self, line_num):
"""Delete a line."""
self.save_state()
if 0 <= line_num < len(self.lines):
if len(self.lines) > 1:
del self.lines[line_num]
else:
self.lines = [""]
def _set_text(self, text):
"""Set entire text content."""
self.save_state()
self.lines = text.splitlines() if text else [""]
self.cursor_y = 0
self.cursor_x = 0
def set_text(self, text):
"""Thread-safe text setting."""
if not self.running:
with self.lock:
self._set_text(text)
return
try:
self.client_sock.send(pickle.dumps({"command": "set_text", "text": text}))
except:
with self.lock:
self._set_text(text)
def _goto_line(self, line_num):
"""Go to specific line."""
line_num = max(0, min(line_num - 1, len(self.lines) - 1))
self.cursor_y = line_num
self.cursor_x = 0
def goto_line(self, line_num):
"""Thread-safe goto line."""
try:
self.client_sock.send(pickle.dumps({"command": "goto_line", "line_num": line_num}))
except:
with self.lock:
self._goto_line(line_num)
def get_text(self):
"""Get entire text content."""
try:
self.client_sock.send(pickle.dumps({"command": "get_text"}))
data = self.client_sock.recv(65536)
return pickle.loads(data)
except:
with self.lock:
return "\n".join(self.lines)
def get_cursor(self):
"""Get cursor position."""
try:
self.client_sock.send(pickle.dumps({"command": "get_cursor"}))
data = self.client_sock.recv(4096)
return pickle.loads(data)
except:
with self.lock:
return (self.cursor_y, self.cursor_x)
def get_file_info(self):
"""Get file information."""
try:
self.client_sock.send(pickle.dumps({"command": "get_file_info"}))
data = self.client_sock.recv(4096)
return pickle.loads(data)
except:
with self.lock:
return {
"filename": self.filename,
"lines": len(self.lines),
"cursor": (self.cursor_y, self.cursor_x),
"mode": self.mode,
}
def socket_listener(self):
"""Listen for socket commands with error handling."""
while self.running:
try:
data = self.server_sock.recv(65536)
if not data:
break
command = pickle.loads(data)
self.command_queue.put(command)
except socket.timeout:
continue
except OSError:
if self.running:
continue
else:
break
except Exception:
continue
def execute_command(self, command):
"""Execute command with error handling."""
try:
cmd = command.get("command")
if cmd == "insert_text":
self._insert_text(command.get("text", ""))
elif cmd == "delete_char":
self._delete_char()
elif cmd == "save_file":
self._save_file()
elif cmd == "set_text":
self._set_text(command.get("text", ""))
elif cmd == "goto_line":
self._goto_line(command.get("line_num", 1))
elif cmd == "get_text":
result = "\n".join(self.lines)
self.server_sock.send(pickle.dumps(result))
elif cmd == "get_cursor":
result = (self.cursor_y, self.cursor_x)
self.server_sock.send(pickle.dumps(result))
elif cmd == "get_file_info":
result = {
"filename": self.filename,
"lines": len(self.lines),
"cursor": (self.cursor_y, self.cursor_x),
"mode": self.mode,
}
self.server_sock.send(pickle.dumps(result))
elif cmd == "stop":
self.running = False
except Exception:
pass
def move_cursor_to(self, y, x):
"""Move cursor to specific position."""
with self.lock:
self.cursor_y = max(0, min(y, len(self.lines) - 1))
self.cursor_x = max(0, min(x, len(self.lines[self.cursor_y])))
def get_line(self, line_num):
"""Get specific line."""
with self.lock:
if 0 <= line_num < len(self.lines):
return self.lines[line_num]
return None
def get_lines(self, start, end):
"""Get range of lines."""
with self.lock:
start = max(0, start)
end = min(end, len(self.lines))
return self.lines[start:end]
def insert_at_line(self, line_num, text):
"""Insert text at specific line."""
with self.lock:
self.save_state()
line_num = max(0, min(line_num, len(self.lines)))
self.lines.insert(line_num, text)
def delete_lines(self, start, end):
"""Delete range of lines."""
with self.lock:
self.save_state()
start = max(0, start)
end = min(end, len(self.lines))
if start < end:
del self.lines[start:end]
if not self.lines:
self.lines = [""]
def replace_text(self, start_line, start_col, end_line, end_col, new_text):
"""Replace text in range."""
with self.lock:
self.save_state()
start_line = max(0, min(start_line, len(self.lines) - 1))
end_line = max(0, min(end_line, len(self.lines) - 1))
if start_line == end_line:
line = self.lines[start_line]
start_col = max(0, min(start_col, len(line)))
end_col = max(0, min(end_col, len(line)))
self.lines[start_line] = line[:start_col] + new_text + line[end_col:]
else:
first_part = self.lines[start_line][:start_col]
last_part = self.lines[end_line][end_col:]
new_lines = new_text.split("\n")
self.lines[start_line] = first_part + new_lines[0]
del self.lines[start_line + 1 : end_line + 1]
for i, new_line in enumerate(new_lines[1:], 1):
self.lines.insert(start_line + i, new_line)
if len(new_lines) > 1:
self.lines[start_line + len(new_lines) - 1] += last_part
else:
self.lines[start_line] += last_part
def search(self, pattern, start_line=0):
"""Search for pattern in text."""
with self.lock:
results = []
try:
for i in range(start_line, len(self.lines)):
matches = re.finditer(pattern, self.lines[i])
for match in matches:
results.append((i, match.start(), match.end()))
except re.error:
pass
return results
def replace_all(self, search_text, replace_text):
"""Replace all occurrences of text."""
with self.lock:
self.save_state()
for i in range(len(self.lines)):
self.lines[i] = self.lines[i].replace(search_text, replace_text)
def select_range(self, start_line, start_col, end_line, end_col):
"""Select text range."""
with self.lock:
self.selection_start = (start_line, start_col)
self.selection_end = (end_line, end_col)
def get_selection(self):
"""Get selected text."""
with self.lock:
if not self.selection_start or not self.selection_end:
return ""
sl, sc = self.selection_start
el, ec = self.selection_end
if sl < 0 or sl >= len(self.lines) or el < 0 or (el >= len(self.lines)):
return ""
if sl == el:
return self.lines[sl][sc:ec]
result = [self.lines[sl][sc:]]
for i in range(sl + 1, el):
if i < len(self.lines):
result.append(self.lines[i])
if el < len(self.lines):
result.append(self.lines[el][:ec])
return "\n".join(result)
def delete_selection(self):
"""Delete selected text."""
with self.lock:
if not self.selection_start or not self.selection_end:
return
self.save_state()
sl, sc = self.selection_start
el, ec = self.selection_end
if 0 <= sl < len(self.lines) and 0 <= el < len(self.lines):
self.replace_text(sl, sc, el, ec, "")
self.selection_start = None
self.selection_end = None
def apply_search_replace_block(self, search_block, replace_block):
"""Apply search and replace on block."""
with self.lock:
self.save_state()
search_lines = search_block.splitlines()
replace_lines = replace_block.splitlines()
for i in range(len(self.lines) - len(search_lines) + 1):
match = True
for j, search_line in enumerate(search_lines):
if i + j >= len(self.lines):
match = False
break
if self.lines[i + j].strip() != search_line.strip():
match = False
break
if match:
indent = len(self.lines[i]) - len(self.lines[i].lstrip())
indented_replace = [" " * indent + line for line in replace_lines]
self.lines[i : i + len(search_lines)] = indented_replace
return True
return False
def apply_diff(self, diff_text):
"""Apply unified diff."""
with self.lock:
self.save_state()
try:
lines = diff_text.split("\n")
start_line = 0
for line in lines:
if line.startswith("@@"):
match = re.search("@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@", line)
if match:
start_line = int(match.group(1)) - 1
elif line.startswith("-"):
if start_line < len(self.lines):
del self.lines[start_line]
elif line.startswith("+"):
self.lines.insert(start_line, line[1:])
start_line += 1
elif line and (not line.startswith("\\")):
start_line += 1
except Exception:
pass
def get_context(self, line_num, context_lines=3):
"""Get lines around specific line."""
with self.lock:
start = max(0, line_num - context_lines)
end = min(len(self.lines), line_num + context_lines + 1)
return self.get_lines(start, end)
def count_lines(self):
"""Count total lines."""
with self.lock:
return len(self.lines)
def close(self):
"""Close the editor."""
self.stop()
def is_running(self):
"""Check if editor is running."""
return self.running
def wait(self, timeout=None):
"""Wait for editor to finish."""
if self.thread and self.thread.is_alive():
self.thread.join(timeout=timeout)
return not self.thread.is_alive()
return True
def __enter__(self):
"""Context manager entry."""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Context manager exit."""
if exc_type:
self._exception_occurred = True
self.stop()
return False
def __del__(self):
"""Destructor for cleanup."""
self._cleanup()
def main():
"""Main entry point with error handling."""
editor = None
try:
filename = sys.argv[1] if len(sys.argv) > 1 else None
auto_save = "--auto-save" in sys.argv
editor = RPEditor(filename, auto_save=auto_save)
editor.start()
if editor.thread:
editor.thread.join()
except KeyboardInterrupt:
pass
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
finally:
if editor:
editor.stop()
os.system("clear" if os.name != "nt" else "cls")
if __name__ == "__main__":
if "rpe" in sys.argv[0]:
main()

600
rp/editor2.py Normal file
View File

@ -0,0 +1,600 @@
import curses
import pickle
import queue
import re
import socket
import sys
import threading
class RPEditor:
def __init__(self, filename=None):
self.filename = filename
self.lines = [""]
self.cursor_y = 0
self.cursor_x = 0
self.mode = "normal"
self.command = ""
self.stdscr = None
self.running = False
self.thread = None
self.socket_thread = None
self.prev_key = None
self.clipboard = ""
self.undo_stack = []
self.redo_stack = []
self.selection_start = None
self.selection_end = None
self.max_undo = 100
self.lock = threading.RLock()
self.client_sock, self.server_sock = socket.socketpair()
self.command_queue = queue.Queue()
if filename:
self.load_file()
def load_file(self):
try:
with open(self.filename) as f:
self.lines = f.read().splitlines()
if not self.lines:
self.lines = [""]
except:
self.lines = [""]
def _save_file(self):
with self.lock:
if self.filename:
with open(self.filename, "w") as f:
f.write("\n".join(self.lines))
def save_file(self):
self.client_sock.send(pickle.dumps({"command": "save_file"}))
def start(self):
self.running = True
self.socket_thread = threading.Thread(target=self.socket_listener)
self.socket_thread.start()
self.thread = threading.Thread(target=self.run)
self.thread.start()
def stop(self):
self.client_sock.send(pickle.dumps({"command": "stop"}))
self.running = False
if self.stdscr:
curses.endwin()
if self.thread:
self.thread.join()
if self.socket_thread:
self.socket_thread.join()
self.client_sock.close()
self.server_sock.close()
def run(self):
curses.wrapper(self.main_loop)
def main_loop(self, stdscr):
self.stdscr = stdscr
curses.curs_set(1)
self.stdscr.keypad(True)
while self.running:
with self.lock:
self.draw()
try:
while True:
command = self.command_queue.get_nowait()
with self.lock:
self.execute_command(command)
except queue.Empty:
pass
key = self.stdscr.getch()
with self.lock:
self.handle_key(key)
def draw(self):
self.stdscr.clear()
height, width = self.stdscr.getmaxyx()
for i, line in enumerate(self.lines):
if i < height - 1:
self.stdscr.addstr(i, 0, line[:width])
status = f"{self.mode.upper()} | {self.filename or 'untitled'} | {self.cursor_y + 1}:{self.cursor_x + 1}"
self.stdscr.addstr(height - 1, 0, status[:width])
if self.mode == "command":
self.stdscr.addstr(height - 1, 0, self.command[:width])
self.stdscr.move(self.cursor_y, min(self.cursor_x, width - 1))
self.stdscr.refresh()
def handle_key(self, key):
if self.mode == "normal":
self.handle_normal(key)
elif self.mode == "insert":
self.handle_insert(key)
elif self.mode == "command":
self.handle_command(key)
def handle_normal(self, key):
if key == ord("h") or key == curses.KEY_LEFT:
self.move_cursor(0, -1)
elif key == ord("j") or key == curses.KEY_DOWN:
self.move_cursor(1, 0)
elif key == ord("k") or key == curses.KEY_UP:
self.move_cursor(-1, 0)
elif key == ord("l") or key == curses.KEY_RIGHT:
self.move_cursor(0, 1)
elif key == ord("i"):
self.mode = "insert"
elif key == ord(":"):
self.mode = "command"
self.command = ":"
elif key == ord("x"):
self._delete_char()
elif key == ord("a"):
self.cursor_x += 1
self.mode = "insert"
elif key == ord("A"):
self.cursor_x = len(self.lines[self.cursor_y])
self.mode = "insert"
elif key == ord("o"):
self._insert_line(self.cursor_y + 1, "")
self.cursor_y += 1
self.cursor_x = 0
self.mode = "insert"
elif key == ord("O"):
self._insert_line(self.cursor_y, "")
self.cursor_x = 0
self.mode = "insert"
elif key == ord("d") and self.prev_key == ord("d"):
self.clipboard = self.lines[self.cursor_y]
self._delete_line(self.cursor_y)
if self.cursor_y >= len(self.lines):
self.cursor_y = len(self.lines) - 1
self.cursor_x = 0
elif key == ord("y") and self.prev_key == ord("y"):
self.clipboard = self.lines[self.cursor_y]
elif key == ord("p"):
self._insert_line(self.cursor_y + 1, self.clipboard)
self.cursor_y += 1
self.cursor_x = 0
elif key == ord("P"):
self._insert_line(self.cursor_y, self.clipboard)
self.cursor_x = 0
elif key == ord("w"):
line = self.lines[self.cursor_y]
i = self.cursor_x
while i < len(line) and (not line[i].isalnum()):
i += 1
while i < len(line) and line[i].isalnum():
i += 1
self.cursor_x = i
elif key == ord("b"):
line = self.lines[self.cursor_y]
i = self.cursor_x - 1
while i >= 0 and (not line[i].isalnum()):
i -= 1
while i >= 0 and line[i].isalnum():
i -= 1
self.cursor_x = i + 1
elif key == ord("0"):
self.cursor_x = 0
elif key == ord("$"):
self.cursor_x = len(self.lines[self.cursor_y])
elif key == ord("g"):
if self.prev_key == ord("g"):
self.cursor_y = 0
self.cursor_x = 0
elif key == ord("G"):
self.cursor_y = len(self.lines) - 1
self.cursor_x = 0
elif key == ord("u"):
self.undo()
elif key == ord("r") and self.prev_key == 18:
self.redo()
self.prev_key = key
def handle_insert(self, key):
if key == 27:
self.mode = "normal"
if self.cursor_x > 0:
self.cursor_x -= 1
elif key == 10:
self._split_line()
elif key == curses.KEY_BACKSPACE or key == 127:
self._backspace()
elif 32 <= key <= 126:
char = chr(key)
self._insert_char(char)
def handle_command(self, key):
if key == 10:
cmd = self.command[1:]
if cmd == "q" or cmd == "q!":
self.running = False
elif cmd == "w":
self._save_file()
elif cmd == "wq" or cmd == "wq!" or cmd == "x" or (cmd == "xq") or (cmd == "x!"):
self._save_file()
self.running = False
elif cmd.startswith("w "):
self.filename = cmd[2:]
self._save_file()
elif cmd == "wq":
self._save_file()
self.running = False
self.mode = "normal"
self.command = ""
elif key == 27:
self.mode = "normal"
self.command = ""
elif key == curses.KEY_BACKSPACE or key == 127:
if len(self.command) > 1:
self.command = self.command[:-1]
elif 32 <= key <= 126:
self.command += chr(key)
def move_cursor(self, dy, dx):
new_y = self.cursor_y + dy
new_x = self.cursor_x + dx
if 0 <= new_y < len(self.lines):
self.cursor_y = new_y
self.cursor_x = max(0, min(new_x, len(self.lines[self.cursor_y])))
def save_state(self):
with self.lock:
state = {
"lines": list(self.lines),
"cursor_y": self.cursor_y,
"cursor_x": self.cursor_x,
}
self.undo_stack.append(state)
if len(self.undo_stack) > self.max_undo:
self.undo_stack.pop(0)
self.redo_stack.clear()
def undo(self):
with self.lock:
if self.undo_stack:
current_state = {
"lines": list(self.lines),
"cursor_y": self.cursor_y,
"cursor_x": self.cursor_x,
}
self.redo_stack.append(current_state)
state = self.undo_stack.pop()
self.lines = state["lines"]
self.cursor_y = state["cursor_y"]
self.cursor_x = state["cursor_x"]
def redo(self):
with self.lock:
if self.redo_stack:
current_state = {
"lines": list(self.lines),
"cursor_y": self.cursor_y,
"cursor_x": self.cursor_x,
}
self.undo_stack.append(current_state)
state = self.redo_stack.pop()
self.lines = state["lines"]
self.cursor_y = state["cursor_y"]
self.cursor_x = state["cursor_x"]
def _insert_text(self, text):
self.save_state()
lines = text.split("\n")
if len(lines) == 1:
self.lines[self.cursor_y] = (
self.lines[self.cursor_y][: self.cursor_x]
+ text
+ self.lines[self.cursor_y][self.cursor_x :]
)
self.cursor_x += len(text)
else:
first = self.lines[self.cursor_y][: self.cursor_x] + lines[0]
last = lines[-1] + self.lines[self.cursor_y][self.cursor_x :]
self.lines[self.cursor_y] = first
for i in range(1, len(lines) - 1):
self.lines.insert(self.cursor_y + i, lines[i])
self.lines.insert(self.cursor_y + len(lines) - 1, last)
self.cursor_y += len(lines) - 1
self.cursor_x = len(lines[-1])
def insert_text(self, text):
self.client_sock.send(pickle.dumps({"command": "insert_text", "text": text}))
def _delete_char(self):
self.save_state()
if self.cursor_x < len(self.lines[self.cursor_y]):
self.lines[self.cursor_y] = (
self.lines[self.cursor_y][: self.cursor_x]
+ self.lines[self.cursor_y][self.cursor_x + 1 :]
)
def delete_char(self):
self.client_sock.send(pickle.dumps({"command": "delete_char"}))
def _insert_char(self, char):
self.lines[self.cursor_y] = (
self.lines[self.cursor_y][: self.cursor_x]
+ char
+ self.lines[self.cursor_y][self.cursor_x :]
)
self.cursor_x += 1
def _split_line(self):
line = self.lines[self.cursor_y]
self.lines[self.cursor_y] = line[: self.cursor_x]
self.lines.insert(self.cursor_y + 1, line[self.cursor_x :])
self.cursor_y += 1
self.cursor_x = 0
def _backspace(self):
if self.cursor_x > 0:
self.lines[self.cursor_y] = (
self.lines[self.cursor_y][: self.cursor_x - 1]
+ self.lines[self.cursor_y][self.cursor_x :]
)
self.cursor_x -= 1
elif self.cursor_y > 0:
prev_len = len(self.lines[self.cursor_y - 1])
self.lines[self.cursor_y - 1] += self.lines[self.cursor_y]
del self.lines[self.cursor_y]
self.cursor_y -= 1
self.cursor_x = prev_len
def _insert_line(self, line_num, text):
self.save_state()
line_num = max(0, min(line_num, len(self.lines)))
self.lines.insert(line_num, text)
def _delete_line(self, line_num):
self.save_state()
if 0 <= line_num < len(self.lines):
if len(self.lines) > 1:
del self.lines[line_num]
else:
self.lines = [""]
def _set_text(self, text):
self.save_state()
self.lines = text.splitlines() if text else [""]
self.cursor_y = 0
self.cursor_x = 0
def set_text(self, text):
self.client_sock.send(pickle.dumps({"command": "set_text", "text": text}))
def _goto_line(self, line_num):
line_num = max(0, min(line_num, len(self.lines) - 1))
self.cursor_y = line_num
self.cursor_x = 0
def goto_line(self, line_num):
self.client_sock.send(pickle.dumps({"command": "goto_line", "line_num": line_num}))
def get_text(self):
self.client_sock.send(pickle.dumps({"command": "get_text"}))
try:
return pickle.loads(self.client_sock.recv(4096))
except:
return ""
def get_cursor(self):
self.client_sock.send(pickle.dumps({"command": "get_cursor"}))
try:
return pickle.loads(self.client_sock.recv(4096))
except:
return (0, 0)
def get_file_info(self):
self.client_sock.send(pickle.dumps({"command": "get_file_info"}))
try:
return pickle.loads(self.client_sock.recv(4096))
except:
return {}
def socket_listener(self):
while self.running:
try:
data = self.server_sock.recv(4096)
if not data:
break
command = pickle.loads(data)
self.command_queue.put(command)
except OSError:
break
def execute_command(self, command):
cmd = command.get("command")
if cmd == "insert_text":
self._insert_text(command["text"])
elif cmd == "delete_char":
self._delete_char()
elif cmd == "save_file":
self._save_file()
elif cmd == "set_text":
self._set_text(command["text"])
elif cmd == "goto_line":
self._goto_line(command["line_num"])
elif cmd == "get_text":
result = "\n".join(self.lines)
try:
self.server_sock.send(pickle.dumps(result))
except:
pass
elif cmd == "get_cursor":
result = (self.cursor_y, self.cursor_x)
try:
self.server_sock.send(pickle.dumps(result))
except:
pass
elif cmd == "get_file_info":
result = {
"filename": self.filename,
"lines": len(self.lines),
"cursor": (self.cursor_y, self.cursor_x),
"mode": self.mode,
}
try:
self.server_sock.send(pickle.dumps(result))
except:
pass
elif cmd == "stop":
self.running = False
def move_cursor_to(self, y, x):
with self.lock:
self.cursor_y = max(0, min(y, len(self.lines) - 1))
self.cursor_x = max(0, min(x, len(self.lines[self.cursor_y])))
def get_line(self, line_num):
with self.lock:
if 0 <= line_num < len(self.lines):
return self.lines[line_num]
return None
def get_lines(self, start, end):
with self.lock:
start = max(0, start)
end = min(end, len(self.lines))
return self.lines[start:end]
def insert_at_line(self, line_num, text):
with self.lock:
self.save_state()
line_num = max(0, min(line_num, len(self.lines)))
self.lines.insert(line_num, text)
def delete_lines(self, start, end):
with self.lock:
self.save_state()
start = max(0, start)
end = min(end, len(self.lines))
if start < end:
del self.lines[start:end]
if not self.lines:
self.lines = [""]
def replace_text(self, start_line, start_col, end_line, end_col, new_text):
with self.lock:
self.save_state()
if start_line == end_line:
line = self.lines[start_line]
self.lines[start_line] = line[:start_col] + new_text + line[end_col:]
else:
first_part = self.lines[start_line][:start_col]
last_part = self.lines[end_line][end_col:]
new_lines = new_text.split("\n")
self.lines[start_line] = first_part + new_lines[0]
del self.lines[start_line + 1 : end_line + 1]
for i, new_line in enumerate(new_lines[1:], 1):
self.lines.insert(start_line + i, new_line)
if len(new_lines) > 1:
self.lines[start_line + len(new_lines) - 1] += last_part
else:
self.lines[start_line] += last_part
def search(self, pattern, start_line=0):
with self.lock:
results = []
for i in range(start_line, len(self.lines)):
matches = re.finditer(pattern, self.lines[i])
for match in matches:
results.append((i, match.start(), match.end()))
return results
def replace_all(self, search_text, replace_text):
with self.lock:
self.save_state()
for i in range(len(self.lines)):
self.lines[i] = self.lines[i].replace(search_text, replace_text)
def select_range(self, start_line, start_col, end_line, end_col):
with self.lock:
self.selection_start = (start_line, start_col)
self.selection_end = (end_line, end_col)
def get_selection(self):
with self.lock:
if not self.selection_start or not self.selection_end:
return ""
sl, sc = self.selection_start
el, ec = self.selection_end
if sl == el:
return self.lines[sl][sc:ec]
result = [self.lines[sl][sc:]]
for i in range(sl + 1, el):
result.append(self.lines[i])
result.append(self.lines[el][:ec])
return "\n".join(result)
def delete_selection(self):
with self.lock:
if not self.selection_start or not self.selection_end:
return
self.save_state()
sl, sc = self.selection_start
el, ec = self.selection_end
self.replace_text(sl, sc, el, ec, "")
self.selection_start = None
self.selection_end = None
def apply_search_replace_block(self, search_block, replace_block):
with self.lock:
self.save_state()
search_lines = search_block.splitlines()
replace_lines = replace_block.splitlines()
for i in range(len(self.lines) - len(search_lines) + 1):
match = True
for j, search_line in enumerate(search_lines):
if self.lines[i + j].strip() != search_line.strip():
match = False
break
if match:
indent = len(self.lines[i]) - len(self.lines[i].lstrip())
indented_replace = [" " * indent + line for line in replace_lines]
self.lines[i : i + len(search_lines)] = indented_replace
return True
return False
def apply_diff(self, diff_text):
with self.lock:
self.save_state()
lines = diff_text.split("\n")
for line in lines:
if line.startswith("@@"):
match = re.search("@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@", line)
if match:
start_line = int(match.group(1)) - 1
elif line.startswith("-"):
if start_line < len(self.lines):
del self.lines[start_line]
elif line.startswith("+"):
self.lines.insert(start_line, line[1:])
start_line += 1
def get_context(self, line_num, context_lines=3):
with self.lock:
start = max(0, line_num - context_lines)
end = min(len(self.lines), line_num + context_lines + 1)
return self.get_lines(start, end)
def count_lines(self):
with self.lock:
return len(self.lines)
def close(self):
self.running = False
self.stop()
if self.thread:
self.thread.join()
def main():
filename = sys.argv[1] if len(sys.argv) > 1 else None
editor = RPEditor(filename)
editor.start()
editor.thread.join()
if __name__ == "__main__":
main()

457
rp/implode.py Normal file
View File

@ -0,0 +1,457 @@
"""
impLODE: A Python script to consolidate a multi-file Python project
into a single, runnable file.
It intelligently resolves local imports, hoists external dependencies to the top,
and preserves the core logic, using AST for safe transformations.
"""
import os
import sys
import ast
import argparse
import logging
import py_compile
from typing import Set, Dict, Optional, TextIO
logger = logging.getLogger("impLODE")
class ImportTransformer(ast.NodeTransformer):
"""
An AST transformer that visits Import and ImportFrom nodes.
On Pass 1 (Dry Run):
- Identifies all local vs. external imports.
- Recursively calls the main resolver for local modules.
- Stores external and __future__ imports in the Imploder instance.
On Pass 2 (Write Run):
- Recursively calls the main resolver for local modules.
- Removes all import statements (since they were hoisted in Pass 1).
"""
def __init__(
self,
imploder: "Imploder",
current_file_path: str,
f_out: Optional[TextIO],
is_dry_run: bool,
indent_level: int = 0,
):
self.imploder = imploder
self.current_file_path = current_file_path
self.current_dir = os.path.dirname(current_file_path)
self.f_out = f_out
self.is_dry_run = is_dry_run
self.indent = " " * indent_level
self.logger = logging.getLogger(self.__class__.__name__)
def _log_debug(self, msg: str):
"""Helper for indented debug logging."""
self.logger.debug(f"{self.indent} > {msg}")
def _find_local_module(self, module_name: str, level: int) -> Optional[str]:
"""
Tries to find the absolute path for a given module name and relative level.
Returns None if it's not a local module *and* cannot be found in site-packages.
"""
if not module_name:
base_path = self.current_dir
if level > 0:
for _ in range(level - 1):
base_path = os.path.dirname(base_path)
return base_path
base_path = self.current_dir
if level > 0:
for _ in range(level - 1):
base_path = os.path.dirname(base_path)
else:
base_path = self.imploder.root_dir
module_parts = module_name.split(".")
module_path = os.path.join(base_path, *module_parts)
package_init = os.path.join(module_path, "__init__.py")
if os.path.isfile(package_init):
self._log_debug(
f"Resolved '{module_name}' to local package: {os.path.relpath(package_init, self.imploder.root_dir)}"
)
return package_init
module_py = module_path + ".py"
if os.path.isfile(module_py):
self._log_debug(
f"Resolved '{module_name}' to local module: {os.path.relpath(module_py, self.imploder.root_dir)}"
)
return module_py
if level == 0:
self._log_debug(
f"Module '{module_name}' not found at primary path. Starting deep fallback search from {self.imploder.root_dir}..."
)
target_path_py = os.path.join(*module_parts) + ".py"
target_path_init = os.path.join(*module_parts, "__init__.py")
for dirpath, dirnames, filenames in os.walk(self.imploder.root_dir, topdown=True):
dirnames[:] = [
d
for d in dirnames
if not d.startswith(".")
and d not in ("venv", "env", ".venv", ".env", "__pycache__", "node_modules")
]
check_file_py = os.path.join(dirpath, target_path_py)
if os.path.isfile(check_file_py):
self._log_debug(
f"Fallback search found module: {os.path.relpath(check_file_py, self.imploder.root_dir)}"
)
return check_file_py
check_file_init = os.path.join(dirpath, target_path_init)
if os.path.isfile(check_file_init):
self._log_debug(
f"Fallback search found package: {os.path.relpath(check_file_init, self.imploder.root_dir)}"
)
return check_file_init
return None
def visit_Import(self, node: ast.Import) -> Optional[ast.AST]:
"""Handles `import foo` or `import foo.bar`."""
for alias in node.names:
module_path = self._find_local_module(alias.name, level=0)
if module_path:
self._log_debug(f"Resolving local import: `import {alias.name}`")
self.imploder.resolve_file(
file_abs_path=module_path,
f_out=self.f_out,
is_dry_run=self.is_dry_run,
indent_level=self.imploder.current_indent_level,
)
else:
self._log_debug(f"Found external import: `import {alias.name}`")
if self.is_dry_run:
key = f"import {alias.name}"
if key not in self.imploder.external_imports:
self.imploder.external_imports[key] = node
module_names = ", ".join([a.name for a in node.names])
new_call = ast.Call(
func=ast.Name(id="_implode_log_import", ctx=ast.Load()),
args=[
ast.Constant(value=module_names),
ast.Constant(value=0),
ast.Constant(value="import"),
],
keywords=[],
)
return ast.Expr(value=new_call)
def visit_ImportFrom(self, node: ast.ImportFrom) -> Optional[ast.AST]:
"""Handles `from foo import bar` or `from .foo import bar`."""
module_name_str = node.module or ""
import_type = "from-import"
if module_name_str == "__future__":
import_type = "future-import"
new_call = ast.Call(
func=ast.Name(id="_implode_log_import", ctx=ast.Load()),
args=[
ast.Constant(value=module_name_str),
ast.Constant(value=node.level),
ast.Constant(value=import_type),
],
keywords=[],
)
replacement_node = ast.Expr(value=new_call)
if node.module == "__future__":
self._log_debug("Found __future__ import. Hoisting to top.")
if self.is_dry_run:
key = ast.unparse(node)
self.imploder.future_imports[key] = node
return replacement_node
module_path = self._find_local_module(node.module or "", node.level)
if module_path and os.path.isdir(module_path):
self._log_debug(f"Resolving package import: `from {node.module or '.'} import ...`")
for alias in node.names:
package_module_py = os.path.join(module_path, alias.name + ".py")
package_module_init = os.path.join(module_path, alias.name, "__init__.py")
if os.path.isfile(package_module_py):
self._log_debug(
f"Found sub-module: {os.path.relpath(package_module_py, self.imploder.root_dir)}"
)
self.imploder.resolve_file(
file_abs_path=package_module_py,
f_out=self.f_out,
is_dry_run=self.is_dry_run,
indent_level=self.imploder.current_indent_level,
)
elif os.path.isfile(package_module_init):
self._log_debug(
f"Found sub-package: {os.path.relpath(package_module_init, self.imploder.root_dir)}"
)
self.imploder.resolve_file(
file_abs_path=package_module_init,
f_out=self.f_out,
is_dry_run=self.is_dry_run,
indent_level=self.imploder.current_indent_level,
)
else:
self.logger.warning(
f"{self.indent} > Could not resolve sub-module '{alias.name}' in package '{module_path}'"
)
return replacement_node
if module_path:
self._log_debug(f"Resolving local from-import: `from {node.module or '.'} ...`")
self.imploder.resolve_file(
file_abs_path=module_path,
f_out=self.f_out,
is_dry_run=self.is_dry_run,
indent_level=self.imploder.current_indent_level,
)
else:
self._log_debug(f"Found external from-import: `from {node.module or '.'} ...`")
if self.is_dry_run:
key = ast.unparse(node)
if key not in self.imploder.external_imports:
self.imploder.external_imports[key] = node
return replacement_node
class Imploder:
"""
Core class for handling the implosion process.
Manages state, file processing, and the two-pass analysis.
"""
def __init__(self, root_dir: str, enable_import_logging: bool = False):
self.root_dir = os.path.realpath(root_dir)
self.processed_files: Set[str] = set()
self.external_imports: Dict[str, ast.AST] = {}
self.future_imports: Dict[str, ast.AST] = {}
self.current_indent_level = 0
self.enable_import_logging = enable_import_logging
logger.info(f"Initialized Imploder with root: {self.root_dir}")
def implode(self, main_file_abs_path: str, output_file_path: str):
"""
Runs the full two-pass implosion process.
"""
if not os.path.isfile(main_file_abs_path):
logger.critical(f"Main file not found: {main_file_abs_path}")
sys.exit(1)
logger.info(
f"--- PASS 1: Analyzing dependencies from {os.path.relpath(main_file_abs_path, self.root_dir)} ---"
)
self.processed_files.clear()
self.external_imports.clear()
self.future_imports.clear()
try:
self.resolve_file(main_file_abs_path, f_out=None, is_dry_run=True, indent_level=0)
except Exception as e:
logger.critical(f"Error during analysis pass: {e}", exc_info=True)
sys.exit(1)
logger.info(
f"--- Analysis complete. Found {len(self.future_imports)} __future__ imports and {len(self.external_imports)} external modules. ---"
)
logger.info(f"--- PASS 2: Writing imploded file to {output_file_path} ---")
self.processed_files.clear()
try:
with open(output_file_path, "w", encoding="utf-8") as f_out:
f_out.write(f"#!/usr/bin/env python3\n")
f_out.write(f"# -*- coding: utf-8 -*-\n")
f_out.write(f"import logging\n")
f_out.write(f"\n# --- IMPLODED FILE: Generated by impLODE --- #\n")
f_out.write(
f"# --- Original main file: {os.path.relpath(main_file_abs_path, self.root_dir)} --- #\n"
)
if self.future_imports:
f_out.write("\n# --- Hoisted __future__ Imports --- #\n")
for node in self.future_imports.values():
f_out.write(f"{ast.unparse(node)}\n")
f_out.write("# --- End __future__ Imports --- #\n")
enable_logging_str = "True" if self.enable_import_logging else "False"
f_out.write("\n# --- impLODE Helper Function --- #\n")
f_out.write(f"_IMPLODE_LOGGING_ENABLED_ = {enable_logging_str}\n")
f_out.write("def _implode_log_import(module_name, level, import_type):\n")
f_out.write(
' """Dummy function to replace imports and prevent IndentationErrors."""\n'
)
f_out.write(" if _IMPLODE_LOGGING_ENABLED_:\n")
f_out.write(
" print(f\"[impLODE Logger]: Skipped {import_type}: module='{module_name}', level={level}\")\n"
)
f_out.write(" pass\n")
f_out.write("# --- End Helper Function --- #\n")
if self.external_imports:
f_out.write("\n# --- Hoisted External Imports --- #\n")
for node in self.external_imports.values():
f_out.write("try:\n")
f_out.write(f" {ast.unparse(node)}\n")
f_out.write("except ImportError:\n")
f_out.write(" pass\n")
f_out.write("# --- End External Imports --- #\n")
self.resolve_file(main_file_abs_path, f_out=f_out, is_dry_run=False, indent_level=0)
except IOError as e:
logger.critical(
f"Could not write to output file {output_file_path}: {e}", exc_info=True
)
sys.exit(1)
except Exception as e:
logger.critical(f"Error during write pass: {e}", exc_info=True)
sys.exit(1)
logger.info(f"--- Implosion complete! Output saved to {output_file_path} ---")
def resolve_file(
self, file_abs_path: str, f_out: Optional[TextIO], is_dry_run: bool, indent_level: int = 0
):
"""
Recursively resolves a single file.
- `is_dry_run=True`: Analyzes imports, populating `external_imports`.
- `is_dry_run=False`: Writes transformed code to `f_out`.
"""
self.current_indent_level = indent_level
indent = " " * indent_level
try:
file_real_path = os.path.realpath(file_abs_path)
rel_path = os.path.relpath(file_real_path, self.root_dir)
except ValueError:
logger.warning(
f"{indent}Cannot calculate relative path for {file_abs_path}. Using absolute."
)
rel_path = file_abs_path
if file_real_path in self.processed_files:
logger.debug(f"{indent}Skipping already processed file: {rel_path}")
return
logger.info(f"{indent}Processing: {rel_path}")
self.processed_files.add(file_real_path)
try:
with open(file_real_path, "r", encoding="utf-8") as f:
code = f.read()
except FileNotFoundError:
logger.error(f"{indent}File not found: {file_real_path}")
return
except UnicodeDecodeError:
logger.error(f"{indent}Could not decode file (not utf-8): {file_real_path}")
return
except Exception as e:
logger.error(f"{indent}Could not read file {file_real_path}: {e}")
return
try:
py_compile.compile(file_real_path, doraise=True, quiet=1)
logger.debug(f"{indent}Syntax OK (py_compile): {rel_path}")
except py_compile.PyCompileError as e:
logger.error(
f"{indent}Syntax error (py_compile) in {e.file} on line {e.lineno}: {e.msg}"
)
return
except Exception as e:
logger.error(f"{indent}Error during py_compile for {rel_path}: {e}")
return
try:
tree = ast.parse(code, filename=file_real_path)
except SyntaxError as e:
logger.error(f"{indent}Syntax error in {rel_path} on line {e.lineno}: {e.msg}")
return
except Exception as e:
logger.error(f"{indent}Could not parse AST for {rel_path}: {e}")
return
transformer = ImportTransformer(
imploder=self,
current_file_path=file_real_path,
f_out=f_out,
is_dry_run=is_dry_run,
indent_level=indent_level,
)
try:
new_tree = transformer.visit(tree)
except Exception as e:
logger.error(f"{indent}Error transforming AST for {rel_path}: {e}", exc_info=True)
return
if not is_dry_run and f_out:
try:
ast.fix_missing_locations(new_tree)
f_out.write(f"\n\n# --- Content from {rel_path} --- #\n")
f_out.write(ast.unparse(new_tree))
f_out.write(f"\n# --- End of {rel_path} --- #\n")
logger.info(f"{indent}Successfully wrote content from: {rel_path}")
except Exception as e:
logger.error(
f"{indent}Could not unparse or write AST for {rel_path}: {e}", exc_info=True
)
self.current_indent_level = indent_level
def setup_logging(level: int):
"""Configures the root logger."""
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(handler)
transformer_logger = logging.getLogger("ImportTransformer")
transformer_logger.setLevel(level)
transformer_logger.addHandler(handler)
def main():
"""Main entry point for the script."""
parser = argparse.ArgumentParser(
description="impLODE: Consolidate a multi-file Python project into one file.",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"main_file", type=str, help="The main entry point .py file of your project."
)
parser.add_argument(
"-o",
"--output",
type=str,
default="imploded.py",
help="Path for the combined output file. (default: imploded.py)",
)
parser.add_argument(
"-r",
"--root",
type=str,
default=".",
help="The root directory of the project for resolving absolute imports. (default: current directory)",
)
log_group = parser.add_mutually_exclusive_group()
log_group.add_argument(
"-v",
"--verbose",
action="store_const",
dest="log_level",
const=logging.DEBUG,
default=logging.INFO,
help="Enable verbose DEBUG logging.",
)
log_group.add_argument(
"-q",
"--quiet",
action="store_const",
dest="log_level",
const=logging.WARNING,
help="Suppress INFO logs, showing only WARNINGS and ERRORS.",
)
parser.add_argument(
"--enable-import-logging",
action="store_true",
help="Enable runtime logging for removed import statements in the final imploded script.",
)
args = parser.parse_args()
setup_logging(args.log_level)
root_dir = os.path.abspath(args.root)
main_file_path = os.path.abspath(args.main_file)
output_file_path = os.path.abspath(args.output)
if not os.path.isdir(root_dir):
logger.critical(f"Root directory not found: {root_dir}")
sys.exit(1)
if not os.path.isfile(main_file_path):
logger.critical(f"Main file not found: {main_file_path}")
sys.exit(1)
if not main_file_path.startswith(root_dir):
logger.warning(f"Main file {main_file_path} is outside the specified root {root_dir}.")
logger.warning("This may cause issues with absolute import resolution.")
if main_file_path == output_file_path:
logger.critical("Output file cannot be the same as the main file.")
sys.exit(1)
imploder = Imploder(root_dir=root_dir, enable_import_logging=args.enable_import_logging)
imploder.implode(main_file_abs_path=main_file_path, output_file_path=output_file_path)
if __name__ == "__main__":
main()

141
rp/input_handler.py Normal file
View File

@ -0,0 +1,141 @@
"""
Advanced input handler for PR Assistant with editor mode, file inclusion, and image support.
"""
import base64
import mimetypes
import re
import readline
from pathlib import Path
from typing import Optional
class AdvancedInputHandler:
"""Handles advanced input with editor mode, file inclusion, and image support."""
def __init__(self):
self.editor_mode = False
self.setup_readline()
def setup_readline(self):
"""Setup readline with basic completer."""
try:
def completer(text, state):
return None
readline.set_completer(completer)
readline.parse_and_bind("tab: complete")
except:
pass
def toggle_editor_mode(self):
"""Toggle between simple and editor input modes."""
self.editor_mode = not self.editor_mode
mode = "Editor" if self.editor_mode else "Simple"
print(f"\nSwitched to {mode.lower()} input mode.")
def get_input(self, prompt: str = "You> ") -> Optional[str]:
"""Get input from user, handling different modes."""
try:
if self.editor_mode:
return self._get_editor_input(prompt)
else:
return self._get_simple_input(prompt)
except KeyboardInterrupt:
return None
except EOFError:
return None
def _get_simple_input(self, prompt: str) -> Optional[str]:
"""Get simple input with file completion."""
try:
user_input = input(prompt).strip()
if not user_input:
return ""
if user_input.lower() == "/editor":
self.toggle_editor_mode()
return self.get_input(prompt)
processed_input = self._process_input(user_input)
return processed_input
except KeyboardInterrupt:
return None
def _get_editor_input(self, prompt: str) -> Optional[str]:
"""Get multi-line input for editor mode."""
try:
print("Editor mode: Enter your message. Type 'END' on a new line to finish.")
print("Type '/simple' to switch back to simple mode.")
lines = []
while True:
try:
line = input()
if line.strip().lower() == "end":
break
elif line.strip().lower() == "/simple":
self.toggle_editor_mode()
return self.get_input(prompt)
lines.append(line)
except EOFError:
break
content = "\n".join(lines).strip()
if not content:
return ""
processed_content = self._process_input(content)
return processed_content
except KeyboardInterrupt:
return None
def _process_input(self, text: str) -> str:
"""Process input text for file inclusions and images."""
text = self._process_file_inclusions(text)
text = self._process_image_inclusions(text)
return text
def _process_file_inclusions(self, text: str) -> str:
"""Replace @[filename] with file contents."""
def replace_file(match):
filename = match.group(1).strip()
try:
path = Path(filename).expanduser().resolve()
if path.exists() and path.is_file():
with open(path, encoding="utf-8", errors="replace") as f:
content = f.read()
return f"\n--- File: {filename} ---\n{content}\n--- End of {filename} ---\n"
else:
return f"[File not found: {filename}]"
except Exception as e:
return f"[Error reading file {filename}: {e}]"
pattern = "@\\[([^\\]]+)\\]"
return re.sub(pattern, replace_file, text)
def _process_image_inclusions(self, text: str) -> str:
"""Process image file references and encode them."""
words = text.split()
processed_parts = []
for word in words:
try:
path = Path(word.strip()).expanduser().resolve()
if path.exists() and path.is_file():
mime_type, _ = mimetypes.guess_type(str(path))
if mime_type and mime_type.startswith("image/"):
with open(path, "rb") as f:
image_data = base64.b64encode(f.read()).decode("utf-8")
processed_parts.append(
f"[Image: {path.name}]\ndata:{mime_type};base64,{image_data}\n"
)
continue
except:
pass
processed_parts.append(word)
return " ".join(processed_parts)
input_handler = AdvancedInputHandler()
def get_advanced_input(prompt: str = "You> ") -> Optional[str]:
"""Get advanced input from user."""
return input_handler.get_input(prompt)

12
rp/memory/__init__.py Normal file
View File

@ -0,0 +1,12 @@
from .conversation_memory import ConversationMemory
from .fact_extractor import FactExtractor
from .knowledge_store import KnowledgeEntry, KnowledgeStore
from .semantic_index import SemanticIndex
__all__ = [
"KnowledgeStore",
"KnowledgeEntry",
"SemanticIndex",
"ConversationMemory",
"FactExtractor",
]

View File

@ -0,0 +1,209 @@
import json
import sqlite3
import time
from typing import Any, Dict, List, Optional
class ConversationMemory:
def __init__(self, db_path: str):
self.db_path = db_path
self._initialize_memory()
def _initialize_memory(self):
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n CREATE TABLE IF NOT EXISTS conversation_history (\n conversation_id TEXT PRIMARY KEY,\n session_id TEXT,\n started_at REAL NOT NULL,\n ended_at REAL,\n message_count INTEGER DEFAULT 0,\n summary TEXT,\n topics TEXT,\n metadata TEXT\n )\n "
)
cursor.execute(
"\n CREATE TABLE IF NOT EXISTS conversation_messages (\n message_id TEXT PRIMARY KEY,\n conversation_id TEXT NOT NULL,\n role TEXT NOT NULL,\n content TEXT NOT NULL,\n timestamp REAL NOT NULL,\n tool_calls TEXT,\n metadata TEXT,\n FOREIGN KEY (conversation_id) REFERENCES conversation_history(conversation_id)\n )\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_conv_session ON conversation_history(session_id)\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_conv_started ON conversation_history(started_at DESC)\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_msg_conversation ON conversation_messages(conversation_id)\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_msg_timestamp ON conversation_messages(timestamp)\n "
)
conn.commit()
conn.close()
def create_conversation(
self,
conversation_id: str,
session_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
):
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n INSERT INTO conversation_history\n (conversation_id, session_id, started_at, metadata)\n VALUES (?, ?, ?, ?)\n ",
(conversation_id, session_id, time.time(), json.dumps(metadata) if metadata else None),
)
conn.commit()
conn.close()
def add_message(
self,
conversation_id: str,
message_id: str,
role: str,
content: str,
tool_calls: Optional[List[Dict[str, Any]]] = None,
metadata: Optional[Dict[str, Any]] = None,
):
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n INSERT INTO conversation_messages\n (message_id, conversation_id, role, content, timestamp, tool_calls, metadata)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n ",
(
message_id,
conversation_id,
role,
content,
time.time(),
json.dumps(tool_calls) if tool_calls else None,
json.dumps(metadata) if metadata else None,
),
)
cursor.execute(
"\n UPDATE conversation_history\n SET message_count = message_count + 1\n WHERE conversation_id = ?\n ",
(conversation_id,),
)
conn.commit()
conn.close()
def get_conversation_messages(
self, conversation_id: str, limit: Optional[int] = None
) -> List[Dict[str, Any]]:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
if limit:
cursor.execute(
"\n SELECT message_id, role, content, timestamp, tool_calls, metadata\n FROM conversation_messages\n WHERE conversation_id = ?\n ORDER BY timestamp DESC\n LIMIT ?\n ",
(conversation_id, limit),
)
else:
cursor.execute(
"\n SELECT message_id, role, content, timestamp, tool_calls, metadata\n FROM conversation_messages\n WHERE conversation_id = ?\n ORDER BY timestamp ASC\n ",
(conversation_id,),
)
messages = []
for row in cursor.fetchall():
messages.append(
{
"message_id": row[0],
"role": row[1],
"content": row[2],
"timestamp": row[3],
"tool_calls": json.loads(row[4]) if row[4] else None,
"metadata": json.loads(row[5]) if row[5] else None,
}
)
conn.close()
return messages
def update_conversation_summary(
self, conversation_id: str, summary: str, topics: Optional[List[str]] = None
):
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n UPDATE conversation_history\n SET summary = ?, topics = ?, ended_at = ?\n WHERE conversation_id = ?\n ",
(summary, json.dumps(topics) if topics else None, time.time(), conversation_id),
)
conn.commit()
conn.close()
def search_conversations(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n SELECT DISTINCT h.conversation_id, h.session_id, h.started_at,\n h.message_count, h.summary, h.topics\n FROM conversation_history h\n LEFT JOIN conversation_messages m ON h.conversation_id = m.conversation_id\n WHERE h.summary LIKE ? OR h.topics LIKE ? OR m.content LIKE ?\n ORDER BY h.started_at DESC\n LIMIT ?\n ",
(f"%{query}%", f"%{query}%", f"%{query}%", limit),
)
conversations = []
for row in cursor.fetchall():
conversations.append(
{
"conversation_id": row[0],
"session_id": row[1],
"started_at": row[2],
"message_count": row[3],
"summary": row[4],
"topics": json.loads(row[5]) if row[5] else [],
}
)
conn.close()
return conversations
def get_recent_conversations(
self, limit: int = 10, session_id: Optional[str] = None
) -> List[Dict[str, Any]]:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
if session_id:
cursor.execute(
"\n SELECT conversation_id, session_id, started_at, ended_at,\n message_count, summary, topics\n FROM conversation_history\n WHERE session_id = ?\n ORDER BY started_at DESC\n LIMIT ?\n ",
(session_id, limit),
)
else:
cursor.execute(
"\n SELECT conversation_id, session_id, started_at, ended_at,\n message_count, summary, topics\n FROM conversation_history\n ORDER BY started_at DESC\n LIMIT ?\n ",
(limit,),
)
conversations = []
for row in cursor.fetchall():
conversations.append(
{
"conversation_id": row[0],
"session_id": row[1],
"started_at": row[2],
"ended_at": row[3],
"message_count": row[4],
"summary": row[5],
"topics": json.loads(row[6]) if row[6] else [],
}
)
conn.close()
return conversations
def delete_conversation(self, conversation_id: str) -> bool:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"DELETE FROM conversation_messages WHERE conversation_id = ?", (conversation_id,)
)
cursor.execute(
"DELETE FROM conversation_history WHERE conversation_id = ?", (conversation_id,)
)
deleted = cursor.rowcount > 0
conn.commit()
conn.close()
return deleted
def get_statistics(self) -> Dict[str, Any]:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("SELECT COUNT(*) FROM conversation_history")
total_conversations = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(*) FROM conversation_messages")
total_messages = cursor.fetchone()[0]
cursor.execute("SELECT SUM(message_count) FROM conversation_history")
cursor.fetchone()[0] or 0
cursor.execute(
"\n SELECT AVG(message_count) FROM conversation_history WHERE message_count > 0\n "
)
avg_messages = cursor.fetchone()[0] or 0
conn.close()
return {
"total_conversations": total_conversations,
"total_messages": total_messages,
"average_messages_per_conversation": round(avg_messages, 2),
}

200
rp/memory/fact_extractor.py Normal file
View File

@ -0,0 +1,200 @@
import re
from collections import defaultdict
from typing import Any, Dict, List
class FactExtractor:
def __init__(self):
self.fact_patterns = [
("([A-Z][a-z]+ [A-Z][a-z]+) is (a|an) ([^.]+)", "definition"),
("([A-Z][a-z]+) (was|is) (born|created|founded) in (\\d{4})", "temporal"),
("([A-Z][a-z]+) (invented|created|developed) ([^.]+)", "attribution"),
("([^.]+) (costs?|worth) (\\$[\\d,]+)", "numeric"),
("([A-Z][a-z]+) (lives?|works?|located) in ([A-Z][a-z]+)", "location"),
]
def extract_facts(self, text: str) -> List[Dict[str, Any]]:
facts = []
for pattern, fact_type in self.fact_patterns:
matches = re.finditer(pattern, text)
for match in matches:
facts.append(
{
"type": fact_type,
"text": match.group(0),
"components": match.groups(),
"confidence": 0.7,
}
)
noun_phrases = self._extract_noun_phrases(text)
for phrase in noun_phrases:
if len(phrase.split()) >= 2:
facts.append(
{"type": "entity", "text": phrase, "components": [phrase], "confidence": 0.5}
)
return facts
def _extract_noun_phrases(self, text: str) -> List[str]:
sentences = re.split("[.!?]", text)
phrases = []
for sentence in sentences:
words = sentence.split()
current_phrase = []
for word in words:
if word and word[0].isupper() and (len(word) > 1):
current_phrase.append(word)
else:
if len(current_phrase) >= 2:
phrases.append(" ".join(current_phrase))
elif len(current_phrase) == 1:
phrases.append(current_phrase[0])
current_phrase = []
if len(current_phrase) >= 2:
phrases.append(" ".join(current_phrase))
elif len(current_phrase) == 1:
phrases.append(current_phrase[0])
return list(set(phrases))
def extract_key_terms(self, text: str, top_k: int = 10) -> List[tuple]:
words = re.findall("\\b[a-z]{4,}\\b", text.lower())
stopwords = {
"this",
"that",
"these",
"those",
"what",
"which",
"where",
"when",
"with",
"from",
"have",
"been",
"were",
"will",
"would",
"could",
"should",
"about",
"their",
"there",
"other",
"than",
"then",
"them",
"some",
"more",
"very",
"such",
"into",
"through",
"during",
"before",
"after",
"above",
"below",
"between",
"under",
"again",
"further",
"once",
"here",
"both",
"each",
"doing",
"only",
"over",
"same",
"being",
"does",
"just",
"also",
"make",
"made",
"know",
"like",
}
filtered_words = [w for w in words if w not in stopwords]
word_freq = defaultdict(int)
for word in filtered_words:
word_freq[word] += 1
sorted_terms = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)
return sorted_terms[:top_k]
def extract_relationships(self, text: str) -> List[Dict[str, Any]]:
relationships = []
relationship_patterns = [
("([A-Z][a-z]+) (works for|employed by|member of) ([A-Z][a-z]+)", "employment"),
("([A-Z][a-z]+) (owns|has|possesses) ([^.]+)", "ownership"),
("([A-Z][a-z]+) (located in|part of|belongs to) ([A-Z][a-z]+)", "location"),
("([A-Z][a-z]+) (uses|utilizes|implements) ([^.]+)", "usage"),
]
for pattern, rel_type in relationship_patterns:
matches = re.finditer(pattern, text)
for match in matches:
relationships.append(
{
"type": rel_type,
"subject": match.group(1),
"predicate": match.group(2),
"object": match.group(3),
"confidence": 0.6,
}
)
return relationships
def extract_metadata(self, text: str) -> Dict[str, Any]:
word_count = len(text.split()) if text.strip() else 0
sentences = re.split("[.!?]", text.strip())
sentence_count = len([s for s in sentences if s.strip()]) if text.strip() else 0
urls = re.findall("https?://[^\\s]+", text)
email_addresses = re.findall("\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}\\b", text)
dates = re.findall(
"\\b\\d{1,2}[-/]\\d{1,2}[-/]\\d{2,4}\\b|\\b\\d{4}[-/]\\d{1,2}[-/]\\d{1,2}\\b|\\b\\d{4}\\b",
text,
)
numbers = re.findall("\\b\\d+(?:,\\d{3})*(?:\\.\\d+)?\\b", text)
return {
"word_count": word_count,
"sentence_count": sentence_count,
"avg_words_per_sentence": round(word_count / max(sentence_count, 1), 2),
"urls": urls,
"email_addresses": email_addresses,
"dates": dates,
"numeric_values": numbers,
"has_code": bool(re.search("```|def |class |import |function ", text)),
"has_questions": bool(re.search("\\?", text)),
}
def categorize_content(self, text: str) -> List[str]:
categories = []
category_keywords = {
"programming": [
"code",
"function",
"class",
"variable",
"programming",
"software",
"debug",
],
"data": ["data", "database", "query", "table", "record", "statistics", "analysis"],
"documentation": ["documentation", "guide", "tutorial", "manual", "readme", "explain"],
"configuration": [
"config",
"settings",
"configuration",
"setup",
"install",
"deployment",
],
"testing": ["test", "testing", "validate", "verification", "quality", "assertion"],
"research": ["research", "study", "analysis", "investigation", "findings", "results"],
"planning": ["plan", "planning", "schedule", "roadmap", "milestone", "timeline"],
}
text_lower = text.lower()
for category, keywords in category_keywords.items():
if any((keyword in text_lower for keyword in keywords)):
categories.append(category)
return categories if categories else ["general"]

View File

@ -0,0 +1,253 @@
import json
import sqlite3
import threading
import time
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple
from .semantic_index import SemanticIndex
@dataclass
class KnowledgeEntry:
entry_id: str
category: str
content: str
metadata: Dict[str, Any]
created_at: float
updated_at: float
access_count: int = 0
importance_score: float = 1.0
def to_dict(self) -> Dict[str, Any]:
return {
"entry_id": self.entry_id,
"category": self.category,
"content": self.content,
"metadata": self.metadata,
"created_at": self.created_at,
"updated_at": self.updated_at,
"access_count": self.access_count,
"importance_score": self.importance_score,
}
class KnowledgeStore:
def __init__(self, db_path: str):
self.db_path = db_path
self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
self.lock = threading.Lock()
self.semantic_index = SemanticIndex()
self._initialize_store()
self._load_index()
def _initialize_store(self):
with self.lock:
cursor = self.conn.cursor()
cursor.execute(
"\n CREATE TABLE IF NOT EXISTS knowledge_entries (\n entry_id TEXT PRIMARY KEY,\n category TEXT NOT NULL,\n content TEXT NOT NULL,\n metadata TEXT,\n created_at REAL NOT NULL,\n updated_at REAL NOT NULL,\n access_count INTEGER DEFAULT 0,\n importance_score REAL DEFAULT 1.0\n )\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_category ON knowledge_entries(category)\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_importance ON knowledge_entries(importance_score DESC)\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_created ON knowledge_entries(created_at DESC)\n "
)
self.conn.commit()
def _load_index(self):
with self.lock:
cursor = self.conn.cursor()
cursor.execute("SELECT entry_id, content FROM knowledge_entries")
for row in cursor.fetchall():
self.semantic_index.add_document(row[0], row[1])
def add_entry(self, entry: KnowledgeEntry):
with self.lock:
cursor = self.conn.cursor()
cursor.execute(
"\n INSERT OR REPLACE INTO knowledge_entries\n (entry_id, category, content, metadata, created_at, updated_at, access_count, importance_score)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?)\n ",
(
entry.entry_id,
entry.category,
entry.content,
json.dumps(entry.metadata),
entry.created_at,
entry.updated_at,
entry.access_count,
entry.importance_score,
),
)
self.conn.commit()
self.semantic_index.add_document(entry.entry_id, entry.content)
def get_entry(self, entry_id: str) -> Optional[KnowledgeEntry]:
with self.lock:
cursor = self.conn.cursor()
cursor.execute(
"\n SELECT entry_id, category, content, metadata, created_at, updated_at, access_count, importance_score\n FROM knowledge_entries\n WHERE entry_id = ?\n ",
(entry_id,),
)
row = cursor.fetchone()
if row:
cursor.execute(
"\n UPDATE knowledge_entries\n SET access_count = access_count + 1\n WHERE entry_id = ?\n ",
(entry_id,),
)
self.conn.commit()
return KnowledgeEntry(
entry_id=row[0],
category=row[1],
content=row[2],
metadata=json.loads(row[3]) if row[3] else {},
created_at=row[4],
updated_at=row[5],
access_count=row[6] + 1,
importance_score=row[7],
)
return None
def search_entries(
self, query: str, category: Optional[str] = None, top_k: int = 5
) -> List[KnowledgeEntry]:
semantic_results = self.semantic_index.search(query, top_k * 2)
fts_results = self._fts_search(query, top_k * 2)
combined_results = {}
for entry_id, score in semantic_results:
combined_results[entry_id] = score * 0.7
for entry_id, score in fts_results:
if entry_id in combined_results:
combined_results[entry_id] = max(combined_results[entry_id], score * 1.0)
else:
combined_results[entry_id] = score * 1.0
sorted_results = sorted(combined_results.items(), key=lambda x: x[1], reverse=True)
with self.lock:
cursor = self.conn.cursor()
entries = []
for entry_id, score in sorted_results[:top_k]:
if category:
cursor.execute(
"\n SELECT entry_id, category, content, metadata, created_at, updated_at, access_count, importance_score\n FROM knowledge_entries\n WHERE entry_id = ? AND category = ?\n ",
(entry_id, category),
)
else:
cursor.execute(
"\n SELECT entry_id, category, content, metadata, created_at, updated_at, access_count, importance_score\n FROM knowledge_entries\n WHERE entry_id = ?\n ",
(entry_id,),
)
row = cursor.fetchone()
if row:
entry = KnowledgeEntry(
entry_id=row[0],
category=row[1],
content=row[2],
metadata=json.loads(row[3]) if row[3] else {},
created_at=row[4],
updated_at=row[5],
access_count=row[6],
importance_score=row[7],
)
entry.metadata["search_score"] = score
entries.append(entry)
return entries
def _fts_search(self, query: str, top_k: int = 10) -> List[Tuple[str, float]]:
"""Full Text Search with exact word and partial sentence matching."""
with self.lock:
cursor = self.conn.cursor()
query_lower = query.lower()
query_words = query_lower.split()
cursor.execute(
"\n SELECT entry_id, content\n FROM knowledge_entries\n WHERE LOWER(content) LIKE ?\n ",
(f"%{query_lower}%",),
)
exact_matches = []
partial_matches = []
for row in cursor.fetchall():
entry_id, content = row
content_lower = content.lower()
if query_lower in content_lower:
exact_matches.append((entry_id, 1.0))
continue
content_words = set(content_lower.split())
query_word_set = set(query_words)
matching_words = len(query_word_set & content_words)
if matching_words > 0:
word_overlap_score = matching_words / len(query_word_set)
consecutive_bonus = 0.0
for i in range(len(query_words)):
for j in range(i + 1, min(i + 4, len(query_words) + 1)):
phrase = " ".join(query_words[i:j])
if phrase in content_lower:
consecutive_bonus += 0.2 * (j - i)
total_score = min(0.99, word_overlap_score + consecutive_bonus)
partial_matches.append((entry_id, total_score))
all_results = exact_matches + partial_matches
all_results.sort(key=lambda x: x[1], reverse=True)
return all_results[:top_k]
def get_by_category(self, category: str, limit: int = 20) -> List[KnowledgeEntry]:
with self.lock:
cursor = self.conn.cursor()
cursor.execute(
"\n SELECT entry_id, category, content, metadata, created_at, updated_at, access_count, importance_score\n FROM knowledge_entries\n WHERE category = ?\n ORDER BY importance_score DESC, created_at DESC\n LIMIT ?\n ",
(category, limit),
)
entries = []
for row in cursor.fetchall():
entries.append(
KnowledgeEntry(
entry_id=row[0],
category=row[1],
content=row[2],
metadata=json.loads(row[3]) if row[3] else {},
created_at=row[4],
updated_at=row[5],
access_count=row[6],
importance_score=row[7],
)
)
return entries
def update_importance(self, entry_id: str, importance_score: float):
with self.lock:
cursor = self.conn.cursor()
cursor.execute(
"\n UPDATE knowledge_entries\n SET importance_score = ?, updated_at = ?\n WHERE entry_id = ?\n ",
(importance_score, time.time(), entry_id),
)
self.conn.commit()
def delete_entry(self, entry_id: str) -> bool:
with self.lock:
cursor = self.conn.cursor()
cursor.execute("DELETE FROM knowledge_entries WHERE entry_id = ?", (entry_id,))
deleted = cursor.rowcount > 0
self.conn.commit()
if deleted:
self.semantic_index.remove_document(entry_id)
return deleted
def get_statistics(self) -> Dict[str, Any]:
with self.lock:
cursor = self.conn.cursor()
cursor.execute("SELECT COUNT(*) FROM knowledge_entries")
total_entries = cursor.fetchone()[0]
cursor.execute("SELECT COUNT(DISTINCT category) FROM knowledge_entries")
total_categories = cursor.fetchone()[0]
cursor.execute(
"\n SELECT category, COUNT(*) as count\n FROM knowledge_entries\n GROUP BY category\n ORDER BY count DESC\n "
)
category_counts = {row[0]: row[1] for row in cursor.fetchall()}
cursor.execute("SELECT SUM(access_count) FROM knowledge_entries")
total_accesses = cursor.fetchone()[0] or 0
return {
"total_entries": total_entries,
"total_categories": total_categories,
"category_distribution": category_counts,
"total_accesses": total_accesses,
"vocabulary_size": len(self.semantic_index.vocabulary),
}

View File

@ -0,0 +1,85 @@
import math
import re
from collections import Counter, defaultdict
from typing import Dict, List, Set, Tuple
class SemanticIndex:
def __init__(self):
self.documents: Dict[str, str] = {}
self.vocabulary: Set[str] = set()
self.idf_scores: Dict[str, float] = {}
self.doc_tf_scores: Dict[str, Dict[str, float]] = {}
def _tokenize(self, text: str) -> List[str]:
text = text.lower()
text = re.sub("[^a-z0-9\\s]", " ", text)
tokens = text.split()
return tokens
def _compute_tf(self, tokens: List[str]) -> Dict[str, float]:
term_count = Counter(tokens)
total_terms = len(tokens)
return {term: count / total_terms for term, count in term_count.items()}
def _compute_idf(self):
doc_count = len(self.documents)
if doc_count == 0:
return
token_doc_count = defaultdict(int)
for doc_id, doc_text in self.documents.items():
tokens = set(self._tokenize(doc_text))
for token in tokens:
token_doc_count[token] += 1
if doc_count == 1:
self.idf_scores = {token: 1.0 for token in token_doc_count}
else:
self.idf_scores = {
token: math.log(doc_count / count) for token, count in token_doc_count.items()
}
def add_document(self, doc_id: str, text: str):
self.documents[doc_id] = text
tokens = self._tokenize(text)
self.vocabulary.update(tokens)
tf_scores = self._compute_tf(tokens)
self.doc_tf_scores[doc_id] = tf_scores
self._compute_idf()
def remove_document(self, doc_id: str):
if doc_id in self.documents:
del self.documents[doc_id]
if doc_id in self.doc_tf_scores:
del self.doc_tf_scores[doc_id]
self._compute_idf()
def search(self, query: str, top_k: int = 5) -> List[Tuple[str, float]]:
if not query.strip():
return []
query_tokens = self._tokenize(query)
if not query_tokens:
return []
query_tf = self._compute_tf(query_tokens)
query_vector = {
token: query_tf.get(token, 0) * self.idf_scores.get(token, 0) for token in query_tokens
}
scores = []
for doc_id, doc_tf in self.doc_tf_scores.items():
doc_vector = {
token: doc_tf.get(token, 0) * self.idf_scores.get(token, 0) for token in doc_tf
}
similarity = self._cosine_similarity(query_vector, doc_vector)
scores.append((doc_id, similarity))
scores.sort(key=lambda x: x[1], reverse=True)
return scores[:top_k]
def _cosine_similarity(self, vec1: Dict[str, float], vec2: Dict[str, float]) -> float:
dot_product = sum(
(vec1.get(token, 0) * vec2.get(token, 0) for token in set(vec1) | set(vec2))
)
norm1 = math.sqrt(sum((val**2 for val in vec1.values())))
norm2 = math.sqrt(sum((val**2 for val in vec2.values())))
if norm1 == 0 or norm2 == 0:
return 0
return dot_product / (norm1 * norm2)

340
rp/multiplexer.py Normal file
View File

@ -0,0 +1,340 @@
import queue
import subprocess
import sys
import threading
import time
from rp.tools.process_handlers import detect_process_type, get_handler_for_process
from rp.tools.prompt_detection import get_global_detector
from rp.ui import Colors
class TerminalMultiplexer:
def __init__(self, name, show_output=True):
self.name = name
self.show_output = show_output
self.stdout_buffer = []
self.stderr_buffer = []
self.stdout_queue = queue.Queue()
self.stderr_queue = queue.Queue()
self.active = True
self.lock = threading.Lock()
self.metadata = {
"start_time": time.time(),
"last_activity": time.time(),
"interaction_count": 0,
"process_type": "unknown",
"state": "active",
}
self.handler = None
self.prompt_detector = get_global_detector()
if self.show_output:
self.display_thread = threading.Thread(target=self._display_worker, daemon=True)
self.display_thread.start()
def _display_worker(self):
while self.active:
try:
line = self.stdout_queue.get(timeout=0.1)
if line:
sys.stdout.write(line)
sys.stdout.flush()
except queue.Empty:
pass
try:
line = self.stderr_queue.get(timeout=0.1)
if line:
if self.metadata.get("process_type") in ["vim", "ssh"]:
sys.stderr.write(line)
else:
sys.stderr.write(f"{Colors.YELLOW}[{self.name} err]{Colors.RESET} {line}\n")
sys.stderr.flush()
except queue.Empty:
pass
def write_stdout(self, data):
with self.lock:
self.stdout_buffer.append(data)
self.metadata["last_activity"] = time.time()
if self.handler:
self.handler.update_state(data)
self.prompt_detector.update_session_state(
self.name, data, self.metadata["process_type"]
)
if self.show_output:
self.stdout_queue.put(data)
def write_stderr(self, data):
with self.lock:
self.stderr_buffer.append(data)
self.metadata["last_activity"] = time.time()
if self.handler:
self.handler.update_state(data)
self.prompt_detector.update_session_state(
self.name, data, self.metadata["process_type"]
)
if self.show_output:
self.stderr_queue.put(data)
def get_stdout(self):
with self.lock:
return "".join(self.stdout_buffer)
def get_stderr(self):
with self.lock:
return "".join(self.stderr_buffer)
def get_all_output(self):
with self.lock:
return {"stdout": "".join(self.stdout_buffer), "stderr": "".join(self.stderr_buffer)}
def get_metadata(self):
with self.lock:
return self.metadata.copy()
def update_metadata(self, key, value):
with self.lock:
self.metadata[key] = value
def set_process_type(self, process_type):
with self.lock:
self.metadata["process_type"] = process_type
self.handler = get_handler_for_process(process_type, self)
def send_input(self, input_data):
if hasattr(self, "process") and self.process.poll() is None:
try:
self.process.stdin.write(input_data + "\n")
self.process.stdin.flush()
with self.lock:
self.metadata["last_activity"] = time.time()
self.metadata["interaction_count"] += 1
except Exception as e:
self.write_stderr(f"Error sending input: {e}")
else:
with self.lock:
self.metadata["last_activity"] = time.time()
self.metadata["interaction_count"] += 1
def close(self):
self.active = False
if hasattr(self, "display_thread"):
self.display_thread.join(timeout=1)
multiplexer_registry = {}
multiplexer_counter = 0
multiplexer_lock = threading.Lock()
background_monitor = None
monitor_active = False
monitor_interval = 0.2
def create_multiplexer(name=None, show_output=True):
global multiplexer_counter
with multiplexer_lock:
if name is None:
multiplexer_counter += 1
name = f"process-{multiplexer_counter}"
multiplexer_instance = TerminalMultiplexer(name, show_output)
multiplexer_registry[name] = multiplexer_instance
return (name, multiplexer_instance)
def get_multiplexer(name):
return multiplexer_registry.get(name)
def close_multiplexer(name):
multiplexer_instance = multiplexer_registry.get(name)
if multiplexer_instance:
multiplexer_instance.close()
del multiplexer_registry[name]
def get_all_multiplexer_states():
with multiplexer_lock:
states = {}
for name, multiplexer_instance in multiplexer_registry.items():
states[name] = {
"metadata": multiplexer_instance.get_metadata(),
"output_summary": {
"stdout_lines": len(multiplexer_instance.stdout_buffer),
"stderr_lines": len(multiplexer_instance.stderr_buffer),
},
}
return states
def cleanup_all_multiplexers():
for multiplexer_instance in list(multiplexer_registry.values()):
multiplexer_instance.close()
multiplexer_registry.clear()
background_processes = {}
process_lock = threading.Lock()
class BackgroundProcess:
def __init__(self, name, command):
self.name = name
self.command = command
self.process = None
self.multiplexer = None
self.status = "starting"
self.start_time = time.time()
self.end_time = None
def start(self):
try:
multiplexer_name, multiplexer_instance = create_multiplexer(
self.name, show_output=False
)
self.multiplexer = multiplexer_instance
process_type = detect_process_type(self.command)
multiplexer_instance.set_process_type(process_type)
self.process = subprocess.Popen(
self.command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
bufsize=1,
universal_newlines=True,
)
self.status = "running"
threading.Thread(target=self._monitor_stdout, daemon=True).start()
threading.Thread(target=self._monitor_stderr, daemon=True).start()
return {"status": "success", "pid": self.process.pid}
except Exception as e:
self.status = "error"
return {"status": "error", "error": str(e)}
def _monitor_stdout(self):
try:
for line in iter(self.process.stdout.readline, ""):
if line:
self.multiplexer.write_stdout(line.rstrip("\n\r"))
except Exception as e:
self.multiplexer.write_stderr(f"Error reading stdout: {e}")
finally:
self._check_completion()
def _monitor_stderr(self):
try:
for line in iter(self.process.stderr.readline, ""):
if line:
self.multiplexer.write_stderr(line.rstrip("\n\r"))
except Exception as e:
self.multiplexer.write_stderr(f"Error reading stderr: {e}")
def _check_completion(self):
if self.process and self.process.poll() is not None:
self.status = "completed"
self.end_time = time.time()
def get_info(self):
self._check_completion()
return {
"name": self.name,
"command": self.command,
"status": self.status,
"pid": self.process.pid if self.process else None,
"start_time": self.start_time,
"end_time": self.end_time,
"runtime": (
time.time() - self.start_time
if not self.end_time
else self.end_time - self.start_time
),
}
def get_output(self, lines=None):
if not self.multiplexer:
return []
all_output = self.multiplexer.get_all_output()
stdout_lines = all_output["stdout"].split("\n") if all_output["stdout"] else []
stderr_lines = all_output["stderr"].split("\n") if all_output["stderr"] else []
combined = stdout_lines + stderr_lines
if lines:
combined = combined[-lines:]
return [line for line in combined if line.strip()]
def send_input(self, input_text):
if self.process and self.status == "running":
try:
self.process.stdin.write(input_text + "\n")
self.process.stdin.flush()
return {"status": "success"}
except Exception as e:
return {"status": "error", "error": str(e)}
return {"status": "error", "error": "Process not running or no stdin"}
def kill(self):
if self.process and self.status == "running":
try:
self.process.terminate()
time.sleep(0.1)
if self.process.poll() is None:
self.process.kill()
self.status = "killed"
self.end_time = time.time()
return {"status": "success"}
except Exception as e:
return {"status": "error", "error": str(e)}
return {"status": "error", "error": "Process not running"}
def start_background_process(name, command):
with process_lock:
if name in background_processes:
return {"status": "error", "error": f"Process {name} already exists"}
process_instance = BackgroundProcess(name, command)
result = process_instance.start()
if result["status"] == "success":
background_processes[name] = process_instance
return result
def get_all_sessions():
with process_lock:
sessions = {}
for name, process_instance in background_processes.items():
sessions[name] = process_instance.get_info()
return sessions
def get_session_info(name):
with process_lock:
process_instance = background_processes.get(name)
return process_instance.get_info() if process_instance else None
def get_session_output(name, lines=None):
with process_lock:
process_instance = background_processes.get(name)
return process_instance.get_output(lines) if process_instance else None
def send_input_to_session(name, input_text):
with process_lock:
process_instance = background_processes.get(name)
return (
process_instance.send_input(input_text)
if process_instance
else {"status": "error", "error": "Session not found"}
)
def kill_session(name):
with process_lock:
process_instance = background_processes.get(name)
if process_instance:
result = process_instance.kill()
if result["status"] == "success":
del background_processes[name]
return result
return {"status": "error", "error": "Session not found"}

384
rp/multiplexer.py.bak Normal file
View File

@ -0,0 +1,384 @@
import queue
import subprocess
import sys
import threading
import time
from rp.tools.process_handlers import detect_process_type, get_handler_for_process
from rp.tools.prompt_detection import get_global_detector
from rp.ui import Colors
class TerminalMultiplexer:
def __init__(self, name, show_output=True):
self.name = name
self.show_output = show_output
self.stdout_buffer = []
self.stderr_buffer = []
self.stdout_queue = queue.Queue()
self.stderr_queue = queue.Queue()
self.active = True
self.lock = threading.Lock()
self.metadata = {
"start_time": time.time(),
"last_activity": time.time(),
"interaction_count": 0,
"process_type": "unknown",
"state": "active",
}
self.handler = None
self.prompt_detector = get_global_detector()
if self.show_output:
self.display_thread = threading.Thread(target=self._display_worker, daemon=True)
self.display_thread.start()
def _display_worker(self):
while self.active:
try:
line = self.stdout_queue.get(timeout=0.1)
if line:
if self.metadata.get("process_type") in ["vim", "ssh"]:
sys.stdout.write(line)
else:
sys.stdout.write(f"{Colors.GRAY}[{self.name}]{Colors.RESET} {line}\n")
sys.stdout.flush()
except queue.Empty:
pass
try:
line = self.stderr_queue.get(timeout=0.1)
if line:
if self.metadata.get("process_type") in ["vim", "ssh"]:
sys.stderr.write(line)
else:
sys.stderr.write(f"{Colors.YELLOW}[{self.name} err]{Colors.RESET} {line}\n")
sys.stderr.flush()
except queue.Empty:
pass
def write_stdout(self, data):
with self.lock:
self.stdout_buffer.append(data)
self.metadata["last_activity"] = time.time()
# Update handler state if available
if self.handler:
self.handler.update_state(data)
# Update prompt detector
self.prompt_detector.update_session_state(
self.name, data, self.metadata["process_type"]
)
if self.show_output:
self.stdout_queue.put(data)
def write_stderr(self, data):
with self.lock:
self.stderr_buffer.append(data)
self.metadata["last_activity"] = time.time()
# Update handler state if available
if self.handler:
self.handler.update_state(data)
# Update prompt detector
self.prompt_detector.update_session_state(
self.name, data, self.metadata["process_type"]
)
if self.show_output:
self.stderr_queue.put(data)
def get_stdout(self):
with self.lock:
return "".join(self.stdout_buffer)
def get_stderr(self):
with self.lock:
return "".join(self.stderr_buffer)
def get_all_output(self):
with self.lock:
return {
"stdout": "".join(self.stdout_buffer),
"stderr": "".join(self.stderr_buffer),
}
def get_metadata(self):
with self.lock:
return self.metadata.copy()
def update_metadata(self, key, value):
with self.lock:
self.metadata[key] = value
def set_process_type(self, process_type):
"""Set the process type and initialize appropriate handler."""
with self.lock:
self.metadata["process_type"] = process_type
self.handler = get_handler_for_process(process_type, self)
def send_input(self, input_data):
if hasattr(self, "process") and self.process.poll() is None:
try:
self.process.stdin.write(input_data + "\n")
self.process.stdin.flush()
with self.lock:
self.metadata["last_activity"] = time.time()
self.metadata["interaction_count"] += 1
except Exception as e:
self.write_stderr(f"Error sending input: {e}")
else:
# This will be implemented when we have a process attached
# For now, just update activity
with self.lock:
self.metadata["last_activity"] = time.time()
self.metadata["interaction_count"] += 1
def close(self):
self.active = False
if hasattr(self, "display_thread"):
self.display_thread.join(timeout=1)
_multiplexers = {}
_mux_counter = 0
_mux_lock = threading.Lock()
_background_monitor = None
_monitor_active = False
_monitor_interval = 0.2 # 200ms
def create_multiplexer(name=None, show_output=True):
global _mux_counter
with _mux_lock:
if name is None:
_mux_counter += 1
name = f"process-{_mux_counter}"
mux = TerminalMultiplexer(name, show_output)
_multiplexers[name] = mux
return name, mux
def get_multiplexer(name):
return _multiplexers.get(name)
def close_multiplexer(name):
mux = _multiplexers.get(name)
if mux:
mux.close()
del _multiplexers[name]
def get_all_multiplexer_states():
with _mux_lock:
states = {}
for name, mux in _multiplexers.items():
states[name] = {
"metadata": mux.get_metadata(),
"output_summary": {
"stdout_lines": len(mux.stdout_buffer),
"stderr_lines": len(mux.stderr_buffer),
},
}
return states
def cleanup_all_multiplexers():
for mux in list(_multiplexers.values()):
mux.close()
_multiplexers.clear()
# Background process management
_background_processes = {}
_process_lock = threading.Lock()
class BackgroundProcess:
def __init__(self, name, command):
self.name = name
self.command = command
self.process = None
self.multiplexer = None
self.status = "starting"
self.start_time = time.time()
self.end_time = None
def start(self):
"""Start the background process."""
try:
# Create multiplexer for this process
mux_name, mux = create_multiplexer(self.name, show_output=False)
self.multiplexer = mux
# Detect process type
process_type = detect_process_type(self.command)
mux.set_process_type(process_type)
# Start the subprocess
self.process = subprocess.Popen(
self.command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
bufsize=1,
universal_newlines=True,
)
self.status = "running"
# Start output monitoring threads
threading.Thread(target=self._monitor_stdout, daemon=True).start()
threading.Thread(target=self._monitor_stderr, daemon=True).start()
return {"status": "success", "pid": self.process.pid}
except Exception as e:
self.status = "error"
return {"status": "error", "error": str(e)}
def _monitor_stdout(self):
"""Monitor stdout from the process."""
try:
for line in iter(self.process.stdout.readline, ""):
if line:
self.multiplexer.write_stdout(line.rstrip("\n\r"))
except Exception as e:
self.write_stderr(f"Error reading stdout: {e}")
finally:
self._check_completion()
def _monitor_stderr(self):
"""Monitor stderr from the process."""
try:
for line in iter(self.process.stderr.readline, ""):
if line:
self.multiplexer.write_stderr(line.rstrip("\n\r"))
except Exception as e:
self.write_stderr(f"Error reading stderr: {e}")
def _check_completion(self):
"""Check if process has completed."""
if self.process and self.process.poll() is not None:
self.status = "completed"
self.end_time = time.time()
def get_info(self):
"""Get process information."""
self._check_completion()
return {
"name": self.name,
"command": self.command,
"status": self.status,
"pid": self.process.pid if self.process else None,
"start_time": self.start_time,
"end_time": self.end_time,
"runtime": (
time.time() - self.start_time
if not self.end_time
else self.end_time - self.start_time
),
}
def get_output(self, lines=None):
"""Get process output."""
if not self.multiplexer:
return []
all_output = self.multiplexer.get_all_output()
stdout_lines = all_output["stdout"].split("\n") if all_output["stdout"] else []
stderr_lines = all_output["stderr"].split("\n") if all_output["stderr"] else []
combined = stdout_lines + stderr_lines
if lines:
combined = combined[-lines:]
return [line for line in combined if line.strip()]
def send_input(self, input_text):
"""Send input to the process."""
if self.process and self.status == "running":
try:
self.process.stdin.write(input_text + "\n")
self.process.stdin.flush()
return {"status": "success"}
except Exception as e:
return {"status": "error", "error": str(e)}
return {"status": "error", "error": "Process not running or no stdin"}
def kill(self):
"""Kill the process."""
if self.process and self.status == "running":
try:
self.process.terminate()
# Wait a bit for graceful termination
time.sleep(0.1)
if self.process.poll() is None:
self.process.kill()
self.status = "killed"
self.end_time = time.time()
return {"status": "success"}
except Exception as e:
return {"status": "error", "error": str(e)}
return {"status": "error", "error": "Process not running"}
def start_background_process(name, command):
"""Start a background process."""
with _process_lock:
if name in _background_processes:
return {"status": "error", "error": f"Process {name} already exists"}
process = BackgroundProcess(name, command)
result = process.start()
if result["status"] == "success":
_background_processes[name] = process
return result
def get_all_sessions():
"""Get all background process sessions."""
with _process_lock:
sessions = {}
for name, process in _background_processes.items():
sessions[name] = process.get_info()
return sessions
def get_session_info(name):
"""Get information about a specific session."""
with _process_lock:
process = _background_processes.get(name)
return process.get_info() if process else None
def get_session_output(name, lines=None):
"""Get output from a specific session."""
with _process_lock:
process = _background_processes.get(name)
return process.get_output(lines) if process else None
def send_input_to_session(name, input_text):
"""Send input to a background session."""
with _process_lock:
process = _background_processes.get(name)
return (
process.send_input(input_text)
if process
else {"status": "error", "error": "Session not found"}
)
def kill_session(name):
"""Kill a background session."""
with _process_lock:
process = _background_processes.get(name)
if process:
result = process.kill()
if result["status"] == "success":
del _background_processes[name]
return result
return {"status": "error", "error": "Session not found"}

0
rp/plugins/__init__.py Normal file
View File

72
rp/plugins/loader.py Normal file
View File

@ -0,0 +1,72 @@
import importlib.util
import os
import sys
from typing import Callable, Dict, List
from rp.core.logging import get_logger
logger = get_logger("plugins")
PLUGINS_DIR = os.path.expanduser("~/.rp/plugins")
class PluginLoader:
def __init__(self):
self.loaded_plugins = {}
self.plugin_tools = []
os.makedirs(PLUGINS_DIR, exist_ok=True)
def load_plugins(self) -> List[Dict]:
if not os.path.exists(PLUGINS_DIR):
logger.info("No plugins directory found")
return []
plugin_files = [f for f in os.listdir(PLUGINS_DIR) if f.endswith(".py")]
for plugin_file in plugin_files:
try:
self._load_plugin_file(plugin_file)
except Exception as e:
logger.error(f"Error loading plugin {plugin_file}: {e}")
return self.plugin_tools
def _load_plugin_file(self, filename: str):
plugin_path = os.path.join(PLUGINS_DIR, filename)
plugin_name = filename[:-3]
spec = importlib.util.spec_from_file_location(plugin_name, plugin_path)
if spec is None or spec.loader is None:
logger.error(f"Could not load spec for {filename}")
return
module = importlib.util.module_from_spec(spec)
sys.modules[plugin_name] = module
spec.loader.exec_module(module)
if hasattr(module, "register_tools"):
tools = module.register_tools()
if isinstance(tools, list):
self.plugin_tools.extend(tools)
self.loaded_plugins[plugin_name] = module
logger.info(f"Loaded plugin: {plugin_name} ({len(tools)} tools)")
else:
logger.warning(f"Plugin {plugin_name} register_tools() did not return a list")
else:
logger.warning(f"Plugin {plugin_name} does not have register_tools() function")
def get_plugin_function(self, tool_name: str) -> Callable:
for plugin_name, module in self.loaded_plugins.items():
if hasattr(module, tool_name):
return getattr(module, tool_name)
raise ValueError(f"Plugin function not found: {tool_name}")
def list_loaded_plugins(self) -> List[str]:
return list(self.loaded_plugins.keys())
def create_example_plugin():
example_plugin = os.path.join(PLUGINS_DIR, "example_plugin.py")
if os.path.exists(example_plugin):
return
example_code = '"""\nExample plugin for PR Assistant\n\nThis plugin demonstrates how to create custom tools.\n"""\n\ndef my_custom_tool(argument: str) -> str:\n """\n A custom tool that does something useful.\n\n Args:\n argument: Some input\n\n Returns:\n A result string\n """\n return f"Custom tool processed: {argument}"\n\n\ndef register_tools():\n """\n Register tools with the PR assistant.\n\n Returns:\n List of tool definitions\n """\n return [\n {\n "type": "function",\n "function": {\n "name": "my_custom_tool",\n "description": "A custom tool that processes input",\n "parameters": {\n "type": "object",\n "properties": {\n "argument": {\n "type": "string",\n "description": "The input to process"\n }\n },\n "required": ["argument"]\n }\n }\n }\n ]\n'
try:
os.makedirs(PLUGINS_DIR, exist_ok=True)
with open(example_plugin, "w") as f:
f.write(example_code)
logger.info(f"Created example plugin at {example_plugin}")
except Exception as e:
logger.error(f"Error creating example plugin: {e}")

43
rp/research.md Normal file
View File

@ -0,0 +1,43 @@
# Research Overview: Additional Functionality for PR Assistant
## Overview of Current Application
The PR Assistant is a professional CLI AI assistant designed for autonomous execution of tasks. It integrates various tools including command execution, web fetching, database operations, filesystem management, and Python code execution. It features session management, logging, usage tracking, and a plugin system for extensibility.
## Potential New Features
Based on analysis of similar AI assistants and tool-using agents, here are researched ideas for additional functionality:
### 1. Multi-Modal Interfaces
- **Graphical User Interface (GUI)**: Develop a desktop app using frameworks like Electron or Tkinter to provide a user-friendly interface beyond CLI.
- **Web Interface**: Create a web-based dashboard for remote access and visualization of results.
- **Voice Input/Output**: Integrate speech recognition (e.g., via Google Speech API) and text-to-speech for hands-free interaction.
### 2. Enhanced Tool Ecosystem
- **Additional Built-in Tools**: Add tools for Git operations, email handling, calendar integration, image processing (e.g., OCR, generation via DALL-E), and real-time data feeds (weather, stocks).
- **API Integrations**: Connect to popular services like GitHub for repository management, Slack/Discord for notifications, or cloud storage (AWS S3, Google Drive).
- **Workflow Automation**: Implement chaining of tools for complex workflows, similar to Zapier or LangChain agents.
### 3. Advanced AI Capabilities
- **Multi-Agent Systems**: Allow multiple AI agents to collaborate on tasks, with role specialization (e.g., one for coding, one for research).
- **Long-Term Memory and Learning**: Implement persistent knowledge bases and fine-tuning on user interactions to improve responses over time.
- **Context Awareness**: Enhance context management with better summarization and retrieval of past conversations.
### 4. Productivity and Usability Enhancements
- **Export and Sharing**: Add options to export session results to formats like PDF, Markdown, or integrate with documentation tools (e.g., Notion, Confluence).
- **Scheduled Tasks**: Enable cron-like scheduling for autonomous task execution.
- **Multi-User Support**: Implement user accounts for shared access and collaboration features.
### 5. Security and Reliability
- **Sandboxing and Permissions**: Improve security with containerized tool execution and user-defined permission levels.
- **Error Recovery**: Add automatic retry mechanisms, fallback strategies, and detailed error reporting.
- **Audit Logging**: Enhance logging for compliance and debugging.
### 6. Plugin Ecosystem Expansion
- **Community Plugin Repository**: Create an online hub for user-contributed plugins.
- **Plugin Marketplace**: Allow users to rate and install plugins easily, with dependency management.
### 7. Performance Optimizations
- **Caching**: Implement caching for API calls and tool results to reduce latency.
- **Parallel Execution**: Enable concurrent tool usage for faster task completion.
- **Model Selection**: Expand support for multiple AI models and allow dynamic switching.
These features would position the PR Assistant as a more versatile and powerful tool, appealing to developers, researchers, and productivity enthusiasts. Implementation should prioritize backward compatibility and maintain the CLI-first approach while adding optional interfaces.

13
rp/rp.py Executable file
View File

@ -0,0 +1,13 @@
#!/usr/bin/env python3
# Trigger build
import sys
import os
# Add current directory to path to ensure imports work
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from pr.__main__ import main
if __name__ == "__main__":
main()

83
rp/tools/__init__.py Normal file
View File

@ -0,0 +1,83 @@
from rp.tools.agents import (
collaborate_agents,
create_agent,
execute_agent_task,
list_agents,
remove_agent,
)
from rp.tools.base import get_tools_definition
from rp.tools.vision import post_image
from rp.tools.command import kill_process, run_command, run_command_interactive, tail_process
from rp.tools.database import db_get, db_query, db_set
from rp.tools.editor import (
close_editor,
editor_insert_text,
editor_replace_text,
editor_search,
open_editor,
)
from rp.tools.filesystem import (
chdir,
getpwd,
index_source_directory,
list_directory,
mkdir,
read_file,
search_replace,
write_file,
)
from rp.tools.memory import (
add_knowledge_entry,
delete_knowledge_entry,
get_knowledge_by_category,
get_knowledge_entry,
get_knowledge_statistics,
search_knowledge,
update_knowledge_importance,
)
from rp.tools.patch import apply_patch, create_diff
from rp.tools.python_exec import python_exec
from rp.tools.web import http_fetch, web_search, web_search_news
__all__ = [
"add_knowledge_entry",
"apply_patch",
"chdir",
"close_editor",
"collaborate_agents",
"create_agent",
"create_diff",
"db_get",
"db_query",
"db_set",
"delete_knowledge_entry",
"post_image",
"editor_insert_text",
"editor_replace_text",
"editor_search",
"execute_agent_task",
"get_knowledge_by_category",
"get_knowledge_entry",
"get_knowledge_statistics",
"get_tools_definition",
"getpwd",
"http_fetch",
"index_source_directory",
"kill_process",
"list_agents",
"list_directory",
"mkdir",
"open_editor",
"python_exec",
"read_file",
"remove_agent",
"run_command",
"run_command_interactive",
"search_knowledge",
"search_replace",
"tail_process",
"update_knowledge_importance",
"web_search",
"web_search_news",
"write_file",
]

120
rp/tools/agents.py Normal file
View File

@ -0,0 +1,120 @@
import asyncio
import os
from typing import Any, Dict, List
from rp.agents.agent_manager import AgentManager
from rp.core.api import call_api
from rp.config import DEFAULT_MODEL, DEFAULT_API_URL
from rp.tools.base import get_tools_definition
def _create_api_wrapper():
"""Create a wrapper function for call_api that matches AgentManager expectations."""
model = os.environ.get("AI_MODEL", DEFAULT_MODEL)
api_url = os.environ.get("API_URL", DEFAULT_API_URL)
api_key = os.environ.get("OPENROUTER_API_KEY", "")
use_tools = int(os.environ.get("USE_TOOLS", "0"))
tools_definition = get_tools_definition() if use_tools else []
def api_wrapper(messages, temperature=None, max_tokens=None, **kwargs):
try:
loop = asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = asyncio.run_coroutine_threadsafe(
call_api(
messages=messages,
model=model,
api_url=api_url,
api_key=api_key,
use_tools=use_tools,
tools_definition=tools_definition,
verbose=False,
),
loop,
)
return future.result()
except RuntimeError:
return asyncio.run(
call_api(
messages=messages,
model=model,
api_url=api_url,
api_key=api_key,
use_tools=use_tools,
tools_definition=tools_definition,
verbose=False,
)
)
return api_wrapper
def create_agent(role_name: str, agent_id: str = None) -> Dict[str, Any]:
"""Create a new agent with the specified role."""
try:
db_path = os.environ.get("ASSISTANT_DB_PATH", "~/.assistant_db.sqlite")
db_path = os.path.expanduser(db_path)
api_wrapper = _create_api_wrapper()
manager = AgentManager(db_path, api_wrapper)
agent_id = manager.create_agent(role_name, agent_id)
return {"status": "success", "agent_id": agent_id, "role": role_name}
except Exception as e:
return {"status": "error", "error": str(e)}
def list_agents() -> Dict[str, Any]:
"""List all active agents."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
api_wrapper = _create_api_wrapper()
manager = AgentManager(db_path, api_wrapper)
agents = []
for agent_id, agent in manager.active_agents.items():
agents.append(
{
"agent_id": agent_id,
"role": agent.role.name,
"task_count": agent.task_count,
"message_count": len(agent.message_history),
}
)
return {"status": "success", "agents": agents}
except Exception as e:
return {"status": "error", "error": str(e)}
def execute_agent_task(agent_id: str, task: str, context: Dict[str, Any] = None) -> Dict[str, Any]:
"""Execute a task with the specified agent."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
api_wrapper = _create_api_wrapper()
manager = AgentManager(db_path, api_wrapper)
result = manager.execute_agent_task(agent_id, task, context)
return result
except Exception as e:
return {"status": "error", "error": str(e)}
def remove_agent(agent_id: str) -> Dict[str, Any]:
"""Remove an agent."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
api_wrapper = _create_api_wrapper()
manager = AgentManager(db_path, api_wrapper)
success = manager.remove_agent(agent_id)
return {"status": "success" if success else "not_found", "agent_id": agent_id}
except Exception as e:
return {"status": "error", "error": str(e)}
def collaborate_agents(orchestrator_id: str, task: str, agent_roles: List[str]) -> Dict[str, Any]:
"""Collaborate multiple agents on a task."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
api_wrapper = _create_api_wrapper()
manager = AgentManager(db_path, api_wrapper)
result = manager.collaborate_agents(orchestrator_id, task, agent_roles)
return result
except Exception as e:
return {"status": "error", "error": str(e)}

112
rp/tools/base.py Normal file
View File

@ -0,0 +1,112 @@
import inspect
import rp.tools
from typing import get_type_hints, get_origin, get_args
def _type_to_json_schema(py_type):
"""Convert Python type to JSON Schema type."""
if py_type == str:
return {"type": "string"}
elif py_type == int:
return {"type": "integer"}
elif py_type == float:
return {"type": "number"}
elif py_type == bool:
return {"type": "boolean"}
elif get_origin(py_type) == list:
return {"type": "array", "items": _type_to_json_schema(get_args(py_type)[0])}
elif get_origin(py_type) == dict:
return {"type": "object"}
else:
return {"type": "string"}
def _generate_tool_schema(func):
"""Generate JSON Schema for a tool function."""
sig = inspect.signature(func)
docstring = func.__doc__ or ""
description = docstring.strip().split("\n")[0] if docstring else ""
type_hints = get_type_hints(func)
properties = {}
required = []
for param_name, param in sig.parameters.items():
if param_name in ["db_conn", "python_globals"]:
continue
param_type = type_hints.get(param_name, str)
schema = _type_to_json_schema(param_type)
param_doc = ""
if docstring:
lines = docstring.split("\n")
in_args = False
for line in lines:
line = line.strip()
if line.startswith("Args:") or line.startswith("Arguments:"):
in_args = True
continue
elif in_args and line.startswith(param_name + ":"):
param_doc = line.split(":", 1)[1].strip()
break
elif in_args and line == "":
continue
elif in_args and (not line.startswith(" ")):
break
if param_doc:
schema["description"] = param_doc
if param.default != inspect.Parameter.empty:
schema["default"] = param.default
properties[param_name] = schema
if param.default == inspect.Parameter.empty:
required.append(param_name)
return {
"type": "function",
"function": {
"name": func.__name__,
"description": description,
"parameters": {"type": "object", "properties": properties, "required": required},
},
}
def get_tools_definition():
"""Dynamically generate tool definitions from all tool functions."""
tools = []
for name in dir(rp.tools):
if name.startswith("_"):
continue
obj = getattr(rp.tools, name)
if callable(obj) and hasattr(obj, "__module__") and obj.__module__.startswith("rp.tools."):
if obj.__doc__:
try:
schema = _generate_tool_schema(obj)
tools.append(schema)
except Exception as e:
print(f"Warning: Could not generate schema for {name}: {e}")
continue
return tools
def get_func_map(db_conn=None, python_globals=None):
"""Dynamically generate function map for tool execution."""
func_map = {}
for name in getattr(rp.tools, "__all__", []):
if name.startswith("_"):
continue
obj = getattr(rp.tools, name, None)
if callable(obj) and hasattr(obj, "__module__") and obj.__module__.startswith("rp.tools."):
sig = inspect.signature(obj)
params = list(sig.parameters.keys())
if "db_conn" in params and "python_globals" in params:
func_map[name] = (
lambda func=obj, db_conn=db_conn, python_globals=python_globals, **kw: func(
**kw, db_conn=db_conn, python_globals=python_globals
)
)
elif "db_conn" in params:
func_map[name] = lambda func=obj, db_conn=db_conn, **kw: func(**kw, db_conn=db_conn)
elif "python_globals" in params:
func_map[name] = lambda func=obj, python_globals=python_globals, **kw: func(
**kw, python_globals=python_globals
)
else:
func_map[name] = lambda func=obj, **kw: func(**kw)
return func_map

164
rp/tools/command.py Normal file
View File

@ -0,0 +1,164 @@
import select
import subprocess
import time
_processes = {}
def _register_process(pid: int, process):
_processes[pid] = process
return _processes
def _get_process(pid: int):
return _processes.get(pid)
def kill_process(pid: int):
try:
process = _get_process(pid)
if process:
process.kill()
_processes.pop(pid)
mux_name = f"cmd-{pid}"
if get_multiplexer(mux_name):
close_multiplexer(mux_name)
return {"status": "success", "message": f"Process {pid} has been killed"}
else:
return {"status": "error", "error": f"Process {pid} not found"}
except Exception as e:
return {"status": "error", "error": str(e)}
def tail_process(pid: int, timeout: int = 30):
process = _get_process(pid)
if process:
mux_name = f"cmd-{pid}"
mux = get_multiplexer(mux_name)
if not mux:
mux_name, mux = create_multiplexer(mux_name, show_output=True)
try:
start_time = time.time()
timeout_duration = timeout
stdout_content = ""
stderr_content = ""
while True:
if process.poll() is not None:
remaining_stdout, remaining_stderr = process.communicate()
if remaining_stdout:
mux.write_stdout(remaining_stdout)
stdout_content += remaining_stdout
if remaining_stderr:
mux.write_stderr(remaining_stderr)
stderr_content += remaining_stderr
if pid in _processes:
_processes.pop(pid)
close_multiplexer(mux_name)
return {
"status": "success",
"stdout": stdout_content,
"stderr": stderr_content,
"returncode": process.returncode,
}
if time.time() - start_time > timeout_duration:
return {
"status": "running",
"message": "Process is still running. Call tail_process again to continue monitoring.",
"stdout_so_far": stdout_content,
"stderr_so_far": stderr_content,
"pid": pid,
}
ready, _, _ = select.select([process.stdout, process.stderr], [], [], 0.1)
for pipe in ready:
if pipe == process.stdout:
line = process.stdout.readline()
if line:
mux.write_stdout(line)
stdout_content += line
elif pipe == process.stderr:
line = process.stderr.readline()
if line:
mux.write_stderr(line)
stderr_content += line
except Exception as e:
return {"status": "error", "error": str(e)}
else:
return {"status": "error", "error": f"Process {pid} not found"}
def run_command(command, timeout=30, monitored=False, cwd=None):
"""Execute a shell command and return the output.
Args:
command: The shell command to execute.
timeout: Maximum time in seconds to wait for completion.
monitored: Whether to monitor the process (unused).
cwd: Working directory for the command.
Returns:
Dict with status, stdout, stderr, returncode, and optionally pid if still running.
"""
from rp.multiplexer import close_multiplexer, create_multiplexer
mux_name = None
try:
process = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, cwd=cwd
)
_register_process(process.pid, process)
mux_name, mux = create_multiplexer(f"cmd-{process.pid}", show_output=True)
start_time = time.time()
timeout_duration = timeout
stdout_content = ""
stderr_content = ""
while True:
if process.poll() is not None:
remaining_stdout, remaining_stderr = process.communicate()
if remaining_stdout:
mux.write_stdout(remaining_stdout)
stdout_content += remaining_stdout
if remaining_stderr:
mux.write_stderr(remaining_stderr)
stderr_content += remaining_stderr
if process.pid in _processes:
_processes.pop(process.pid)
close_multiplexer(mux_name)
return {
"status": "success",
"stdout": stdout_content,
"stderr": stderr_content,
"returncode": process.returncode,
}
if time.time() - start_time > timeout_duration:
return {
"status": "running",
"message": f"Process still running after {timeout}s timeout. Use tail_process({process.pid}) to monitor or kill_process({process.pid}) to terminate.",
"stdout_so_far": stdout_content,
"stderr_so_far": stderr_content,
"pid": process.pid,
"mux_name": mux_name,
}
ready, _, _ = select.select([process.stdout, process.stderr], [], [], 0.1)
for pipe in ready:
if pipe == process.stdout:
line = process.stdout.readline()
if line:
mux.write_stdout(line)
stdout_content += line
elif pipe == process.stderr:
line = process.stderr.readline()
if line:
mux.write_stderr(line)
stderr_content += line
except Exception as e:
if mux_name:
close_multiplexer(mux_name)
return {"status": "error", "error": str(e)}
def run_command_interactive(command):
try:
return_code = os.system(command)
return {"status": "success", "returncode": return_code}
except Exception as e:
return {"status": "error", "error": str(e)}

176
rp/tools/command.py.bak Normal file
View File

@ -0,0 +1,176 @@
print(f"Executing command: {command}") print(f"Killing process: {pid}")import os
import select
import subprocess
import time
from rp.multiplexer import close_multiplexer, create_multiplexer, get_multiplexer
_processes = {}
def _register_process(pid: int, process):
_processes[pid] = process
return _processes
def _get_process(pid: int):
return _processes.get(pid)
def kill_process(pid: int):
try:
process = _get_process(pid)
if process:
process.kill()
_processes.pop(pid)
mux_name = f"cmd-{pid}"
if get_multiplexer(mux_name):
close_multiplexer(mux_name)
return {"status": "success", "message": f"Process {pid} has been killed"}
else:
return {"status": "error", "error": f"Process {pid} not found"}
except Exception as e:
return {"status": "error", "error": str(e)}
def tail_process(pid: int, timeout: int = 30):
process = _get_process(pid)
if process:
mux_name = f"cmd-{pid}"
mux = get_multiplexer(mux_name)
if not mux:
mux_name, mux = create_multiplexer(mux_name, show_output=True)
try:
start_time = time.time()
timeout_duration = timeout
stdout_content = ""
stderr_content = ""
while True:
if process.poll() is not None:
remaining_stdout, remaining_stderr = process.communicate()
if remaining_stdout:
mux.write_stdout(remaining_stdout)
stdout_content += remaining_stdout
if remaining_stderr:
mux.write_stderr(remaining_stderr)
stderr_content += remaining_stderr
if pid in _processes:
_processes.pop(pid)
close_multiplexer(mux_name)
return {
"status": "success",
"stdout": stdout_content,
"stderr": stderr_content,
"returncode": process.returncode,
}
if time.time() - start_time > timeout_duration:
return {
"status": "running",
"message": "Process is still running. Call tail_process again to continue monitoring.",
"stdout_so_far": stdout_content,
"stderr_so_far": stderr_content,
"pid": pid,
}
ready, _, _ = select.select([process.stdout, process.stderr], [], [], 0.1)
for pipe in ready:
if pipe == process.stdout:
line = process.stdout.readline()
if line:
mux.write_stdout(line)
stdout_content += line
elif pipe == process.stderr:
line = process.stderr.readline()
if line:
mux.write_stderr(line)
stderr_content += line
except Exception as e:
return {"status": "error", "error": str(e)}
else:
return {"status": "error", "error": f"Process {pid} not found"}
def run_command(command, timeout=30, monitored=False):
mux_name = None
try:
process = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
_register_process(process.pid, process)
mux_name, mux = create_multiplexer(f"cmd-{process.pid}", show_output=True)
start_time = time.time()
timeout_duration = timeout
stdout_content = ""
stderr_content = ""
while True:
if process.poll() is not None:
remaining_stdout, remaining_stderr = process.communicate()
if remaining_stdout:
mux.write_stdout(remaining_stdout)
stdout_content += remaining_stdout
if remaining_stderr:
mux.write_stderr(remaining_stderr)
stderr_content += remaining_stderr
if process.pid in _processes:
_processes.pop(process.pid)
close_multiplexer(mux_name)
return {
"status": "success",
"stdout": stdout_content,
"stderr": stderr_content,
"returncode": process.returncode,
}
if time.time() - start_time > timeout_duration:
return {
"status": "running",
"message": f"Process still running after {timeout}s timeout. Use tail_process({process.pid}) to monitor or kill_process({process.pid}) to terminate.",
"stdout_so_far": stdout_content,
"stderr_so_far": stderr_content,
"pid": process.pid,
"mux_name": mux_name,
}
ready, _, _ = select.select([process.stdout, process.stderr], [], [], 0.1)
for pipe in ready:
if pipe == process.stdout:
line = process.stdout.readline()
if line:
mux.write_stdout(line)
stdout_content += line
elif pipe == process.stderr:
line = process.stderr.readline()
if line:
mux.write_stderr(line)
stderr_content += line
except Exception as e:
if mux_name:
close_multiplexer(mux_name)
return {"status": "error", "error": str(e)}
def run_command_interactive(command):
try:
return_code = os.system(command)
return {"status": "success", "returncode": return_code}
except Exception as e:
return {"status": "error", "error": str(e)}

View File

@ -0,0 +1,70 @@
import os
from typing import Optional
CONTEXT_FILE = "/home/retoor/.local/share/rp/.rcontext.txt"
def _read_context() -> str:
if not os.path.exists(CONTEXT_FILE):
raise FileNotFoundError(f"Context file {CONTEXT_FILE} not found.")
with open(CONTEXT_FILE, "r") as f:
return f.read()
def _write_context(content: str):
with open(CONTEXT_FILE, "w") as f:
f.write(content)
def modify_context_add(new_content: str, position: Optional[str] = None) -> str:
"""
Add new content to the .rcontext.txt file.
Args:
new_content: The content to add.
position: Optional marker to insert before (e.g., '***').
"""
current = _read_context()
if position and position in current:
parts = current.split(position, 1)
updated = parts[0] + new_content + "\n\n" + position + parts[1]
else:
updated = current + "\n\n" + new_content
_write_context(updated)
return f"Added: {new_content[:100]}... (full addition applied). Consequences: Enhances functionality as requested."
def modify_context_replace(old_content: str, new_content: str) -> str:
"""
Replace old content with new content in .rcontext.txt.
Args:
old_content: The content to replace.
new_content: The replacement content.
"""
current = _read_context()
if old_content not in current:
raise ValueError(f"Old content not found: {old_content[:50]}...")
updated = current.replace(old_content, new_content, 1)
_write_context(updated)
return f"Replaced: '{old_content[:50]}...' with '{new_content[:50]}...'. Consequences: Changes behavior as specified; verify for unintended effects."
def modify_context_delete(content_to_delete: str, confirmed: bool = False) -> str:
"""
Delete content from .rcontext.txt, but only if confirmed.
Args:
content_to_delete: The content to delete.
confirmed: Must be True to proceed with deletion.
"""
if not confirmed:
raise PermissionError(
f"Deletion not confirmed. To delete '{content_to_delete[:50]}...', you must explicitly confirm. Are you sure? This may affect system behavior permanently."
)
current = _read_context()
if content_to_delete not in current:
raise ValueError(f"Content to delete not found: {content_to_delete[:50]}...")
updated = current.replace(content_to_delete, "", 1)
_write_context(updated)
return f"Deleted: '{content_to_delete[:50]}...'. Consequences: Removed specified content; system may lose referenced rules or guidelines."

76
rp/tools/database.py Normal file
View File

@ -0,0 +1,76 @@
import time
def db_set(key, value, db_conn):
"""Set a key-value pair in the database.
Args:
key: The key to set.
value: The value to store.
db_conn: Database connection.
Returns:
Dict with status and message.
"""
if not db_conn:
return {"status": "error", "error": "Database not initialized"}
try:
cursor = db_conn.cursor()
cursor.execute(
"INSERT OR REPLACE INTO kv_store (key, value, timestamp)\n VALUES (?, ?, ?)",
(key, value, time.time()),
)
db_conn.commit()
return {"status": "success", "message": f"Set {key}"}
except Exception as e:
return {"status": "error", "error": str(e)}
def db_get(key, db_conn):
"""Get a value from the database.
Args:
key: The key to retrieve.
db_conn: Database connection.
Returns:
Dict with status and value.
"""
if not db_conn:
return {"status": "error", "error": "Database not initialized"}
try:
cursor = db_conn.cursor()
cursor.execute("SELECT value FROM kv_store WHERE key = ?", (key,))
result = cursor.fetchone()
if result:
return {"status": "success", "value": result[0]}
else:
return {"status": "error", "error": "Key not found"}
except Exception as e:
return {"status": "error", "error": str(e)}
def db_query(query, db_conn):
"""Execute a database query.
Args:
query: SQL query to execute.
db_conn: Database connection.
Returns:
Dict with status and query results.
"""
if not db_conn:
return {"status": "error", "error": "Database not initialized"}
try:
cursor = db_conn.cursor()
cursor.execute(query)
if query.strip().upper().startswith("SELECT"):
results = cursor.fetchall()
columns = [desc[0] for desc in cursor.description] if cursor.description else []
return {"status": "success", "columns": columns, "rows": results}
else:
db_conn.commit()
return {"status": "success", "rows_affected": cursor.rowcount}
except Exception as e:
return {"status": "error", "error": str(e)}

795
rp/tools/debugging.py Normal file
View File

@ -0,0 +1,795 @@
import sys
import os
import ast
import inspect
import time
import threading
import gc
import weakref
import linecache
import re
import json
import subprocess
from collections import defaultdict
from datetime import datetime
class MemoryTracker:
def __init__(self):
self.allocations = defaultdict(list)
self.references = weakref.WeakValueDictionary()
self.peak_memory = 0
self.current_memory = 0
def track_object(self, obj, location):
try:
obj_id = id(obj)
obj_size = sys.getsizeof(obj)
self.allocations[location].append(
{
"id": obj_id,
"type": type(obj).__name__,
"size": obj_size,
"timestamp": time.time(),
}
)
self.current_memory += obj_size
if self.current_memory > self.peak_memory:
self.peak_memory = self.current_memory
except:
pass
def analyze_leaks(self):
gc.collect()
leaks = []
for obj in gc.get_objects():
if sys.getrefcount(obj) > 10:
try:
leaks.append(
{
"type": type(obj).__name__,
"refcount": sys.getrefcount(obj),
"size": sys.getsizeof(obj),
}
)
except:
pass
return sorted(leaks, key=lambda x: x["refcount"], reverse=True)[:20]
def get_report(self):
return {
"peak_memory": self.peak_memory,
"current_memory": self.current_memory,
"allocation_count": sum((len(v) for v in self.allocations.values())),
"leaks": self.analyze_leaks(),
}
class PerformanceProfiler:
def __init__(self):
self.function_times = defaultdict(lambda: {"calls": 0, "total_time": 0.0, "self_time": 0.0})
self.call_stack = []
self.start_times = {}
def enter_function(self, frame):
func_name = self._get_function_name(frame)
self.call_stack.append(func_name)
self.start_times[id(frame)] = time.perf_counter()
def exit_function(self, frame):
func_name = self._get_function_name(frame)
frame_id = id(frame)
if frame_id in self.start_times:
elapsed = time.perf_counter() - self.start_times[frame_id]
self.function_times[func_name]["calls"] += 1
self.function_times[func_name]["total_time"] += elapsed
self.function_times[func_name]["self_time"] += elapsed
del self.start_times[frame_id]
if self.call_stack:
self.call_stack.pop()
def _get_function_name(self, frame):
return f"{frame.f_code.co_filename}:{frame.f_code.co_name}:{frame.f_lineno}"
def get_hotspots(self, limit=20):
sorted_funcs = sorted(
self.function_times.items(), key=lambda x: x[1]["total_time"], reverse=True
)
return sorted_funcs[:limit]
class StaticAnalyzer(ast.NodeVisitor):
def __init__(self):
self.issues = []
self.complexity = 0
self.unused_vars = set()
self.undefined_vars = set()
self.defined_vars = set()
self.used_vars = set()
self.functions = {}
self.classes = {}
self.imports = []
def visit_FunctionDef(self, node):
self.functions[node.name] = {
"lineno": node.lineno,
"args": [arg.arg for arg in node.args.args],
"decorators": [
d.id if isinstance(d, ast.Name) else "complex" for d in node.decorator_list
],
"complexity": self._calculate_complexity(node),
}
if len(node.body) == 0:
self.issues.append(f"Line {node.lineno}: Empty function '{node.name}'")
if len(node.args.args) > 7:
self.issues.append(
f"Line {node.lineno}: Function '{node.name}' has too many parameters ({len(node.args.args)})"
)
self.generic_visit(node)
def visit_ClassDef(self, node):
self.classes[node.name] = {
"lineno": node.lineno,
"bases": [b.id if isinstance(b, ast.Name) else "complex" for b in node.bases],
"methods": [],
}
self.generic_visit(node)
def visit_Import(self, node):
for alias in node.names:
self.imports.append(alias.name)
self.generic_visit(node)
def visit_ImportFrom(self, node):
if node.module:
self.imports.append(node.module)
self.generic_visit(node)
def visit_Name(self, node):
if isinstance(node.ctx, ast.Store):
self.defined_vars.add(node.id)
elif isinstance(node.ctx, ast.Load):
self.used_vars.add(node.id)
self.generic_visit(node)
def visit_If(self, node):
self.complexity += 1
self.generic_visit(node)
def visit_For(self, node):
self.complexity += 1
self.generic_visit(node)
def visit_While(self, node):
self.complexity += 1
self.generic_visit(node)
def visit_ExceptHandler(self, node):
self.complexity += 1
if node.type is None:
self.issues.append(f"Line {node.lineno}: Bare except clause (catches all exceptions)")
self.generic_visit(node)
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if isinstance(node.left, ast.Str) or isinstance(node.right, ast.Str):
self.issues.append(
f"Line {node.lineno}: String concatenation with '+' (use f-strings or join)"
)
self.generic_visit(node)
def _calculate_complexity(self, node):
complexity = 1
for child in ast.walk(node):
if isinstance(child, (ast.If, ast.For, ast.While, ast.ExceptHandler)):
complexity += 1
return complexity
def finalize(self):
self.unused_vars = self.defined_vars - self.used_vars
self.undefined_vars = self.used_vars - self.defined_vars - set(dir(__builtins__))
for var in self.unused_vars:
self.issues.append(f"Unused variable: '{var}'")
def analyze_code(self, source_code):
try:
tree = ast.parse(source_code)
self.visit(tree)
self.finalize()
return {
"issues": self.issues,
"complexity": self.complexity,
"functions": self.functions,
"classes": self.classes,
"imports": self.imports,
"unused_vars": list(self.unused_vars),
}
except SyntaxError as e:
return {"error": f"Syntax error at line {e.lineno}: {e.msg}"}
class DynamicTracer:
def __init__(self):
self.execution_trace = []
self.exception_trace = []
self.variable_changes = defaultdict(list)
self.line_coverage = set()
self.function_calls = defaultdict(int)
self.max_trace_length = 10000
self.memory_tracker = MemoryTracker()
self.profiler = PerformanceProfiler()
self.trace_active = False
def trace_calls(self, frame, event, arg):
if not self.trace_active:
return
if len(self.execution_trace) >= self.max_trace_length:
return self.trace_calls
co = frame.f_code
func_name = co.co_name
filename = co.co_filename
line_no = frame.f_lineno
if "site-packages" in filename or filename.startswith("<"):
return self.trace_calls
trace_entry = {
"event": event,
"function": func_name,
"filename": filename,
"lineno": line_no,
"timestamp": time.time(),
}
if event == "call":
self.function_calls[f"{filename}:{func_name}"] += 1
self.profiler.enter_function(frame)
trace_entry["locals"] = {
k: repr(v)[:100] for k, v in frame.f_locals.items() if not k.startswith("__")
}
elif event == "return":
self.profiler.exit_function(frame)
trace_entry["return_value"] = repr(arg)[:100] if arg else None
elif event == "line":
self.line_coverage.add((filename, line_no))
line_code = linecache.getline(filename, line_no).strip()
trace_entry["code"] = line_code
for var, value in frame.f_locals.items():
if not var.startswith("__"):
self.variable_changes[var].append(
{"line": line_no, "value": repr(value)[:100], "timestamp": time.time()}
)
self.memory_tracker.track_object(value, f"{filename}:{line_no}")
elif event == "exception":
exc_type, exc_value, exc_tb = arg
self.exception_trace.append(
{
"type": exc_type.__name__,
"message": str(exc_value),
"filename": filename,
"function": func_name,
"lineno": line_no,
"timestamp": time.time(),
}
)
trace_entry["exception"] = {"type": exc_type.__name__, "message": str(exc_value)}
self.execution_trace.append(trace_entry)
return self.trace_calls
def start_tracing(self):
self.trace_active = True
sys.settrace(self.trace_calls)
threading.settrace(self.trace_calls)
def stop_tracing(self):
self.trace_active = False
sys.settrace(None)
threading.settrace(None)
def get_trace_report(self):
return {
"execution_trace": self.execution_trace[-100:],
"exception_trace": self.exception_trace,
"line_coverage": list(self.line_coverage),
"function_calls": dict(self.function_calls),
"variable_changes": {k: v[-10:] for k, v in self.variable_changes.items()},
"hotspots": self.profiler.get_hotspots(20),
"memory_report": self.memory_tracker.get_report(),
}
class GitBisectAutomator:
def __init__(self, repo_path="."):
self.repo_path = repo_path
def is_git_repo(self):
try:
result = subprocess.run(
["git", "rev-parse", "--git-dir"],
cwd=self.repo_path,
capture_output=True,
text=True,
)
return result.returncode == 0
except:
return False
def get_commit_history(self, limit=50):
try:
result = subprocess.run(
["git", "log", f"--max-count={limit}", "--oneline"],
cwd=self.repo_path,
capture_output=True,
text=True,
)
if result.returncode == 0:
commits = []
for line in result.stdout.strip().split("\n"):
parts = line.split(" ", 1)
if len(parts) == 2:
commits.append({"hash": parts[0], "message": parts[1]})
return commits
except:
pass
return []
def blame_file(self, filepath):
try:
result = subprocess.run(
["git", "blame", "-L", "1,50", filepath],
cwd=self.repo_path,
capture_output=True,
text=True,
)
if result.returncode == 0:
return result.stdout
except:
pass
return None
class LogAnalyzer:
def __init__(self):
self.log_patterns = {
"error": re.compile("error|exception|fail|critical", re.IGNORECASE),
"warning": re.compile("warn|caution", re.IGNORECASE),
"debug": re.compile("debug|trace", re.IGNORECASE),
"timestamp": re.compile(
"\\\\d{4}-\\\\d{2}-\\\\d{2}[\\\\s_T]\\\\d{2}:\\\\d{2}:\\\\d{2}"
),
}
self.anomalies = []
def analyze_logs(self, log_content):
lines = log_content.split("\n")
errors = []
warnings = []
timestamps = []
for i, line in enumerate(lines):
if self.log_patterns["error"].search(line):
errors.append({"line": i + 1, "content": line})
elif self.log_patterns["warning"].search(line):
warnings.append({"line": i + 1, "content": line})
ts_match = self.log_patterns["timestamp"].search(line)
if ts_match:
timestamps.append(ts_match.group())
return {
"total_lines": len(lines),
"errors": errors[:50],
"warnings": warnings[:50],
"error_count": len(errors),
"warning_count": len(warnings),
"timestamps": timestamps[:20],
}
class ExceptionAnalyzer:
def __init__(self):
self.exceptions = []
self.exception_counts = defaultdict(int)
def capture_exception(self, exc_type, exc_value, exc_traceback):
exc_info = {
"type": exc_type.__name__,
"message": str(exc_value),
"timestamp": time.time(),
"traceback": [],
}
tb = exc_traceback
while tb is not None:
frame = tb.tb_frame
exc_info["traceback"].append(
{
"filename": frame.f_code.co_filename,
"function": frame.f_code.co_name,
"lineno": tb.tb_lineno,
"locals": {
k: repr(v)[:100]
for k, v in frame.f_locals.items()
if not k.startswith("__")
},
}
)
tb = tb.tb_next
self.exceptions.append(exc_info)
self.exception_counts[exc_type.__name__] += 1
return exc_info
def get_report(self):
return {
"total_exceptions": len(self.exceptions),
"exception_types": dict(self.exception_counts),
"recent_exceptions": self.exceptions[-10:],
}
class TestGenerator:
def __init__(self):
self.test_cases = []
def generate_tests_for_function(self, func_name, func_signature):
test_template = f"def test_{func_name}_basic():\n result = {func_name}()\n assert result is not None\n\ndef test_{func_name}_edge_cases():\n pass\n\ndef test_{func_name}_exceptions():\n pass\n"
return test_template
def analyze_function_for_tests(self, func_obj):
sig = inspect.signature(func_obj)
test_inputs = []
for param_name, param in sig.parameters.items():
if param.annotation != inspect.Parameter.empty:
param_type = param.annotation
if param_type == int:
test_inputs.append([0, 1, -1, 100])
elif param_type == str:
test_inputs.append(["", "test", "a" * 100])
elif param_type == list:
test_inputs.append([[], [1], [1, 2, 3]])
else:
test_inputs.append([None])
else:
test_inputs.append([None, 0, "", []])
return test_inputs
class CodeFlowVisualizer:
def __init__(self):
self.flow_graph = defaultdict(list)
self.call_hierarchy = defaultdict(set)
def build_flow_from_trace(self, execution_trace):
for i in range(len(execution_trace) - 1):
current = execution_trace[i]
next_step = execution_trace[i + 1]
current_node = f"{current['function']}:{current['lineno']}"
next_node = f"{next_step['function']}:{next_step['lineno']}"
self.flow_graph[current_node].append(next_node)
if current["event"] == "call":
caller = current["function"]
callee = next_step["function"]
self.call_hierarchy[caller].add(callee)
def generate_text_visualization(self):
output = []
output.append("Call Hierarchy:")
for caller, callees in sorted(self.call_hierarchy.items()):
output.append(f" {caller}")
for callee in sorted(callees):
output.append(f" -> {callee}")
return "\n".join(output)
class AutomatedDebugger:
def __init__(self):
self.static_analyzer = StaticAnalyzer()
self.dynamic_tracer = DynamicTracer()
self.exception_analyzer = ExceptionAnalyzer()
self.log_analyzer = LogAnalyzer()
self.git_automator = GitBisectAutomator()
self.test_generator = TestGenerator()
self.flow_visualizer = CodeFlowVisualizer()
self.original_excepthook = sys.excepthook
def analyze_source_file(self, filepath):
try:
with open(filepath, "r", encoding="utf-8") as f:
source_code = f.read()
return self.static_analyzer.analyze_code(source_code)
except Exception as e:
return {"error": str(e)}
def run_with_tracing(self, target_func, *args, **kwargs):
self.dynamic_tracer.start_tracing()
result = None
exception = None
try:
result = target_func(*args, **kwargs)
except Exception as e:
exception = self.exception_analyzer.capture_exception(type(e), e, e.__traceback__)
finally:
self.dynamic_tracer.stop_tracing()
self.flow_visualizer.build_flow_from_trace(self.dynamic_tracer.execution_trace)
return {
"result": result,
"exception": exception,
"trace": self.dynamic_tracer.get_trace_report(),
"flow": self.flow_visualizer.generate_text_visualization(),
}
def analyze_logs(self, log_file_or_content):
if os.path.isfile(log_file_or_content):
with open(log_file_or_content, "r", encoding="utf-8") as f:
content = f.read()
else:
content = log_file_or_content
return self.log_analyzer.analyze_logs(content)
def generate_debug_report(self, output_file="debug_report.json"):
report = {
"timestamp": datetime.now().isoformat(),
"static_analysis": self.static_analyzer.issues,
"dynamic_trace": self.dynamic_tracer.get_trace_report(),
"exceptions": self.exception_analyzer.get_report(),
"git_info": (
self.git_automator.get_commit_history(10)
if self.git_automator.is_git_repo()
else None
),
"flow_visualization": self.flow_visualizer.generate_text_visualization(),
}
with open(output_file, "w") as f:
json.dump(report, f, indent=2, default=str)
return report
def auto_debug_function(self, func, test_inputs=None):
results = []
if test_inputs is None:
test_inputs = self.test_generator.analyze_function_for_tests(func)
for input_set in test_inputs:
try:
if isinstance(input_set, list):
result = self.run_with_tracing(func, *input_set)
else:
result = self.run_with_tracing(func, input_set)
results.append(
{
"input": input_set,
"success": result["exception"] is None,
"output": result["result"],
"trace_summary": {
"function_calls": len(result["trace"]["function_calls"]),
"exceptions": len(result["trace"]["exception_trace"]),
},
}
)
except Exception as e:
results.append({"input": input_set, "success": False, "error": str(e)})
return results
_memory_tracker = MemoryTracker()
_performance_profiler = PerformanceProfiler()
_static_analyzer = StaticAnalyzer()
_dynamic_tracer = DynamicTracer()
_git_automator = GitBisectAutomator()
_log_analyzer = LogAnalyzer()
_exception_analyzer = ExceptionAnalyzer()
_test_generator = TestGenerator()
_code_flow_visualizer = CodeFlowVisualizer()
_automated_debugger = AutomatedDebugger()
def track_memory_allocation(location: str = "manual") -> dict:
"""Track current memory allocation at a specific location."""
try:
_memory_tracker.track_object({}, location)
return {
"status": "success",
"current_memory": _memory_tracker.current_memory,
"peak_memory": _memory_tracker.peak_memory,
"location": location,
}
except Exception as e:
return {"status": "error", "error": str(e)}
def analyze_memory_leaks() -> dict:
"""Analyze potential memory leaks in the current process."""
try:
leaks = _memory_tracker.analyze_leaks()
return {"status": "success", "leaks_found": len(leaks), "top_leaks": leaks[:10]}
except Exception as e:
return {"status": "error", "error": str(e)}
def get_memory_report() -> dict:
"""Get a comprehensive memory usage report."""
try:
return {"status": "success", "report": _memory_tracker.get_report()}
except Exception as e:
return {"status": "error", "error": str(e)}
def start_performance_profiling() -> dict:
"""Start performance profiling."""
try:
PerformanceProfiler()
return {"status": "success", "message": "Performance profiling started"}
except Exception as e:
return {"status": "error", "error": str(e)}
def stop_performance_profiling() -> dict:
"""Stop performance profiling and get hotspots."""
try:
hotspots = _performance_profiler.get_hotspots(20)
return {"status": "success", "hotspots": hotspots}
except Exception as e:
return {"status": "error", "error": str(e)}
def analyze_source_code(source_code: str) -> dict:
"""Perform static analysis on Python source code."""
try:
analyzer = StaticAnalyzer()
result = analyzer.analyze_code(source_code)
return {"status": "success", "analysis": result}
except Exception as e:
return {"status": "error", "error": str(e)}
def analyze_source_file(filepath: str) -> dict:
"""Analyze a Python source file statically."""
try:
result = _automated_debugger.analyze_source_file(filepath)
return {"status": "success", "filepath": filepath, "analysis": result}
except Exception as e:
return {"status": "error", "error": str(e)}
def start_dynamic_tracing() -> dict:
"""Start dynamic execution tracing."""
try:
_dynamic_tracer.start_tracing()
return {"status": "success", "message": "Dynamic tracing started"}
except Exception as e:
return {"status": "error", "error": str(e)}
def stop_dynamic_tracing() -> dict:
"""Stop dynamic tracing and get trace report."""
try:
_dynamic_tracer.stop_tracing()
report = _dynamic_tracer.get_trace_report()
return {"status": "success", "trace_report": report}
except Exception as e:
return {"status": "error", "error": str(e)}
def get_git_commit_history(limit: int = 50) -> dict:
"""Get recent git commit history."""
try:
commits = _git_automator.get_commit_history(limit)
return {
"status": "success",
"commits": commits,
"is_git_repo": _git_automator.is_git_repo(),
}
except Exception as e:
return {"status": "error", "error": str(e)}
def blame_file(filepath: str) -> dict:
"""Get git blame information for a file."""
try:
blame_output = _git_automator.blame_file(filepath)
return {"status": "success", "filepath": filepath, "blame": blame_output}
except Exception as e:
return {"status": "error", "error": str(e)}
def analyze_log_content(log_content: str) -> dict:
"""Analyze log content for errors, warnings, and patterns."""
try:
analysis = _log_analyzer.analyze_logs(log_content)
return {"status": "success", "analysis": analysis}
except Exception as e:
return {"status": "error", "error": str(e)}
def analyze_log_file(filepath: str) -> dict:
"""Analyze a log file."""
try:
analysis = _automated_debugger.analyze_logs(filepath)
return {"status": "success", "filepath": filepath, "analysis": analysis}
except Exception as e:
return {"status": "error", "error": str(e)}
def get_exception_report() -> dict:
"""Get a report of captured exceptions."""
try:
report = _exception_analyzer.get_report()
return {"status": "success", "exception_report": report}
except Exception as e:
return {"status": "error", "error": str(e)}
def generate_tests_for_function(func_name: str, func_signature: str = "") -> dict:
"""Generate test templates for a function."""
try:
test_code = _test_generator.generate_tests_for_function(func_name, func_signature)
return {"status": "success", "func_name": func_name, "test_code": test_code}
except Exception as e:
return {"status": "error", "error": str(e)}
def visualize_code_flow_from_trace(execution_trace) -> dict:
"""Visualize code flow from execution trace."""
try:
visualizer = CodeFlowVisualizer()
visualizer.build_flow_from_trace(execution_trace)
visualization = visualizer.generate_text_visualization()
return {"status": "success", "flow_visualization": visualization}
except Exception as e:
return {"status": "error", "error": str(e)}
def run_function_with_debugging(func_code: str, *args, **kwargs) -> dict:
"""Execute a function with full debugging."""
try:
local_vars = {}
exec(func_code, globals(), local_vars)
func = None
for name, obj in local_vars.items():
if callable(obj) and (not name.startswith("_")):
func = obj
break
if func is None:
return {"status": "error", "error": "No function found in code"}
result = _automated_debugger.run_with_tracing(func, *args, **kwargs)
return {"status": "success", "debug_result": result}
except Exception as e:
return {"status": "error", "error": str(e)}
def generate_comprehensive_debug_report(output_file: str = "debug_report.json") -> dict:
"""Generate a comprehensive debug report."""
try:
report = _automated_debugger.generate_debug_report(output_file)
return {
"status": "success",
"output_file": output_file,
"report_summary": {
"static_issues": len(report.get("static_analysis", [])),
"exceptions": report.get("exceptions", {}).get("total_exceptions", 0),
"function_calls": len(report.get("dynamic_trace", {}).get("function_calls", {})),
},
}
except Exception as e:
return {"status": "error", "error": str(e)}
def auto_debug_function(func_code: str, test_inputs: list = None) -> dict:
"""Automatically debug a function with test inputs."""
try:
local_vars = {}
exec(func_code, globals(), local_vars)
func = None
for name, obj in local_vars.items():
if callable(obj) and (not name.startswith("_")):
func = obj
break
if func is None:
return {"status": "error", "error": "No function found in code"}
if test_inputs is None:
test_inputs = _test_generator.analyze_function_for_tests(func)
results = _automated_debugger.auto_debug_function(func, test_inputs)
return {"status": "success", "debug_results": results}
except Exception as e:
return {"status": "error", "error": str(e)}

152
rp/tools/editor.py Normal file
View File

@ -0,0 +1,152 @@
import os
import os.path
from rp.editor import RPEditor
from ..tools.patch import display_content_diff
from ..ui.edit_feedback import track_edit, tracker
_editors = {}
def get_editor(filepath):
if filepath not in _editors:
_editors[filepath] = RPEditor(filepath)
return _editors[filepath]
def close_editor(filepath):
from rp.multiplexer import close_multiplexer, get_multiplexer
try:
path = os.path.expanduser(filepath)
editor = get_editor(path)
editor.close()
mux_name = f"editor-{path}"
mux = get_multiplexer(mux_name)
if mux:
mux.write_stdout(f"Closed editor for: {path}\n")
close_multiplexer(mux_name)
return {"status": "success", "message": f"Editor closed for {path}"}
except Exception as e:
return {"status": "error", "error": str(e)}
def open_editor(filepath):
from rp.multiplexer import create_multiplexer
try:
path = os.path.expanduser(filepath)
editor = RPEditor(path)
editor.start()
mux_name = f"editor-{path}"
mux_name, mux = create_multiplexer(mux_name, show_output=True)
mux.write_stdout(f"Opened editor for: {path}\n")
return {"status": "success", "message": f"Editor opened for {path}", "mux_name": mux_name}
except Exception as e:
return {"status": "error", "error": str(e)}
def editor_insert_text(filepath, text, line=None, col=None, show_diff=True):
from rp.multiplexer import get_multiplexer
try:
path = os.path.expanduser(filepath)
old_content = ""
if os.path.exists(path):
with open(path) as f:
old_content = f.read()
position = (line if line is not None else 0) * 1000 + (col if col is not None else 0)
operation = track_edit("INSERT", filepath, start_pos=position, content=text)
tracker.mark_in_progress(operation)
editor = get_editor(path)
if line is not None and col is not None:
editor.move_cursor_to(line, col)
editor.insert_text(text)
editor.save_file()
mux_name = f"editor-{path}"
mux = get_multiplexer(mux_name)
if mux:
location = f" at line {line}, col {col}" if line is not None and col is not None else ""
preview = text[:50] + "..." if len(text) > 50 else text
mux.write_stdout(f"Inserted text{location}: {repr(preview)}\n")
if show_diff and old_content:
with open(path) as f:
new_content = f.read()
diff_result = display_content_diff(old_content, new_content, filepath)
if diff_result["status"] == "success":
mux.write_stdout(diff_result["visual_diff"] + "\n")
tracker.mark_completed(operation)
result = {"status": "success", "message": f"Inserted text in {path}"}
close_editor(filepath)
return result
except Exception as e:
if "operation" in locals():
tracker.mark_failed(operation)
return {"status": "error", "error": str(e)}
def editor_replace_text(
filepath, start_line, start_col, end_line, end_col, new_text, show_diff=True
):
from rp.multiplexer import get_multiplexer
try:
path = os.path.expanduser(filepath)
old_content = ""
if os.path.exists(path):
with open(path) as f:
old_content = f.read()
start_pos = start_line * 1000 + start_col
end_pos = end_line * 1000 + end_col
operation = track_edit(
"REPLACE",
filepath,
start_pos=start_pos,
end_pos=end_pos,
content=new_text,
old_content=old_content,
)
tracker.mark_in_progress(operation)
editor = get_editor(path)
editor.replace_text(start_line, start_col, end_line, end_col, new_text)
editor.save_file()
mux_name = f"editor-{path}"
mux = get_multiplexer(mux_name)
if mux:
preview = new_text[:50] + "..." if len(new_text) > 50 else new_text
mux.write_stdout(
f"Replaced text from ({start_line},{start_col}) to ({end_line},{end_col}): {repr(preview)}\n"
)
if show_diff and old_content:
with open(path) as f:
new_content = f.read()
diff_result = display_content_diff(old_content, new_content, filepath)
if diff_result["status"] == "success":
mux.write_stdout(diff_result["visual_diff"] + "\n")
tracker.mark_completed(operation)
result = {"status": "success", "message": f"Replaced text in {path}"}
close_editor(filepath)
return result
except Exception as e:
if "operation" in locals():
tracker.mark_failed(operation)
return {"status": "error", "error": str(e)}
def editor_search(filepath, pattern, start_line=0):
from rp.multiplexer import get_multiplexer
try:
path = os.path.expanduser(filepath)
editor = RPEditor(path)
results = editor.search(pattern, start_line)
mux_name = f"editor-{path}"
mux = get_multiplexer(mux_name)
if mux:
mux.write_stdout(
f"Searched for pattern '{pattern}' from line {start_line}: {len(results)} matches\n"
)
result = {"status": "success", "results": results}
close_editor(filepath)
return result
except Exception as e:
return {"status": "error", "error": str(e)}

383
rp/tools/filesystem.py Normal file
View File

@ -0,0 +1,383 @@
import hashlib
import os
import time
from typing import Optional, Any
from rp.editor import RPEditor
from ..tools.patch import display_content_diff
from ..ui.diff_display import get_diff_stats
from ..ui.edit_feedback import track_edit, tracker
_id = 0
def get_uid():
global _id
_id += 3
return _id
def read_file(filepath: str, db_conn: Optional[Any] = None) -> dict:
"""
Read the contents of a file.
Args:
filepath: Path to the file to read
db_conn: Optional database connection for tracking
Returns:
dict: Status and content or error
"""
try:
path = os.path.expanduser(filepath)
with open(path) as f:
content = f.read()
if db_conn:
from rp.tools.database import db_set
db_set("read:" + path, "true", db_conn)
return {"status": "success", "content": content}
except Exception as e:
return {"status": "error", "error": str(e)}
def write_file(
filepath: str, content: str, db_conn: Optional[Any] = None, show_diff: bool = True
) -> dict:
"""
Write content to a file.
Args:
filepath: Path to the file to write
content: Content to write
db_conn: Optional database connection for tracking
show_diff: Whether to show diff of changes
Returns:
dict: Status and message or error
"""
operation = None
try:
path = os.path.expanduser(filepath)
old_content = ""
is_new_file = not os.path.exists(path)
if not is_new_file and db_conn:
from rp.tools.database import db_get
read_status = db_get("read:" + path, db_conn)
if read_status.get("status") != "success" or read_status.get("value") != "true":
return {
"status": "error",
"error": "File must be read before writing. Please read the file first.",
}
if not is_new_file:
with open(path) as f:
old_content = f.read()
operation = track_edit("WRITE", filepath, content=content, old_content=old_content)
tracker.mark_in_progress(operation)
if show_diff and (not is_new_file):
diff_result = display_content_diff(old_content, content, filepath)
if diff_result["status"] == "success":
print(diff_result["visual_diff"])
editor = RPEditor(path)
editor.set_text(content)
editor.save_file()
if os.path.exists(path) and db_conn:
try:
cursor = db_conn.cursor()
file_hash = hashlib.md5(old_content.encode()).hexdigest()
cursor.execute(
"SELECT MAX(version) FROM file_versions WHERE filepath = ?", (filepath,)
)
result = cursor.fetchone()
version = result[0] + 1 if result[0] else 1
cursor.execute(
"INSERT INTO file_versions (filepath, content, hash, timestamp, version)\n VALUES (?, ?, ?, ?, ?)",
(filepath, old_content, file_hash, time.time(), version),
)
db_conn.commit()
except Exception:
pass
tracker.mark_completed(operation)
message = f"File written to {path}"
if show_diff and (not is_new_file):
stats = get_diff_stats(old_content, content)
message += f" ({stats['insertions']}+ {stats['deletions']}-)"
return {"status": "success", "message": message}
except Exception as e:
if operation is not None:
tracker.mark_failed(operation)
return {"status": "error", "error": str(e)}
def list_directory(path=".", recursive=False):
"""List files and directories in the specified path."""
try:
path = os.path.expanduser(path)
items = []
if recursive:
for root, dirs, files in os.walk(path):
for name in files:
item_path = os.path.join(root, name)
items.append(
{"path": item_path, "type": "file", "size": os.path.getsize(item_path)}
)
for name in dirs:
items.append({"path": os.path.join(root, name), "type": "directory"})
else:
for item in os.listdir(path):
item_path = os.path.join(path, item)
items.append(
{
"name": item,
"type": "directory" if os.path.isdir(item_path) else "file",
"size": os.path.getsize(item_path) if os.path.isfile(item_path) else None,
}
)
return {"status": "success", "items": items}
except Exception as e:
return {"status": "error", "error": str(e)}
def mkdir(path):
try:
os.makedirs(os.path.expanduser(path), exist_ok=True)
return {"status": "success", "message": f"Directory created at {path}"}
except Exception as e:
return {"status": "error", "error": str(e)}
def chdir(path):
"""Change the current working directory."""
try:
os.chdir(os.path.expanduser(path))
return {"status": "success", "new_path": os.getcwd()}
except Exception as e:
return {"status": "error", "error": str(e)}
def getpwd():
"""Get the current working directory."""
try:
return {"status": "success", "path": os.getcwd()}
except Exception as e:
return {"status": "error", "error": str(e)}
def index_source_directory(path: str) -> dict:
"""
Index directory recursively and read all source files.
Args:
path: Path to index
Returns:
dict: Status and indexed files or error
"""
extensions = [
".py",
".js",
".ts",
".java",
".cpp",
".c",
".h",
".hpp",
".html",
".css",
".json",
".xml",
".md",
".sh",
".rb",
".go",
]
source_files = []
try:
for root, _, files in os.walk(os.path.expanduser(path)):
for file in files:
if any((file.endswith(ext) for ext in extensions)):
filepath = os.path.join(root, file)
try:
with open(filepath, encoding="utf-8") as f:
content = f.read()
source_files.append({"path": filepath, "content": content})
except Exception:
continue
return {"status": "success", "indexed_files": source_files}
except Exception as e:
return {"status": "error", "error": str(e)}
def search_replace(
filepath: str, old_string: str, new_string: str, db_conn: Optional[Any] = None
) -> dict:
"""
Search and replace text in a file.
Args:
filepath: Path to the file
old_string: String to replace
new_string: Replacement string
db_conn: Optional database connection for tracking
Returns:
dict: Status and message or error
"""
try:
path = os.path.expanduser(filepath)
if not os.path.exists(path):
return {"status": "error", "error": "File does not exist"}
if db_conn:
from rp.tools.database import db_get
read_status = db_get("read:" + path, db_conn)
if read_status.get("status") != "success" or read_status.get("value") != "true":
return {
"status": "error",
"error": "File must be read before writing. Please read the file first.",
}
with open(path) as f:
content = f.read()
content = content.replace(old_string, new_string)
with open(path, "w") as f:
f.write(content)
return {
"status": "success",
"message": f"Replaced '{old_string}' with '{new_string}' in {path}",
}
except Exception as e:
return {"status": "error", "error": str(e)}
_editors = {}
def get_editor(filepath):
if filepath not in _editors:
_editors[filepath] = RPEditor(filepath)
return _editors[filepath]
def close_editor(filepath):
try:
path = os.path.expanduser(filepath)
editor = get_editor(path)
editor.close()
return {"status": "success", "message": f"Editor closed for {path}"}
except Exception as e:
return {"status": "error", "error": str(e)}
def open_editor(filepath):
try:
path = os.path.expanduser(filepath)
editor = RPEditor(path)
editor.start()
return {"status": "success", "message": f"Editor opened for {path}"}
except Exception as e:
return {"status": "error", "error": str(e)}
def editor_insert_text(filepath, text, line=None, col=None, show_diff=True, db_conn=None):
operation = None
try:
path = os.path.expanduser(filepath)
if db_conn:
from rp.tools.database import db_get
read_status = db_get("read:" + path, db_conn)
if read_status.get("status") != "success" or read_status.get("value") != "true":
return {
"status": "error",
"error": "File must be read before writing. Please read the file first.",
}
old_content = ""
if os.path.exists(path):
with open(path) as f:
old_content = f.read()
position = (line if line is not None else 0) * 1000 + (col if col is not None else 0)
operation = track_edit("INSERT", filepath, start_pos=position, content=text)
tracker.mark_in_progress(operation)
editor = get_editor(path)
if line is not None and col is not None:
editor.move_cursor_to(line, col)
editor.insert_text(text)
editor.save_file()
if show_diff and old_content:
with open(path) as f:
new_content = f.read()
diff_result = display_content_diff(old_content, new_content, filepath)
if diff_result["status"] == "success":
print(diff_result["visual_diff"])
tracker.mark_completed(operation)
return {"status": "success", "message": f"Inserted text in {path}"}
except Exception as e:
if operation is not None:
tracker.mark_failed(operation)
return {"status": "error", "error": str(e)}
def editor_replace_text(
filepath, start_line, start_col, end_line, end_col, new_text, show_diff=True, db_conn=None
):
try:
operation = None
path = os.path.expanduser(filepath)
if db_conn:
from rp.tools.database import db_get
read_status = db_get("read:" + path, db_conn)
if read_status.get("status") != "success" or read_status.get("value") != "true":
return {
"status": "error",
"error": "File must be read before writing. Please read the file first.",
}
old_content = ""
if os.path.exists(path):
with open(path) as f:
old_content = f.read()
start_pos = start_line * 1000 + start_col
end_pos = end_line * 1000 + end_col
operation = track_edit(
"REPLACE",
filepath,
start_pos=start_pos,
end_pos=end_pos,
content=new_text,
old_content=old_content,
)
tracker.mark_in_progress(operation)
editor = get_editor(path)
editor.replace_text(start_line, start_col, end_line, end_col, new_text)
editor.save_file()
if show_diff and old_content:
with open(path) as f:
new_content = f.read()
diff_result = display_content_diff(old_content, new_content, filepath)
if diff_result["status"] == "success":
print(diff_result["visual_diff"])
tracker.mark_completed(operation)
return {"status": "success", "message": f"Replaced text in {path}"}
except Exception as e:
if operation is not None:
tracker.mark_failed(operation)
return {"status": "error", "error": str(e)}
def display_edit_summary():
from ..ui.edit_feedback import display_edit_summary
return display_edit_summary()
def display_edit_timeline(show_content=False):
from ..ui.edit_feedback import display_edit_timeline
return display_edit_timeline(show_content)
def clear_edit_tracker():
from ..ui.edit_feedback import clear_tracker
clear_tracker()
return {"status": "success", "message": "Edit tracker cleared"}

View File

@ -0,0 +1,170 @@
import subprocess
import threading
import importlib
def _get_multiplexer_functions():
"""Lazy import multiplexer functions to avoid circular imports."""
multiplexer = importlib.import_module("rp.multiplexer")
return {
"create_multiplexer": multiplexer.create_multiplexer,
"get_multiplexer": multiplexer.get_multiplexer,
"close_multiplexer": multiplexer.close_multiplexer,
"get_all_multiplexer_states": multiplexer.get_all_multiplexer_states,
}
def start_interactive_session(command, session_name=None, process_type="generic", cwd=None):
"""
Start an interactive session in a dedicated multiplexer.
Args:
command: The command to run (list or string)
session_name: Optional name for the session
process_type: Type of process (ssh, vim, apt, etc.)
cwd: Current working directory for the command
Returns:
session_name: The name of the created session
"""
funcs = _get_multiplexer_functions()
name, mux = funcs["create_multiplexer"](session_name)
mux.update_metadata("process_type", process_type)
if isinstance(command, str):
command = command.split()
try:
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
bufsize=1,
cwd=cwd,
)
mux.process = process
mux.update_metadata("pid", process.pid)
from rp.tools.process_handlers import detect_process_type
detected_type = detect_process_type(command)
mux.set_process_type(detected_type)
stdout_thread = threading.Thread(
target=_read_output, args=(process.stdout, mux.write_stdout, detected_type), daemon=True
)
stderr_thread = threading.Thread(
target=_read_output, args=(process.stderr, mux.write_stderr, detected_type), daemon=True
)
stdout_thread.start()
stderr_thread.start()
mux.stdout_thread = stdout_thread
mux.stderr_thread = stderr_thread
return name
except Exception as e:
funcs["close_multiplexer"](name)
raise e
def _read_output(stream, write_func, process_type):
"""Read from a stream and write to multiplexer buffer."""
if process_type in ["vim", "ssh"]:
try:
while True:
char = stream.read(1)
if not char:
break
write_func(char)
except Exception as e:
print(f"Error reading output: {e}")
else:
try:
for line in iter(stream.readline, ""):
if line:
write_func(line.rstrip("\n"))
except Exception as e:
print(f"Error reading output: {e}")
def send_input_to_session(session_name, input_data):
"""
Send input to an interactive session.
Args:
session_name: Name of the session
input_data: Input string to send
"""
funcs = _get_multiplexer_functions()
mux = funcs["get_multiplexer"](session_name)
if not mux:
raise ValueError(f"Session {session_name} not found")
if not hasattr(mux, "process") or mux.process.poll() is not None:
raise ValueError(f"Session {session_name} is not active")
try:
mux.process.stdin.write(input_data + "\n")
mux.process.stdin.flush()
except Exception as e:
raise RuntimeError(f"Failed to send input to session {session_name}: {e}")
def read_session_output(session_name, lines=None):
funcs = _get_multiplexer_functions()
"\n Read output from a session.\n\n Args:\n session_name: Name of the session\n lines: Number of recent lines to return (None for all)\n\n Returns:\n dict: {'stdout': str, 'stderr': str}\n "
mux = funcs["get_multiplexer"](session_name)
if not mux:
raise ValueError(f"Session {session_name} not found")
output = mux.get_all_output()
if lines is not None:
stdout_lines = output["stdout"].split("\n")[-lines:] if output["stdout"] else []
stderr_lines = output["stderr"].split("\n")[-lines:] if output["stderr"] else []
output = {"stdout": "\n".join(stdout_lines), "stderr": "\n".join(stderr_lines)}
return output
def list_active_sessions():
"""
List all active interactive sessions.
Returns:
dict: Session states
"""
funcs = _get_multiplexer_functions()
return funcs["get_all_multiplexer_states"]()
def get_session_status(session_name):
"""
Get detailed status of a session.
Args:
session_name: Name of the session
Returns:
dict: Session metadata and status
"""
funcs = _get_multiplexer_functions()
mux = funcs["get_multiplexer"](session_name)
if not mux:
return None
status = mux.get_metadata()
status["is_active"] = hasattr(mux, "process") and mux.process.poll() is None
if status["is_active"]:
status["pid"] = mux.process.pid
status["output_summary"] = {
"stdout_lines": len(mux.stdout_buffer),
"stderr_lines": len(mux.stderr_buffer),
}
return status
def close_interactive_session(session_name):
"""
Close an interactive session.
"""
try:
funcs = _get_multiplexer_functions()
mux = funcs["get_multiplexer"](session_name)
if mux:
mux.process.kill()
funcs["close_multiplexer"](session_name)
return {"status": "success"}
except Exception as e:
return {"status": "error", "error": str(e)}

99
rp/tools/memory.py Normal file
View File

@ -0,0 +1,99 @@
import os
import time
import uuid
from typing import Any, Dict
from rp.memory.knowledge_store import KnowledgeEntry, KnowledgeStore
def add_knowledge_entry(
category: str, content: str, metadata: Dict[str, Any] = None, entry_id: str = None
) -> Dict[str, Any]:
"""Add a new entry to the knowledge base."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
store = KnowledgeStore(db_path)
if entry_id is None:
entry_id = str(uuid.uuid4())[:16]
entry = KnowledgeEntry(
entry_id=entry_id,
category=category,
content=content,
metadata=metadata or {},
created_at=time.time(),
updated_at=time.time(),
)
store.add_entry(entry)
return {"status": "success", "entry_id": entry_id}
except Exception as e:
return {"status": "error", "error": str(e)}
def get_knowledge_entry(entry_id: str) -> Dict[str, Any]:
"""Retrieve a knowledge entry by ID."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
store = KnowledgeStore(db_path)
entry = store.get_entry(entry_id)
if entry:
return {"status": "success", "entry": entry.to_dict()}
else:
return {"status": "not_found", "entry_id": entry_id}
except Exception as e:
return {"status": "error", "error": str(e)}
def search_knowledge(query: str, category: str = None, top_k: int = 5) -> Dict[str, Any]:
"""Search the knowledge base semantically."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
store = KnowledgeStore(db_path)
entries = store.search_entries(query, category, top_k)
results = [entry.to_dict() for entry in entries]
return {"status": "success", "results": results}
except Exception as e:
return {"status": "error", "error": str(e)}
def get_knowledge_by_category(category: str, limit: int = 20) -> Dict[str, Any]:
"""Get knowledge entries by category."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
store = KnowledgeStore(db_path)
entries = store.get_by_category(category, limit)
results = [entry.to_dict() for entry in entries]
return {"status": "success", "entries": results}
except Exception as e:
return {"status": "error", "error": str(e)}
def update_knowledge_importance(entry_id: str, importance_score: float) -> Dict[str, Any]:
"""Update the importance score of a knowledge entry."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
store = KnowledgeStore(db_path)
store.update_importance(entry_id, importance_score)
return {"status": "success", "entry_id": entry_id, "importance_score": importance_score}
except Exception as e:
return {"status": "error", "error": str(e)}
def delete_knowledge_entry(entry_id: str) -> Dict[str, Any]:
"""Delete a knowledge entry."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
store = KnowledgeStore(db_path)
success = store.delete_entry(entry_id)
return {"status": "success" if success else "not_found", "entry_id": entry_id}
except Exception as e:
return {"status": "error", "error": str(e)}
def get_knowledge_statistics() -> Dict[str, Any]:
"""Get statistics about the knowledge base."""
try:
db_path = os.path.expanduser("~/.assistant_db.sqlite")
store = KnowledgeStore(db_path)
stats = store.get_statistics()
return {"status": "success", "statistics": stats}
except Exception as e:
return {"status": "error", "error": str(e)}

111
rp/tools/patch.py Normal file
View File

@ -0,0 +1,111 @@
import difflib
import os
import subprocess
import tempfile
from ..ui.diff_display import display_diff, get_diff_stats
def apply_patch(filepath, patch_content, db_conn=None):
"""Apply a patch to a file.
Args:
filepath: Path to the file to patch.
patch_content: The patch content as a string.
db_conn: Database connection (optional).
Returns:
Dict with status and output.
"""
try:
path = os.path.expanduser(filepath)
if db_conn:
from rp.tools.database import db_get
read_status = db_get("read:" + path, db_conn)
if read_status.get("status") != "success" or read_status.get("value") != "true":
return {
"status": "error",
"error": "File must be read before writing. Please read the file first.",
}
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".patch") as f:
f.write(patch_content)
patch_file = f.name
result = subprocess.run(
["patch", path, patch_file], capture_output=True, text=True, cwd=os.path.dirname(path)
)
os.unlink(patch_file)
if result.returncode == 0:
return {"status": "success", "output": result.stdout.strip()}
else:
return {"status": "error", "error": result.stderr.strip()}
except Exception as e:
return {"status": "error", "error": str(e)}
def create_diff(
file1, file2, fromfile="file1", tofile="file2", visual=False, format_type="unified"
):
"""Create a unified diff between two files.
Args:
file1: Path to the first file.
file2: Path to the second file.
fromfile: Label for the first file.
tofile: Label for the second file.
visual: Whether to include visual diff.
format_type: Diff format type.
Returns:
Dict with status and diff content.
"""
try:
path1 = os.path.expanduser(file1)
path2 = os.path.expanduser(file2)
with open(path1) as f1, open(path2) as f2:
content1 = f1.read()
content2 = f2.read()
if visual:
visual_diff = display_diff(content1, content2, fromfile, format_type)
stats = get_diff_stats(content1, content2)
lines1 = content1.splitlines(keepends=True)
lines2 = content2.splitlines(keepends=True)
plain_diff = list(
difflib.unified_diff(lines1, lines2, fromfile=fromfile, tofile=tofile)
)
return {
"status": "success",
"diff": "".join(plain_diff),
"visual_diff": visual_diff,
"stats": stats,
}
else:
lines1 = content1.splitlines(keepends=True)
lines2 = content2.splitlines(keepends=True)
diff = list(difflib.unified_diff(lines1, lines2, fromfile=fromfile, tofile=tofile))
return {"status": "success", "diff": "".join(diff)}
except Exception as e:
return {"status": "error", "error": str(e)}
def display_file_diff(filepath1, filepath2, format_type="unified", context_lines=3):
try:
path1 = os.path.expanduser(filepath1)
path2 = os.path.expanduser(filepath2)
with open(path1) as f1:
old_content = f1.read()
with open(path2) as f2:
new_content = f2.read()
visual_diff = display_diff(old_content, new_content, filepath1, format_type)
stats = get_diff_stats(old_content, new_content)
return {"status": "success", "visual_diff": visual_diff, "stats": stats}
except Exception as e:
return {"status": "error", "error": str(e)}
def display_content_diff(old_content, new_content, filename="file", format_type="unified"):
try:
visual_diff = display_diff(old_content, new_content, filename, format_type)
stats = get_diff_stats(old_content, new_content)
return {"status": "success", "visual_diff": visual_diff, "stats": stats}
except Exception as e:
return {"status": "error", "error": str(e)}

View File

@ -0,0 +1,255 @@
from abc import ABC, abstractmethod
class ProcessHandler(ABC):
"""Base class for process-specific handlers."""
def __init__(self, multiplexer):
self.multiplexer = multiplexer
self.state_machine = {}
self.current_state = "initial"
self.prompt_patterns = []
self.response_suggestions = {}
@abstractmethod
def get_process_type(self):
"""Return the process type this handler manages."""
def update_state(self, output):
"""Update internal state based on output."""
def get_prompt_suggestions(self):
"""Return suggested responses for current state."""
return self.response_suggestions.get(self.current_state, [])
def is_waiting_for_input(self):
"""Check if process appears to be waiting for input."""
return self.current_state in ["waiting_confirmation", "waiting_input"]
class AptHandler(ProcessHandler):
"""Handler for apt package manager interactions."""
def __init__(self, multiplexer):
super().__init__(multiplexer)
self.state_machine = {
"initial": ["running_command"],
"running_command": ["waiting_confirmation", "completed"],
"waiting_confirmation": ["confirmed", "cancelled"],
"confirmed": ["installing", "completed"],
"installing": ["completed", "error"],
"completed": [],
"error": [],
"cancelled": [],
}
self.prompt_patterns = [
("Do you want to continue\\?", "confirmation"),
("After this operation.*installed\\.", "size_info"),
("Need to get.*B of archives\\.", "download_info"),
("Unpacking.*Configuring", "configuring"),
("Setting up", "setting_up"),
("E:\\s", "error"),
]
def get_process_type(self):
return "apt"
def update_state(self, output):
"""Update state based on apt output patterns."""
output_lower = output.lower()
if "processing triggers" in output_lower or "done" in output_lower:
self.current_state = "completed"
elif "do you want to continue" in output_lower:
self.current_state = "waiting_confirmation"
elif "setting up" in output_lower or "unpacking" in output_lower:
self.current_state = "installing"
elif "e:" in output_lower or "error" in output_lower:
self.current_state = "error"
def get_prompt_suggestions(self):
"""Return suggested responses for apt prompts."""
suggestions = super().get_prompt_suggestions()
if self.current_state == "waiting_confirmation":
suggestions.extend(["y", "yes", "n", "no"])
return suggestions
class VimHandler(ProcessHandler):
"""Handler for vim editor interactions."""
def __init__(self, multiplexer):
super().__init__(multiplexer)
self.state_machine = {
"initial": ["normal_mode", "insert_mode"],
"normal_mode": ["insert_mode", "command_mode", "visual_mode"],
"insert_mode": ["normal_mode"],
"command_mode": ["normal_mode"],
"visual_mode": ["normal_mode"],
"exiting": [],
}
self.prompt_patterns = [
("-- INSERT --", "insert_mode"),
("-- VISUAL --", "visual_mode"),
(":", "command_mode"),
("Press ENTER", "waiting_enter"),
("Saved", "saved"),
]
self.mode_indicators = {"insert": "-- INSERT --", "visual": "-- VISUAL --", "command": ":"}
def get_process_type(self):
return "vim"
def update_state(self, output):
"""Update state based on vim mode indicators."""
if "-- INSERT --" in output:
self.current_state = "insert_mode"
elif "-- VISUAL --" in output:
self.current_state = "visual_mode"
elif output.strip().endswith(":"):
self.current_state = "command_mode"
elif "Press ENTER" in output:
self.current_state = "waiting_enter"
else:
self.current_state = "normal_mode"
def get_prompt_suggestions(self):
"""Return suggested commands for vim modes."""
suggestions = super().get_prompt_suggestions()
if self.current_state == "command_mode":
suggestions.extend(["w", "q", "wq", "q!", "w!"])
elif self.current_state == "normal_mode":
suggestions.extend(["i", "a", "o", "dd", ":w", ":q"])
elif self.current_state == "waiting_enter":
suggestions.extend(["\n"])
return suggestions
class SSHHandler(ProcessHandler):
"""Handler for SSH connection interactions."""
def __init__(self, multiplexer):
super().__init__(multiplexer)
self.state_machine = {
"initial": ["connecting"],
"connecting": ["auth_prompt", "connected", "failed"],
"auth_prompt": ["connected", "failed"],
"connected": ["shell", "disconnected"],
"shell": ["disconnected"],
"failed": [],
"disconnected": [],
}
self.prompt_patterns = [
("password:", "password_prompt"),
("yes/no", "host_key_prompt"),
("Permission denied", "auth_failed"),
("Welcome to", "connected"),
("\\$", "shell_prompt"),
("\\#", "root_shell_prompt"),
("Connection closed", "disconnected"),
]
def get_process_type(self):
return "ssh"
def update_state(self, output):
"""Update state based on SSH connection output."""
output_lower = output.lower()
if "permission denied" in output_lower:
self.current_state = "failed"
elif "password:" in output_lower:
self.current_state = "auth_prompt"
elif "yes/no" in output_lower:
self.current_state = "auth_prompt"
elif "welcome to" in output_lower or "last login" in output_lower:
self.current_state = "connected"
elif output.strip().endswith("$") or output.strip().endswith("#"):
self.current_state = "shell"
elif "connection closed" in output_lower:
self.current_state = "disconnected"
def get_prompt_suggestions(self):
"""Return suggested responses for SSH prompts."""
suggestions = super().get_prompt_suggestions()
if self.current_state == "auth_prompt":
if "password:" in self.multiplexer.get_all_output()["stdout"]:
suggestions.extend(["<password>"])
elif "yes/no" in self.multiplexer.get_all_output()["stdout"]:
suggestions.extend(["yes", "no"])
return suggestions
class GenericProcessHandler(ProcessHandler):
"""Fallback handler for unknown process types."""
def __init__(self, multiplexer):
super().__init__(multiplexer)
self.state_machine = {
"initial": ["running"],
"running": ["waiting_input", "completed"],
"waiting_input": ["running"],
"completed": [],
}
self.prompt_patterns = [
("\\?\\s*$", "waiting_input"),
(">\\s*$", "waiting_input"),
(":\\s*$", "waiting_input"),
("done", "completed"),
("finished", "completed"),
("exit code", "completed"),
]
def get_process_type(self):
return "generic"
def update_state(self, output):
"""Basic state detection for generic processes."""
output_lower = output.lower()
if any((pattern in output_lower for pattern in ["done", "finished", "complete"])):
self.current_state = "completed"
elif any((output.strip().endswith(char) for char in ["?", ">", ":"])):
self.current_state = "waiting_input"
else:
self.current_state = "running"
_handler_classes = {
"apt": AptHandler,
"vim": VimHandler,
"ssh": SSHHandler,
"generic": GenericProcessHandler,
}
def get_handler_for_process(process_type, multiplexer):
"""Get appropriate handler for a process type."""
handler_class = _handler_classes.get(process_type, GenericProcessHandler)
return handler_class(multiplexer)
def detect_process_type(command):
"""Detect process type from command."""
command_str = " ".join(command) if isinstance(command, list) else command
command_lower = command_str.lower()
if "apt" in command_lower or "apt-get" in command_lower:
return "apt"
elif "vim" in command_lower or "vi " in command_lower:
return "vim"
elif "ssh" in command_lower:
return "ssh"
else:
return "generic"
return "ssh"
def detect_process_type(command):
"""Detect process type from command."""
command_str = " ".join(command) if isinstance(command, list) else command
command_lower = command_str.lower()
if "apt" in command_lower or "apt-get" in command_lower:
return "apt"
elif "vim" in command_lower or "vi " in command_lower:
return "vim"
elif "ssh" in command_lower:
return "ssh"
else:
return "generic"

View File

@ -0,0 +1,246 @@
import re
import time
class PromptDetector:
"""Detects various process prompts and manages interaction state."""
def __init__(self):
self.prompt_patterns = self._load_prompt_patterns()
self.state_machines = self._load_state_machines()
self.session_states = {}
self.timeout_configs = {"default": 30, "apt": 300, "ssh": 60, "vim": 3600}
def _load_prompt_patterns(self):
"""Load regex patterns for detecting various prompts."""
return {
"bash_prompt": [
re.compile("[\\w\\-\\.]+@[\\w\\-\\.]+:.*[\\$#]\\s*$"),
re.compile("\\$\\s*$"),
re.compile("#\\s*$"),
re.compile(">\\s*$"),
],
"confirmation": [
re.compile("[Yy]/[Nn]", re.IGNORECASE),
re.compile("[Yy]es/[Nn]o", re.IGNORECASE),
re.compile("continue\\?", re.IGNORECASE),
re.compile("proceed\\?", re.IGNORECASE),
],
"password": [
re.compile("password:", re.IGNORECASE),
re.compile("passphrase:", re.IGNORECASE),
re.compile("enter password", re.IGNORECASE),
],
"sudo_password": [re.compile("\\[sudo\\].*password", re.IGNORECASE)],
"apt": [
re.compile("Do you want to continue\\?", re.IGNORECASE),
re.compile("After this operation", re.IGNORECASE),
re.compile("Need to get", re.IGNORECASE),
],
"vim": [
re.compile("-- INSERT --"),
re.compile("-- VISUAL --"),
re.compile(":"),
re.compile("Press ENTER", re.IGNORECASE),
],
"ssh": [
re.compile("yes/no", re.IGNORECASE),
re.compile("password:", re.IGNORECASE),
re.compile("Permission denied", re.IGNORECASE),
],
"git": [re.compile("Username:", re.IGNORECASE), re.compile("Email:", re.IGNORECASE)],
"error": [
re.compile("error:", re.IGNORECASE),
re.compile("failed", re.IGNORECASE),
re.compile("exception", re.IGNORECASE),
],
}
def _load_state_machines(self):
"""Load state machines for different process types."""
return {
"apt": {
"states": ["initial", "running", "confirming", "installing", "completed", "error"],
"transitions": {
"initial": ["running"],
"running": ["confirming", "installing", "completed", "error"],
"confirming": ["installing", "cancelled"],
"installing": ["completed", "error"],
"completed": [],
"error": [],
"cancelled": [],
},
},
"ssh": {
"states": ["initial", "connecting", "authenticating", "connected", "error"],
"transitions": {
"initial": ["connecting"],
"connecting": ["authenticating", "connected", "error"],
"authenticating": ["connected", "error"],
"connected": ["error"],
"error": [],
},
},
"vim": {
"states": ["initial", "normal", "insert", "visual", "command", "exiting"],
"transitions": {
"initial": ["normal", "insert"],
"normal": ["insert", "visual", "command", "exiting"],
"insert": ["normal"],
"visual": ["normal"],
"command": ["normal", "exiting"],
"exiting": [],
},
},
}
def detect_prompt(self, output, process_type="generic"):
"""Detect what type of prompt is present in the output."""
detections = {}
for category, patterns in self.prompt_patterns.items():
for pattern in patterns:
if pattern.search(output):
if category not in detections:
detections[category] = []
detections[category].append(pattern.pattern)
if process_type in self.prompt_patterns:
for pattern in self.prompt_patterns[process_type]:
if pattern.search(output):
detections[process_type] = detections.get(process_type, [])
detections[process_type].append(pattern.pattern)
return detections
def get_response_suggestions(self, prompt_detections, process_type="generic"):
"""Get suggested responses based on detected prompts."""
suggestions = []
for category, patterns in prompt_detections.items():
if category == "confirmation":
suggestions.extend(["y", "yes", "n", "no"])
elif category == "password":
suggestions.append("<password>")
elif category == "sudo_password":
suggestions.append("<sudo_password>")
elif category == "apt":
if any(("continue" in p for p in patterns)):
suggestions.extend(["y", "yes"])
elif category == "vim":
if any((":" in p for p in patterns)):
suggestions.extend(["w", "q", "wq", "q!"])
elif any(("ENTER" in p for p in patterns)):
suggestions.append("\n")
elif category == "ssh":
if any(("yes/no" in p for p in patterns)):
suggestions.extend(["yes", "no"])
elif any(("password" in p for p in patterns)):
suggestions.append("<password>")
elif category == "bash_prompt":
suggestions.extend(["help", "ls", "pwd", "exit"])
return list(set(suggestions))
def update_session_state(self, session_name, output, process_type="generic"):
"""Update the state machine for a session based on output."""
if session_name not in self.session_states:
self.session_states[session_name] = {
"current_state": "initial",
"process_type": process_type,
"last_activity": time.time(),
"transitions": [],
}
session_state = self.session_states[session_name]
old_state = session_state["current_state"]
detections = self.detect_prompt(output, process_type)
new_state = self._determine_state_from_detections(detections, process_type, old_state)
if new_state != old_state:
session_state["transitions"].append(
{
"from": old_state,
"to": new_state,
"timestamp": time.time(),
"trigger": detections,
}
)
session_state["current_state"] = new_state
session_state["last_activity"] = time.time()
return new_state
def _determine_state_from_detections(self, detections, process_type, current_state):
"""Determine new state based on prompt detections."""
if process_type in self.state_machines:
self.state_machines[process_type]
if "confirmation" in detections and current_state in ["running", "initial"]:
return "confirming"
elif "password" in detections or "sudo_password" in detections:
return "authenticating"
elif "error" in detections:
return "error"
elif "bash_prompt" in detections and current_state != "initial":
return "connected" if process_type == "ssh" else "completed"
elif "vim" in detections:
if any(("-- INSERT --" in p for p in detections.get("vim", []))):
return "insert"
elif any(("-- VISUAL --" in p for p in detections.get("vim", []))):
return "visual"
elif any((":" in p for p in detections.get("vim", []))):
return "command"
if current_state == "initial":
return "running"
elif current_state == "running" and detections:
return "waiting_input"
elif current_state == "waiting_input" and (not detections):
return "running"
return current_state
def is_waiting_for_input(self, session_name):
"""Check if a session is currently waiting for input."""
if session_name not in self.session_states:
return False
state = self.session_states[session_name]["current_state"]
process_type = self.session_states[session_name]["process_type"]
waiting_states = {
"generic": ["waiting_input"],
"apt": ["confirming"],
"ssh": ["authenticating"],
"vim": ["command", "insert", "visual"],
}
return state in waiting_states.get(process_type, [])
def get_session_timeout(self, session_name):
"""Get the timeout for a session based on its process type."""
if session_name not in self.session_states:
return self.timeout_configs["default"]
process_type = self.session_states[session_name]["process_type"]
return self.timeout_configs.get(process_type, self.timeout_configs["default"])
def check_for_timeouts(self):
"""Check all sessions for timeouts and return timed out sessions."""
timed_out = []
current_time = time.time()
for session_name, state in self.session_states.items():
timeout = self.get_session_timeout(session_name)
if current_time - state["last_activity"] > timeout:
timed_out.append(session_name)
return timed_out
def get_session_info(self, session_name):
"""Get information about a session's state."""
if session_name not in self.session_states:
return None
state = self.session_states[session_name]
return {
"current_state": state["current_state"],
"process_type": state["process_type"],
"last_activity": state["last_activity"],
"transitions": state["transitions"][-5:],
"is_waiting": self.is_waiting_for_input(session_name),
}
_detector = None
def get_global_detector():
"""Get the global prompt detector instance."""
global _detector
if _detector is None:
_detector = PromptDetector()
return _detector

32
rp/tools/python_exec.py Normal file
View File

@ -0,0 +1,32 @@
import contextlib
import os
import traceback
from io import StringIO
def python_exec(code, python_globals, cwd=None):
"""Execute Python code and capture the output.
Args:
code: The Python code to execute.
python_globals: Dictionary of global variables for execution.
cwd: Working directory for execution.
Returns:
Dict with status and output, or error information.
"""
try:
original_cwd = None
if cwd:
original_cwd = os.getcwd()
os.chdir(cwd)
output = StringIO()
with contextlib.redirect_stdout(output):
exec(code, python_globals)
if original_cwd:
os.chdir(original_cwd)
return {"status": "success", "output": output.getvalue()}
except Exception as e:
if original_cwd:
os.chdir(original_cwd)
return {"status": "error", "error": str(e), "traceback": traceback.format_exc()}

19
rp/tools/vision.py Normal file
View File

@ -0,0 +1,19 @@
from rp.vision import post_image as vision_post_image
import functools
@functools.lru_cache()
def post_image(path: str, prompt: str = None):
"""Post an image for analysis.
Args:
path: Path to the image file.
prompt: Optional prompt for analysis.
Returns:
Analysis result.
"""
try:
return vision_post_image(path=path, prompt=prompt)
except Exception:
raise

62
rp/tools/web.py Normal file
View File

@ -0,0 +1,62 @@
import json
import urllib.error
import urllib.parse
import urllib.request
def http_fetch(url, headers=None):
"""Fetch content from an HTTP URL.
Args:
url: The URL to fetch.
headers: Optional HTTP headers.
Returns:
Dict with status and content.
"""
try:
req = urllib.request.Request(url)
if headers:
for key, value in headers.items():
req.add_header(key, value)
with urllib.request.urlopen(req) as response:
content = response.read().decode("utf-8")
return {"status": "success", "content": content[:10000]}
except Exception as e:
return {"status": "error", "error": str(e)}
def _perform_search(base_url, query, params=None):
try:
full_url = f"https://static.molodetz.nl/search.cgi?query={query}"
with urllib.request.urlopen(full_url) as response:
content = response.read().decode("utf-8")
return {"status": "success", "content": json.loads(content)}
except Exception as e:
return {"status": "error", "error": str(e)}
def web_search(query):
"""Perform a web search.
Args:
query: Search query.
Returns:
Dict with status and search results.
"""
base_url = "https://search.molodetz.nl/search"
return _perform_search(base_url, query)
def web_search_news(query):
"""Perform a web search for news.
Args:
query: Search query for news.
Returns:
Dict with status and news search results.
"""
base_url = "https://search.molodetz.nl/search"
return _perform_search(base_url, query)

12
rp/ui/__init__.py Normal file
View File

@ -0,0 +1,12 @@
from rp.ui.colors import Colors, Spinner
from rp.ui.display import display_tool_call, print_autonomous_header
from rp.ui.rendering import highlight_code, render_markdown
__all__ = [
"Colors",
"Spinner",
"highlight_code",
"render_markdown",
"display_tool_call",
"print_autonomous_header",
]

44
rp/ui/colors.py Normal file
View File

@ -0,0 +1,44 @@
import asyncio
class Colors:
RESET = "\x1b[0m"
BOLD = "\x1b[1m"
RED = "\x1b[91m"
GREEN = "\x1b[92m"
YELLOW = "\x1b[93m"
BLUE = "\x1b[94m"
MAGENTA = "\x1b[95m"
CYAN = "\x1b[96m"
GRAY = "\x1b[90m"
WHITE = "\x1b[97m"
BG_BLUE = "\x1b[44m"
BG_GREEN = "\x1b[42m"
BG_RED = "\x1b[41m"
class Spinner:
def __init__(self, message="Processing...", spinner_chars="|/-\\"):
self.message = message
self.spinner_chars = spinner_chars
self.running = False
self.task = None
async def start(self):
self.running = True
self.task = asyncio.create_task(self._spin())
async def stop(self):
self.running = False
if self.task:
await self.task
print("\r" + " " * (len(self.message) + 2) + "\r", end="", flush=True)
async def _spin(self):
i = 0
while self.running:
char = self.spinner_chars[i % len(self.spinner_chars)]
print(f"\r{Colors.CYAN}{char}{Colors.RESET} {self.message}", end="", flush=True)
i += 1
await asyncio.sleep(0.1)

205
rp/ui/diff_display.py Normal file
View File

@ -0,0 +1,205 @@
import difflib
from typing import Dict, List, Optional, Tuple
from .colors import Colors
class DiffStats:
def __init__(self):
self.insertions = 0
self.deletions = 0
self.modifications = 0
self.files_changed = 0
@property
def total_changes(self):
return self.insertions + self.deletions
def __str__(self):
return f"{self.files_changed} file(s) changed, {self.insertions} insertions(+), {self.deletions} deletions(-)"
class DiffLine:
def __init__(
self,
line_type: str,
content: str,
old_line_num: Optional[int] = None,
new_line_num: Optional[int] = None,
):
self.line_type = line_type
self.content = content
self.old_line_num = old_line_num
self.new_line_num = new_line_num
def format(self, show_line_nums: bool = True) -> str:
color = {
"add": Colors.GREEN,
"delete": Colors.RED,
"context": Colors.GRAY,
"header": Colors.CYAN,
"stats": Colors.BLUE,
}.get(self.line_type, Colors.RESET)
prefix = {"add": "+ ", "delete": "- ", "context": " ", "header": "", "stats": ""}.get(
self.line_type, " "
)
if show_line_nums and self.line_type in ("add", "delete", "context"):
old_num = str(self.old_line_num) if self.old_line_num else " "
new_num = str(self.new_line_num) if self.new_line_num else " "
line_num_str = f"{Colors.YELLOW}{old_num:>4} {new_num:>4}{Colors.RESET} "
else:
line_num_str = ""
return f"{line_num_str}{color}{prefix}{self.content}{Colors.RESET}"
class DiffDisplay:
def __init__(self, context_lines: int = 3):
self.context_lines = context_lines
def create_diff(
self, old_content: str, new_content: str, filename: str = "file"
) -> Tuple[List[DiffLine], DiffStats]:
old_lines = old_content.splitlines(keepends=True)
new_lines = new_content.splitlines(keepends=True)
diff_lines = []
stats = DiffStats()
stats.files_changed = 1
diff = difflib.unified_diff(
old_lines,
new_lines,
fromfile=f"a/{filename}",
tofile=f"b/{filename}",
n=self.context_lines,
)
old_line_num = 0
new_line_num = 0
for line in diff:
if line.startswith("---") or line.startswith("+++"):
diff_lines.append(DiffLine("header", line.rstrip()))
elif line.startswith("@@"):
diff_lines.append(DiffLine("header", line.rstrip()))
old_line_num, new_line_num = self._parse_hunk_header(line)
elif line.startswith("+"):
stats.insertions += 1
diff_lines.append(DiffLine("add", line[1:].rstrip(), None, new_line_num))
new_line_num += 1
elif line.startswith("-"):
stats.deletions += 1
diff_lines.append(DiffLine("delete", line[1:].rstrip(), old_line_num, None))
old_line_num += 1
elif line.startswith(" "):
diff_lines.append(
DiffLine("context", line[1:].rstrip(), old_line_num, new_line_num)
)
old_line_num += 1
new_line_num += 1
stats.modifications = min(stats.insertions, stats.deletions)
return (diff_lines, stats)
def _parse_hunk_header(self, header: str) -> Tuple[int, int]:
try:
parts = header.split("@@")[1].strip().split()
old_start = int(parts[0].split(",")[0].replace("-", ""))
new_start = int(parts[1].split(",")[0].replace("+", ""))
return (old_start, new_start)
except (IndexError, ValueError):
return (0, 0)
def render_diff(
self,
diff_lines: List[DiffLine],
stats: DiffStats,
show_line_nums: bool = True,
show_stats: bool = True,
) -> str:
output = []
if show_stats:
output.append(f"\n{Colors.BOLD}{Colors.BLUE}{'=' * 60}{Colors.RESET}")
output.append(f"{Colors.BOLD}{Colors.BLUE}DIFF SUMMARY{Colors.RESET}")
output.append(f"{Colors.BOLD}{Colors.BLUE}{'=' * 60}{Colors.RESET}")
output.append(f"{Colors.BLUE}{stats}{Colors.RESET}\n")
for line in diff_lines:
output.append(line.format(show_line_nums))
if show_stats:
output.append(f"\n{Colors.BOLD}{Colors.BLUE}{'=' * 60}{Colors.RESET}\n")
return "\n".join(output)
def display_file_diff(
self,
old_content: str,
new_content: str,
filename: str = "file",
show_line_nums: bool = True,
) -> str:
diff_lines, stats = self.create_diff(old_content, new_content, filename)
if not diff_lines:
return f"{Colors.GRAY}No changes detected{Colors.RESET}"
return self.render_diff(diff_lines, stats, show_line_nums)
def display_side_by_side(
self, old_content: str, new_content: str, filename: str = "file", width: int = 80
) -> str:
old_lines = old_content.splitlines()
new_lines = new_content.splitlines()
matcher = difflib.SequenceMatcher(None, old_lines, new_lines)
output = []
output.append(f"\n{Colors.BOLD}{Colors.BLUE}{'=' * width}{Colors.RESET}")
output.append(
f"{Colors.BOLD}{Colors.BLUE}SIDE-BY-SIDE COMPARISON: {filename}{Colors.RESET}"
)
output.append(f"{Colors.BOLD}{Colors.BLUE}{'=' * width}{Colors.RESET}\n")
half_width = (width - 5) // 2
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
if tag == "equal":
for i, (old_line, new_line) in enumerate(zip(old_lines[i1:i2], new_lines[j1:j2])):
old_display = old_line[:half_width].ljust(half_width)
new_display = new_line[:half_width].ljust(half_width)
output.append(f"{Colors.GRAY}{old_display} | {new_display}{Colors.RESET}")
elif tag == "replace":
max_lines = max(i2 - i1, j2 - j1)
for i in range(max_lines):
old_line = old_lines[i1 + i] if i1 + i < i2 else ""
new_line = new_lines[j1 + i] if j1 + i < j2 else ""
old_display = old_line[:half_width].ljust(half_width)
new_display = new_line[:half_width].ljust(half_width)
output.append(
f"{Colors.RED}{old_display}{Colors.RESET} | {Colors.GREEN}{new_display}{Colors.RESET}"
)
elif tag == "delete":
for old_line in old_lines[i1:i2]:
old_display = old_line[:half_width].ljust(half_width)
output.append(f"{Colors.RED}{old_display} | {' ' * half_width}{Colors.RESET}")
elif tag == "insert":
for new_line in new_lines[j1:j2]:
new_display = new_line[:half_width].ljust(half_width)
output.append(f"{' ' * half_width} | {Colors.GREEN}{new_display}{Colors.RESET}")
output.append(f"\n{Colors.BOLD}{Colors.BLUE}{'=' * width}{Colors.RESET}\n")
return "\n".join(output)
def display_diff(
old_content: str,
new_content: str,
filename: str = "file",
format_type: str = "unified",
context_lines: int = 3,
) -> str:
displayer = DiffDisplay(context_lines)
if format_type == "side-by-side":
return displayer.display_side_by_side(old_content, new_content, filename)
else:
return displayer.display_file_diff(old_content, new_content, filename)
def get_diff_stats(old_content: str, new_content: str) -> Dict[str, int]:
displayer = DiffDisplay()
_, stats = displayer.create_diff(old_content, new_content)
return {
"insertions": stats.insertions,
"deletions": stats.deletions,
"modifications": stats.modifications,
"total_changes": stats.total_changes,
"files_changed": stats.files_changed,
}

58
rp/ui/display.py Normal file
View File

@ -0,0 +1,58 @@
from rp.ui.colors import Colors
def display_tool_call(tool_name, arguments, status="running", result=None):
if status == "running":
return
args_str = ", ".join([f"{k}={str(v)[:20]}" for k, v in list(arguments.items())[:2]])
line = f"{tool_name}({args_str})"
if len(line) > 80:
line = line[:77] + "..."
print(f"{Colors.GRAY}{line}{Colors.RESET}")
def print_autonomous_header(task):
print(f"{Colors.BOLD}Task:{Colors.RESET} {task}")
print(f"{Colors.GRAY}r will work continuously until the task is complete.{Colors.RESET}")
print(f"{Colors.GRAY}Press Ctrl+C twice to interrupt.{Colors.RESET}\n")
print(f"{Colors.BOLD}{'' * 80}{Colors.RESET}\n")
def display_multiplexer_status(sessions):
"""Display the status of background sessions."""
if not sessions:
print(f"{Colors.GRAY}No background sessions running{Colors.RESET}")
return
print(f"\n{Colors.BOLD}Background Sessions:{Colors.RESET}")
print(f"{Colors.GRAY}{'' * 60}{Colors.RESET}")
for session_name, session_info in sessions.items():
status = session_info.get("status", "unknown")
pid = session_info.get("pid", "N/A")
command = session_info.get("command", "N/A")
status_color = {"running": Colors.GREEN, "stopped": Colors.RED, "error": Colors.RED}.get(
status, Colors.YELLOW
)
print(f" {Colors.CYAN}{session_name}{Colors.RESET}")
print(f" Status: {status_color}{status}{Colors.RESET}")
print(f" PID: {pid}")
print(f" Command: {command}")
if "start_time" in session_info:
import time
elapsed = time.time() - session_info["start_time"]
print(f" Running for: {elapsed:.1f}s")
print()
def display_background_event(event):
"""Display a background event."""
event.get("type", "unknown")
session_name = event.get("session_name", "unknown")
timestamp = event.get("timestamp", 0)
message = event.get("message", "")
import datetime
time_str = datetime.datetime.fromtimestamp(timestamp).strftime("%H:%M:%S")
print(
f"{Colors.GRAY}[{time_str}]{Colors.RESET} {Colors.CYAN}{session_name}{Colors.RESET}: {message}"
)

183
rp/ui/edit_feedback.py Normal file
View File

@ -0,0 +1,183 @@
from datetime import datetime
from typing import Dict, List, Optional
from .colors import Colors
from .progress import ProgressBar
class EditOperation:
def __init__(
self,
op_type: str,
filepath: str,
start_pos: int = 0,
end_pos: int = 0,
content: str = "",
old_content: str = "",
):
self.op_type = op_type
self.filepath = filepath
self.start_pos = start_pos
self.end_pos = end_pos
self.content = content
self.old_content = old_content
self.timestamp = datetime.now()
self.status = "pending"
def format_operation(self) -> str:
op_colors = {
"INSERT": Colors.GREEN,
"REPLACE": Colors.YELLOW,
"DELETE": Colors.RED,
"WRITE": Colors.BLUE,
}
color = op_colors.get(self.op_type, Colors.RESET)
status_icon = {"pending": "", "in_progress": "", "completed": "", "failed": ""}.get(
self.status, ""
)
return f"{color}{status_icon} [{self.op_type}]{Colors.RESET} {self.filepath}"
def format_details(self, show_content: bool = True) -> str:
output = [self.format_operation()]
if self.op_type in ("INSERT", "REPLACE"):
output.append(f" {Colors.GRAY}Position: {self.start_pos}-{self.end_pos}{Colors.RESET}")
if show_content:
if self.old_content:
lines = self.old_content.split("\n")
preview = lines[0][:60] + ("..." if len(lines[0]) > 60 or len(lines) > 1 else "")
output.append(f" {Colors.RED}- {preview}{Colors.RESET}")
if self.content:
lines = self.content.split("\n")
preview = lines[0][:60] + ("..." if len(lines[0]) > 60 or len(lines) > 1 else "")
output.append(f" {Colors.GREEN}+ {preview}{Colors.RESET}")
return "\n".join(output)
class EditTracker:
def __init__(self):
self.operations: List[EditOperation] = []
self.current_file: Optional[str] = None
def add_operation(self, op_type: str, filepath: str, **kwargs) -> EditOperation:
op = EditOperation(op_type, filepath, **kwargs)
self.operations.append(op)
self.current_file = filepath
return op
def mark_in_progress(self, operation: EditOperation):
operation.status = "in_progress"
def mark_completed(self, operation: EditOperation):
operation.status = "completed"
def mark_failed(self, operation: EditOperation):
operation.status = "failed"
def get_stats(self) -> Dict[str, int]:
stats = {
"total": len(self.operations),
"completed": sum((1 for op in self.operations if op.status == "completed")),
"pending": sum((1 for op in self.operations if op.status == "pending")),
"in_progress": sum((1 for op in self.operations if op.status == "in_progress")),
"failed": sum((1 for op in self.operations if op.status == "failed")),
}
return stats
def get_completion_percentage(self) -> float:
if not self.operations:
return 0.0
stats = self.get_stats()
return stats["completed"] / stats["total"] * 100
def display_progress(self) -> str:
if not self.operations:
return f"{Colors.GRAY}No edit operations tracked{Colors.RESET}"
output = []
output.append(f"\n{Colors.BOLD}{Colors.BLUE}{'=' * 60}{Colors.RESET}")
output.append(f"{Colors.BOLD}{Colors.BLUE}EDIT OPERATIONS PROGRESS{Colors.RESET}")
output.append(f"{Colors.BOLD}{Colors.BLUE}{'=' * 60}{Colors.RESET}\n")
stats = self.get_stats()
self.get_completion_percentage()
progress_bar = ProgressBar(total=stats["total"], width=40)
progress_bar.current = stats["completed"]
bar_display = progress_bar._get_bar_display()
output.append(f"Progress: {bar_display}")
output.append(
f"{Colors.BLUE}Total: {stats['total']}, Completed: {stats['completed']}, Pending: {stats['pending']}, Failed: {stats['failed']}{Colors.RESET}\n"
)
output.append(f"{Colors.BOLD}Recent Operations:{Colors.RESET}")
for i, op in enumerate(self.operations[-5:], 1):
output.append(f"{i}. {op.format_operation()}")
output.append(f"\n{Colors.BOLD}{Colors.BLUE}{'=' * 60}{Colors.RESET}\n")
return "\n".join(output)
def display_timeline(self, show_content: bool = False) -> str:
if not self.operations:
return f"{Colors.GRAY}No edit operations tracked{Colors.RESET}"
output = []
output.append(f"\n{Colors.BOLD}{Colors.BLUE}{'=' * 60}{Colors.RESET}")
output.append(f"{Colors.BOLD}{Colors.BLUE}EDIT TIMELINE{Colors.RESET}")
output.append(f"{Colors.BOLD}{Colors.BLUE}{'=' * 60}{Colors.RESET}\n")
for i, op in enumerate(self.operations, 1):
timestamp = op.timestamp.strftime("%H:%M:%S")
output.append(f"{Colors.GRAY}[{timestamp}]{Colors.RESET} {i}.")
output.append(op.format_details(show_content))
output.append("")
stats = self.get_stats()
output.append(f"{Colors.BOLD}Summary:{Colors.RESET}")
output.append(
f"{Colors.BLUE}Total operations: {stats['total']}, Completed: {stats['completed']}, Failed: {stats['failed']}{Colors.RESET}"
)
output.append(f"\n{Colors.BOLD}{Colors.BLUE}{'=' * 60}{Colors.RESET}\n")
return "\n".join(output)
def display_summary(self) -> str:
if not self.operations:
return f"{Colors.GRAY}No edits to summarize{Colors.RESET}"
stats = self.get_stats()
files_modified = len({op.filepath for op in self.operations})
output = []
output.append(f"\n{Colors.BOLD}{Colors.GREEN}{'=' * 60}{Colors.RESET}")
output.append(f"{Colors.BOLD}{Colors.GREEN}EDIT SUMMARY{Colors.RESET}")
output.append(f"{Colors.BOLD}{Colors.GREEN}{'=' * 60}{Colors.RESET}\n")
output.append(f"{Colors.GREEN}Files Modified: {files_modified}{Colors.RESET}")
output.append(f"{Colors.GREEN}Total Operations: {stats['total']}{Colors.RESET}")
output.append(f"{Colors.GREEN}Successful: {stats['completed']}{Colors.RESET}")
if stats["failed"] > 0:
output.append(f"{Colors.RED}Failed: {stats['failed']}{Colors.RESET}")
output.append(f"\n{Colors.BOLD}Operations by Type:{Colors.RESET}")
op_types = {}
for op in self.operations:
op_types[op.op_type] = op_types.get(op.op_type, 0) + 1
for op_type, count in sorted(op_types.items()):
output.append(f" {op_type}: {count}")
output.append(f"\n{Colors.BOLD}{Colors.GREEN}{'=' * 60}{Colors.RESET}\n")
return "\n".join(output)
def clear(self):
self.operations.clear()
self.current_file = None
tracker = EditTracker()
def track_edit(op_type: str, filepath: str, **kwargs) -> EditOperation:
return tracker.add_operation(op_type, filepath, **kwargs)
def display_edit_progress() -> str:
return tracker.display_progress()
def display_edit_timeline(show_content: bool = False) -> str:
return tracker.display_timeline(show_content)
def display_edit_summary() -> str:
return tracker.display_summary()
def clear_tracker():
tracker.clear()

64
rp/ui/output.py Normal file
View File

@ -0,0 +1,64 @@
import json
import sys
from datetime import datetime
from typing import Any
class OutputFormatter:
def __init__(self, format_type: str = "text", quiet: bool = False):
self.format_type = format_type
self.quiet = quiet
def output(self, data: Any, message_type: str = "response"):
if self.quiet and message_type not in ["error", "result"]:
return
if self.format_type == "json":
self._output_json(data, message_type)
elif self.format_type == "structured":
self._output_structured(data, message_type)
else:
self._output_text(data, message_type)
def _output_json(self, data: Any, message_type: str):
output = {"type": message_type, "timestamp": datetime.now().isoformat(), "data": data}
print(json.dumps(output, indent=2))
def _output_structured(self, data: Any, message_type: str):
if isinstance(data, dict):
for key, value in data.items():
print(f"{key}: {value}")
elif isinstance(data, list):
for item in data:
print(f"- {item}")
else:
print(data)
def _output_text(self, data: Any, message_type: str):
if isinstance(data, (dict, list)):
print(json.dumps(data, indent=2))
else:
print(data)
def error(self, message: str):
if self.format_type == "json":
self._output_json({"error": message}, "error")
else:
print(f"Error: {message}", file=sys.stderr)
def success(self, message: str):
if not self.quiet:
if self.format_type == "json":
self._output_json({"success": message}, "success")
else:
print(message)
def info(self, message: str):
if not self.quiet:
if self.format_type == "json":
self._output_json({"info": message}, "info")
else:
print(message)
def result(self, data: Any):
self.output(data, "result")

72
rp/ui/progress.py Normal file
View File

@ -0,0 +1,72 @@
import sys
import threading
import time
class ProgressIndicator:
def __init__(self, message: str = "Working", show: bool = True):
self.message = message
self.show = show
self.running = False
self.thread = None
def __enter__(self):
if self.show:
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.show:
self.stop()
def start(self):
self.running = True
self.thread = threading.Thread(target=self._animate, daemon=True)
self.thread.start()
def stop(self):
if self.running:
self.running = False
if self.thread:
self.thread.join(timeout=1.0)
sys.stdout.write("\r" + " " * (len(self.message) + 10) + "\r")
sys.stdout.flush()
def _animate(self):
spinner = ["", "", "", "", "", "", "", "", "", ""]
idx = 0
while self.running:
sys.stdout.write(f"\r{spinner[idx]} {self.message}...")
sys.stdout.flush()
idx = (idx + 1) % len(spinner)
time.sleep(0.1)
class ProgressBar:
def __init__(self, total: int, description: str = "Progress", width: int = 40):
self.total = total
self.description = description
self.width = width
self.current = 0
def update(self, amount: int = 1):
self.current += amount
self._display()
def _display(self):
if self.total == 0:
percent = 100
else:
percent = int(self.current / self.total * 100)
filled = int(self.current / self.total * self.width) if self.total > 0 else self.width
bar = "" * filled + "" * (self.width - filled)
sys.stdout.write(f"\r{self.description}: |{bar}| {percent}% ({self.current}/{self.total})")
sys.stdout.flush()
if self.current >= self.total:
sys.stdout.write("\n")
def finish(self):
self.current = self.total
self._display()

91
rp/ui/rendering.py Normal file
View File

@ -0,0 +1,91 @@
import re
from rp.config import LANGUAGE_KEYWORDS
from rp.ui.colors import Colors
def highlight_code(code, language=None, syntax_highlighting=True):
if not syntax_highlighting:
return code
if not language:
if "def " in code or "import " in code:
language = "python"
elif "function " in code or "const " in code:
language = "javascript"
elif "public " in code or "class " in code:
language = "java"
if language and language in LANGUAGE_KEYWORDS:
keywords = LANGUAGE_KEYWORDS[language]
for keyword in keywords:
pattern = "\\b" + re.escape(keyword) + "\\b"
code = re.sub(pattern, f"{Colors.BLUE}{keyword}{Colors.RESET}", code)
code = re.sub('"([^"]*)"', f'{Colors.GREEN}"\\1"{Colors.RESET}', code)
code = re.sub("'([^']*)'", f"{Colors.GREEN}'\\1'{Colors.RESET}", code)
code = re.sub("#(.*)$", f"{Colors.GRAY}#\\1{Colors.RESET}", code, flags=re.MULTILINE)
code = re.sub("//(.*)$", f"{Colors.GRAY}//\\1{Colors.RESET}", code, flags=re.MULTILINE)
return code
def render_markdown(text, syntax_highlighting=True):
if not syntax_highlighting:
return text
code_blocks = []
def extract_code_block(match):
lang = match.group(1) or ""
code = match.group(2)
highlighted_code = highlight_code(code.strip("\n"), lang, syntax_highlighting)
placeholder = f"%%CODEBLOCK{len(code_blocks)}%%"
full_block = f"{Colors.GRAY}```{lang}{Colors.RESET}\n{highlighted_code}\n{Colors.GRAY}```{Colors.RESET}"
code_blocks.append(full_block)
return placeholder
text = re.sub("```(\\w*)\\n(.*?)\\n?```", extract_code_block, text, flags=re.DOTALL)
inline_codes = []
def extract_inline_code(match):
code = match.group(1)
placeholder = f"%%INLINECODE{len(inline_codes)}%%"
inline_codes.append(f"{Colors.YELLOW}{code}{Colors.RESET}")
return placeholder
text = re.sub("`([^`]+)`", extract_inline_code, text)
lines = text.split("\n")
processed_lines = []
for line in lines:
if line.startswith("### "):
line = f"{Colors.BOLD}{Colors.GREEN}{line[4:]}{Colors.RESET}"
elif line.startswith("## "):
line = f"{Colors.BOLD}{Colors.BLUE}{line[3:]}{Colors.RESET}"
elif line.startswith("# "):
line = f"{Colors.BOLD}{Colors.MAGENTA}{line[2:]}{Colors.RESET}"
elif line.startswith("> "):
line = f"{Colors.CYAN}> {line[2:]}{Colors.RESET}"
elif re.match("^\\s*[\\*\\-\\+]\\s", line):
match = re.match("^(\\s*)([\\*\\-\\+])(\\s+.*)", line)
if match:
line = (
f"{match.group(1)}{Colors.YELLOW}{match.group(2)}{Colors.RESET}{match.group(3)}"
)
elif re.match("^\\s*\\d+\\.\\s", line):
match = re.match("^(\\s*)(\\d+\\.)(\\s+.*)", line)
if match:
line = (
f"{match.group(1)}{Colors.YELLOW}{match.group(2)}{Colors.RESET}{match.group(3)}"
)
processed_lines.append(line)
text = "\n".join(processed_lines)
text = re.sub(
"\\[(.*?)\\]\\((.*?)\\)",
f"{Colors.BLUE}\\1{Colors.RESET}{Colors.GRAY}(\\2){Colors.RESET}",
text,
)
text = re.sub("~~(.*?)~~", f"{Colors.GRAY}\\1{Colors.RESET}", text)
text = re.sub("\\*\\*(.*?)\\*\\*", f"{Colors.BOLD}\\1{Colors.RESET}", text)
text = re.sub("__(.*?)__", f"{Colors.BOLD}\\1{Colors.RESET}", text)
text = re.sub("\\*(.*?)\\*", f"{Colors.CYAN}\\1{Colors.RESET}", text)
text = re.sub("_(.*?)_", f"{Colors.CYAN}\\1{Colors.RESET}", text)
for i, code in enumerate(inline_codes):
text = text.replace(f"%%INLINECODE{i}%%", code)
for i, block in enumerate(code_blocks):
text = text.replace(f"%%CODEBLOCK{i}%%", block)
return text

42
rp/vision.py Executable file
View File

@ -0,0 +1,42 @@
import http.client
import argparse
import base64
import json
import http.client
import pathlib
DEFAULT_URL = "https://static.molodetz.nl/rp.vision.cgi"
def post_image(image_path: str, prompt: str = "", url: str = DEFAULT_URL):
image_path = str(pathlib.Path(image_path).resolve().absolute())
if not url:
url = DEFAULT_URL
url_parts = url.split("/")
host = url_parts[2]
path = "/" + "/".join(url_parts[3:])
with open(image_path, "rb") as file:
image_data = file.read()
base64_data = base64.b64encode(image_data).decode("utf-8")
payload = {"data": base64_data, "path": image_path, "prompt": prompt}
body = json.dumps(payload).encode("utf-8")
headers = {
"Content-Type": "application/json",
"Content-Length": str(len(body)),
"User-Agent": "Python http.client",
}
conn = http.client.HTTPSConnection(host)
conn.request("POST", path, body, headers)
resp = conn.getresponse()
data = resp.read()
print("Status:", resp.status, resp.reason)
print(data.decode())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("image_path")
parser.add_argument("--prompt", default="")
parser.add_argument("--url", default=DEFAULT_URL)
args = parser.parse_args()
post_image(args.url, args.image_path, args.prompt)

5
rp/workflows/__init__.py Normal file
View File

@ -0,0 +1,5 @@
from .workflow_definition import ExecutionMode, Workflow, WorkflowStep
from .workflow_engine import WorkflowEngine
from .workflow_storage import WorkflowStorage
__all__ = ["Workflow", "WorkflowStep", "ExecutionMode", "WorkflowEngine", "WorkflowStorage"]

View File

@ -0,0 +1,94 @@
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional
class ExecutionMode(Enum):
SEQUENTIAL = "sequential"
PARALLEL = "parallel"
CONDITIONAL = "conditional"
@dataclass
class WorkflowStep:
tool_name: str
arguments: Dict[str, Any]
step_id: str
condition: Optional[str] = None
on_success: Optional[List[str]] = None
on_failure: Optional[List[str]] = None
retry_count: int = 0
timeout_seconds: int = 300
def to_dict(self) -> Dict[str, Any]:
return {
"tool_name": self.tool_name,
"arguments": self.arguments,
"step_id": self.step_id,
"condition": self.condition,
"on_success": self.on_success,
"on_failure": self.on_failure,
"retry_count": self.retry_count,
"timeout_seconds": self.timeout_seconds,
}
@staticmethod
def from_dict(data: Dict[str, Any]) -> "WorkflowStep":
return WorkflowStep(
tool_name=data["tool_name"],
arguments=data["arguments"],
step_id=data["step_id"],
condition=data.get("condition"),
on_success=data.get("on_success"),
on_failure=data.get("on_failure"),
retry_count=data.get("retry_count", 0),
timeout_seconds=data.get("timeout_seconds", 300),
)
@dataclass
class Workflow:
name: str
description: str
steps: List[WorkflowStep]
execution_mode: ExecutionMode = ExecutionMode.SEQUENTIAL
variables: Dict[str, Any] = field(default_factory=dict)
tags: List[str] = field(default_factory=list)
def to_dict(self) -> Dict[str, Any]:
return {
"name": self.name,
"description": self.description,
"steps": [step.to_dict() for step in self.steps],
"execution_mode": self.execution_mode.value,
"variables": self.variables,
"tags": self.tags,
}
@staticmethod
def from_dict(data: Dict[str, Any]) -> "Workflow":
return Workflow(
name=data["name"],
description=data["description"],
steps=[WorkflowStep.from_dict(step) for step in data["steps"]],
execution_mode=ExecutionMode(data.get("execution_mode", "sequential")),
variables=data.get("variables", {}),
tags=data.get("tags", []),
)
def add_step(self, step: WorkflowStep):
self.steps.append(step)
def get_step(self, step_id: str) -> Optional[WorkflowStep]:
for step in self.steps:
if step.step_id == step_id:
return step
return None
def get_initial_steps(self) -> List[WorkflowStep]:
if self.execution_mode == ExecutionMode.SEQUENTIAL:
return [self.steps[0]] if self.steps else []
elif self.execution_mode == ExecutionMode.PARALLEL:
return self.steps
else:
return [step for step in self.steps if not step.condition]

View File

@ -0,0 +1,185 @@
import re
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, Callable, Dict, List, Optional
from .workflow_definition import ExecutionMode, Workflow, WorkflowStep
class WorkflowExecutionContext:
def __init__(self):
self.variables: Dict[str, Any] = {}
self.step_results: Dict[str, Any] = {}
self.execution_log: List[Dict[str, Any]] = []
def set_variable(self, name: str, value: Any):
self.variables[name] = value
def get_variable(self, name: str, default: Any = None) -> Any:
return self.variables.get(name, default)
def set_step_result(self, step_id: str, result: Any):
self.step_results[step_id] = result
def get_step_result(self, step_id: str) -> Any:
return self.step_results.get(step_id)
def log_event(self, event_type: str, step_id: str, details: Dict[str, Any]):
self.execution_log.append(
{
"timestamp": time.time(),
"event_type": event_type,
"step_id": step_id,
"details": details,
}
)
class WorkflowEngine:
def __init__(self, tool_executor: Callable, max_workers: int = 5):
self.tool_executor = tool_executor
self.max_workers = max_workers
def _evaluate_condition(self, condition: str, context: WorkflowExecutionContext) -> bool:
if not condition:
return True
try:
safe_locals = {"variables": context.variables, "results": context.step_results}
return eval(condition, {"__builtins__": {}}, safe_locals)
except Exception:
return False
def _substitute_variables(
self, arguments: Dict[str, Any], context: WorkflowExecutionContext
) -> Dict[str, Any]:
substituted = {}
for key, value in arguments.items():
if isinstance(value, str):
pattern = "\\$\\{([^}]+)\\}"
matches = re.findall(pattern, value)
for match in matches:
if match.startswith("step."):
step_id = match.split(".", 1)[1]
replacement = context.get_step_result(step_id)
if replacement is not None:
value = value.replace(f"${{{match}}}", str(replacement))
elif match.startswith("var."):
var_name = match.split(".", 1)[1]
replacement = context.get_variable(var_name)
if replacement is not None:
value = value.replace(f"${{{match}}}", str(replacement))
substituted[key] = value
else:
substituted[key] = value
return substituted
def _execute_step(
self, step: WorkflowStep, context: WorkflowExecutionContext
) -> Dict[str, Any]:
if not self._evaluate_condition(step.condition, context):
context.log_event("skipped", step.step_id, {"reason": "condition_not_met"})
return {"status": "skipped", "step_id": step.step_id}
arguments = self._substitute_variables(step.arguments, context)
start_time = time.time()
retry_attempts = 0
last_error = None
while retry_attempts <= step.retry_count:
try:
context.log_event(
"executing",
step.step_id,
{"tool": step.tool_name, "arguments": arguments, "attempt": retry_attempts + 1},
)
result = self.tool_executor(step.tool_name, arguments)
execution_time = time.time() - start_time
context.set_step_result(step.step_id, result)
context.log_event(
"completed",
step.step_id,
{
"execution_time": execution_time,
"result_size": len(str(result)) if result else 0,
},
)
return {
"status": "success",
"step_id": step.step_id,
"result": result,
"execution_time": execution_time,
}
except Exception as e:
last_error = str(e)
retry_attempts += 1
if retry_attempts <= step.retry_count:
time.sleep(1 * retry_attempts)
context.log_event("failed", step.step_id, {"error": last_error})
return {
"status": "failed",
"step_id": step.step_id,
"error": last_error,
"execution_time": time.time() - start_time,
}
def _get_next_steps(
self, completed_step: WorkflowStep, result: Dict[str, Any], workflow: Workflow
) -> List[WorkflowStep]:
next_steps = []
if result["status"] == "success" and completed_step.on_success:
for step_id in completed_step.on_success:
step = workflow.get_step(step_id)
if step:
next_steps.append(step)
elif result["status"] == "failed" and completed_step.on_failure:
for step_id in completed_step.on_failure:
step = workflow.get_step(step_id)
if step:
next_steps.append(step)
elif workflow.execution_mode == ExecutionMode.SEQUENTIAL:
current_index = workflow.steps.index(completed_step)
if current_index + 1 < len(workflow.steps):
next_steps.append(workflow.steps[current_index + 1])
return next_steps
def execute_workflow(
self, workflow: Workflow, initial_variables: Optional[Dict[str, Any]] = None
) -> WorkflowExecutionContext:
context = WorkflowExecutionContext()
if initial_variables:
context.variables.update(initial_variables)
if workflow.variables:
context.variables.update(workflow.variables)
context.log_event("workflow_started", "workflow", {"name": workflow.name})
if workflow.execution_mode == ExecutionMode.PARALLEL:
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
futures = {
executor.submit(self._execute_step, step, context): step
for step in workflow.steps
}
for future in as_completed(futures):
step = futures[future]
try:
result = future.result()
context.log_event("step_completed", step.step_id, result)
except Exception as e:
context.log_event("step_failed", step.step_id, {"error": str(e)})
else:
pending_steps = workflow.get_initial_steps()
executed_step_ids = set()
while pending_steps:
step = pending_steps.pop(0)
if step.step_id in executed_step_ids:
continue
result = self._execute_step(step, context)
executed_step_ids.add(step.step_id)
next_steps = self._get_next_steps(step, result, workflow)
pending_steps.extend(next_steps)
context.log_event(
"workflow_completed",
"workflow",
{
"total_steps": len(context.step_results),
"executed_steps": list(context.step_results.keys()),
},
)
return context

View File

@ -0,0 +1,172 @@
import json
import sqlite3
import time
from typing import List, Optional
from .workflow_definition import Workflow
class WorkflowStorage:
def __init__(self, db_path: str):
self.db_path = db_path
self._initialize_storage()
def _initialize_storage(self):
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n CREATE TABLE IF NOT EXISTS workflows (\n workflow_id TEXT PRIMARY KEY,\n name TEXT NOT NULL,\n description TEXT,\n workflow_data TEXT NOT NULL,\n created_at INTEGER NOT NULL,\n updated_at INTEGER NOT NULL,\n execution_count INTEGER DEFAULT 0,\n last_execution_at INTEGER,\n tags TEXT\n )\n "
)
cursor.execute(
"\n CREATE TABLE IF NOT EXISTS workflow_executions (\n execution_id TEXT PRIMARY KEY,\n workflow_id TEXT NOT NULL,\n started_at INTEGER NOT NULL,\n completed_at INTEGER,\n status TEXT NOT NULL,\n execution_log TEXT,\n variables TEXT,\n step_results TEXT,\n FOREIGN KEY (workflow_id) REFERENCES workflows(workflow_id)\n )\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_workflow_name ON workflows(name)\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_execution_workflow ON workflow_executions(workflow_id)\n "
)
cursor.execute(
"\n CREATE INDEX IF NOT EXISTS idx_execution_started ON workflow_executions(started_at)\n "
)
conn.commit()
conn.close()
def save_workflow(self, workflow: Workflow) -> str:
import hashlib
workflow_data = json.dumps(workflow.to_dict())
workflow_id = hashlib.sha256(workflow.name.encode()).hexdigest()[:16]
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
current_time = int(time.time())
tags_json = json.dumps(workflow.tags)
cursor.execute(
"\n INSERT OR REPLACE INTO workflows\n (workflow_id, name, description, workflow_data, created_at, updated_at, tags)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n ",
(
workflow_id,
workflow.name,
workflow.description,
workflow_data,
current_time,
current_time,
tags_json,
),
)
conn.commit()
conn.close()
return workflow_id
def load_workflow(self, workflow_id: str) -> Optional[Workflow]:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("SELECT workflow_data FROM workflows WHERE workflow_id = ?", (workflow_id,))
row = cursor.fetchone()
conn.close()
if row:
workflow_dict = json.loads(row[0])
return Workflow.from_dict(workflow_dict)
return None
def load_workflow_by_name(self, name: str) -> Optional[Workflow]:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("SELECT workflow_data FROM workflows WHERE name = ?", (name,))
row = cursor.fetchone()
conn.close()
if row:
workflow_dict = json.loads(row[0])
return Workflow.from_dict(workflow_dict)
return None
def list_workflows(self, tag: Optional[str] = None) -> List[dict]:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
if tag:
cursor.execute(
"\n SELECT workflow_id, name, description, execution_count, last_execution_at, tags\n FROM workflows\n WHERE tags LIKE ?\n ORDER BY name\n ",
(f'%"{tag}"%',),
)
else:
cursor.execute(
"\n SELECT workflow_id, name, description, execution_count, last_execution_at, tags\n FROM workflows\n ORDER BY name\n "
)
workflows = []
for row in cursor.fetchall():
workflows.append(
{
"workflow_id": row[0],
"name": row[1],
"description": row[2],
"execution_count": row[3],
"last_execution_at": row[4],
"tags": json.loads(row[5]) if row[5] else [],
}
)
conn.close()
return workflows
def delete_workflow(self, workflow_id: str) -> bool:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute("DELETE FROM workflows WHERE workflow_id = ?", (workflow_id,))
deleted = cursor.rowcount > 0
cursor.execute("DELETE FROM workflow_executions WHERE workflow_id = ?", (workflow_id,))
conn.commit()
conn.close()
return deleted
def save_execution(
self, workflow_id: str, execution_context: "WorkflowExecutionContext"
) -> str:
import uuid
execution_id = str(uuid.uuid4())[:16]
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
started_at = (
int(execution_context.execution_log[0]["timestamp"])
if execution_context.execution_log
else int(time.time())
)
completed_at = int(time.time())
cursor.execute(
"\n INSERT INTO workflow_executions\n (execution_id, workflow_id, started_at, completed_at, status, execution_log, variables, step_results)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?)\n ",
(
execution_id,
workflow_id,
started_at,
completed_at,
"completed",
json.dumps(execution_context.execution_log),
json.dumps(execution_context.variables),
json.dumps(execution_context.step_results),
),
)
cursor.execute(
"\n UPDATE workflows\n SET execution_count = execution_count + 1,\n last_execution_at = ?\n WHERE workflow_id = ?\n ",
(completed_at, workflow_id),
)
conn.commit()
conn.close()
return execution_id
def get_execution_history(self, workflow_id: str, limit: int = 10) -> List[dict]:
conn = sqlite3.connect(self.db_path, check_same_thread=False)
cursor = conn.cursor()
cursor.execute(
"\n SELECT execution_id, started_at, completed_at, status\n FROM workflow_executions\n WHERE workflow_id = ?\n ORDER BY started_at DESC\n LIMIT ?\n ",
(workflow_id, limit),
)
executions = []
for row in cursor.fetchall():
executions.append(
{
"execution_id": row[0],
"started_at": row[1],
"completed_at": row[2],
"status": row[3],
}
)
conn.close()
return executions