diff --git a/CHANGELOG.md b/CHANGELOG.md index e21a163..76446e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,14 @@ + + +## Version 1.42.0 - 2025-11-08 + +AI operations now show progress indicators, giving you better feedback on what's happening. The Assistant API and autonomous mode have also been improved to provide progress updates. + +**Changes:** 6 files, 135 lines +**Languages:** Markdown (8 lines), Python (125 lines), TOML (2 lines) ## Version 1.41.0 - 2025-11-08 diff --git a/pyproject.toml b/pyproject.toml index b46a59a..13d30b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "rp" -version = "1.41.0" +version = "1.42.0" description = "R python edition. The ultimate autonomous AI CLI." readme = "README.md" requires-python = ">=3.10" diff --git a/rp/agents/agent_manager.py b/rp/agents/agent_manager.py index 967b31c..758f717 100644 --- a/rp/agents/agent_manager.py +++ b/rp/agents/agent_manager.py @@ -3,6 +3,7 @@ import uuid from dataclasses import dataclass, field from typing import Any, Callable, Dict, List, Optional +from ..core.context import get_context_content from ..memory.knowledge_store import KnowledgeStore from .agent_communication import AgentCommunicationBus, AgentMessage, MessageType from .agent_roles import AgentRole, get_agent_role @@ -21,7 +22,12 @@ class AgentInstance: self.message_history.append({"role": role, "content": content, "timestamp": time.time()}) def get_system_message(self) -> Dict[str, str]: - return {"role": "system", "content": self.role.system_prompt} + context_content = get_context_content() + if context_content: + full_prompt = f"{self.role.system_prompt}\n\n{context_content}" + else: + full_prompt = self.role.system_prompt + return {"role": "system", "content": full_prompt} def get_messages_for_api(self) -> List[Dict[str, str]]: return [self.get_system_message()] + [ diff --git a/rp/core/__init__.py b/rp/core/__init__.py index a5c4cac..fa6bc6c 100644 --- a/rp/core/__init__.py +++ b/rp/core/__init__.py @@ -1,5 +1,5 @@ from rp.core.api import call_api, list_models from rp.core.assistant import Assistant -from rp.core.context import init_system_message, manage_context_window +from rp.core.context import init_system_message, manage_context_window, get_context_content -__all__ = ["Assistant", "call_api", "list_models", "init_system_message", "manage_context_window"] +__all__ = ["Assistant", "call_api", "list_models", "init_system_message", "manage_context_window", "get_context_content"] diff --git a/rp/core/context.py b/rp/core/context.py index a10d517..2f1846b 100644 --- a/rp/core/context.py +++ b/rp/core/context.py @@ -45,11 +45,62 @@ def truncate_tool_result(result, max_length=None): return result_copy +def get_context_content(): + context_parts = [] + for context_file in [CONTEXT_FILE, GLOBAL_CONTEXT_FILE]: + if os.path.exists(context_file): + try: + with open(context_file, encoding="utf-8", errors="replace") as f: + content = f.read() + if len(content) > 10000: + content = content[:10000] + "\n... [truncated]" + context_parts.append(f"Context from {context_file}:\n{content}") + except Exception as e: + logging.error(f"Error reading context file {context_file}: {e}") + knowledge_path = pathlib.Path(KNOWLEDGE_PATH) + if knowledge_path.exists() and knowledge_path.is_dir(): + for knowledge_file in knowledge_path.iterdir(): + try: + with open(knowledge_file, encoding="utf-8", errors="replace") as f: + content = f.read() + if len(content) > 10000: + content = content[:10000] + "\n... [truncated]" + context_parts.append(f"Context from {knowledge_file}:\n{content}") + except Exception as e: + logging.error(f"Error reading context file {knowledge_file}: {e}") + return "\n\n".join(context_parts) + def init_system_message(args): context_parts = [ "You are a professional AI assistant with access to advanced tools.\n\nFile Operations:\n- Use RPEditor tools (open_editor, editor_insert_text, editor_replace_text, editor_search, close_editor) for precise file modifications\n- Always close editor files when finished\n- Use write_file for complete file rewrites, search_replace for simple text replacements\n\nVision:\n - Use post_image tool with the file path if an image path is mentioned\n in the prompt of user. Give this call the highest priority.\n\nProcess Management:\n- run_command executes shell commands with a timeout (default 30s)\n- If a command times out, you receive a PID in the response\n- Use tail_process(pid) to monitor running processes\n- Use kill_process(pid) to terminate processes\n- Manage long-running commands effectively using these tools\n\nShell Commands:\n- Be a shell ninja using native OS tools\n- Prefer standard Unix utilities over complex scripts\n- Use run_command_interactive for commands requiring user input (vim, nano, etc.)" ] max_context_size = 10000 + if args.include_env: + env_context = "Environment Variables:\n" + for key, value in os.environ.items(): + if not key.startswith("_"): + env_context += f"{key}={value}\n" + if len(env_context) > max_context_size: + env_context = env_context[:max_context_size] + "\n... [truncated]" + context_parts.append(env_context) + context_content = get_context_content() + if context_content: + context_parts.append(context_content) + if args.context: + for ctx_file in args.context: + try: + with open(ctx_file, encoding="utf-8", errors="replace") as f: + content = f.read() + if len(content) > max_context_size: + content = content[:max_context_size] + "\n... [truncated]" + context_parts.append(f"Context from {ctx_file}:\n{content}") + except Exception as e: + logging.error(f"Error reading context file {ctx_file}: {e}") + system_message = "\n\n".join(context_parts) + if len(system_message) > max_context_size * 3: + system_message = system_message[: max_context_size * 3] + "\n... [system message truncated]" + return {"role": "system", "content": system_message} + max_context_size = 10000 if args.include_env: env_context = "Environment Variables:\n" for key, value in os.environ.items():