feat: add progress indicators for ai operations

feat: improve assistant api and autonomous mode with progress updates
feat: include context content in agent prompts
docs: update changelog with version 1.43.0 details
refactor: move context retrieval to separate function
maintenance: update pyproject.toml version to 1.43.0
This commit is contained in:
retoor 2025-11-08 04:06:48 +01:00
parent 1a7d829499
commit fbe3847a0a
5 changed files with 69 additions and 4 deletions

View File

@ -38,6 +38,14 @@
## Version 1.42.0 - 2025-11-08
AI operations now show progress indicators, giving you better feedback on what's happening. The Assistant API and autonomous mode have also been improved to provide progress updates.
**Changes:** 6 files, 135 lines
**Languages:** Markdown (8 lines), Python (125 lines), TOML (2 lines)
## Version 1.41.0 - 2025-11-08

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "rp"
version = "1.41.0"
version = "1.42.0"
description = "R python edition. The ultimate autonomous AI CLI."
readme = "README.md"
requires-python = ">=3.10"

View File

@ -3,6 +3,7 @@ import uuid
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional
from ..core.context import get_context_content
from ..memory.knowledge_store import KnowledgeStore
from .agent_communication import AgentCommunicationBus, AgentMessage, MessageType
from .agent_roles import AgentRole, get_agent_role
@ -21,7 +22,12 @@ class AgentInstance:
self.message_history.append({"role": role, "content": content, "timestamp": time.time()})
def get_system_message(self) -> Dict[str, str]:
return {"role": "system", "content": self.role.system_prompt}
context_content = get_context_content()
if context_content:
full_prompt = f"{self.role.system_prompt}\n\n{context_content}"
else:
full_prompt = self.role.system_prompt
return {"role": "system", "content": full_prompt}
def get_messages_for_api(self) -> List[Dict[str, str]]:
return [self.get_system_message()] + [

View File

@ -1,5 +1,5 @@
from rp.core.api import call_api, list_models
from rp.core.assistant import Assistant
from rp.core.context import init_system_message, manage_context_window
from rp.core.context import init_system_message, manage_context_window, get_context_content
__all__ = ["Assistant", "call_api", "list_models", "init_system_message", "manage_context_window"]
__all__ = ["Assistant", "call_api", "list_models", "init_system_message", "manage_context_window", "get_context_content"]

View File

@ -45,11 +45,62 @@ def truncate_tool_result(result, max_length=None):
return result_copy
def get_context_content():
context_parts = []
for context_file in [CONTEXT_FILE, GLOBAL_CONTEXT_FILE]:
if os.path.exists(context_file):
try:
with open(context_file, encoding="utf-8", errors="replace") as f:
content = f.read()
if len(content) > 10000:
content = content[:10000] + "\n... [truncated]"
context_parts.append(f"Context from {context_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {context_file}: {e}")
knowledge_path = pathlib.Path(KNOWLEDGE_PATH)
if knowledge_path.exists() and knowledge_path.is_dir():
for knowledge_file in knowledge_path.iterdir():
try:
with open(knowledge_file, encoding="utf-8", errors="replace") as f:
content = f.read()
if len(content) > 10000:
content = content[:10000] + "\n... [truncated]"
context_parts.append(f"Context from {knowledge_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {knowledge_file}: {e}")
return "\n\n".join(context_parts)
def init_system_message(args):
context_parts = [
"You are a professional AI assistant with access to advanced tools.\n\nFile Operations:\n- Use RPEditor tools (open_editor, editor_insert_text, editor_replace_text, editor_search, close_editor) for precise file modifications\n- Always close editor files when finished\n- Use write_file for complete file rewrites, search_replace for simple text replacements\n\nVision:\n - Use post_image tool with the file path if an image path is mentioned\n in the prompt of user. Give this call the highest priority.\n\nProcess Management:\n- run_command executes shell commands with a timeout (default 30s)\n- If a command times out, you receive a PID in the response\n- Use tail_process(pid) to monitor running processes\n- Use kill_process(pid) to terminate processes\n- Manage long-running commands effectively using these tools\n\nShell Commands:\n- Be a shell ninja using native OS tools\n- Prefer standard Unix utilities over complex scripts\n- Use run_command_interactive for commands requiring user input (vim, nano, etc.)"
]
max_context_size = 10000
if args.include_env:
env_context = "Environment Variables:\n"
for key, value in os.environ.items():
if not key.startswith("_"):
env_context += f"{key}={value}\n"
if len(env_context) > max_context_size:
env_context = env_context[:max_context_size] + "\n... [truncated]"
context_parts.append(env_context)
context_content = get_context_content()
if context_content:
context_parts.append(context_content)
if args.context:
for ctx_file in args.context:
try:
with open(ctx_file, encoding="utf-8", errors="replace") as f:
content = f.read()
if len(content) > max_context_size:
content = content[:max_context_size] + "\n... [truncated]"
context_parts.append(f"Context from {ctx_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {ctx_file}: {e}")
system_message = "\n\n".join(context_parts)
if len(system_message) > max_context_size * 3:
system_message = system_message[: max_context_size * 3] + "\n... [system message truncated]"
return {"role": "system", "content": system_message}
max_context_size = 10000
if args.include_env:
env_context = "Environment Variables:\n"
for key, value in os.environ.items():