Compare commits

...

3 Commits

Author SHA1 Message Date
686115e0f6 feat: add progress indicators for ai operations
Some checks failed
Tests / test (push) Failing after 0s
feat: improve assistant api with progress updates
feat: include relevant context in agent prompts
refactor: split system message into multiple lines
maintenance: update version to 1.44.0
fix: handle entry conversion to string in process_message
2025-11-08 07:07:35 +01:00
fbe3847a0a feat: add progress indicators for ai operations
feat: improve assistant api and autonomous mode with progress updates
feat: include context content in agent prompts
docs: update changelog with version 1.43.0 details
refactor: move context retrieval to separate function
maintenance: update pyproject.toml version to 1.43.0
2025-11-08 04:06:48 +01:00
1a7d829499 feat: add progress indicators for ai operations
feat: enhance autonomous mode with progress tracking
feat: improve assistant api calls with progress feedback
maintenance: update pyproject.toml version to 1.42.0
2025-11-08 03:55:06 +01:00
10 changed files with 172 additions and 64 deletions

View File

@ -37,6 +37,30 @@
## Version 1.43.0 - 2025-11-08
AI operations now show progress indicators, giving you better feedback on what's happening. The Assistant API and autonomous mode have been improved with progress updates and agent prompts now include relevant context.
**Changes:** 5 files, 73 lines
**Languages:** Markdown (8 lines), Python (63 lines), TOML (2 lines)
## Version 1.42.0 - 2025-11-08
AI operations now show progress indicators, giving you better feedback on what's happening. The Assistant API and autonomous mode have also been improved to provide progress updates.
**Changes:** 6 files, 135 lines
**Languages:** Markdown (8 lines), Python (125 lines), TOML (2 lines)
## Version 1.41.0 - 2025-11-08
Workflow functionality is now more thoroughly tested. This improves reliability and helps ensure future changes don't break existing workflows.
**Changes:** 3 files, 570 lines
**Languages:** Markdown (8 lines), Python (560 lines), TOML (2 lines)
## Version 1.40.0 - 2025-11-08

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "rp"
version = "1.40.0"
version = "1.43.0"
description = "R python edition. The ultimate autonomous AI CLI."
readme = "README.md"
requires-python = ">=3.10"

View File

@ -3,6 +3,7 @@ import uuid
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional
from ..core.context import get_context_content
from ..memory.knowledge_store import KnowledgeStore
from .agent_communication import AgentCommunicationBus, AgentMessage, MessageType
from .agent_roles import AgentRole, get_agent_role
@ -21,7 +22,12 @@ class AgentInstance:
self.message_history.append({"role": role, "content": content, "timestamp": time.time()})
def get_system_message(self) -> Dict[str, str]:
return {"role": "system", "content": self.role.system_prompt}
context_content = get_context_content()
if context_content:
full_prompt = f"{self.role.system_prompt}\n\n{context_content}"
else:
full_prompt = self.role.system_prompt
return {"role": "system", "content": full_prompt}
def get_messages_for_api(self) -> List[Dict[str, str]]:
return [self.get_system_message()] + [

View File

@ -7,6 +7,7 @@ from rp.core.api import call_api
from rp.core.context import truncate_tool_result
from rp.tools.base import get_tools_definition
from rp.ui import Colors, display_tool_call
from rp.ui.progress import ProgressIndicator
logger = logging.getLogger("rp")
@ -29,6 +30,7 @@ def run_autonomous_mode(assistant, task):
assistant.messages = manage_context_window(assistant.messages, assistant.verbose)
logger.debug(f"Messages after context management: {len(assistant.messages)}")
with ProgressIndicator("Querying AI..."):
response = call_api(
assistant.messages,
assistant.model,
@ -72,6 +74,7 @@ def process_response_autonomous(assistant, response):
assistant.messages.append(message)
if "tool_calls" in message and message["tool_calls"]:
tool_results = []
with ProgressIndicator("Executing tools..."):
for tool_call in message["tool_calls"]:
func_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
@ -89,6 +92,7 @@ def process_response_autonomous(assistant, response):
)
for result in tool_results:
assistant.messages.append(result)
with ProgressIndicator("Processing tool results..."):
follow_up = call_api(
assistant.messages,
assistant.model,

View File

@ -1,5 +1,5 @@
from rp.core.api import call_api, list_models
from rp.core.assistant import Assistant
from rp.core.context import init_system_message, manage_context_window
from rp.core.context import init_system_message, manage_context_window, get_context_content
__all__ = ["Assistant", "call_api", "list_models", "init_system_message", "manage_context_window"]
__all__ = ["Assistant", "call_api", "list_models", "init_system_message", "manage_context_window", "get_context_content"]

View File

@ -67,6 +67,7 @@ from rp.tools.patch import apply_patch, create_diff, display_file_diff
from rp.tools.python_exec import python_exec
from rp.tools.web import http_fetch, web_search, web_search_news
from rp.ui import Colors, Spinner, render_markdown
from rp.ui.progress import ProgressIndicator
logger = logging.getLogger("rp")
logger.setLevel(logging.DEBUG)
@ -305,10 +306,12 @@ class Assistant:
if "tool_calls" in message and message["tool_calls"]:
tool_count = len(message["tool_calls"])
print(f"{Colors.BLUE}đź”§ Executing {tool_count} tool call(s)...{Colors.RESET}")
with ProgressIndicator("Executing tools..."):
tool_results = self.execute_tool_calls(message["tool_calls"])
print(f"{Colors.GREEN}âś… Tool execution completed.{Colors.RESET}")
for result in tool_results:
self.messages.append(result)
with ProgressIndicator("Processing tool results..."):
follow_up = call_api(
self.messages,
self.model,
@ -505,11 +508,10 @@ def process_message(assistant, message):
updated_at=time.time(),
)
assistant.knowledge_store.add_entry(entry)
assistant.messages.append({"role": "user", "content": message})
assistant.messages.append({"role": "user", "content": str(entry)})
logger.debug(f"Processing user message: {message[:100]}...")
logger.debug(f"Current message count: {len(assistant.messages)}")
spinner = Spinner("Querying AI...")
spinner.start()
with ProgressIndicator("Querying AI..."):
response = call_api(
assistant.messages,
assistant.model,
@ -520,7 +522,6 @@ def process_message(assistant, message):
verbose=assistant.verbose,
db_conn=assistant.db_conn,
)
spinner.stop()
if "usage" in response:
usage = response["usage"]
input_tokens = usage.get("prompt_tokens", 0)

View File

@ -45,10 +45,76 @@ def truncate_tool_result(result, max_length=None):
return result_copy
def get_context_content():
context_parts = []
for context_file in [CONTEXT_FILE, GLOBAL_CONTEXT_FILE]:
if os.path.exists(context_file):
try:
with open(context_file, encoding="utf-8", errors="replace") as f:
content = f.read()
if len(content) > 10000:
content = content[:10000] + "\n... [truncated]"
context_parts.append(f"Context from {context_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {context_file}: {e}")
knowledge_path = pathlib.Path(KNOWLEDGE_PATH)
if knowledge_path.exists() and knowledge_path.is_dir():
for knowledge_file in knowledge_path.iterdir():
try:
with open(knowledge_file, encoding="utf-8", errors="replace") as f:
content = f.read()
if len(content) > 10000:
content = content[:10000] + "\n... [truncated]"
context_parts.append(f"Context from {knowledge_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {knowledge_file}: {e}")
return "\n\n".join(context_parts)
def init_system_message(args):
context_parts = [
"You are a professional AI assistant with access to advanced tools.\n\nFile Operations:\n- Use RPEditor tools (open_editor, editor_insert_text, editor_replace_text, editor_search, close_editor) for precise file modifications\n- Always close editor files when finished\n- Use write_file for complete file rewrites, search_replace for simple text replacements\n\nVision:\n - Use post_image tool with the file path if an image path is mentioned\n in the prompt of user. Give this call the highest priority.\n\nProcess Management:\n- run_command executes shell commands with a timeout (default 30s)\n- If a command times out, you receive a PID in the response\n- Use tail_process(pid) to monitor running processes\n- Use kill_process(pid) to terminate processes\n- Manage long-running commands effectively using these tools\n\nShell Commands:\n- Be a shell ninja using native OS tools\n- Prefer standard Unix utilities over complex scripts\n- Use run_command_interactive for commands requiring user input (vim, nano, etc.)"
"You are a professional AI assistant with access to advanced tools.",
"Use RPEditor tools (open_editor, editor_insert_text, editor_replace_text, editor_search, close_editor) for precise file modifications.",
"Always close editor files when finished.",
"Use write_file for complete file rewrites, search_replace for simple text replacements.",
"Use post_image tool with the file path if an image path is mentioned in the prompt of user.",
"Give this call the highest priority.",
"run_command executes shell commands with a timeout (default 30s).",
"If a command times out, you receive a PID in the response.",
"Use tail_process(pid) to monitor running processes.",
"Use kill_process(pid) to terminate processes.",
"Manage long-running commands effectively using these tools.",
"Be a shell ninja using native OS tools.",
"Prefer standard Unix utilities over complex scripts.",
"Use run_command_interactive for commands requiring user input (vim, nano, etc.).",
"Use the knowledge base to answer questions. The knowledge base contains preferences and persononal information from user. Also store here that such information. Always synchronize with the knowledge base.",
]
max_context_size = 10000
if args.include_env:
env_context = "Environment Variables:\n"
for key, value in os.environ.items():
if not key.startswith("_"):
env_context += f"{key}={value}\n"
if len(env_context) > max_context_size:
env_context = env_context[:max_context_size] + "\n... [truncated]"
context_parts.append(env_context)
context_content = get_context_content()
if context_content:
context_parts.append(context_content)
if args.context:
for ctx_file in args.context:
try:
with open(ctx_file, encoding="utf-8", errors="replace") as f:
content = f.read()
if len(content) > max_context_size:
content = content[:max_context_size] + "\n... [truncated]"
context_parts.append(f"Context from {ctx_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {ctx_file}: {e}")
system_message = "\n\n".join(context_parts)
if len(system_message) > max_context_size * 3:
system_message = system_message[: max_context_size * 3] + "\n... [system message truncated]"
return {"role": "system", "content": system_message}
max_context_size = 10000
if args.include_env:
env_context = "Environment Variables:\n"

View File

@ -20,6 +20,7 @@ from rp.core.advanced_context import AdvancedContextManager
from rp.core.api import call_api
from rp.memory import ConversationMemory, FactExtractor, KnowledgeStore, KnowledgeEntry
from rp.tools.base import get_tools_definition
from rp.ui.progress import ProgressIndicator
from rp.workflows import WorkflowEngine, WorkflowStorage
logger = logging.getLogger("rp")
@ -173,6 +174,7 @@ class EnhancedAssistant:
working_messages = enhanced_messages
else:
working_messages = self.base.messages
with ProgressIndicator("Querying AI..."):
response = self.enhanced_call_api(working_messages)
result = self.base.process_response(response)
if len(self.base.messages) >= CONVERSATION_SUMMARY_THRESHOLD:

View File

@ -19,6 +19,9 @@ class KnowledgeEntry:
access_count: int = 0
importance_score: float = 1.0
def __str__(self):
return json.dumps(self.to_dict())
def to_dict(self) -> Dict[str, Any]:
return {
"entry_id": self.entry_id,

View File

@ -1,10 +1,12 @@
from rp.ui.colors import Colors, Spinner
from rp.ui.display import display_tool_call, print_autonomous_header
from rp.ui.progress import ProgressIndicator
from rp.ui.rendering import highlight_code, render_markdown
__all__ = [
"Colors",
"Spinner",
"ProgressIndicator",
"highlight_code",
"render_markdown",
"display_tool_call",