feat: add progress indicators for ai operations

feat: enhance autonomous mode with progress tracking
feat: improve assistant api calls with progress feedback
maintenance: update pyproject.toml version to 1.42.0
This commit is contained in:
retoor 2025-11-08 03:55:06 +01:00
parent 6a80c86d51
commit 1a7d829499
6 changed files with 76 additions and 59 deletions

View File

@ -37,6 +37,14 @@
## Version 1.41.0 - 2025-11-08
Workflow functionality is now more thoroughly tested. This improves reliability and helps ensure future changes don't break existing workflows.
**Changes:** 3 files, 570 lines
**Languages:** Markdown (8 lines), Python (560 lines), TOML (2 lines)
## Version 1.40.0 - 2025-11-08 ## Version 1.40.0 - 2025-11-08

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "rp" name = "rp"
version = "1.40.0" version = "1.41.0"
description = "R python edition. The ultimate autonomous AI CLI." description = "R python edition. The ultimate autonomous AI CLI."
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"

View File

@ -7,6 +7,7 @@ from rp.core.api import call_api
from rp.core.context import truncate_tool_result from rp.core.context import truncate_tool_result
from rp.tools.base import get_tools_definition from rp.tools.base import get_tools_definition
from rp.ui import Colors, display_tool_call from rp.ui import Colors, display_tool_call
from rp.ui.progress import ProgressIndicator
logger = logging.getLogger("rp") logger = logging.getLogger("rp")
@ -29,6 +30,7 @@ def run_autonomous_mode(assistant, task):
assistant.messages = manage_context_window(assistant.messages, assistant.verbose) assistant.messages = manage_context_window(assistant.messages, assistant.verbose)
logger.debug(f"Messages after context management: {len(assistant.messages)}") logger.debug(f"Messages after context management: {len(assistant.messages)}")
with ProgressIndicator("Querying AI..."):
response = call_api( response = call_api(
assistant.messages, assistant.messages,
assistant.model, assistant.model,
@ -72,6 +74,7 @@ def process_response_autonomous(assistant, response):
assistant.messages.append(message) assistant.messages.append(message)
if "tool_calls" in message and message["tool_calls"]: if "tool_calls" in message and message["tool_calls"]:
tool_results = [] tool_results = []
with ProgressIndicator("Executing tools..."):
for tool_call in message["tool_calls"]: for tool_call in message["tool_calls"]:
func_name = tool_call["function"]["name"] func_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"]) arguments = json.loads(tool_call["function"]["arguments"])
@ -89,6 +92,7 @@ def process_response_autonomous(assistant, response):
) )
for result in tool_results: for result in tool_results:
assistant.messages.append(result) assistant.messages.append(result)
with ProgressIndicator("Processing tool results..."):
follow_up = call_api( follow_up = call_api(
assistant.messages, assistant.messages,
assistant.model, assistant.model,

View File

@ -67,6 +67,7 @@ from rp.tools.patch import apply_patch, create_diff, display_file_diff
from rp.tools.python_exec import python_exec from rp.tools.python_exec import python_exec
from rp.tools.web import http_fetch, web_search, web_search_news from rp.tools.web import http_fetch, web_search, web_search_news
from rp.ui import Colors, Spinner, render_markdown from rp.ui import Colors, Spinner, render_markdown
from rp.ui.progress import ProgressIndicator
logger = logging.getLogger("rp") logger = logging.getLogger("rp")
logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG)
@ -305,10 +306,12 @@ class Assistant:
if "tool_calls" in message and message["tool_calls"]: if "tool_calls" in message and message["tool_calls"]:
tool_count = len(message["tool_calls"]) tool_count = len(message["tool_calls"])
print(f"{Colors.BLUE}🔧 Executing {tool_count} tool call(s)...{Colors.RESET}") print(f"{Colors.BLUE}🔧 Executing {tool_count} tool call(s)...{Colors.RESET}")
with ProgressIndicator("Executing tools..."):
tool_results = self.execute_tool_calls(message["tool_calls"]) tool_results = self.execute_tool_calls(message["tool_calls"])
print(f"{Colors.GREEN}✅ Tool execution completed.{Colors.RESET}") print(f"{Colors.GREEN}✅ Tool execution completed.{Colors.RESET}")
for result in tool_results: for result in tool_results:
self.messages.append(result) self.messages.append(result)
with ProgressIndicator("Processing tool results..."):
follow_up = call_api( follow_up = call_api(
self.messages, self.messages,
self.model, self.model,
@ -508,8 +511,7 @@ def process_message(assistant, message):
assistant.messages.append({"role": "user", "content": message}) assistant.messages.append({"role": "user", "content": message})
logger.debug(f"Processing user message: {message[:100]}...") logger.debug(f"Processing user message: {message[:100]}...")
logger.debug(f"Current message count: {len(assistant.messages)}") logger.debug(f"Current message count: {len(assistant.messages)}")
spinner = Spinner("Querying AI...") with ProgressIndicator("Querying AI..."):
spinner.start()
response = call_api( response = call_api(
assistant.messages, assistant.messages,
assistant.model, assistant.model,
@ -520,7 +522,6 @@ def process_message(assistant, message):
verbose=assistant.verbose, verbose=assistant.verbose,
db_conn=assistant.db_conn, db_conn=assistant.db_conn,
) )
spinner.stop()
if "usage" in response: if "usage" in response:
usage = response["usage"] usage = response["usage"]
input_tokens = usage.get("prompt_tokens", 0) input_tokens = usage.get("prompt_tokens", 0)

View File

@ -20,6 +20,7 @@ from rp.core.advanced_context import AdvancedContextManager
from rp.core.api import call_api from rp.core.api import call_api
from rp.memory import ConversationMemory, FactExtractor, KnowledgeStore, KnowledgeEntry from rp.memory import ConversationMemory, FactExtractor, KnowledgeStore, KnowledgeEntry
from rp.tools.base import get_tools_definition from rp.tools.base import get_tools_definition
from rp.ui.progress import ProgressIndicator
from rp.workflows import WorkflowEngine, WorkflowStorage from rp.workflows import WorkflowEngine, WorkflowStorage
logger = logging.getLogger("rp") logger = logging.getLogger("rp")
@ -173,6 +174,7 @@ class EnhancedAssistant:
working_messages = enhanced_messages working_messages = enhanced_messages
else: else:
working_messages = self.base.messages working_messages = self.base.messages
with ProgressIndicator("Querying AI..."):
response = self.enhanced_call_api(working_messages) response = self.enhanced_call_api(working_messages)
result = self.base.process_response(response) result = self.base.process_response(response)
if len(self.base.messages) >= CONVERSATION_SUMMARY_THRESHOLD: if len(self.base.messages) >= CONVERSATION_SUMMARY_THRESHOLD:

View File

@ -1,10 +1,12 @@
from rp.ui.colors import Colors, Spinner from rp.ui.colors import Colors, Spinner
from rp.ui.display import display_tool_call, print_autonomous_header from rp.ui.display import display_tool_call, print_autonomous_header
from rp.ui.progress import ProgressIndicator
from rp.ui.rendering import highlight_code, render_markdown from rp.ui.rendering import highlight_code, render_markdown
__all__ = [ __all__ = [
"Colors", "Colors",
"Spinner", "Spinner",
"ProgressIndicator",
"highlight_code", "highlight_code",
"render_markdown", "render_markdown",
"display_tool_call", "display_tool_call",