feat: add progress indicators for ai operations

feat: enhance autonomous mode with progress tracking
feat: improve assistant api calls with progress feedback
maintenance: update pyproject.toml version to 1.42.0
This commit is contained in:
retoor 2025-11-08 03:55:06 +01:00
parent 6a80c86d51
commit 1a7d829499
6 changed files with 76 additions and 59 deletions

View File

@ -37,6 +37,14 @@
## Version 1.41.0 - 2025-11-08
Workflow functionality is now more thoroughly tested. This improves reliability and helps ensure future changes don't break existing workflows.
**Changes:** 3 files, 570 lines
**Languages:** Markdown (8 lines), Python (560 lines), TOML (2 lines)
## Version 1.40.0 - 2025-11-08 ## Version 1.40.0 - 2025-11-08

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "rp" name = "rp"
version = "1.40.0" version = "1.41.0"
description = "R python edition. The ultimate autonomous AI CLI." description = "R python edition. The ultimate autonomous AI CLI."
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"

View File

@ -7,6 +7,7 @@ from rp.core.api import call_api
from rp.core.context import truncate_tool_result from rp.core.context import truncate_tool_result
from rp.tools.base import get_tools_definition from rp.tools.base import get_tools_definition
from rp.ui import Colors, display_tool_call from rp.ui import Colors, display_tool_call
from rp.ui.progress import ProgressIndicator
logger = logging.getLogger("rp") logger = logging.getLogger("rp")
@ -29,15 +30,16 @@ def run_autonomous_mode(assistant, task):
assistant.messages = manage_context_window(assistant.messages, assistant.verbose) assistant.messages = manage_context_window(assistant.messages, assistant.verbose)
logger.debug(f"Messages after context management: {len(assistant.messages)}") logger.debug(f"Messages after context management: {len(assistant.messages)}")
response = call_api( with ProgressIndicator("Querying AI..."):
assistant.messages, response = call_api(
assistant.model, assistant.messages,
assistant.api_url, assistant.model,
assistant.api_key, assistant.api_url,
assistant.use_tools, assistant.api_key,
get_tools_definition(), assistant.use_tools,
verbose=assistant.verbose, get_tools_definition(),
) verbose=assistant.verbose,
)
if "error" in response: if "error" in response:
logger.error(f"API error in autonomous mode: {response['error']}") logger.error(f"API error in autonomous mode: {response['error']}")
print(f"{Colors.RED}Error: {response['error']}{Colors.RESET}") print(f"{Colors.RED}Error: {response['error']}{Colors.RESET}")
@ -72,32 +74,34 @@ def process_response_autonomous(assistant, response):
assistant.messages.append(message) assistant.messages.append(message)
if "tool_calls" in message and message["tool_calls"]: if "tool_calls" in message and message["tool_calls"]:
tool_results = [] tool_results = []
for tool_call in message["tool_calls"]: with ProgressIndicator("Executing tools..."):
func_name = tool_call["function"]["name"] for tool_call in message["tool_calls"]:
arguments = json.loads(tool_call["function"]["arguments"]) func_name = tool_call["function"]["name"]
result = execute_single_tool(assistant, func_name, arguments) arguments = json.loads(tool_call["function"]["arguments"])
if isinstance(result, str): result = execute_single_tool(assistant, func_name, arguments)
try: if isinstance(result, str):
result = json.loads(result) try:
except json.JSONDecodeError as ex: result = json.loads(result)
result = {"error": str(ex)} except json.JSONDecodeError as ex:
status = "success" if result.get("status") == "success" else "error" result = {"error": str(ex)}
result = truncate_tool_result(result) status = "success" if result.get("status") == "success" else "error"
display_tool_call(func_name, arguments, status, result) result = truncate_tool_result(result)
tool_results.append( display_tool_call(func_name, arguments, status, result)
{"tool_call_id": tool_call["id"], "role": "tool", "content": json.dumps(result)} tool_results.append(
) {"tool_call_id": tool_call["id"], "role": "tool", "content": json.dumps(result)}
)
for result in tool_results: for result in tool_results:
assistant.messages.append(result) assistant.messages.append(result)
follow_up = call_api( with ProgressIndicator("Processing tool results..."):
assistant.messages, follow_up = call_api(
assistant.model, assistant.messages,
assistant.api_url, assistant.model,
assistant.api_key, assistant.api_url,
assistant.use_tools, assistant.api_key,
get_tools_definition(), assistant.use_tools,
verbose=assistant.verbose, get_tools_definition(),
) verbose=assistant.verbose,
)
if "usage" in follow_up: if "usage" in follow_up:
usage = follow_up["usage"] usage = follow_up["usage"]
input_tokens = usage.get("prompt_tokens", 0) input_tokens = usage.get("prompt_tokens", 0)

View File

@ -67,6 +67,7 @@ from rp.tools.patch import apply_patch, create_diff, display_file_diff
from rp.tools.python_exec import python_exec from rp.tools.python_exec import python_exec
from rp.tools.web import http_fetch, web_search, web_search_news from rp.tools.web import http_fetch, web_search, web_search_news
from rp.ui import Colors, Spinner, render_markdown from rp.ui import Colors, Spinner, render_markdown
from rp.ui.progress import ProgressIndicator
logger = logging.getLogger("rp") logger = logging.getLogger("rp")
logger.setLevel(logging.DEBUG) logger.setLevel(logging.DEBUG)
@ -305,20 +306,22 @@ class Assistant:
if "tool_calls" in message and message["tool_calls"]: if "tool_calls" in message and message["tool_calls"]:
tool_count = len(message["tool_calls"]) tool_count = len(message["tool_calls"])
print(f"{Colors.BLUE}🔧 Executing {tool_count} tool call(s)...{Colors.RESET}") print(f"{Colors.BLUE}🔧 Executing {tool_count} tool call(s)...{Colors.RESET}")
tool_results = self.execute_tool_calls(message["tool_calls"]) with ProgressIndicator("Executing tools..."):
tool_results = self.execute_tool_calls(message["tool_calls"])
print(f"{Colors.GREEN}✅ Tool execution completed.{Colors.RESET}") print(f"{Colors.GREEN}✅ Tool execution completed.{Colors.RESET}")
for result in tool_results: for result in tool_results:
self.messages.append(result) self.messages.append(result)
follow_up = call_api( with ProgressIndicator("Processing tool results..."):
self.messages, follow_up = call_api(
self.model, self.messages,
self.api_url, self.model,
self.api_key, self.api_url,
self.use_tools, self.api_key,
get_tools_definition(), self.use_tools,
verbose=self.verbose, get_tools_definition(),
db_conn=self.db_conn, verbose=self.verbose,
) db_conn=self.db_conn,
)
return self.process_response(follow_up) return self.process_response(follow_up)
content = message.get("content", "") content = message.get("content", "")
return render_markdown(content, self.syntax_highlighting) return render_markdown(content, self.syntax_highlighting)
@ -508,19 +511,17 @@ def process_message(assistant, message):
assistant.messages.append({"role": "user", "content": message}) assistant.messages.append({"role": "user", "content": message})
logger.debug(f"Processing user message: {message[:100]}...") logger.debug(f"Processing user message: {message[:100]}...")
logger.debug(f"Current message count: {len(assistant.messages)}") logger.debug(f"Current message count: {len(assistant.messages)}")
spinner = Spinner("Querying AI...") with ProgressIndicator("Querying AI..."):
spinner.start() response = call_api(
response = call_api( assistant.messages,
assistant.messages, assistant.model,
assistant.model, assistant.api_url,
assistant.api_url, assistant.api_key,
assistant.api_key, assistant.use_tools,
assistant.use_tools, get_tools_definition(),
get_tools_definition(), verbose=assistant.verbose,
verbose=assistant.verbose, db_conn=assistant.db_conn,
db_conn=assistant.db_conn, )
)
spinner.stop()
if "usage" in response: if "usage" in response:
usage = response["usage"] usage = response["usage"]
input_tokens = usage.get("prompt_tokens", 0) input_tokens = usage.get("prompt_tokens", 0)

View File

@ -20,6 +20,7 @@ from rp.core.advanced_context import AdvancedContextManager
from rp.core.api import call_api from rp.core.api import call_api
from rp.memory import ConversationMemory, FactExtractor, KnowledgeStore, KnowledgeEntry from rp.memory import ConversationMemory, FactExtractor, KnowledgeStore, KnowledgeEntry
from rp.tools.base import get_tools_definition from rp.tools.base import get_tools_definition
from rp.ui.progress import ProgressIndicator
from rp.workflows import WorkflowEngine, WorkflowStorage from rp.workflows import WorkflowEngine, WorkflowStorage
logger = logging.getLogger("rp") logger = logging.getLogger("rp")
@ -173,7 +174,8 @@ class EnhancedAssistant:
working_messages = enhanced_messages working_messages = enhanced_messages
else: else:
working_messages = self.base.messages working_messages = self.base.messages
response = self.enhanced_call_api(working_messages) with ProgressIndicator("Querying AI..."):
response = self.enhanced_call_api(working_messages)
result = self.base.process_response(response) result = self.base.process_response(response)
if len(self.base.messages) >= CONVERSATION_SUMMARY_THRESHOLD: if len(self.base.messages) >= CONVERSATION_SUMMARY_THRESHOLD:
summary = ( summary = (

View File

@ -1,10 +1,12 @@
from rp.ui.colors import Colors, Spinner from rp.ui.colors import Colors, Spinner
from rp.ui.display import display_tool_call, print_autonomous_header from rp.ui.display import display_tool_call, print_autonomous_header
from rp.ui.progress import ProgressIndicator
from rp.ui.rendering import highlight_code, render_markdown from rp.ui.rendering import highlight_code, render_markdown
__all__ = [ __all__ = [
"Colors", "Colors",
"Spinner", "Spinner",
"ProgressIndicator",
"highlight_code", "highlight_code",
"render_markdown", "render_markdown",
"display_tool_call", "display_tool_call",