feat: add progress indicators for ai operations

feat: enhance autonomous mode with progress tracking
feat: improve assistant api calls with progress feedback
maintenance: update pyproject.toml version to 1.42.0
This commit is contained in:
retoor 2025-11-08 03:55:06 +01:00
parent 6a80c86d51
commit 1a7d829499
6 changed files with 76 additions and 59 deletions

View File

@ -37,6 +37,14 @@
## Version 1.41.0 - 2025-11-08
Workflow functionality is now more thoroughly tested. This improves reliability and helps ensure future changes don't break existing workflows.
**Changes:** 3 files, 570 lines
**Languages:** Markdown (8 lines), Python (560 lines), TOML (2 lines)
## Version 1.40.0 - 2025-11-08

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "rp"
version = "1.40.0"
version = "1.41.0"
description = "R python edition. The ultimate autonomous AI CLI."
readme = "README.md"
requires-python = ">=3.10"

View File

@ -7,6 +7,7 @@ from rp.core.api import call_api
from rp.core.context import truncate_tool_result
from rp.tools.base import get_tools_definition
from rp.ui import Colors, display_tool_call
from rp.ui.progress import ProgressIndicator
logger = logging.getLogger("rp")
@ -29,15 +30,16 @@ def run_autonomous_mode(assistant, task):
assistant.messages = manage_context_window(assistant.messages, assistant.verbose)
logger.debug(f"Messages after context management: {len(assistant.messages)}")
response = call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
)
with ProgressIndicator("Querying AI..."):
response = call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
)
if "error" in response:
logger.error(f"API error in autonomous mode: {response['error']}")
print(f"{Colors.RED}Error: {response['error']}{Colors.RESET}")
@ -72,32 +74,34 @@ def process_response_autonomous(assistant, response):
assistant.messages.append(message)
if "tool_calls" in message and message["tool_calls"]:
tool_results = []
for tool_call in message["tool_calls"]:
func_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
result = execute_single_tool(assistant, func_name, arguments)
if isinstance(result, str):
try:
result = json.loads(result)
except json.JSONDecodeError as ex:
result = {"error": str(ex)}
status = "success" if result.get("status") == "success" else "error"
result = truncate_tool_result(result)
display_tool_call(func_name, arguments, status, result)
tool_results.append(
{"tool_call_id": tool_call["id"], "role": "tool", "content": json.dumps(result)}
)
with ProgressIndicator("Executing tools..."):
for tool_call in message["tool_calls"]:
func_name = tool_call["function"]["name"]
arguments = json.loads(tool_call["function"]["arguments"])
result = execute_single_tool(assistant, func_name, arguments)
if isinstance(result, str):
try:
result = json.loads(result)
except json.JSONDecodeError as ex:
result = {"error": str(ex)}
status = "success" if result.get("status") == "success" else "error"
result = truncate_tool_result(result)
display_tool_call(func_name, arguments, status, result)
tool_results.append(
{"tool_call_id": tool_call["id"], "role": "tool", "content": json.dumps(result)}
)
for result in tool_results:
assistant.messages.append(result)
follow_up = call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
)
with ProgressIndicator("Processing tool results..."):
follow_up = call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
)
if "usage" in follow_up:
usage = follow_up["usage"]
input_tokens = usage.get("prompt_tokens", 0)

View File

@ -67,6 +67,7 @@ from rp.tools.patch import apply_patch, create_diff, display_file_diff
from rp.tools.python_exec import python_exec
from rp.tools.web import http_fetch, web_search, web_search_news
from rp.ui import Colors, Spinner, render_markdown
from rp.ui.progress import ProgressIndicator
logger = logging.getLogger("rp")
logger.setLevel(logging.DEBUG)
@ -305,20 +306,22 @@ class Assistant:
if "tool_calls" in message and message["tool_calls"]:
tool_count = len(message["tool_calls"])
print(f"{Colors.BLUE}🔧 Executing {tool_count} tool call(s)...{Colors.RESET}")
tool_results = self.execute_tool_calls(message["tool_calls"])
with ProgressIndicator("Executing tools..."):
tool_results = self.execute_tool_calls(message["tool_calls"])
print(f"{Colors.GREEN}✅ Tool execution completed.{Colors.RESET}")
for result in tool_results:
self.messages.append(result)
follow_up = call_api(
self.messages,
self.model,
self.api_url,
self.api_key,
self.use_tools,
get_tools_definition(),
verbose=self.verbose,
db_conn=self.db_conn,
)
with ProgressIndicator("Processing tool results..."):
follow_up = call_api(
self.messages,
self.model,
self.api_url,
self.api_key,
self.use_tools,
get_tools_definition(),
verbose=self.verbose,
db_conn=self.db_conn,
)
return self.process_response(follow_up)
content = message.get("content", "")
return render_markdown(content, self.syntax_highlighting)
@ -508,19 +511,17 @@ def process_message(assistant, message):
assistant.messages.append({"role": "user", "content": message})
logger.debug(f"Processing user message: {message[:100]}...")
logger.debug(f"Current message count: {len(assistant.messages)}")
spinner = Spinner("Querying AI...")
spinner.start()
response = call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
db_conn=assistant.db_conn,
)
spinner.stop()
with ProgressIndicator("Querying AI..."):
response = call_api(
assistant.messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose,
db_conn=assistant.db_conn,
)
if "usage" in response:
usage = response["usage"]
input_tokens = usage.get("prompt_tokens", 0)

View File

@ -20,6 +20,7 @@ from rp.core.advanced_context import AdvancedContextManager
from rp.core.api import call_api
from rp.memory import ConversationMemory, FactExtractor, KnowledgeStore, KnowledgeEntry
from rp.tools.base import get_tools_definition
from rp.ui.progress import ProgressIndicator
from rp.workflows import WorkflowEngine, WorkflowStorage
logger = logging.getLogger("rp")
@ -173,7 +174,8 @@ class EnhancedAssistant:
working_messages = enhanced_messages
else:
working_messages = self.base.messages
response = self.enhanced_call_api(working_messages)
with ProgressIndicator("Querying AI..."):
response = self.enhanced_call_api(working_messages)
result = self.base.process_response(response)
if len(self.base.messages) >= CONVERSATION_SUMMARY_THRESHOLD:
summary = (

View File

@ -1,10 +1,12 @@
from rp.ui.colors import Colors, Spinner
from rp.ui.display import display_tool_call, print_autonomous_header
from rp.ui.progress import ProgressIndicator
from rp.ui.rendering import highlight_code, render_markdown
__all__ = [
"Colors",
"Spinner",
"ProgressIndicator",
"highlight_code",
"render_markdown",
"display_tool_call",