feat: add progress indicators for ai operations
Some checks failed
Tests / test (push) Has been cancelled

feat: update assistant api to provide progress updates
maintenance: bump python slugify version
maintenance: add aiosqlite dependency
refactor: inject knowledge context in autonomous mode
refactor: update assistant class to use graph memory
refactor: process messages with enhanced context and progress indicator
docs: update changelog with version 1.44.0 details
maintenance: update pyproject.toml version to 1.45.0
maintenance: update requirements.txt with aiosqlite
feat: test: mock time and uuid for process_message test
feat: test: mock fact_extractor and knowledge_store for process_message test
This commit is contained in:
retoor 2025-11-08 08:21:40 +01:00
parent 686115e0f6
commit 83d0c59884
8 changed files with 69 additions and 18 deletions

View File

@ -40,6 +40,14 @@
## Version 1.44.0 - 2025-11-08
AI operations now show progress indicators, giving you better feedback on long tasks. The Assistant API has been updated to provide progress updates during these operations.
**Changes:** 5 files, 32 lines
**Languages:** Markdown (8 lines), Python (22 lines), TOML (2 lines)
## Version 1.43.0 - 2025-11-08

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "rp"
version = "1.43.0"
version = "1.44.0"
description = "R python edition. The ultimate autonomous AI CLI."
readme = "README.md"
requires-python = ">=3.10"

View File

@ -6,4 +6,4 @@ gitpython==3.1.43
websockets==13.0.1
pytest==8.3.2
bcrypt==4.1.3
python-slugify==8.0.4
python-slugify==8.0.4aiosqlite==0.20.0

View File

@ -19,8 +19,8 @@ def run_autonomous_mode(assistant, task):
logger.debug(f"Task: {task}")
from rp.core.knowledge_context import inject_knowledge_context
inject_knowledge_context(assistant, task)
assistant.messages.append({"role": "user", "content": f"{task}"})
inject_knowledge_context(assistant, assistant.messages[-1]["content"])
try:
while True:
assistant.autonomous_iterations += 1

View File

@ -6,7 +6,9 @@ import readline
import signal
import sqlite3
import sys
import time
import traceback
import uuid
from concurrent.futures import ThreadPoolExecutor
from rp.commands import handle_command
@ -109,9 +111,10 @@ class Assistant:
self.background_tasks = set()
self.last_result = None
self.init_database()
from rp.memory import KnowledgeStore, FactExtractor
from rp.memory import KnowledgeStore, FactExtractor, GraphMemory
self.knowledge_store = KnowledgeStore(DB_PATH, db_conn=self.db_conn)
self.fact_extractor = FactExtractor()
self.graph_memory = GraphMemory(DB_PATH, db_conn=self.db_conn)
self.messages.append(init_system_message(args))
try:
from rp.core.enhanced_assistant import EnhancedAssistant
@ -324,6 +327,8 @@ class Assistant:
)
return self.process_response(follow_up)
content = message.get("content", "")
with ProgressIndicator("Updating memory..."):
self.graph_memory.populate_from_text(content)
return render_markdown(content, self.syntax_highlighting)
def signal_handler(self, signum, frame):
@ -412,8 +417,10 @@ class Assistant:
cmd_result = handle_command(self, user_input)
if cmd_result is False:
break
elif cmd_result is True:
continue
# If cmd_result is True, the command was handled (e.g., /auto),
# and the blocking operation will complete before the next prompt.
# If cmd_result is None, it's not a special command, process with LLM.
elif cmd_result is None:
# Use enhanced processing if available, otherwise fall back to basic processing
if hasattr(self, "enhanced") and self.enhanced:
result = self.enhanced.process_with_enhanced_context(user_input)
@ -487,7 +494,6 @@ class Assistant:
def process_message(assistant, message):
from rp.core.knowledge_context import inject_knowledge_context
inject_knowledge_context(assistant, message)
# Save the user message as a fact
import time
import uuid
@ -509,6 +515,9 @@ def process_message(assistant, message):
)
assistant.knowledge_store.add_entry(entry)
assistant.messages.append({"role": "user", "content": str(entry)})
inject_knowledge_context(assistant, assistant.messages[-1]["content"])
with ProgressIndicator("Updating memory..."):
assistant.graph_memory.populate_from_text(message)
logger.debug(f"Processing user message: {message[:100]}...")
logger.debug(f"Current message count: {len(assistant.messages)}")
with ProgressIndicator("Querying AI..."):

View File

@ -2,6 +2,7 @@ from .conversation_memory import ConversationMemory
from .fact_extractor import FactExtractor
from .knowledge_store import KnowledgeEntry, KnowledgeStore
from .semantic_index import SemanticIndex
from .graph_memory import GraphMemory
__all__ = [
"KnowledgeStore",
@ -9,4 +10,5 @@ __all__ = [
"SemanticIndex",
"ConversationMemory",
"FactExtractor",
"GraphMemory",
]

View File

@ -20,7 +20,7 @@ class KnowledgeEntry:
importance_score: float = 1.0
def __str__(self):
return json.dumps(self.to_dict())
return json.dumps(self.to_dict(), indent=4, sort_keys=True,default=str)
def to_dict(self) -> Dict[str, Any]:
return {

View File

@ -88,21 +88,53 @@ class TestAssistant(unittest.TestCase):
@patch("rp.core.assistant.call_api")
@patch("rp.core.assistant.get_tools_definition")
def test_process_message(self, mock_tools, mock_call):
@patch("time.time")
@patch("uuid.uuid4")
def test_process_message(self, mock_uuid, mock_time, mock_tools, mock_call):
assistant = MagicMock()
assistant.verbose = False
assistant.use_tools = True
assistant.model = "model"
assistant.api_url = "url"
assistant.api_key = "key"
# Mock fact_extractor and its categorize_content method
assistant.fact_extractor = MagicMock()
assistant.fact_extractor.categorize_content.return_value = ["user_message"]
# Mock knowledge_store and its add_entry method
assistant.knowledge_store = MagicMock()
assistant.knowledge_store.add_entry.return_value = None
mock_tools.return_value = []
mock_call.return_value = {"choices": [{"message": {"content": "response"}}]}
mock_time.return_value = 1234567890.123456
mock_uuid.return_value = MagicMock()
mock_uuid.return_value.__str__ = MagicMock(return_value="mock_uuid_value")
with patch("rp.core.assistant.render_markdown", return_value="rendered"):
with patch("builtins.print"):
process_message(assistant, "test message")
assistant.messages.append.assert_called_with({"role": "user", "content": "test message"})
from rp.memory import KnowledgeEntry
import json
import time
import uuid
from unittest.mock import ANY
# Mock time.time() and uuid.uuid4() to return consistent values
expected_entry = KnowledgeEntry(
entry_id="mock_uuid_value"[:16],
category="user_message",
content="test message",
metadata={
"type": "user_message",
"confidence": 1.0,
"source": "user_input",
},
created_at=1234567890.123456,
updated_at=1234567890.123456,
)
expected_content = str(expected_entry)
assistant.knowledge_store.add_entry.assert_called_once_with(expected_entry)
if __name__ == "__main__":