diff --git a/CHANGELOG.md b/CHANGELOG.md index f11f2e0..e57d916 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,14 @@ + + +## Version 1.44.0 - 2025-11-08 + +AI operations now show progress indicators, giving you better feedback on long tasks. The Assistant API has been updated to provide progress updates during these operations. + +**Changes:** 5 files, 32 lines +**Languages:** Markdown (8 lines), Python (22 lines), TOML (2 lines) ## Version 1.43.0 - 2025-11-08 diff --git a/pyproject.toml b/pyproject.toml index f96ee80..90aca5a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "rp" -version = "1.43.0" +version = "1.44.0" description = "R python edition. The ultimate autonomous AI CLI." readme = "README.md" requires-python = ">=3.10" diff --git a/requirements.txt b/requirements.txt index 5c12dde..f864288 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,4 +6,4 @@ gitpython==3.1.43 websockets==13.0.1 pytest==8.3.2 bcrypt==4.1.3 -python-slugify==8.0.4 \ No newline at end of file +python-slugify==8.0.4aiosqlite==0.20.0 diff --git a/rp/autonomous/mode.py b/rp/autonomous/mode.py index e40f6ee..f4d6ab1 100644 --- a/rp/autonomous/mode.py +++ b/rp/autonomous/mode.py @@ -19,8 +19,8 @@ def run_autonomous_mode(assistant, task): logger.debug(f"Task: {task}") from rp.core.knowledge_context import inject_knowledge_context - inject_knowledge_context(assistant, task) assistant.messages.append({"role": "user", "content": f"{task}"}) + inject_knowledge_context(assistant, assistant.messages[-1]["content"]) try: while True: assistant.autonomous_iterations += 1 diff --git a/rp/core/assistant.py b/rp/core/assistant.py index cd021c1..7358da0 100644 --- a/rp/core/assistant.py +++ b/rp/core/assistant.py @@ -6,7 +6,9 @@ import readline import signal import sqlite3 import sys +import time import traceback +import uuid from concurrent.futures import ThreadPoolExecutor from rp.commands import handle_command @@ -109,9 +111,10 @@ class Assistant: self.background_tasks = set() self.last_result = None self.init_database() - from rp.memory import KnowledgeStore, FactExtractor + from rp.memory import KnowledgeStore, FactExtractor, GraphMemory self.knowledge_store = KnowledgeStore(DB_PATH, db_conn=self.db_conn) self.fact_extractor = FactExtractor() + self.graph_memory = GraphMemory(DB_PATH, db_conn=self.db_conn) self.messages.append(init_system_message(args)) try: from rp.core.enhanced_assistant import EnhancedAssistant @@ -324,6 +327,8 @@ class Assistant: ) return self.process_response(follow_up) content = message.get("content", "") + with ProgressIndicator("Updating memory..."): + self.graph_memory.populate_from_text(content) return render_markdown(content, self.syntax_highlighting) def signal_handler(self, signum, frame): @@ -412,16 +417,18 @@ class Assistant: cmd_result = handle_command(self, user_input) if cmd_result is False: break - elif cmd_result is True: - continue - # Use enhanced processing if available, otherwise fall back to basic processing - if hasattr(self, "enhanced") and self.enhanced: - result = self.enhanced.process_with_enhanced_context(user_input) - if result != self.last_result: - print(result) - self.last_result = result - else: - process_message(self, user_input) + # If cmd_result is True, the command was handled (e.g., /auto), + # and the blocking operation will complete before the next prompt. + # If cmd_result is None, it's not a special command, process with LLM. + elif cmd_result is None: + # Use enhanced processing if available, otherwise fall back to basic processing + if hasattr(self, "enhanced") and self.enhanced: + result = self.enhanced.process_with_enhanced_context(user_input) + if result != self.last_result: + print(result) + self.last_result = result + else: + process_message(self, user_input) except EOFError: break except KeyboardInterrupt: @@ -487,7 +494,6 @@ class Assistant: def process_message(assistant, message): from rp.core.knowledge_context import inject_knowledge_context - inject_knowledge_context(assistant, message) # Save the user message as a fact import time import uuid @@ -509,6 +515,9 @@ def process_message(assistant, message): ) assistant.knowledge_store.add_entry(entry) assistant.messages.append({"role": "user", "content": str(entry)}) + inject_knowledge_context(assistant, assistant.messages[-1]["content"]) + with ProgressIndicator("Updating memory..."): + assistant.graph_memory.populate_from_text(message) logger.debug(f"Processing user message: {message[:100]}...") logger.debug(f"Current message count: {len(assistant.messages)}") with ProgressIndicator("Querying AI..."): diff --git a/rp/memory/__init__.py b/rp/memory/__init__.py index ee5c13f..a5f4818 100644 --- a/rp/memory/__init__.py +++ b/rp/memory/__init__.py @@ -2,6 +2,7 @@ from .conversation_memory import ConversationMemory from .fact_extractor import FactExtractor from .knowledge_store import KnowledgeEntry, KnowledgeStore from .semantic_index import SemanticIndex +from .graph_memory import GraphMemory __all__ = [ "KnowledgeStore", @@ -9,4 +10,5 @@ __all__ = [ "SemanticIndex", "ConversationMemory", "FactExtractor", + "GraphMemory", ] diff --git a/rp/memory/knowledge_store.py b/rp/memory/knowledge_store.py index 0de9442..a6890f2 100644 --- a/rp/memory/knowledge_store.py +++ b/rp/memory/knowledge_store.py @@ -20,7 +20,7 @@ class KnowledgeEntry: importance_score: float = 1.0 def __str__(self): - return json.dumps(self.to_dict()) + return json.dumps(self.to_dict(), indent=4, sort_keys=True,default=str) def to_dict(self) -> Dict[str, Any]: return { diff --git a/tests/test_assistant.py b/tests/test_assistant.py index f65db16..a873a87 100644 --- a/tests/test_assistant.py +++ b/tests/test_assistant.py @@ -88,22 +88,54 @@ class TestAssistant(unittest.TestCase): @patch("rp.core.assistant.call_api") @patch("rp.core.assistant.get_tools_definition") - def test_process_message(self, mock_tools, mock_call): + @patch("time.time") + @patch("uuid.uuid4") + def test_process_message(self, mock_uuid, mock_time, mock_tools, mock_call): assistant = MagicMock() assistant.verbose = False assistant.use_tools = True assistant.model = "model" assistant.api_url = "url" assistant.api_key = "key" + # Mock fact_extractor and its categorize_content method + assistant.fact_extractor = MagicMock() + assistant.fact_extractor.categorize_content.return_value = ["user_message"] + # Mock knowledge_store and its add_entry method + assistant.knowledge_store = MagicMock() + assistant.knowledge_store.add_entry.return_value = None mock_tools.return_value = [] mock_call.return_value = {"choices": [{"message": {"content": "response"}}]} + mock_time.return_value = 1234567890.123456 + mock_uuid.return_value = MagicMock() + mock_uuid.return_value.__str__ = MagicMock(return_value="mock_uuid_value") with patch("rp.core.assistant.render_markdown", return_value="rendered"): with patch("builtins.print"): process_message(assistant, "test message") - assistant.messages.append.assert_called_with({"role": "user", "content": "test message"}) + from rp.memory import KnowledgeEntry + import json + import time + import uuid + from unittest.mock import ANY + # Mock time.time() and uuid.uuid4() to return consistent values + expected_entry = KnowledgeEntry( + entry_id="mock_uuid_value"[:16], + category="user_message", + content="test message", + metadata={ + "type": "user_message", + "confidence": 1.0, + "source": "user_input", + }, + created_at=1234567890.123456, + updated_at=1234567890.123456, + ) + expected_content = str(expected_entry) + + assistant.knowledge_store.add_entry.assert_called_once_with(expected_entry) + if __name__ == "__main__": unittest.main()