feat: add progress indicators for ai operations
Some checks failed
Tests / test (push) Has been cancelled
Some checks failed
Tests / test (push) Has been cancelled
feat: update assistant api to provide progress updates maintenance: bump python slugify version maintenance: add aiosqlite dependency refactor: inject knowledge context in autonomous mode refactor: update assistant class to use graph memory refactor: process messages with enhanced context and progress indicator docs: update changelog with version 1.44.0 details maintenance: update pyproject.toml version to 1.45.0 maintenance: update requirements.txt with aiosqlite feat: test: mock time and uuid for process_message test feat: test: mock fact_extractor and knowledge_store for process_message test
This commit is contained in:
parent
686115e0f6
commit
83d0c59884
@ -40,6 +40,14 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Version 1.44.0 - 2025-11-08
|
||||||
|
|
||||||
|
AI operations now show progress indicators, giving you better feedback on long tasks. The Assistant API has been updated to provide progress updates during these operations.
|
||||||
|
|
||||||
|
**Changes:** 5 files, 32 lines
|
||||||
|
**Languages:** Markdown (8 lines), Python (22 lines), TOML (2 lines)
|
||||||
|
|
||||||
## Version 1.43.0 - 2025-11-08
|
## Version 1.43.0 - 2025-11-08
|
||||||
|
|
||||||
|
|||||||
@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "rp"
|
name = "rp"
|
||||||
version = "1.43.0"
|
version = "1.44.0"
|
||||||
description = "R python edition. The ultimate autonomous AI CLI."
|
description = "R python edition. The ultimate autonomous AI CLI."
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10"
|
requires-python = ">=3.10"
|
||||||
|
|||||||
@ -6,4 +6,4 @@ gitpython==3.1.43
|
|||||||
websockets==13.0.1
|
websockets==13.0.1
|
||||||
pytest==8.3.2
|
pytest==8.3.2
|
||||||
bcrypt==4.1.3
|
bcrypt==4.1.3
|
||||||
python-slugify==8.0.4
|
python-slugify==8.0.4aiosqlite==0.20.0
|
||||||
|
|||||||
@ -19,8 +19,8 @@ def run_autonomous_mode(assistant, task):
|
|||||||
logger.debug(f"Task: {task}")
|
logger.debug(f"Task: {task}")
|
||||||
from rp.core.knowledge_context import inject_knowledge_context
|
from rp.core.knowledge_context import inject_knowledge_context
|
||||||
|
|
||||||
inject_knowledge_context(assistant, task)
|
|
||||||
assistant.messages.append({"role": "user", "content": f"{task}"})
|
assistant.messages.append({"role": "user", "content": f"{task}"})
|
||||||
|
inject_knowledge_context(assistant, assistant.messages[-1]["content"])
|
||||||
try:
|
try:
|
||||||
while True:
|
while True:
|
||||||
assistant.autonomous_iterations += 1
|
assistant.autonomous_iterations += 1
|
||||||
|
|||||||
@ -6,7 +6,9 @@ import readline
|
|||||||
import signal
|
import signal
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import sys
|
import sys
|
||||||
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
import uuid
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
from rp.commands import handle_command
|
from rp.commands import handle_command
|
||||||
@ -109,9 +111,10 @@ class Assistant:
|
|||||||
self.background_tasks = set()
|
self.background_tasks = set()
|
||||||
self.last_result = None
|
self.last_result = None
|
||||||
self.init_database()
|
self.init_database()
|
||||||
from rp.memory import KnowledgeStore, FactExtractor
|
from rp.memory import KnowledgeStore, FactExtractor, GraphMemory
|
||||||
self.knowledge_store = KnowledgeStore(DB_PATH, db_conn=self.db_conn)
|
self.knowledge_store = KnowledgeStore(DB_PATH, db_conn=self.db_conn)
|
||||||
self.fact_extractor = FactExtractor()
|
self.fact_extractor = FactExtractor()
|
||||||
|
self.graph_memory = GraphMemory(DB_PATH, db_conn=self.db_conn)
|
||||||
self.messages.append(init_system_message(args))
|
self.messages.append(init_system_message(args))
|
||||||
try:
|
try:
|
||||||
from rp.core.enhanced_assistant import EnhancedAssistant
|
from rp.core.enhanced_assistant import EnhancedAssistant
|
||||||
@ -324,6 +327,8 @@ class Assistant:
|
|||||||
)
|
)
|
||||||
return self.process_response(follow_up)
|
return self.process_response(follow_up)
|
||||||
content = message.get("content", "")
|
content = message.get("content", "")
|
||||||
|
with ProgressIndicator("Updating memory..."):
|
||||||
|
self.graph_memory.populate_from_text(content)
|
||||||
return render_markdown(content, self.syntax_highlighting)
|
return render_markdown(content, self.syntax_highlighting)
|
||||||
|
|
||||||
def signal_handler(self, signum, frame):
|
def signal_handler(self, signum, frame):
|
||||||
@ -412,16 +417,18 @@ class Assistant:
|
|||||||
cmd_result = handle_command(self, user_input)
|
cmd_result = handle_command(self, user_input)
|
||||||
if cmd_result is False:
|
if cmd_result is False:
|
||||||
break
|
break
|
||||||
elif cmd_result is True:
|
# If cmd_result is True, the command was handled (e.g., /auto),
|
||||||
continue
|
# and the blocking operation will complete before the next prompt.
|
||||||
# Use enhanced processing if available, otherwise fall back to basic processing
|
# If cmd_result is None, it's not a special command, process with LLM.
|
||||||
if hasattr(self, "enhanced") and self.enhanced:
|
elif cmd_result is None:
|
||||||
result = self.enhanced.process_with_enhanced_context(user_input)
|
# Use enhanced processing if available, otherwise fall back to basic processing
|
||||||
if result != self.last_result:
|
if hasattr(self, "enhanced") and self.enhanced:
|
||||||
print(result)
|
result = self.enhanced.process_with_enhanced_context(user_input)
|
||||||
self.last_result = result
|
if result != self.last_result:
|
||||||
else:
|
print(result)
|
||||||
process_message(self, user_input)
|
self.last_result = result
|
||||||
|
else:
|
||||||
|
process_message(self, user_input)
|
||||||
except EOFError:
|
except EOFError:
|
||||||
break
|
break
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
@ -487,7 +494,6 @@ class Assistant:
|
|||||||
def process_message(assistant, message):
|
def process_message(assistant, message):
|
||||||
from rp.core.knowledge_context import inject_knowledge_context
|
from rp.core.knowledge_context import inject_knowledge_context
|
||||||
|
|
||||||
inject_knowledge_context(assistant, message)
|
|
||||||
# Save the user message as a fact
|
# Save the user message as a fact
|
||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
@ -509,6 +515,9 @@ def process_message(assistant, message):
|
|||||||
)
|
)
|
||||||
assistant.knowledge_store.add_entry(entry)
|
assistant.knowledge_store.add_entry(entry)
|
||||||
assistant.messages.append({"role": "user", "content": str(entry)})
|
assistant.messages.append({"role": "user", "content": str(entry)})
|
||||||
|
inject_knowledge_context(assistant, assistant.messages[-1]["content"])
|
||||||
|
with ProgressIndicator("Updating memory..."):
|
||||||
|
assistant.graph_memory.populate_from_text(message)
|
||||||
logger.debug(f"Processing user message: {message[:100]}...")
|
logger.debug(f"Processing user message: {message[:100]}...")
|
||||||
logger.debug(f"Current message count: {len(assistant.messages)}")
|
logger.debug(f"Current message count: {len(assistant.messages)}")
|
||||||
with ProgressIndicator("Querying AI..."):
|
with ProgressIndicator("Querying AI..."):
|
||||||
|
|||||||
@ -2,6 +2,7 @@ from .conversation_memory import ConversationMemory
|
|||||||
from .fact_extractor import FactExtractor
|
from .fact_extractor import FactExtractor
|
||||||
from .knowledge_store import KnowledgeEntry, KnowledgeStore
|
from .knowledge_store import KnowledgeEntry, KnowledgeStore
|
||||||
from .semantic_index import SemanticIndex
|
from .semantic_index import SemanticIndex
|
||||||
|
from .graph_memory import GraphMemory
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"KnowledgeStore",
|
"KnowledgeStore",
|
||||||
@ -9,4 +10,5 @@ __all__ = [
|
|||||||
"SemanticIndex",
|
"SemanticIndex",
|
||||||
"ConversationMemory",
|
"ConversationMemory",
|
||||||
"FactExtractor",
|
"FactExtractor",
|
||||||
|
"GraphMemory",
|
||||||
]
|
]
|
||||||
|
|||||||
@ -20,7 +20,7 @@ class KnowledgeEntry:
|
|||||||
importance_score: float = 1.0
|
importance_score: float = 1.0
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return json.dumps(self.to_dict())
|
return json.dumps(self.to_dict(), indent=4, sort_keys=True,default=str)
|
||||||
|
|
||||||
def to_dict(self) -> Dict[str, Any]:
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
|
|||||||
@ -88,22 +88,54 @@ class TestAssistant(unittest.TestCase):
|
|||||||
|
|
||||||
@patch("rp.core.assistant.call_api")
|
@patch("rp.core.assistant.call_api")
|
||||||
@patch("rp.core.assistant.get_tools_definition")
|
@patch("rp.core.assistant.get_tools_definition")
|
||||||
def test_process_message(self, mock_tools, mock_call):
|
@patch("time.time")
|
||||||
|
@patch("uuid.uuid4")
|
||||||
|
def test_process_message(self, mock_uuid, mock_time, mock_tools, mock_call):
|
||||||
assistant = MagicMock()
|
assistant = MagicMock()
|
||||||
assistant.verbose = False
|
assistant.verbose = False
|
||||||
assistant.use_tools = True
|
assistant.use_tools = True
|
||||||
assistant.model = "model"
|
assistant.model = "model"
|
||||||
assistant.api_url = "url"
|
assistant.api_url = "url"
|
||||||
assistant.api_key = "key"
|
assistant.api_key = "key"
|
||||||
|
# Mock fact_extractor and its categorize_content method
|
||||||
|
assistant.fact_extractor = MagicMock()
|
||||||
|
assistant.fact_extractor.categorize_content.return_value = ["user_message"]
|
||||||
|
# Mock knowledge_store and its add_entry method
|
||||||
|
assistant.knowledge_store = MagicMock()
|
||||||
|
assistant.knowledge_store.add_entry.return_value = None
|
||||||
mock_tools.return_value = []
|
mock_tools.return_value = []
|
||||||
mock_call.return_value = {"choices": [{"message": {"content": "response"}}]}
|
mock_call.return_value = {"choices": [{"message": {"content": "response"}}]}
|
||||||
|
mock_time.return_value = 1234567890.123456
|
||||||
|
mock_uuid.return_value = MagicMock()
|
||||||
|
mock_uuid.return_value.__str__ = MagicMock(return_value="mock_uuid_value")
|
||||||
|
|
||||||
with patch("rp.core.assistant.render_markdown", return_value="rendered"):
|
with patch("rp.core.assistant.render_markdown", return_value="rendered"):
|
||||||
with patch("builtins.print"):
|
with patch("builtins.print"):
|
||||||
process_message(assistant, "test message")
|
process_message(assistant, "test message")
|
||||||
|
|
||||||
assistant.messages.append.assert_called_with({"role": "user", "content": "test message"})
|
from rp.memory import KnowledgeEntry
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
from unittest.mock import ANY
|
||||||
|
|
||||||
|
# Mock time.time() and uuid.uuid4() to return consistent values
|
||||||
|
expected_entry = KnowledgeEntry(
|
||||||
|
entry_id="mock_uuid_value"[:16],
|
||||||
|
category="user_message",
|
||||||
|
content="test message",
|
||||||
|
metadata={
|
||||||
|
"type": "user_message",
|
||||||
|
"confidence": 1.0,
|
||||||
|
"source": "user_input",
|
||||||
|
},
|
||||||
|
created_at=1234567890.123456,
|
||||||
|
updated_at=1234567890.123456,
|
||||||
|
)
|
||||||
|
expected_content = str(expected_entry)
|
||||||
|
|
||||||
|
assistant.knowledge_store.add_entry.assert_called_once_with(expected_entry)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user