feat: enable autonomous mode by default

feat: deprecate -a/--autonomous flag
fix: prevent duplicate process execution in /auto command
fix: disable background monitoring by default
fix: add thread locks to prevent duplicate initialization
fix: remove duplicate detect_process_type function definition
fix: improve thread synchronization for global background services
fix: improve cleanup of background threads on exit
docs: update description in rp/__main__.py
docs: add note about autonomous mode in rp/commands/handlers.py
maintenance: update pyproject.toml version to 1.48.0
maintenance: update rp/__init__.py version to 1.47.1
refactor: sanitize json output in rp/core/assistant.py
refactor: use thread locks in rp/core/autonomous_interactions.py
refactor: use thread locks in rp/core/background_monitor.py
refactor: improve autonomous detection in rp/autonomous/detection.py
refactor: improve context initialization in rp/core/context.py
This commit is contained in:
retoor 2025-11-09 03:34:01 +01:00
parent ec42e579a8
commit 5881b66d4a
14 changed files with 125 additions and 108 deletions

View File

@ -1,5 +1,23 @@
# Changelog # Changelog
## Version 1.47.1 - 2025-11-09
### Fixed
- **Duplicate Processes**: Fixed duplicate process execution when running `/auto` command
- Disabled background monitoring by default (set `BACKGROUND_MONITOR_ENABLED = False` in config.py)
- Added thread locks to prevent duplicate initialization of global monitor and autonomous threads
- Removed duplicate `detect_process_type()` function definition in `rp/tools/process_handlers.py`
- Background monitoring can be re-enabled via environment variable: `BACKGROUND_MONITOR=1`
### Changed
- **Autonomous mode is now the default**: All messages and tasks run in autonomous mode by default
- Single message mode: `rp "task"` now runs autonomously until completion
- Interactive mode: Messages in REPL now run autonomously without needing `/auto`
- The `/auto` command still works but shows a deprecation notice
- The `-a/--autonomous` flag is now deprecated as it's the default behavior
- Background monitoring is now opt-in rather than opt-out
- Added proper thread synchronization for global background services
- Improved cleanup of background threads on exit
@ -43,6 +61,12 @@
## Version 1.47.0 - 2025-11-08
Users can now search for knowledge by category. We've also improved performance and updated the software version to 1.47.0.
**Changes:** 3 files, 40 lines
**Languages:** Markdown (8 lines), Python (30 lines), TOML (2 lines)
## Version 1.46.0 - 2025-11-08 ## Version 1.46.0 - 2025-11-08

View File

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project] [project]
name = "rp" name = "rp"
version = "1.46.0" version = "1.47.1"
description = "R python edition. The ultimate autonomous AI CLI." description = "R python edition. The ultimate autonomous AI CLI."
readme = "README.md" readme = "README.md"
requires-python = ">=3.10" requires-python = ">=3.10"

View File

@ -1,4 +1,4 @@
__version__ = "1.0.0" __version__ = "1.47.1"
from rp.core import Assistant from rp.core import Assistant
__all__ = ["Assistant"] __all__ = ["Assistant"]

View File

@ -10,11 +10,11 @@ def main_def():
tracemalloc.start() tracemalloc.start()
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="RP Assistant - Professional CLI AI assistant with visual effects, cost tracking, and autonomous execution", description="RP Assistant - Professional CLI AI assistant with autonomous execution by default",
epilog=""" epilog="""
Examples: Examples:
rp "What is Python?" # Single query rp "Create a web scraper" # Autonomous task execution
rp -i # Interactive mode rp -i # Interactive autonomous mode
rp -i --model gpt-4 # Use specific model rp -i --model gpt-4 # Use specific model
rp --save-session my-task -i # Save session rp --save-session my-task -i # Save session
rp --load-session my-task # Load session rp --load-session my-task # Load session
@ -22,13 +22,13 @@ Examples:
rp --usage # Show token usage stats rp --usage # Show token usage stats
Features: Features:
Autonomous execution by default - tasks run until completion
Visual progress indicators during AI calls Visual progress indicators during AI calls
Real-time cost tracking for each query Real-time cost tracking for each query
Sophisticated CLI with colors and effects Sophisticated CLI with colors and effects
Tool execution with status updates Tool execution with status updates
Commands in interactive mode: Commands in interactive mode:
/auto [task] - Enter autonomous mode
/reset - Clear message history /reset - Clear message history
/verbose - Toggle verbose output /verbose - Toggle verbose output
/models - List available models /models - List available models
@ -45,7 +45,7 @@ Commands in interactive mode:
parser.add_argument("-u", "--api-url", help="API endpoint URL") parser.add_argument("-u", "--api-url", help="API endpoint URL")
parser.add_argument("--model-list-url", help="Model list endpoint URL") parser.add_argument("--model-list-url", help="Model list endpoint URL")
parser.add_argument("-i", "--interactive", action="store_true", help="Interactive mode") parser.add_argument("-i", "--interactive", action="store_true", help="Interactive mode")
parser.add_argument("-a", "--autonomous", action="store_true", help="Autonomous mode") parser.add_argument("-a", "--autonomous", action="store_true", help="Autonomous mode (now default, this flag is deprecated)")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output") parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output")
parser.add_argument( parser.add_argument(
"--debug", action="store_true", help="Enable debug mode with detailed logging" "--debug", action="store_true", help="Enable debug mode with detailed logging"

View File

@ -28,13 +28,23 @@ def is_task_complete(response, iteration):
"cannot complete", "cannot complete",
"impossible to", "impossible to",
] ]
simple_response_keywords = [
"hello",
"hi there",
"how can i help",
"how can i assist",
"what can i do for you",
]
has_tool_calls = "tool_calls" in message and message["tool_calls"] has_tool_calls = "tool_calls" in message and message["tool_calls"]
mentions_completion = any((keyword in content for keyword in completion_keywords)) mentions_completion = any((keyword in content for keyword in completion_keywords))
mentions_error = any((keyword in content for keyword in error_keywords)) mentions_error = any((keyword in content for keyword in error_keywords))
is_simple_response = any((keyword in content for keyword in simple_response_keywords))
if mentions_error: if mentions_error:
return True return True
if mentions_completion and (not has_tool_calls): if mentions_completion and (not has_tool_calls):
return True return True
if is_simple_response and iteration >= 1:
return True
if iteration > 5 and (not has_tool_calls): if iteration > 5 and (not has_tool_calls):
return True return True
if iteration >= MAX_AUTONOMOUS_ITERATIONS: if iteration >= MAX_AUTONOMOUS_ITERATIONS:

View File

@ -1,3 +1,4 @@
import base64
import json import json
import logging import logging
import time import time
@ -12,6 +13,17 @@ from rp.ui.progress import ProgressIndicator
logger = logging.getLogger("rp") logger = logging.getLogger("rp")
def sanitize_for_json(obj):
if isinstance(obj, bytes):
return base64.b64encode(obj).decode("utf-8")
elif isinstance(obj, dict):
return {k: sanitize_for_json(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple)):
return [sanitize_for_json(item) for item in obj]
else:
return obj
def run_autonomous_mode(assistant, task): def run_autonomous_mode(assistant, task):
assistant.autonomous_mode = True assistant.autonomous_mode = True
assistant.autonomous_iterations = 0 assistant.autonomous_iterations = 0
@ -87,8 +99,9 @@ def process_response_autonomous(assistant, response):
status = "success" if result.get("status") == "success" else "error" status = "success" if result.get("status") == "success" else "error"
result = truncate_tool_result(result) result = truncate_tool_result(result)
display_tool_call(func_name, arguments, status, result) display_tool_call(func_name, arguments, status, result)
sanitized_result = sanitize_for_json(result)
tool_results.append( tool_results.append(
{"tool_call_id": tool_call["id"], "role": "tool", "content": json.dumps(result)} {"tool_call_id": tool_call["id"], "role": "tool", "content": json.dumps(sanitized_result)}
) )
for result in tool_results: for result in tool_results:
assistant.messages.append(result) assistant.messages.append(result)

View File

@ -38,14 +38,11 @@ def handle_command(assistant, command):
process_message(assistant, prompt_text) process_message(assistant, prompt_text)
elif cmd == "/auto": elif cmd == "/auto":
if len(command_parts) < 2: print(f"{Colors.YELLOW}Note: Autonomous mode is now the default behavior.{Colors.RESET}")
print(f"{Colors.RED}Usage: /auto [task description]{Colors.RESET}") print(f"{Colors.GRAY}Just type your message directly without /auto{Colors.RESET}")
print( if len(command_parts) >= 2:
f"{Colors.GRAY}Example: /auto Create a Python web scraper for news sites{Colors.RESET}" task = command_parts[1]
) run_autonomous_mode(assistant, task)
return True
task = command_parts[1]
run_autonomous_mode(assistant, task)
return True return True
if cmd in ["exit", "quit", "q"]: if cmd in ["exit", "quit", "q"]:
return False return False

View File

@ -115,7 +115,7 @@ ADVANCED_CONTEXT_ENABLED = True
CONTEXT_RELEVANCE_THRESHOLD = 0.3 CONTEXT_RELEVANCE_THRESHOLD = 0.3
ADAPTIVE_CONTEXT_MIN = 10 ADAPTIVE_CONTEXT_MIN = 10
ADAPTIVE_CONTEXT_MAX = 50 ADAPTIVE_CONTEXT_MAX = 50
BACKGROUND_MONITOR_ENABLED = True BACKGROUND_MONITOR_ENABLED = False
BACKGROUND_MONITOR_INTERVAL = 5.0 BACKGROUND_MONITOR_INTERVAL = 5.0
AUTONOMOUS_INTERACTION_INTERVAL = 10.0 AUTONOMOUS_INTERACTION_INTERVAL = 10.0
MULTIPLEXER_BUFFER_SIZE = 1000 MULTIPLEXER_BUFFER_SIZE = 1000

View File

@ -125,15 +125,24 @@ class Assistant:
except Exception as e: except Exception as e:
logger.warning(f"Could not initialize enhanced features: {e}") logger.warning(f"Could not initialize enhanced features: {e}")
self.enhanced = None self.enhanced = None
try:
start_global_monitor() from rp.config import BACKGROUND_MONITOR_ENABLED
start_global_autonomous(llm_callback=self._handle_background_updates) bg_enabled = os.environ.get("BACKGROUND_MONITOR", str(BACKGROUND_MONITOR_ENABLED)).lower() in ("1", "true", "yes")
self.background_monitoring = True
if self.debug: if bg_enabled:
logger.debug("Background monitoring initialized") try:
except Exception as e: start_global_monitor()
logger.warning(f"Could not initialize background monitoring: {e}") start_global_autonomous(llm_callback=self._handle_background_updates)
self.background_monitoring = True
if self.debug:
logger.debug("Background monitoring initialized")
except Exception as e:
logger.warning(f"Could not initialize background monitoring: {e}")
self.background_monitoring = False
else:
self.background_monitoring = False self.background_monitoring = False
if self.debug:
logger.debug("Background monitoring disabled")
def init_database(self): def init_database(self):
try: try:
@ -419,16 +428,10 @@ class Assistant:
break break
# If cmd_result is True, the command was handled (e.g., /auto), # If cmd_result is True, the command was handled (e.g., /auto),
# and the blocking operation will complete before the next prompt. # and the blocking operation will complete before the next prompt.
# If cmd_result is None, it's not a special command, process with LLM. # If cmd_result is None, it's not a special command, process with autonomous mode.
elif cmd_result is None: elif cmd_result is None:
# Use enhanced processing if available, otherwise fall back to basic processing from rp.autonomous import run_autonomous_mode
if hasattr(self, "enhanced") and self.enhanced: run_autonomous_mode(self, user_input)
result = self.enhanced.process_with_enhanced_context(user_input)
if result != self.last_result:
print(result)
self.last_result = result
else:
process_message(self, user_input)
except EOFError: except EOFError:
break break
except KeyboardInterrupt: except KeyboardInterrupt:
@ -442,7 +445,8 @@ class Assistant:
message = self.args.message message = self.args.message
else: else:
message = sys.stdin.read() message = sys.stdin.read()
process_message(self, message) from rp.autonomous import run_autonomous_mode
run_autonomous_mode(self, message)
def run_autonomous(self): def run_autonomous(self):

View File

@ -133,6 +133,7 @@ class AutonomousInteractions:
_global_autonomous = None _global_autonomous = None
_autonomous_lock = threading.Lock()
def get_global_autonomous(): def get_global_autonomous():
@ -144,15 +145,19 @@ def get_global_autonomous():
def start_global_autonomous(llm_callback=None): def start_global_autonomous(llm_callback=None):
"""Start global autonomous interactions.""" """Start global autonomous interactions."""
global _global_autonomous global _global_autonomous
if _global_autonomous is None: with _autonomous_lock:
_global_autonomous = AutonomousInteractions() if _global_autonomous is None:
_global_autonomous.start(llm_callback) _global_autonomous = AutonomousInteractions()
_global_autonomous.start(llm_callback)
elif not _global_autonomous.active:
_global_autonomous.start(llm_callback)
return _global_autonomous return _global_autonomous
def stop_global_autonomous(): def stop_global_autonomous():
"""Stop global autonomous interactions.""" """Stop global autonomous interactions."""
global _global_autonomous global _global_autonomous
if _global_autonomous: with _autonomous_lock:
_global_autonomous.stop() if _global_autonomous:
_global_autonomous = None _global_autonomous.stop()
_global_autonomous = None

View File

@ -4,6 +4,8 @@ import time
from rp.multiplexer import get_all_multiplexer_states, get_multiplexer from rp.multiplexer import get_all_multiplexer_states, get_multiplexer
_monitor_lock = threading.Lock()
class BackgroundMonitor: class BackgroundMonitor:
@ -161,19 +163,25 @@ _global_monitor = None
def get_global_monitor(): def get_global_monitor():
"""Get the global background monitor instance.""" """Get the global background monitor instance."""
global _global_monitor global _global_monitor
if _global_monitor is None:
_global_monitor = BackgroundMonitor()
return _global_monitor return _global_monitor
def start_global_monitor(): def start_global_monitor():
"""Start the global background monitor.""" """Start the global background monitor."""
monitor = get_global_monitor() global _global_monitor
monitor.start() with _monitor_lock:
if _global_monitor is None:
_global_monitor = BackgroundMonitor()
_global_monitor.start()
elif not _global_monitor.active:
_global_monitor.start()
return _global_monitor
def stop_global_monitor(): def stop_global_monitor():
"""Stop the global background monitor.""" """Stop the global background monitor."""
global _global_monitor global _global_monitor
if _global_monitor: with _monitor_lock:
_global_monitor.stop() if _global_monitor:
_global_monitor.stop()
_global_monitor = None

View File

@ -86,7 +86,7 @@ def init_system_message(args):
"Be a shell ninja using native OS tools.", "Be a shell ninja using native OS tools.",
"Prefer standard Unix utilities over complex scripts.", "Prefer standard Unix utilities over complex scripts.",
"Use run_command_interactive for commands requiring user input (vim, nano, etc.).", "Use run_command_interactive for commands requiring user input (vim, nano, etc.).",
"Use the knowledge base to answer questions. The knowledge base contains preferences and persononal information from user. Also store here that such information. Always synchronize with the knowledge base.", "Use the knowledge base to answer questions and store important user preferences or information when relevant. Avoid storing simple greetings or casual conversation.",
] ]
max_context_size = 10000 max_context_size = 10000
@ -115,50 +115,6 @@ def init_system_message(args):
if len(system_message) > max_context_size * 3: if len(system_message) > max_context_size * 3:
system_message = system_message[: max_context_size * 3] + "\n... [system message truncated]" system_message = system_message[: max_context_size * 3] + "\n... [system message truncated]"
return {"role": "system", "content": system_message} return {"role": "system", "content": system_message}
max_context_size = 10000
if args.include_env:
env_context = "Environment Variables:\n"
for key, value in os.environ.items():
if not key.startswith("_"):
env_context += f"{key}={value}\n"
if len(env_context) > max_context_size:
env_context = env_context[:max_context_size] + "\n... [truncated]"
context_parts.append(env_context)
for context_file in [CONTEXT_FILE, GLOBAL_CONTEXT_FILE]:
if os.path.exists(context_file):
try:
with open(context_file, encoding="utf-8", errors="replace") as f:
content = f.read()
if len(content) > max_context_size:
content = content[:max_context_size] + "\n... [truncated]"
context_parts.append(f"Context from {context_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {context_file}: {e}")
knowledge_path = pathlib.Path(KNOWLEDGE_PATH)
if knowledge_path.exists() and knowledge_path.is_dir():
for knowledge_file in knowledge_path.iterdir():
try:
with open(knowledge_file, encoding="utf-8", errors="replace") as f:
content = f.read()
if len(content) > max_context_size:
content = content[:max_context_size] + "\n... [truncated]"
context_parts.append(f"Context from {knowledge_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {knowledge_file}: {e}")
if args.context:
for ctx_file in args.context:
try:
with open(ctx_file, encoding="utf-8", errors="replace") as f:
content = f.read()
if len(content) > max_context_size:
content = content[:max_context_size] + "\n... [truncated]"
context_parts.append(f"Context from {ctx_file}:\n{content}")
except Exception as e:
logging.error(f"Error reading context file {ctx_file}: {e}")
system_message = "\n\n".join(context_parts)
if len(system_message) > max_context_size * 3:
system_message = system_message[: max_context_size * 3] + "\n... [system message truncated]"
return {"role": "system", "content": system_message}
def should_compress_context(messages): def should_compress_context(messages):

View File

@ -226,21 +226,6 @@ def get_handler_for_process(process_type, multiplexer):
return handler_class(multiplexer) return handler_class(multiplexer)
def detect_process_type(command):
"""Detect process type from command."""
command_str = " ".join(command) if isinstance(command, list) else command
command_lower = command_str.lower()
if "apt" in command_lower or "apt-get" in command_lower:
return "apt"
elif "vim" in command_lower or "vi " in command_lower:
return "vim"
elif "ssh" in command_lower:
return "ssh"
else:
return "generic"
return "ssh"
def detect_process_type(command): def detect_process_type(command):
"""Detect process type from command.""" """Detect process type from command."""
command_str = " ".join(command) if isinstance(command, list) else command command_str = " ".join(command) if isinstance(command, list) else command

View File

@ -1,3 +1,4 @@
import base64
import imghdr import imghdr
import random import random
import requests import requests
@ -68,7 +69,21 @@ def http_fetch(url: str, headers: Optional[Dict[str, str]] = None) -> Dict[str,
return {"status": "success", "content": content[:10000]} return {"status": "success", "content": content[:10000]}
else: else:
content = response.content content = response.content
return {"status": "success", "content": content} content_length = len(content)
if content_length > 10000:
return {
"status": "success",
"content_type": content_type,
"size_bytes": content_length,
"message": f"Binary content ({content_length} bytes). Use download_to_file to save it.",
}
else:
return {
"status": "success",
"content_type": content_type,
"size_bytes": content_length,
"content_base64": base64.b64encode(content).decode("utf-8"),
}
except requests.exceptions.RequestException as e: except requests.exceptions.RequestException as e:
return {"status": "error", "error": str(e)} return {"status": "error", "error": str(e)}