Compare commits
4 Commits
1f0444d8c1
...
164510896e
| Author | SHA1 | Date | |
|---|---|---|---|
| 164510896e | |||
| 9438496b72 | |||
| a289a8e402 | |||
| 8ef3742f44 |
16
CHANGELOG.md
16
CHANGELOG.md
@ -23,6 +23,22 @@
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Version 1.28.0 - 2025-11-08
|
||||
|
||||
This release introduces new features like advanced input for the assistant and collaboration agents, along with improved logging and error handling. Several internal components have been updated for better performance, stability, and maintainability.
|
||||
|
||||
**Changes:** 54 files, 638 lines
|
||||
**Languages:** Other (10 lines), Python (626 lines), TOML (2 lines)
|
||||
|
||||
## Version 1.27.0 - 2025-11-08
|
||||
|
||||
The project has been renamed to "Reetor's Guide to Modern Python" and now includes a comprehensive tutorial. The README has been significantly updated with installation instructions, a quick start guide, and information on modern Python features and aiohttp.
|
||||
|
||||
**Changes:** 3 files, 2728 lines
|
||||
**Languages:** Markdown (2726 lines), TOML (2 lines)
|
||||
|
||||
## Version 1.26.0 - 2025-11-08
|
||||
|
||||
|
||||
10
Makefile
10
Makefile
@ -27,16 +27,16 @@ test:
|
||||
pytest tests/ -v --tb=long --full-trace -l --maxfail=10
|
||||
|
||||
test-cov:
|
||||
pytest --cov=pr --cov-report=html --cov-report=term-missing
|
||||
pytest --cov=rp --cov-report=html --cov-report=term-missing
|
||||
@echo "Coverage report generated in htmlcov/index.html"
|
||||
|
||||
lint:
|
||||
flake8 pr tests --max-line-length=100 --ignore=E203,W503
|
||||
mypy pr --ignore-missing-imports
|
||||
flake8 rp tests --max-line-length=100 --ignore=E203,W503
|
||||
mypy rp --ignore-missing-imports
|
||||
|
||||
format:
|
||||
black pr tests
|
||||
isort pr tests --profile black
|
||||
black rp tests
|
||||
isort rp tests --profile black
|
||||
|
||||
clean:
|
||||
rm -rf build/
|
||||
|
||||
@ -27,6 +27,7 @@ Version Requirements:
|
||||
11. [Git Protocol Integration](#git-protocol-integration)
|
||||
12. [Repository Manager Implementation](#repository-manager-implementation)
|
||||
13. [Best Practices and Patterns](#best-practices-and-patterns)
|
||||
14. [Automatic Memory and Context Search](#automatic-memory-and-context-search)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "rp"
|
||||
version = "1.26.0"
|
||||
version = "1.28.0"
|
||||
description = "R python edition. The ultimate autonomous AI CLI."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
|
||||
@ -7,4 +7,3 @@ websockets==13.0.1
|
||||
pytest==8.3.2
|
||||
bcrypt==4.1.3
|
||||
python-slugify==8.0.4
|
||||
requests>=2.31.0
|
||||
@ -1,5 +1,6 @@
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from rp import __version__
|
||||
from rp.core import Assistant
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ import time
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from ..memory.knowledge_store import KnowledgeStore
|
||||
from .agent_communication import AgentCommunicationBus, AgentMessage, MessageType
|
||||
from .agent_roles import AgentRole, get_agent_role
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
|
||||
from rp.autonomous.detection import is_task_complete
|
||||
from rp.core.api import call_api
|
||||
from rp.core.context import truncate_tool_result
|
||||
@ -13,7 +14,7 @@ logger = logging.getLogger("rp")
|
||||
def run_autonomous_mode(assistant, task):
|
||||
assistant.autonomous_mode = True
|
||||
assistant.autonomous_iterations = 0
|
||||
logger.debug(f"=== AUTONOMOUS MODE START ===")
|
||||
logger.debug("=== AUTONOMOUS MODE START ===")
|
||||
logger.debug(f"Task: {task}")
|
||||
from rp.core.knowledge_context import inject_knowledge_context
|
||||
|
||||
@ -102,7 +103,9 @@ def process_response_autonomous(assistant, response):
|
||||
input_tokens = usage.get("prompt_tokens", 0)
|
||||
output_tokens = usage.get("completion_tokens", 0)
|
||||
assistant.usage_tracker.track_request(assistant.model, input_tokens, output_tokens)
|
||||
cost = assistant.usage_tracker._calculate_cost(assistant.model, input_tokens, output_tokens)
|
||||
cost = assistant.usage_tracker._calculate_cost(
|
||||
assistant.model, input_tokens, output_tokens
|
||||
)
|
||||
total_cost = assistant.usage_tracker.session_usage["estimated_cost"]
|
||||
print(f"{Colors.YELLOW}đź’° Cost: ${cost:.4f} | Total: ${total_cost:.4f}{Colors.RESET}")
|
||||
return process_response_autonomous(assistant, follow_up)
|
||||
@ -123,6 +126,9 @@ def execute_single_tool(assistant, func_name, arguments):
|
||||
db_get,
|
||||
db_query,
|
||||
db_set,
|
||||
editor_insert_text,
|
||||
editor_replace_text,
|
||||
editor_search,
|
||||
getpwd,
|
||||
http_fetch,
|
||||
index_source_directory,
|
||||
|
||||
@ -1,12 +1,13 @@
|
||||
import json
|
||||
import time
|
||||
from rp.commands.multiplexer_commands import MULTIPLEXER_COMMANDS
|
||||
|
||||
from rp.autonomous import run_autonomous_mode
|
||||
from rp.commands.multiplexer_commands import MULTIPLEXER_COMMANDS
|
||||
from rp.core.api import list_models
|
||||
from rp.editor import RPEditor
|
||||
from rp.tools import read_file
|
||||
from rp.tools.base import get_tools_definition
|
||||
from rp.ui import Colors
|
||||
from rp.editor import RPEditor
|
||||
|
||||
|
||||
def handle_command(assistant, command):
|
||||
@ -263,7 +264,7 @@ def collaborate_agents_command(assistant, task):
|
||||
roles = ["coding", "research", "planning"]
|
||||
result = assistant.enhanced.collaborate_agents(task, roles)
|
||||
print(f"\n{Colors.GREEN}Collaboration completed{Colors.RESET}")
|
||||
print(f"\nOrchestrator response:")
|
||||
print("\nOrchestrator response:")
|
||||
if "orchestrator" in result and "response" in result["orchestrator"]:
|
||||
print(result["orchestrator"]["response"])
|
||||
if result.get("agents"):
|
||||
@ -295,6 +296,7 @@ def store_knowledge(assistant, content):
|
||||
return
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from rp.memory import KnowledgeEntry
|
||||
|
||||
categories = assistant.enhanced.fact_extractor.categorize_content(content)
|
||||
|
||||
@ -7,7 +7,9 @@ from rp.core.http_client import http_client
|
||||
logger = logging.getLogger("rp")
|
||||
|
||||
|
||||
def call_api(messages, model, api_url, api_key, use_tools, tools_definition, verbose=False, db_conn=None):
|
||||
def call_api(
|
||||
messages, model, api_url, api_key, use_tools, tools_definition, verbose=False, db_conn=None
|
||||
):
|
||||
try:
|
||||
messages = auto_slim_messages(messages, verbose=verbose)
|
||||
logger.debug(f"=== API CALL START ===")
|
||||
@ -38,11 +40,14 @@ def call_api(messages, model, api_url, api_key, use_tools, tools_definition, ver
|
||||
if db_conn:
|
||||
|
||||
from rp.tools.database import log_api_request
|
||||
|
||||
log_result = log_api_request(model, api_url, request_json, db_conn)
|
||||
if log_result.get("status") != "success":
|
||||
logger.warning(f"Failed to log API request: {log_result.get('error')}")
|
||||
logger.debug("Sending HTTP request...")
|
||||
response = http_client.post(api_url, headers=headers, json_data=request_json, db_conn=db_conn)
|
||||
response = http_client.post(
|
||||
api_url, headers=headers, json_data=request_json, db_conn=db_conn
|
||||
)
|
||||
if response.get("error"):
|
||||
if "status" in response:
|
||||
logger.error(f"API HTTP Error: {response['status']} - {response.get('text', '')}")
|
||||
@ -92,7 +97,8 @@ def list_models(model_list_url, api_key):
|
||||
response = http_client.get(model_list_url, headers=headers, db_conn=None)
|
||||
if response.get("error"):
|
||||
return {"error": response.get("text", "HTTP error")}
|
||||
data = json.loads(response["text"])
|
||||
response_data = response["text"]
|
||||
data = json.loads(response_data)
|
||||
return data.get("data", [])
|
||||
except Exception as e:
|
||||
return {"error": str(e)}
|
||||
|
||||
@ -8,8 +8,8 @@ import sqlite3
|
||||
import sys
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from rp.commands import handle_command
|
||||
from rp.input_handler import get_advanced_input
|
||||
from rp.config import (
|
||||
DB_PATH,
|
||||
DEFAULT_API_URL,
|
||||
@ -23,6 +23,7 @@ from rp.core.autonomous_interactions import start_global_autonomous, stop_global
|
||||
from rp.core.background_monitor import get_global_monitor, start_global_monitor, stop_global_monitor
|
||||
from rp.core.context import init_system_message, truncate_tool_result
|
||||
from rp.core.usage_tracker import UsageTracker
|
||||
from rp.input_handler import get_advanced_input
|
||||
from rp.tools import get_tools_definition
|
||||
from rp.tools.agents import (
|
||||
collaborate_agents,
|
||||
@ -32,7 +33,7 @@ from rp.tools.agents import (
|
||||
remove_agent,
|
||||
)
|
||||
from rp.tools.command import kill_process, run_command, tail_process
|
||||
from rp.tools.database import db_get, db_query, db_set, log_api_request
|
||||
from rp.tools.database import db_get, db_query, db_set
|
||||
from rp.tools.filesystem import (
|
||||
chdir,
|
||||
clear_edit_tracker,
|
||||
@ -107,6 +108,9 @@ class Assistant:
|
||||
self.background_tasks = set()
|
||||
self.last_result = None
|
||||
self.init_database()
|
||||
from rp.memory import KnowledgeStore, FactExtractor
|
||||
self.knowledge_store = KnowledgeStore(DB_PATH)
|
||||
self.fact_extractor = FactExtractor()
|
||||
self.messages.append(init_system_message(args))
|
||||
try:
|
||||
from rp.core.enhanced_assistant import EnhancedAssistant
|
||||
@ -398,7 +402,7 @@ class Assistant:
|
||||
except:
|
||||
pass
|
||||
prompt += f">{Colors.RESET} "
|
||||
user_input = get_advanced_input(prompt)
|
||||
user_input = get_advanced_input(prompt) or ""
|
||||
user_input = user_input.strip()
|
||||
if not user_input:
|
||||
continue
|
||||
@ -441,6 +445,7 @@ class Assistant:
|
||||
print("No task provided. Exiting.")
|
||||
return
|
||||
from rp.autonomous import run_autonomous_mode
|
||||
|
||||
run_autonomous_mode(self, task)
|
||||
|
||||
def cleanup(self):
|
||||
@ -480,6 +485,26 @@ def process_message(assistant, message):
|
||||
from rp.core.knowledge_context import inject_knowledge_context
|
||||
|
||||
inject_knowledge_context(assistant, message)
|
||||
# Save the user message as a fact
|
||||
import time
|
||||
import uuid
|
||||
from rp.memory import KnowledgeEntry
|
||||
|
||||
categories = assistant.fact_extractor.categorize_content(message)
|
||||
entry_id = str(uuid.uuid4())[:16]
|
||||
entry = KnowledgeEntry(
|
||||
entry_id=entry_id,
|
||||
category=categories[0] if categories else "user_message",
|
||||
content=message,
|
||||
metadata={
|
||||
"type": "user_message",
|
||||
"confidence": 1.0,
|
||||
"source": "user_input",
|
||||
},
|
||||
created_at=time.time(),
|
||||
updated_at=time.time(),
|
||||
)
|
||||
assistant.knowledge_store.add_entry(entry)
|
||||
assistant.messages.append({"role": "user", "content": message})
|
||||
logger.debug(f"Processing user message: {message[:100]}...")
|
||||
logger.debug(f"Current message count: {len(assistant.messages)}")
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import threading
|
||||
import time
|
||||
|
||||
from rp.tools.interactive_control import (
|
||||
get_session_status,
|
||||
list_active_sessions,
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
|
||||
from rp.multiplexer import get_all_multiplexer_states, get_multiplexer
|
||||
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import configparser
|
||||
import os
|
||||
from typing import Any, Dict
|
||||
|
||||
from rp.core.logging import get_logger
|
||||
|
||||
logger = get_logger("config")
|
||||
|
||||
@ -2,6 +2,7 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
from rp.config import (
|
||||
CHARS_PER_TOKEN,
|
||||
CONTENT_TRIM_LENGTH,
|
||||
@ -9,10 +10,10 @@ from rp.config import (
|
||||
CONTEXT_FILE,
|
||||
EMERGENCY_MESSAGES_TO_KEEP,
|
||||
GLOBAL_CONTEXT_FILE,
|
||||
KNOWLEDGE_PATH,
|
||||
MAX_TOKENS_LIMIT,
|
||||
MAX_TOOL_RESULT_LENGTH,
|
||||
RECENT_MESSAGES_TO_KEEP,
|
||||
KNOWLEDGE_PATH,
|
||||
)
|
||||
from rp.ui import Colors
|
||||
|
||||
@ -60,7 +61,7 @@ def init_system_message(args):
|
||||
for context_file in [CONTEXT_FILE, GLOBAL_CONTEXT_FILE]:
|
||||
if os.path.exists(context_file):
|
||||
try:
|
||||
with open(context_file) as f:
|
||||
with open(context_file, encoding="utf-8", errors="replace") as f:
|
||||
content = f.read()
|
||||
if len(content) > max_context_size:
|
||||
content = content[:max_context_size] + "\n... [truncated]"
|
||||
@ -71,7 +72,7 @@ def init_system_message(args):
|
||||
if knowledge_path.exists() and knowledge_path.is_dir():
|
||||
for knowledge_file in knowledge_path.iterdir():
|
||||
try:
|
||||
with open(knowledge_file) as f:
|
||||
with open(knowledge_file, encoding="utf-8", errors="replace") as f:
|
||||
content = f.read()
|
||||
if len(content) > max_context_size:
|
||||
content = content[:max_context_size] + "\n... [truncated]"
|
||||
@ -81,7 +82,7 @@ def init_system_message(args):
|
||||
if args.context:
|
||||
for ctx_file in args.context:
|
||||
try:
|
||||
with open(ctx_file) as f:
|
||||
with open(ctx_file, encoding="utf-8", errors="replace") as f:
|
||||
content = f.read()
|
||||
if len(content) > max_context_size:
|
||||
content = content[:max_context_size] + "\n... [truncated]"
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from rp.agents import AgentManager
|
||||
from rp.cache import APICache, ToolCache
|
||||
from rp.config import (
|
||||
@ -16,7 +18,7 @@ from rp.config import (
|
||||
)
|
||||
from rp.core.advanced_context import AdvancedContextManager
|
||||
from rp.core.api import call_api
|
||||
from rp.memory import ConversationMemory, FactExtractor, KnowledgeStore
|
||||
from rp.memory import ConversationMemory, FactExtractor, KnowledgeStore, KnowledgeEntry
|
||||
from rp.tools.base import get_tools_definition
|
||||
from rp.workflows import WorkflowEngine, WorkflowStorage
|
||||
|
||||
@ -131,9 +133,6 @@ class EnhancedAssistant:
|
||||
facts = self.fact_extractor.extract_facts(user_message)
|
||||
for fact in facts[:5]:
|
||||
entry_id = str(uuid.uuid4())[:16]
|
||||
import time
|
||||
from rp.memory import KnowledgeEntry
|
||||
|
||||
categories = self.fact_extractor.categorize_content(fact["text"])
|
||||
entry = KnowledgeEntry(
|
||||
entry_id=entry_id,
|
||||
@ -148,6 +147,23 @@ class EnhancedAssistant:
|
||||
updated_at=time.time(),
|
||||
)
|
||||
self.knowledge_store.add_entry(entry)
|
||||
|
||||
# Save the entire user message as a fact
|
||||
entry_id = str(uuid.uuid4())[:16]
|
||||
categories = self.fact_extractor.categorize_content(user_message)
|
||||
entry = KnowledgeEntry(
|
||||
entry_id=entry_id,
|
||||
category=categories[0] if categories else "user_message",
|
||||
content=user_message,
|
||||
metadata={
|
||||
"type": "user_message",
|
||||
"confidence": 1.0,
|
||||
"source": "user_input",
|
||||
},
|
||||
created_at=time.time(),
|
||||
updated_at=time.time(),
|
||||
)
|
||||
self.knowledge_store.add_entry(entry)
|
||||
if self.context_manager and ADVANCED_CONTEXT_ENABLED:
|
||||
enhanced_messages, context_info = self.context_manager.create_enhanced_context(
|
||||
self.base.messages, user_message, include_knowledge=True
|
||||
|
||||
@ -1,16 +1,59 @@
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import requests
|
||||
import random
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger("rp")
|
||||
|
||||
# Realistic User-Agents and headers
|
||||
USER_AGENTS = [
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
|
||||
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0",
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59",
|
||||
"Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Mobile/15E148 Safari/604.1",
|
||||
"Mozilla/5.0 (iPad; CPU OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Mobile/15E148 Safari/604.1",
|
||||
"Mozilla/5.0 (Android 11; Mobile; rv:68.0) Gecko/68.0 Firefox/88.0",
|
||||
]
|
||||
|
||||
|
||||
def get_realistic_headers(additional_headers=None):
|
||||
"""Generate realistic HTTP headers with random User-Agent and variations."""
|
||||
accept_languages = [
|
||||
"en-US,en;q=0.5",
|
||||
"en-US,en;q=0.9",
|
||||
"en-GB,en;q=0.5",
|
||||
"en-US,en;q=0.5;fr;q=0.3",
|
||||
]
|
||||
headers = {
|
||||
"User-Agent": random.choice(USER_AGENTS),
|
||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
|
||||
"Accept-Language": random.choice(accept_languages),
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"DNT": "1",
|
||||
"Connection": "keep-alive",
|
||||
"Upgrade-Insecure-Requests": "1",
|
||||
}
|
||||
# Sometimes add Cache-Control
|
||||
if random.random() < 0.3:
|
||||
headers["Cache-Control"] = "no-cache"
|
||||
# Sometimes add Referer
|
||||
if random.random() < 0.2:
|
||||
headers["Referer"] = "https://www.google.com/"
|
||||
if additional_headers:
|
||||
headers.update(additional_headers)
|
||||
return headers
|
||||
|
||||
|
||||
class SyncHTTPClient:
|
||||
|
||||
def __init__(self):
|
||||
self.session = requests.Session()
|
||||
self.default_headers = {}
|
||||
|
||||
def request(
|
||||
self,
|
||||
@ -22,58 +65,87 @@ class SyncHTTPClient:
|
||||
timeout: float = 30.0,
|
||||
db_conn=None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Make a sync HTTP request using requests with retry logic."""
|
||||
attempt = 0
|
||||
start_time = time.time()
|
||||
while True:
|
||||
attempt += 1
|
||||
if headers is None:
|
||||
headers = get_realistic_headers()
|
||||
else:
|
||||
headers = get_realistic_headers(headers)
|
||||
|
||||
request_body_for_log = ""
|
||||
if json_data is not None:
|
||||
request_body_for_log = json.dumps(json_data)
|
||||
elif data is not None:
|
||||
request_body_for_log = data.decode("utf-8") if isinstance(data, bytes) else str(data)
|
||||
|
||||
try:
|
||||
response = self.session.request(
|
||||
response = requests.request(
|
||||
method,
|
||||
url,
|
||||
headers=headers,
|
||||
data=data,
|
||||
json=json_data,
|
||||
timeout=timeout,
|
||||
allow_redirects=True,
|
||||
)
|
||||
response.raise_for_status() # Raise an exception for bad status codes
|
||||
# Prepare request body for logging
|
||||
if json_data is not None:
|
||||
request_body = json.dumps(json_data)
|
||||
elif data is not None:
|
||||
request_body = data.decode('utf-8') if isinstance(data, bytes) else str(data)
|
||||
else:
|
||||
request_body = ""
|
||||
# Log the request
|
||||
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
||||
|
||||
response_data = response.text
|
||||
response_headers = dict(response.headers)
|
||||
|
||||
if db_conn:
|
||||
from rp.tools.database import log_http_request
|
||||
log_result = log_http_request(method, url, request_body, response.text, response.status_code, db_conn)
|
||||
|
||||
log_result = log_http_request(
|
||||
method,
|
||||
url,
|
||||
request_body_for_log,
|
||||
response_data,
|
||||
response.status_code,
|
||||
db_conn,
|
||||
)
|
||||
if log_result.get("status") != "success":
|
||||
logger.warning(f"Failed to log HTTP request: {log_result.get('error')}")
|
||||
|
||||
return {
|
||||
"status": response.status_code,
|
||||
"headers": dict(response.headers),
|
||||
"text": response.text,
|
||||
"headers": response_headers,
|
||||
"text": response_data,
|
||||
"json": response.json,
|
||||
}
|
||||
except requests.exceptions.Timeout:
|
||||
elapsed = time.time() - start_time
|
||||
elapsed_minutes = int(elapsed // 60)
|
||||
elapsed_seconds = elapsed % 60
|
||||
duration_str = (
|
||||
f"{elapsed_minutes}m {elapsed_seconds:.1f}s"
|
||||
if elapsed_minutes > 0
|
||||
else f"{elapsed_seconds:.1f}s"
|
||||
except requests.exceptions.HTTPError as e:
|
||||
response_data = e.response.text if e.response else ""
|
||||
response_headers = dict(e.response.headers) if e.response else {}
|
||||
status_code = e.response.status_code if e.response else 0
|
||||
|
||||
if db_conn:
|
||||
from rp.tools.database import log_http_request
|
||||
|
||||
log_result = log_http_request(
|
||||
method,
|
||||
url,
|
||||
request_body_for_log,
|
||||
response_data,
|
||||
status_code,
|
||||
db_conn,
|
||||
)
|
||||
logger.warning(
|
||||
f"Request timed out (attempt {attempt}, duration: {duration_str}). Retrying in {attempt} second(s)..."
|
||||
)
|
||||
time.sleep(attempt)
|
||||
if log_result.get("status") != "success":
|
||||
logger.warning(f"Failed to log HTTP request: {log_result.get('error')}")
|
||||
|
||||
return {
|
||||
"status": status_code,
|
||||
"headers": response_headers,
|
||||
"text": response_data,
|
||||
"json": lambda: e.response.json() if e.response and response_data else None,
|
||||
}
|
||||
except requests.exceptions.RequestException as e:
|
||||
return {"error": True, "exception": str(e)}
|
||||
logger.error(f"Request failed: {e}")
|
||||
return {"error": True, "exception": str(e), "status": 0, "text": ""}
|
||||
|
||||
def get(
|
||||
self, url: str, headers: Optional[Dict[str, str]] = None, timeout: float = 30.0, db_conn=None
|
||||
self,
|
||||
url: str,
|
||||
headers: Optional[Dict[str, str]] = None,
|
||||
timeout: float = 30.0,
|
||||
db_conn=None,
|
||||
) -> Dict[str, Any]:
|
||||
return self.request("GET", url, headers=headers, timeout=timeout, db_conn=db_conn)
|
||||
|
||||
@ -87,11 +159,17 @@ class SyncHTTPClient:
|
||||
db_conn=None,
|
||||
) -> Dict[str, Any]:
|
||||
return self.request(
|
||||
"POST", url, headers=headers, data=data, json_data=json_data, timeout=timeout, db_conn=db_conn
|
||||
"POST",
|
||||
url,
|
||||
headers=headers,
|
||||
data=data,
|
||||
json_data=json_data,
|
||||
timeout=timeout,
|
||||
db_conn=db_conn,
|
||||
)
|
||||
|
||||
def set_default_headers(self, headers: Dict[str, str]):
|
||||
self.session.headers.update(headers)
|
||||
self.default_headers.update(headers)
|
||||
|
||||
|
||||
http_client = SyncHTTPClient()
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
from rp.config import LOG_FILE
|
||||
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from rp.core.logging import get_logger
|
||||
|
||||
logger = get_logger("session")
|
||||
@ -38,7 +39,7 @@ class SessionManager:
|
||||
if not os.path.exists(session_file):
|
||||
logger.warning(f"Session not found: {name}")
|
||||
return None
|
||||
with open(session_file) as f:
|
||||
with open(session_file, encoding="utf-8") as f:
|
||||
session_data = json.load(f)
|
||||
logger.info(f"Session loaded: {name}")
|
||||
return session_data
|
||||
@ -53,7 +54,7 @@ class SessionManager:
|
||||
if filename.endswith(".json"):
|
||||
filepath = os.path.join(SESSIONS_DIR, filename)
|
||||
try:
|
||||
with open(filepath) as f:
|
||||
with open(filepath, encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
sessions.append(
|
||||
{
|
||||
|
||||
@ -2,6 +2,7 @@ import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Dict, Optional
|
||||
|
||||
from rp.core.logging import get_logger
|
||||
|
||||
logger = get_logger("usage")
|
||||
@ -68,7 +69,7 @@ class UsageTracker:
|
||||
try:
|
||||
history = []
|
||||
if os.path.exists(USAGE_DB_FILE):
|
||||
with open(USAGE_DB_FILE) as f:
|
||||
with open(USAGE_DB_FILE, encoding="utf-8") as f:
|
||||
history = json.load(f)
|
||||
history.append(
|
||||
{
|
||||
@ -113,7 +114,7 @@ class UsageTracker:
|
||||
if not os.path.exists(USAGE_DB_FILE):
|
||||
return {"total_requests": 0, "total_tokens": 0, "total_cost": 0.0}
|
||||
try:
|
||||
with open(USAGE_DB_FILE) as f:
|
||||
with open(USAGE_DB_FILE, encoding="utf-8") as f:
|
||||
history = json.load(f)
|
||||
total_tokens = sum((entry["total_tokens"] for entry in history))
|
||||
total_cost = sum((entry["cost"] for entry in history))
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import os
|
||||
|
||||
from rp.core.exceptions import ValidationError
|
||||
|
||||
|
||||
|
||||
@ -118,6 +118,9 @@ class RPEditor:
|
||||
self.lines = content.splitlines() if content else [""]
|
||||
else:
|
||||
self.lines = [""]
|
||||
except UnicodeDecodeError:
|
||||
# If it's a binary file or truly unreadable as text, treat as empty
|
||||
self.lines = [""]
|
||||
except Exception:
|
||||
self.lines = [""]
|
||||
|
||||
@ -350,7 +353,9 @@ class RPEditor:
|
||||
height, _ = self.stdscr.getmaxyx()
|
||||
page_size = height - 2
|
||||
self.cursor_y = min(len(self.lines) - 1, self.cursor_y + page_size)
|
||||
self.scroll_y = min(max(0, len(self.lines) - height + 1), self.scroll_y + page_size)
|
||||
self.scroll_y = min(
|
||||
max(0, len(self.lines) - height + 1), self.scroll_y + page_size
|
||||
)
|
||||
self.prev_key = key
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@ -35,10 +35,16 @@ class RPEditor:
|
||||
|
||||
def load_file(self):
|
||||
try:
|
||||
with open(self.filename) as f:
|
||||
if self.filename:
|
||||
with open(self.filename, encoding="utf-8", errors="replace") as f:
|
||||
self.lines = f.read().splitlines()
|
||||
if not self.lines:
|
||||
self.lines = [""]
|
||||
else:
|
||||
self.lines = [""]
|
||||
except UnicodeDecodeError:
|
||||
# If it's a binary file or truly unreadable as text, treat as empty
|
||||
self.lines = [""]
|
||||
except:
|
||||
self.lines = [""]
|
||||
|
||||
|
||||
@ -6,13 +6,13 @@ It intelligently resolves local imports, hoists external dependencies to the top
|
||||
and preserves the core logic, using AST for safe transformations.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import ast
|
||||
import argparse
|
||||
import ast
|
||||
import logging
|
||||
import os
|
||||
import py_compile
|
||||
from typing import Set, Dict, Optional, TextIO
|
||||
import sys
|
||||
from typing import Dict, Optional, Set, TextIO
|
||||
|
||||
logger = logging.getLogger("impLODE")
|
||||
|
||||
@ -250,10 +250,10 @@ class Imploder:
|
||||
self.processed_files.clear()
|
||||
try:
|
||||
with open(output_file_path, "w", encoding="utf-8") as f_out:
|
||||
f_out.write(f"#!/usr/bin/env python3\n")
|
||||
f_out.write(f"# -*- coding: utf-8 -*-\n")
|
||||
f_out.write(f"import logging\n")
|
||||
f_out.write(f"\n# --- IMPLODED FILE: Generated by impLODE --- #\n")
|
||||
f_out.write("#!/usr/bin/env python3\n")
|
||||
f_out.write("# -*- coding: utf-8 -*-\n")
|
||||
f_out.write("import logging\n")
|
||||
f_out.write("\n# --- IMPLODED FILE: Generated by impLODE --- #\n")
|
||||
f_out.write(
|
||||
f"# --- Original main file: {os.path.relpath(main_file_abs_path, self.root_dir)} --- #\n"
|
||||
)
|
||||
|
||||
@ -99,9 +99,22 @@ class AdvancedInputHandler:
|
||||
try:
|
||||
path = Path(filename).expanduser().resolve()
|
||||
if path.exists() and path.is_file():
|
||||
mime_type, _ = mimetypes.guess_type(str(path))
|
||||
if mime_type and (
|
||||
mime_type.startswith("text/")
|
||||
or mime_type in ["application/json", "application/xml"]
|
||||
):
|
||||
with open(path, encoding="utf-8", errors="replace") as f:
|
||||
content = f.read()
|
||||
return f"\n--- File: {filename} ---\n{content}\n--- End of {filename} ---\n"
|
||||
elif mime_type and not mime_type.startswith(
|
||||
"image/"
|
||||
): # Handle other binary files
|
||||
with open(path, "rb") as f:
|
||||
binary_data = base64.b64encode(f.read()).decode("utf-8")
|
||||
return f"\n--- Binary File: {filename} ({mime_type}) ---\ndata:{mime_type};base64,{binary_data}\n--- End of {filename} ---\n"
|
||||
else:
|
||||
return f"[File not included (unsupported type or already handled as image): {filename}]"
|
||||
else:
|
||||
return f"[File not found: {filename}]"
|
||||
except Exception as e:
|
||||
|
||||
@ -4,6 +4,7 @@ import threading
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from .semantic_index import SemanticIndex
|
||||
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from rp.tools.process_handlers import detect_process_type, get_handler_for_process
|
||||
from rp.tools.prompt_detection import get_global_detector
|
||||
from rp.ui import Colors
|
||||
|
||||
@ -2,6 +2,7 @@ import importlib.util
|
||||
import os
|
||||
import sys
|
||||
from typing import Callable, Dict, List
|
||||
|
||||
from rp.core.logging import get_logger
|
||||
|
||||
logger = get_logger("plugins")
|
||||
|
||||
2
rp/rp.py
2
rp/rp.py
@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Trigger build
|
||||
import sys
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add current directory to path to ensure imports work
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
@ -6,7 +6,6 @@ from rp.tools.agents import (
|
||||
remove_agent,
|
||||
)
|
||||
from rp.tools.base import get_tools_definition
|
||||
from rp.tools.vision import post_image
|
||||
from rp.tools.command import kill_process, run_command, run_command_interactive, tail_process
|
||||
from rp.tools.database import db_get, db_query, db_set
|
||||
from rp.tools.editor import (
|
||||
@ -17,7 +16,27 @@ from rp.tools.editor import (
|
||||
open_editor,
|
||||
)
|
||||
from rp.tools.filesystem import (
|
||||
get_uid, read_specific_lines, replace_specific_line, insert_line_at_position, delete_specific_line, read_file, write_file, list_directory, mkdir, chdir, getpwd, index_source_directory, search_replace, get_editor, close_editor, open_editor, editor_insert_text, editor_replace_text, display_edit_summary, display_edit_timeline, clear_edit_tracker
|
||||
chdir,
|
||||
clear_edit_tracker,
|
||||
close_editor,
|
||||
delete_specific_line,
|
||||
display_edit_summary,
|
||||
display_edit_timeline,
|
||||
editor_insert_text,
|
||||
editor_replace_text,
|
||||
get_editor,
|
||||
get_uid,
|
||||
getpwd,
|
||||
index_source_directory,
|
||||
insert_line_at_position,
|
||||
list_directory,
|
||||
mkdir,
|
||||
open_editor,
|
||||
read_file,
|
||||
read_specific_lines,
|
||||
replace_specific_line,
|
||||
search_replace,
|
||||
write_file,
|
||||
)
|
||||
from rp.tools.lsp import get_diagnostics
|
||||
from rp.tools.memory import (
|
||||
@ -32,7 +51,8 @@ from rp.tools.memory import (
|
||||
from rp.tools.patch import apply_patch, create_diff
|
||||
from rp.tools.python_exec import python_exec
|
||||
from rp.tools.search import glob_files, grep
|
||||
from rp.tools.web import http_fetch, web_search, web_search_news
|
||||
from rp.tools.vision import post_image
|
||||
from rp.tools.web import download_to_file, http_fetch, web_search, web_search_news
|
||||
|
||||
# Aliases for user-requested tool names
|
||||
view = read_file
|
||||
@ -61,6 +81,7 @@ __all__ = [
|
||||
"db_set",
|
||||
"delete_knowledge_entry",
|
||||
"delete_specific_line",
|
||||
"download_to_file",
|
||||
"diagnostics",
|
||||
"display_edit_summary",
|
||||
"display_edit_timeline",
|
||||
@ -107,4 +128,3 @@ __all__ = [
|
||||
"write",
|
||||
"write_file",
|
||||
]
|
||||
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from rp.agents.agent_manager import AgentManager
|
||||
from rp.config import DEFAULT_API_URL, DEFAULT_MODEL
|
||||
from rp.core.api import call_api
|
||||
from rp.config import DEFAULT_MODEL, DEFAULT_API_URL
|
||||
from rp.tools.base import get_tools_definition
|
||||
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import inspect
|
||||
from typing import get_args, get_origin, get_type_hints
|
||||
|
||||
import rp.tools
|
||||
from typing import get_type_hints, get_origin, get_args
|
||||
|
||||
|
||||
def _type_to_json_schema(py_type):
|
||||
|
||||
@ -76,6 +76,7 @@ def db_query(query, db_conn):
|
||||
except Exception as e:
|
||||
return {"status": "error", "error": str(e)}
|
||||
|
||||
|
||||
def log_api_request(model, api_url, request_payload, db_conn):
|
||||
"""Log an API request to the database.
|
||||
|
||||
@ -101,6 +102,7 @@ def log_api_request(model, api_url, request_payload, db_conn):
|
||||
except Exception as e:
|
||||
return {"status": "error", "error": str(e)}
|
||||
|
||||
|
||||
def log_http_request(method, url, request_body, response_body, status_code, db_conn):
|
||||
"""Log an HTTP request to the database.
|
||||
|
||||
|
||||
@ -1,15 +1,15 @@
|
||||
import sys
|
||||
import os
|
||||
import ast
|
||||
import inspect
|
||||
import time
|
||||
import threading
|
||||
import gc
|
||||
import weakref
|
||||
import linecache
|
||||
import re
|
||||
import inspect
|
||||
import json
|
||||
import linecache
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import weakref
|
||||
from collections import defaultdict
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
@ -1,6 +1,8 @@
|
||||
import os
|
||||
import os.path
|
||||
|
||||
from rp.editor import RPEditor
|
||||
|
||||
from ..tools.patch import display_content_diff
|
||||
from ..ui.edit_feedback import track_edit, tracker
|
||||
|
||||
|
||||
@ -1,8 +1,12 @@
|
||||
import base64
|
||||
import hashlib
|
||||
import mimetypes
|
||||
import os
|
||||
import time
|
||||
from typing import Optional, Any
|
||||
from typing import Any, Optional
|
||||
|
||||
from rp.editor import RPEditor
|
||||
|
||||
from ..tools.patch import display_content_diff
|
||||
from ..ui.diff_display import get_diff_stats
|
||||
from ..ui.edit_feedback import track_edit, tracker
|
||||
@ -16,7 +20,9 @@ def get_uid():
|
||||
return _id
|
||||
|
||||
|
||||
def read_specific_lines(filepath: str, start_line: int, end_line: Optional[int] = None, db_conn: Optional[Any] = None) -> dict:
|
||||
def read_specific_lines(
|
||||
filepath: str, start_line: int, end_line: Optional[int] = None, db_conn: Optional[Any] = None
|
||||
) -> dict:
|
||||
"""
|
||||
Read specific lines or a range of lines from a file.
|
||||
|
||||
@ -46,26 +52,39 @@ def read_specific_lines(filepath: str, start_line: int, end_line: Optional[int]
|
||||
"""
|
||||
try:
|
||||
path = os.path.expanduser(filepath)
|
||||
with open(path, 'r') as file:
|
||||
with open(path, "r") as file:
|
||||
lines = file.readlines()
|
||||
total_lines = len(lines)
|
||||
if start_line < 1 or start_line > total_lines:
|
||||
return {"status": "error", "error": f"Start line {start_line} is out of range. File has {total_lines} lines."}
|
||||
return {
|
||||
"status": "error",
|
||||
"error": f"Start line {start_line} is out of range. File has {total_lines} lines.",
|
||||
}
|
||||
if end_line is None:
|
||||
end_line = start_line
|
||||
if end_line < start_line or end_line > total_lines:
|
||||
return {"status": "error", "error": f"End line {end_line} is out of range. File has {total_lines} lines."}
|
||||
selected_lines = lines[start_line - 1:end_line]
|
||||
content = ''.join(selected_lines)
|
||||
return {
|
||||
"status": "error",
|
||||
"error": f"End line {end_line} is out of range. File has {total_lines} lines.",
|
||||
}
|
||||
selected_lines = lines[start_line - 1 : end_line]
|
||||
content = "".join(selected_lines)
|
||||
if db_conn:
|
||||
from rp.tools.database import db_set
|
||||
|
||||
db_set("read:" + path, "true", db_conn)
|
||||
return {"status": "success", "content": content}
|
||||
except Exception as e:
|
||||
return {"status": "error", "error": str(e)}
|
||||
|
||||
|
||||
def replace_specific_line(filepath: str, line_number: int, new_content: str, db_conn: Optional[Any] = None, show_diff: bool = True) -> dict:
|
||||
def replace_specific_line(
|
||||
filepath: str,
|
||||
line_number: int,
|
||||
new_content: str,
|
||||
db_conn: Optional[Any] = None,
|
||||
show_diff: bool = True,
|
||||
) -> dict:
|
||||
"""
|
||||
Replace the content of a specific line in a file.
|
||||
|
||||
@ -97,18 +116,27 @@ def replace_specific_line(filepath: str, line_number: int, new_content: str, db_
|
||||
return {"status": "error", "error": "File does not exist"}
|
||||
if db_conn:
|
||||
from rp.tools.database import db_get
|
||||
|
||||
read_status = db_get("read:" + path, db_conn)
|
||||
if read_status.get("status") != "success" or read_status.get("value") != "true":
|
||||
return {"status": "error", "error": "File must be read before writing. Please read the file first."}
|
||||
with open(path, 'r') as file:
|
||||
return {
|
||||
"status": "error",
|
||||
"error": "File must be read before writing. Please read the file first.",
|
||||
}
|
||||
with open(path, "r") as file:
|
||||
lines = file.readlines()
|
||||
total_lines = len(lines)
|
||||
if line_number < 1 or line_number > total_lines:
|
||||
return {"status": "error", "error": f"Line number {line_number} is out of range. File has {total_lines} lines."}
|
||||
old_content = ''.join(lines)
|
||||
lines[line_number - 1] = new_content + '\n' if not new_content.endswith('\n') else new_content
|
||||
new_full_content = ''.join(lines)
|
||||
with open(path, 'w') as file:
|
||||
return {
|
||||
"status": "error",
|
||||
"error": f"Line number {line_number} is out of range. File has {total_lines} lines.",
|
||||
}
|
||||
old_content = "".join(lines)
|
||||
lines[line_number - 1] = (
|
||||
new_content + "\n" if not new_content.endswith("\n") else new_content
|
||||
)
|
||||
new_full_content = "".join(lines)
|
||||
with open(path, "w") as file:
|
||||
file.writelines(lines)
|
||||
if show_diff:
|
||||
diff_result = display_content_diff(old_content, new_full_content, filepath)
|
||||
@ -119,7 +147,13 @@ def replace_specific_line(filepath: str, line_number: int, new_content: str, db_
|
||||
return {"status": "error", "error": str(e)}
|
||||
|
||||
|
||||
def insert_line_at_position(filepath: str, line_number: int, new_content: str, db_conn: Optional[Any] = None, show_diff: bool = True) -> dict:
|
||||
def insert_line_at_position(
|
||||
filepath: str,
|
||||
line_number: int,
|
||||
new_content: str,
|
||||
db_conn: Optional[Any] = None,
|
||||
show_diff: bool = True,
|
||||
) -> dict:
|
||||
"""
|
||||
Insert a new line at a specific position in a file.
|
||||
|
||||
@ -148,27 +182,38 @@ def insert_line_at_position(filepath: str, line_number: int, new_content: str, d
|
||||
return {"status": "error", "error": "File does not exist"}
|
||||
if db_conn:
|
||||
from rp.tools.database import db_get
|
||||
|
||||
read_status = db_get("read:" + path, db_conn)
|
||||
if read_status.get("status") != "success" or read_status.get("value") != "true":
|
||||
return {"status": "error", "error": "File must be read before writing. Please read the file first."}
|
||||
with open(path, 'r') as file:
|
||||
return {
|
||||
"status": "error",
|
||||
"error": "File must be read before writing. Please read the file first.",
|
||||
}
|
||||
with open(path, "r") as file:
|
||||
lines = file.readlines()
|
||||
old_content = ''.join(lines)
|
||||
old_content = "".join(lines)
|
||||
insert_index = min(line_number - 1, len(lines))
|
||||
lines.insert(insert_index, new_content + '\n' if not new_content.endswith('\n') else new_content)
|
||||
new_full_content = ''.join(lines)
|
||||
with open(path, 'w') as file:
|
||||
lines.insert(
|
||||
insert_index, new_content + "\n" if not new_content.endswith("\n") else new_content
|
||||
)
|
||||
new_full_content = "".join(lines)
|
||||
with open(path, "w") as file:
|
||||
file.writelines(lines)
|
||||
if show_diff:
|
||||
diff_result = display_content_diff(old_content, new_full_content, filepath)
|
||||
if diff_result["status"] == "success":
|
||||
print(diff_result["visual_diff"])
|
||||
return {"status": "success", "message": f"Inserted line at position {line_number} in {path}"}
|
||||
return {
|
||||
"status": "success",
|
||||
"message": f"Inserted line at position {line_number} in {path}",
|
||||
}
|
||||
except Exception as e:
|
||||
return {"status": "error", "error": str(e)}
|
||||
|
||||
|
||||
def delete_specific_line(filepath: str, line_number: int, db_conn: Optional[Any] = None, show_diff: bool = True) -> dict:
|
||||
def delete_specific_line(
|
||||
filepath: str, line_number: int, db_conn: Optional[Any] = None, show_diff: bool = True
|
||||
) -> dict:
|
||||
"""
|
||||
Delete a specific line from a file.
|
||||
|
||||
@ -195,18 +240,25 @@ def delete_specific_line(filepath: str, line_number: int, db_conn: Optional[Any]
|
||||
return {"status": "error", "error": "File does not exist"}
|
||||
if db_conn:
|
||||
from rp.tools.database import db_get
|
||||
|
||||
read_status = db_get("read:" + path, db_conn)
|
||||
if read_status.get("status") != "success" or read_status.get("value") != "true":
|
||||
return {"status": "error", "error": "File must be read before writing. Please read the file first."}
|
||||
with open(path, 'r') as file:
|
||||
return {
|
||||
"status": "error",
|
||||
"error": "File must be read before writing. Please read the file first.",
|
||||
}
|
||||
with open(path, "r") as file:
|
||||
lines = file.readlines()
|
||||
total_lines = len(lines)
|
||||
if line_number < 1 or line_number > total_lines:
|
||||
return {"status": "error", "error": f"Line number {line_number} is out of range. File has {total_lines} lines."}
|
||||
old_content = ''.join(lines)
|
||||
return {
|
||||
"status": "error",
|
||||
"error": f"Line number {line_number} is out of range. File has {total_lines} lines.",
|
||||
}
|
||||
old_content = "".join(lines)
|
||||
del lines[line_number - 1]
|
||||
new_full_content = ''.join(lines)
|
||||
with open(path, 'w') as file:
|
||||
new_full_content = "".join(lines)
|
||||
with open(path, "w") as file:
|
||||
file.writelines(lines)
|
||||
if show_diff:
|
||||
diff_result = display_content_diff(old_content, new_full_content, filepath)
|
||||
@ -230,8 +282,16 @@ def read_file(filepath: str, db_conn: Optional[Any] = None) -> dict:
|
||||
"""
|
||||
try:
|
||||
path = os.path.expanduser(filepath)
|
||||
with open(path) as f:
|
||||
mime_type, _ = mimetypes.guess_type(str(path))
|
||||
if mime_type and (
|
||||
mime_type.startswith("text/") or mime_type in ["application/json", "application/xml"]
|
||||
):
|
||||
with open(path, encoding="utf-8", errors="replace") as f:
|
||||
content = f.read()
|
||||
else:
|
||||
with open(path, "rb") as f:
|
||||
binary_content = f.read()
|
||||
content = f"data:{mime_type if mime_type else 'application/octet-stream'};base64,{base64.b64encode(binary_content).decode('utf-8')}"
|
||||
if db_conn:
|
||||
from rp.tools.database import db_set
|
||||
|
||||
@ -270,22 +330,53 @@ def write_file(
|
||||
"status": "error",
|
||||
"error": "File must be read before writing. Please read the file first.",
|
||||
}
|
||||
|
||||
write_mode = "w"
|
||||
write_encoding = "utf-8"
|
||||
decoded_content = content
|
||||
|
||||
if content.startswith("data:"):
|
||||
parts = content.split(",", 1)
|
||||
if len(parts) == 2:
|
||||
header = parts[0]
|
||||
encoded_data = parts[1]
|
||||
if ";base64" in header:
|
||||
try:
|
||||
decoded_content = base64.b64decode(encoded_data)
|
||||
write_mode = "wb"
|
||||
write_encoding = None
|
||||
except Exception:
|
||||
pass # Not a valid base64, treat as plain text
|
||||
|
||||
if not is_new_file:
|
||||
with open(path) as f:
|
||||
if write_mode == "wb":
|
||||
with open(path, "rb") as f:
|
||||
old_content = f.read()
|
||||
else:
|
||||
with open(path, encoding="utf-8", errors="replace") as f:
|
||||
old_content = f.read()
|
||||
|
||||
operation = track_edit("WRITE", filepath, content=content, old_content=old_content)
|
||||
tracker.mark_in_progress(operation)
|
||||
if show_diff and (not is_new_file):
|
||||
|
||||
if show_diff and (not is_new_file) and write_mode == "w": # Only show diff for text files
|
||||
diff_result = display_content_diff(old_content, content, filepath)
|
||||
if diff_result["status"] == "success":
|
||||
print(diff_result["visual_diff"])
|
||||
editor = RPEditor(path)
|
||||
editor.set_text(content)
|
||||
editor.save_file()
|
||||
|
||||
if write_mode == "wb":
|
||||
with open(path, write_mode) as f:
|
||||
f.write(decoded_content)
|
||||
else:
|
||||
with open(path, write_mode, encoding=write_encoding) as f:
|
||||
f.write(decoded_content)
|
||||
|
||||
if os.path.exists(path) and db_conn:
|
||||
try:
|
||||
cursor = db_conn.cursor()
|
||||
file_hash = hashlib.md5(old_content.encode()).hexdigest()
|
||||
file_hash = hashlib.md5(
|
||||
old_content.encode() if isinstance(old_content, str) else old_content
|
||||
).hexdigest()
|
||||
cursor.execute(
|
||||
"SELECT MAX(version) FROM file_versions WHERE filepath = ?", (filepath,)
|
||||
)
|
||||
@ -293,14 +384,24 @@ def write_file(
|
||||
version = result[0] + 1 if result[0] else 1
|
||||
cursor.execute(
|
||||
"INSERT INTO file_versions (filepath, content, hash, timestamp, version)\n VALUES (?, ?, ?, ?, ?)",
|
||||
(filepath, old_content, file_hash, time.time(), version),
|
||||
(
|
||||
filepath,
|
||||
(
|
||||
old_content
|
||||
if isinstance(old_content, str)
|
||||
else old_content.decode("utf-8", errors="replace")
|
||||
),
|
||||
file_hash,
|
||||
time.time(),
|
||||
version,
|
||||
),
|
||||
)
|
||||
db_conn.commit()
|
||||
except Exception:
|
||||
pass
|
||||
tracker.mark_completed(operation)
|
||||
message = f"File written to {path}"
|
||||
if show_diff and (not is_new_file):
|
||||
if show_diff and (not is_new_file) and write_mode == "w":
|
||||
stats = get_diff_stats(old_content, content)
|
||||
message += f" ({stats['insertions']}+ {stats['deletions']}-)"
|
||||
return {"status": "success", "message": message}
|
||||
@ -428,6 +529,18 @@ def search_replace(
|
||||
path = os.path.expanduser(filepath)
|
||||
if not os.path.exists(path):
|
||||
return {"status": "error", "error": "File does not exist"}
|
||||
mime_type, _ = mimetypes.guess_type(str(path))
|
||||
if not (
|
||||
mime_type
|
||||
and (
|
||||
mime_type.startswith("text/")
|
||||
or mime_type in ["application/json", "application/xml"]
|
||||
)
|
||||
):
|
||||
return {
|
||||
"status": "error",
|
||||
"error": f"Cannot perform search and replace on binary file: {filepath}",
|
||||
}
|
||||
if db_conn:
|
||||
from rp.tools.database import db_get
|
||||
|
||||
@ -437,7 +550,7 @@ def search_replace(
|
||||
"status": "error",
|
||||
"error": "File must be read before writing. Please read the file first.",
|
||||
}
|
||||
with open(path) as f:
|
||||
with open(path, encoding="utf-8", errors="replace") as f:
|
||||
content = f.read()
|
||||
content = content.replace(old_string, new_string)
|
||||
with open(path, "w") as f:
|
||||
@ -483,6 +596,15 @@ def editor_insert_text(filepath, text, line=None, col=None, show_diff=True, db_c
|
||||
operation = None
|
||||
try:
|
||||
path = os.path.expanduser(filepath)
|
||||
mime_type, _ = mimetypes.guess_type(str(path))
|
||||
if not (
|
||||
mime_type
|
||||
and (
|
||||
mime_type.startswith("text/")
|
||||
or mime_type in ["application/json", "application/xml"]
|
||||
)
|
||||
):
|
||||
return {"status": "error", "error": f"Cannot insert text into binary file: {filepath}"}
|
||||
if db_conn:
|
||||
from rp.tools.database import db_get
|
||||
|
||||
@ -494,7 +616,7 @@ def editor_insert_text(filepath, text, line=None, col=None, show_diff=True, db_c
|
||||
}
|
||||
old_content = ""
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
with open(path, encoding="utf-8", errors="replace") as f:
|
||||
old_content = f.read()
|
||||
position = (line if line is not None else 0) * 1000 + (col if col is not None else 0)
|
||||
operation = track_edit("INSERT", filepath, start_pos=position, content=text)
|
||||
@ -524,6 +646,15 @@ def editor_replace_text(
|
||||
try:
|
||||
operation = None
|
||||
path = os.path.expanduser(filepath)
|
||||
mime_type, _ = mimetypes.guess_type(str(path))
|
||||
if not (
|
||||
mime_type
|
||||
and (
|
||||
mime_type.startswith("text/")
|
||||
or mime_type in ["application/json", "application/xml"]
|
||||
)
|
||||
):
|
||||
return {"status": "error", "error": f"Cannot replace text in binary file: {filepath}"}
|
||||
if db_conn:
|
||||
from rp.tools.database import db_get
|
||||
|
||||
@ -535,7 +666,7 @@ def editor_replace_text(
|
||||
}
|
||||
old_content = ""
|
||||
if os.path.exists(path):
|
||||
with open(path) as f:
|
||||
with open(path, encoding="utf-8", errors="replace") as f:
|
||||
old_content = f.read()
|
||||
start_pos = start_line * 1000 + start_col
|
||||
end_pos = end_line * 1000 + end_col
|
||||
@ -582,4 +713,3 @@ def clear_edit_tracker():
|
||||
|
||||
clear_tracker()
|
||||
return {"status": "success", "message": "Edit tracker cleared"}
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import importlib
|
||||
import subprocess
|
||||
import threading
|
||||
import importlib
|
||||
|
||||
|
||||
def _get_multiplexer_functions():
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
from typing import Dict, Any
|
||||
from typing import Any, Dict
|
||||
|
||||
|
||||
def get_diagnostics(filepath: str) -> Dict[str, Any]:
|
||||
|
||||
@ -2,6 +2,7 @@ import os
|
||||
import time
|
||||
import uuid
|
||||
from typing import Any, Dict
|
||||
|
||||
from rp.memory.knowledge_store import KnowledgeEntry, KnowledgeStore
|
||||
|
||||
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
import difflib
|
||||
import mimetypes
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
from ..ui.diff_display import display_diff, get_diff_stats
|
||||
|
||||
|
||||
@ -61,7 +63,27 @@ def create_diff(
|
||||
try:
|
||||
path1 = os.path.expanduser(file1)
|
||||
path2 = os.path.expanduser(file2)
|
||||
with open(path1) as f1, open(path2) as f2:
|
||||
mime_type1, _ = mimetypes.guess_type(str(path1))
|
||||
mime_type2, _ = mimetypes.guess_type(str(path2))
|
||||
if not (
|
||||
mime_type1
|
||||
and (
|
||||
mime_type1.startswith("text/")
|
||||
or mime_type1 in ["application/json", "application/xml"]
|
||||
)
|
||||
):
|
||||
return {"status": "error", "error": f"Cannot create diff for binary file: {file1}"}
|
||||
if not (
|
||||
mime_type2
|
||||
and (
|
||||
mime_type2.startswith("text/")
|
||||
or mime_type2 in ["application/json", "application/xml"]
|
||||
)
|
||||
):
|
||||
return {"status": "error", "error": f"Cannot create diff for binary file: {file2}"}
|
||||
with open(path1, encoding="utf-8", errors="replace") as f1, open(
|
||||
path2, encoding="utf-8", errors="replace"
|
||||
) as f2:
|
||||
content1 = f1.read()
|
||||
content2 = f2.read()
|
||||
if visual:
|
||||
@ -91,9 +113,27 @@ def display_file_diff(filepath1, filepath2, format_type="unified", context_lines
|
||||
try:
|
||||
path1 = os.path.expanduser(filepath1)
|
||||
path2 = os.path.expanduser(filepath2)
|
||||
with open(path1) as f1:
|
||||
mime_type1, _ = mimetypes.guess_type(str(path1))
|
||||
mime_type2, _ = mimetypes.guess_type(str(path2))
|
||||
if not (
|
||||
mime_type1
|
||||
and (
|
||||
mime_type1.startswith("text/")
|
||||
or mime_type1 in ["application/json", "application/xml"]
|
||||
)
|
||||
):
|
||||
return {"status": "error", "error": f"Cannot display diff for binary file: {filepath1}"}
|
||||
if not (
|
||||
mime_type2
|
||||
and (
|
||||
mime_type2.startswith("text/")
|
||||
or mime_type2 in ["application/json", "application/xml"]
|
||||
)
|
||||
):
|
||||
return {"status": "error", "error": f"Cannot display diff for binary file: {filepath2}"}
|
||||
with open(path1, encoding="utf-8", errors="replace") as f1:
|
||||
old_content = f1.read()
|
||||
with open(path2) as f2:
|
||||
with open(path2, encoding="utf-8", errors="replace") as f2:
|
||||
new_content = f2.read()
|
||||
visual_diff = display_diff(old_content, new_content, filepath1, format_type)
|
||||
stats = get_diff_stats(old_content, new_content)
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import glob
|
||||
import os
|
||||
from typing import List
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
|
||||
def glob_files(pattern: str, path: str = ".") -> dict:
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
from rp.vision import post_image as vision_post_image
|
||||
import functools
|
||||
|
||||
from rp.vision import post_image as vision_post_image
|
||||
|
||||
|
||||
@functools.lru_cache()
|
||||
def post_image(path: str, prompt: str = None):
|
||||
|
||||
154
rp/tools/web.py
154
rp/tools/web.py
@ -1,15 +1,50 @@
|
||||
import json
|
||||
import urllib.error
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
import imghdr
|
||||
import random
|
||||
import requests
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
# Realistic User-Agents
|
||||
USER_AGENTS = [
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
|
||||
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
|
||||
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0",
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/91.0.864.59",
|
||||
"Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Mobile/15E148 Safari/604.1",
|
||||
"Mozilla/5.0 (iPad; CPU OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Mobile/15E148 Safari/604.1",
|
||||
"Mozilla/5.0 (Android 11; Mobile; rv:68.0) Gecko/68.0 Firefox/88.0",
|
||||
]
|
||||
|
||||
|
||||
import json
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
def get_default_headers():
|
||||
"""Get default realistic headers with variations."""
|
||||
accept_languages = [
|
||||
"en-US,en;q=0.5",
|
||||
"en-US,en;q=0.9",
|
||||
"en-GB,en;q=0.5",
|
||||
"en-US,en;q=0.5;fr;q=0.3",
|
||||
]
|
||||
headers = {
|
||||
"User-Agent": random.choice(USER_AGENTS),
|
||||
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
|
||||
"Accept-Language": random.choice(accept_languages),
|
||||
"Accept-Encoding": "gzip, deflate, br",
|
||||
"DNT": "1",
|
||||
"Connection": "keep-alive",
|
||||
"Upgrade-Insecure-Requests": "1",
|
||||
}
|
||||
# Sometimes add Cache-Control
|
||||
if random.random() < 0.3:
|
||||
headers["Cache-Control"] = "no-cache"
|
||||
# Sometimes add Referer
|
||||
if random.random() < 0.2:
|
||||
headers["Referer"] = "https://www.google.com/"
|
||||
return headers
|
||||
|
||||
|
||||
def http_fetch(url, headers=None):
|
||||
def http_fetch(url: str, headers: Optional[Dict[str, str]] = None) -> Dict[str, Any]:
|
||||
"""Fetch content from an HTTP URL.
|
||||
|
||||
Args:
|
||||
@ -20,29 +55,97 @@ def http_fetch(url, headers=None):
|
||||
Dict with status and content.
|
||||
"""
|
||||
try:
|
||||
request = urllib.request.Request(url)
|
||||
default_headers = get_default_headers()
|
||||
if headers:
|
||||
for header_key, header_value in headers.items():
|
||||
request.add_header(header_key, header_value)
|
||||
with urllib.request.urlopen(request) as response:
|
||||
content = response.read().decode("utf-8")
|
||||
default_headers.update(headers)
|
||||
|
||||
response = requests.get(url, headers=default_headers, timeout=30)
|
||||
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
||||
|
||||
content_type = response.headers.get("Content-Type", "").lower()
|
||||
if "text" in content_type or "json" in content_type or "xml" in content_type:
|
||||
content = response.text
|
||||
return {"status": "success", "content": content[:10000]}
|
||||
except Exception as exception:
|
||||
return {"status": "error", "error": str(exception)}
|
||||
else:
|
||||
content = response.content
|
||||
return {"status": "success", "content": content}
|
||||
except requests.exceptions.RequestException as e:
|
||||
return {"status": "error", "error": str(e)}
|
||||
|
||||
|
||||
def _perform_search(base_url, query, params=None):
|
||||
def download_to_file(
|
||||
source_url: str, destination_path: str, headers: Optional[Dict[str, str]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Download content from an HTTP URL to a file.
|
||||
|
||||
Args:
|
||||
source_url: The URL to download from.
|
||||
destination_path: The path to save the downloaded content.
|
||||
headers: Optional HTTP headers.
|
||||
|
||||
Returns:
|
||||
Dict with status, downloaded_from, and downloaded_to on success, or status and error on failure.
|
||||
|
||||
This function can be used for binary files like images as well.
|
||||
"""
|
||||
try:
|
||||
encoded_query = urllib.parse.quote(query)
|
||||
full_url = f"{base_url}?query={encoded_query}"
|
||||
with urllib.request.urlopen(full_url) as response:
|
||||
content = response.read().decode("utf-8")
|
||||
return {"status": "success", "content": json.loads(content)}
|
||||
except Exception as exception:
|
||||
return {"status": "error", "error": str(exception)}
|
||||
default_headers = get_default_headers()
|
||||
if headers:
|
||||
default_headers.update(headers)
|
||||
|
||||
response = requests.get(source_url, headers=default_headers, stream=True, timeout=60)
|
||||
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
||||
|
||||
with open(destination_path, "wb") as file:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
file.write(chunk)
|
||||
|
||||
content_type = response.headers.get("Content-Type", "").lower()
|
||||
if content_type.startswith("image/"):
|
||||
img_type = imghdr.what(destination_path)
|
||||
if img_type is None:
|
||||
return {
|
||||
"status": "success",
|
||||
"downloaded_from": source_url,
|
||||
"downloaded_to": destination_path,
|
||||
"is_valid_image": False,
|
||||
"warning": "Downloaded content is not a valid image, consider finding a different source.",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "success",
|
||||
"downloaded_from": source_url,
|
||||
"downloaded_to": destination_path,
|
||||
"is_valid_image": True,
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": "success",
|
||||
"downloaded_from": source_url,
|
||||
"downloaded_to": destination_path,
|
||||
}
|
||||
except requests.exceptions.RequestException as e:
|
||||
return {"status": "error", "error": str(e)}
|
||||
|
||||
|
||||
def web_search(query):
|
||||
def _perform_search(
|
||||
base_url: str, query: str, params: Optional[Dict[str, str]] = None
|
||||
) -> Dict[str, Any]:
|
||||
try:
|
||||
default_headers = get_default_headers()
|
||||
search_params = {"query": query}
|
||||
if params:
|
||||
search_params.update(params)
|
||||
|
||||
response = requests.get(base_url, headers=default_headers, params=search_params, timeout=30)
|
||||
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
||||
|
||||
return {"status": "success", "content": response.json()}
|
||||
except requests.exceptions.RequestException as e:
|
||||
return {"status": "error", "error": str(e)}
|
||||
|
||||
|
||||
def web_search(query: str) -> Dict[str, Any]:
|
||||
"""Perform a web search.
|
||||
|
||||
Args:
|
||||
@ -55,7 +158,7 @@ def web_search(query):
|
||||
return _perform_search(base_url, query)
|
||||
|
||||
|
||||
def web_search_news(query):
|
||||
def web_search_news(query: str) -> Dict[str, Any]:
|
||||
"""Perform a web search for news.
|
||||
|
||||
Args:
|
||||
@ -66,4 +169,3 @@ def web_search_news(query):
|
||||
"""
|
||||
base_url = "https://search.molodetz.nl/search"
|
||||
return _perform_search(base_url, query)
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import time
|
||||
import threading
|
||||
import time
|
||||
|
||||
|
||||
class Colors:
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import difflib
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
from .colors import Colors
|
||||
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from .colors import Colors
|
||||
from .progress import ProgressBar
|
||||
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import re
|
||||
|
||||
from rp.config import LANGUAGE_KEYWORDS
|
||||
from rp.ui.colors import Colors
|
||||
|
||||
|
||||
29
rp/vision.py
29
rp/vision.py
@ -1,9 +1,7 @@
|
||||
import http.client
|
||||
import argparse
|
||||
import base64
|
||||
import json
|
||||
import http.client
|
||||
import pathlib
|
||||
import requests
|
||||
|
||||
DEFAULT_URL = "https://static.molodetz.nl/rp.vision.cgi"
|
||||
|
||||
@ -12,25 +10,24 @@ def post_image(image_path: str, prompt: str = "", url: str = DEFAULT_URL):
|
||||
image_path = str(pathlib.Path(image_path).resolve().absolute())
|
||||
if not url:
|
||||
url = DEFAULT_URL
|
||||
url_parts = url.split("/")
|
||||
host = url_parts[2]
|
||||
path = "/" + "/".join(url_parts[3:])
|
||||
|
||||
with open(image_path, "rb") as file:
|
||||
image_data = file.read()
|
||||
base64_data = base64.b64encode(image_data).decode("utf-8")
|
||||
|
||||
payload = {"data": base64_data, "path": image_path, "prompt": prompt}
|
||||
body = json.dumps(payload).encode("utf-8")
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Content-Length": str(len(body)),
|
||||
"User-Agent": "Python http.client",
|
||||
"User-Agent": "Python requests",
|
||||
}
|
||||
conn = http.client.HTTPSConnection(host)
|
||||
conn.request("POST", path, body, headers)
|
||||
resp = conn.getresponse()
|
||||
data = resp.read()
|
||||
print("Status:", resp.status, resp.reason)
|
||||
print(data.decode())
|
||||
|
||||
try:
|
||||
response = requests.post(url, json=payload, headers=headers, timeout=60)
|
||||
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
||||
print("Status:", response.status_code, response.reason)
|
||||
print(response.text)
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Error posting image: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -39,4 +36,4 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--prompt", default="")
|
||||
parser.add_argument("--url", default=DEFAULT_URL)
|
||||
args = parser.parse_args()
|
||||
post_image(args.url, args.image_path, args.prompt)
|
||||
post_image(args.image_path, args.prompt, args.url)
|
||||
|
||||
@ -2,6 +2,7 @@ import re
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
from .workflow_definition import ExecutionMode, Workflow, WorkflowStep
|
||||
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ import json
|
||||
import sqlite3
|
||||
import time
|
||||
from typing import List, Optional
|
||||
|
||||
from .workflow_definition import Workflow
|
||||
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import unittest
|
||||
import json
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from rp.core.api import call_api, list_models
|
||||
|
||||
@ -1,28 +1,29 @@
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from rp.commands.handlers import (
|
||||
handle_command,
|
||||
review_file,
|
||||
refactor_file,
|
||||
obfuscate_file,
|
||||
show_workflows,
|
||||
execute_workflow_command,
|
||||
execute_agent_task,
|
||||
show_agents,
|
||||
collaborate_agents_command,
|
||||
search_knowledge,
|
||||
store_knowledge,
|
||||
show_conversation_history,
|
||||
show_cache_stats,
|
||||
clear_caches,
|
||||
show_system_stats,
|
||||
collaborate_agents_command,
|
||||
execute_agent_task,
|
||||
execute_workflow_command,
|
||||
handle_background_command,
|
||||
start_background_session,
|
||||
list_background_sessions,
|
||||
show_session_status,
|
||||
show_session_output,
|
||||
send_session_input,
|
||||
handle_command,
|
||||
kill_background_session,
|
||||
list_background_sessions,
|
||||
obfuscate_file,
|
||||
refactor_file,
|
||||
review_file,
|
||||
search_knowledge,
|
||||
send_session_input,
|
||||
show_agents,
|
||||
show_background_events,
|
||||
show_cache_stats,
|
||||
show_conversation_history,
|
||||
show_session_output,
|
||||
show_session_status,
|
||||
show_system_stats,
|
||||
show_workflows,
|
||||
start_background_session,
|
||||
store_knowledge,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import os
|
||||
import sqlite3
|
||||
import tempfile
|
||||
import os
|
||||
import time
|
||||
|
||||
from rp.memory.conversation_memory import ConversationMemory
|
||||
@ -337,8 +337,8 @@ class TestConversationMemory:
|
||||
|
||||
def test_thread_safety(self):
|
||||
"""Test that the memory can handle concurrent access."""
|
||||
import threading
|
||||
import queue
|
||||
import threading
|
||||
|
||||
results = queue.Queue()
|
||||
|
||||
|
||||
@ -1,15 +1,16 @@
|
||||
import pytest
|
||||
|
||||
from rp.core.exceptions import (
|
||||
PRException,
|
||||
APIException,
|
||||
APIConnectionError,
|
||||
APITimeoutError,
|
||||
APIException,
|
||||
APIResponseError,
|
||||
APITimeoutError,
|
||||
ConfigurationError,
|
||||
ToolExecutionError,
|
||||
FileSystemError,
|
||||
SessionError,
|
||||
ContextError,
|
||||
FileSystemError,
|
||||
PRException,
|
||||
SessionError,
|
||||
ToolExecutionError,
|
||||
ValidationError,
|
||||
)
|
||||
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
from rp.commands.help_docs import (
|
||||
get_workflow_help,
|
||||
get_agent_help,
|
||||
get_knowledge_help,
|
||||
get_cache_help,
|
||||
get_background_help,
|
||||
get_cache_help,
|
||||
get_full_help,
|
||||
get_knowledge_help,
|
||||
get_workflow_help,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -1,9 +1,9 @@
|
||||
import os
|
||||
import sqlite3
|
||||
import tempfile
|
||||
import os
|
||||
import time
|
||||
|
||||
from rp.memory.knowledge_store import KnowledgeStore, KnowledgeEntry
|
||||
from rp.memory.knowledge_store import KnowledgeEntry, KnowledgeStore
|
||||
|
||||
|
||||
class TestKnowledgeStore:
|
||||
@ -284,8 +284,8 @@ class TestKnowledgeStore:
|
||||
|
||||
def test_thread_safety(self):
|
||||
"""Test that the store can handle concurrent access."""
|
||||
import threading
|
||||
import queue
|
||||
import threading
|
||||
|
||||
results = queue.Queue()
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from unittest.mock import patch, MagicMock
|
||||
from rp.core.logging import setup_logging, get_logger
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from rp.core.logging import get_logger, setup_logging
|
||||
|
||||
|
||||
class TestLogging:
|
||||
|
||||
@ -1,13 +1,14 @@
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from rp.commands.multiplexer_commands import (
|
||||
show_sessions,
|
||||
attach_session,
|
||||
detach_session,
|
||||
kill_session,
|
||||
list_waiting_sessions,
|
||||
send_command,
|
||||
show_session_log,
|
||||
show_session_status,
|
||||
list_waiting_sessions,
|
||||
show_sessions,
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -1,5 +1,7 @@
|
||||
import math
|
||||
|
||||
import pytest
|
||||
|
||||
from rp.memory.semantic_index import SemanticIndex
|
||||
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import json
|
||||
from unittest.mock import patch
|
||||
|
||||
from rp.ui.output import OutputFormatter
|
||||
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user