import json
import logging
import time
from typing import Any, Callable, Dict, List, Optional
from .artifacts import ArtifactGenerator
from .model_selector import ModelSelector
from .models import (
Artifact,
ArtifactType,
ExecutionContext,
ExecutionStats,
ExecutionStatus,
Phase,
PhaseType,
ProjectPlan,
TaskIntent,
)
from .monitor import ExecutionMonitor, ProgressTracker
from .orchestrator import ToolOrchestrator
from .planner import ProjectPlanner
logger = logging.getLogger("rp")
class LabsExecutor:
def __init__(
self,
tool_executor: Callable[[str, Dict[str, Any]], Any],
api_caller: Optional[Callable] = None,
db_path: Optional[str] = None,
output_dir: str = "/tmp/artifacts",
verbose: bool = False,
):
self.tool_executor = tool_executor
self.api_caller = api_caller
self.verbose = verbose
self.planner = ProjectPlanner()
self.orchestrator = ToolOrchestrator(
tool_executor=tool_executor,
max_workers=5,
max_retries=3
)
self.model_selector = ModelSelector()
self.artifact_generator = ArtifactGenerator(output_dir=output_dir)
self.monitor = ExecutionMonitor(db_path=db_path)
self.callbacks: List[Callable] = []
self._setup_internal_callbacks()
def _setup_internal_callbacks(self):
def log_callback(event_type: str, data: Dict[str, Any]):
if self.verbose:
logger.info(f"[{event_type}] {json.dumps(data, default=str)[:200]}")
for callback in self.callbacks:
try:
callback(event_type, data)
except Exception as e:
logger.warning(f"Callback error: {e}")
self.orchestrator.add_callback(log_callback)
self.monitor.add_callback(log_callback)
def add_callback(self, callback: Callable):
self.callbacks.append(callback)
def execute(
self,
task: str,
initial_context: Optional[Dict[str, Any]] = None,
max_duration: int = 600,
max_cost: float = 1.0,
) -> Dict[str, Any]:
start_time = time.time()
self._notify("task_received", {"task": task[:200]})
intent = self.planner.parse_request(task)
self._notify("intent_parsed", {
"task_type": intent.task_type,
"complexity": intent.complexity,
"tools": list(intent.required_tools)[:10],
"confidence": intent.confidence
})
plan = self.planner.create_plan(intent)
plan.constraints["max_duration"] = max_duration
plan.constraints["max_cost"] = max_cost
self._notify("plan_created", {
"plan_id": plan.plan_id,
"phases": len(plan.phases),
"estimated_cost": plan.estimated_cost,
"estimated_duration": plan.estimated_duration
})
context = ExecutionContext(
plan=plan,
global_context=initial_context or {"original_task": task}
)
self.monitor.start_execution(context)
progress = ProgressTracker(
total_phases=len(plan.phases),
callback=lambda evt, data: self._notify(f"progress_{evt}", data)
)
try:
for phase in self._get_execution_order(plan):
if time.time() - start_time > max_duration:
self._notify("timeout_warning", {"elapsed": time.time() - start_time})
break
if context.total_cost > max_cost:
self._notify("cost_limit_warning", {"current_cost": context.total_cost})
break
progress.start_phase(phase.name)
self._notify("phase_starting", {
"phase_id": phase.phase_id,
"name": phase.name,
"type": phase.phase_type.value
})
model_choice = self.model_selector.select_model_for_phase(phase, context.global_context)
if phase.phase_type == PhaseType.ARTIFACT and intent.artifact_type:
result = self._execute_artifact_phase(phase, context, intent)
else:
result = self.orchestrator._execute_phase(phase, context)
context.phase_results[phase.phase_id] = result
context.total_cost += result.cost
if result.outputs:
context.global_context.update(result.outputs)
progress.complete_phase(phase.name)
self._notify("phase_completed", {
"phase_id": phase.phase_id,
"status": result.status.value,
"duration": result.duration,
"cost": result.cost
})
if result.status == ExecutionStatus.FAILED:
for error in result.errors:
self._notify("phase_error", {"phase": phase.name, "error": error})
context.completed_at = time.time()
plan.status = ExecutionStatus.COMPLETED
except Exception as e:
logger.error(f"Execution error: {e}")
plan.status = ExecutionStatus.FAILED
context.completed_at = time.time()
self._notify("execution_error", {"error": str(e)})
stats = self.monitor.complete_execution(context)
result = self._compile_result(context, stats, intent)
self._notify("execution_complete", {
"plan_id": plan.plan_id,
"status": plan.status.value,
"total_cost": stats.total_cost,
"total_duration": stats.total_duration,
"effectiveness": stats.effectiveness_score
})
return result
def _get_execution_order(self, plan: ProjectPlan) -> List[Phase]:
from .orchestrator import TopologicalSorter
return TopologicalSorter.sort(plan.phases, plan.dependencies)
def _execute_artifact_phase(
self,
phase: Phase,
context: ExecutionContext,
intent: TaskIntent
) -> Any:
from .models import PhaseResult
phase.status = ExecutionStatus.RUNNING
phase.started_at = time.time()
result = PhaseResult(phase_id=phase.phase_id, status=ExecutionStatus.RUNNING)
try:
artifact_data = self._gather_artifact_data(context)
artifact = self.artifact_generator.generate(
artifact_type=intent.artifact_type,
data=artifact_data,
title=self._generate_artifact_title(intent),
context=context.global_context
)
result.outputs["artifact"] = {
"artifact_id": artifact.artifact_id,
"type": artifact.artifact_type.value,
"title": artifact.title,
"file_path": artifact.file_path,
"content_preview": artifact.content[:500] if artifact.content else ""
}
result.status = ExecutionStatus.COMPLETED
context.global_context["generated_artifact"] = artifact
except Exception as e:
result.status = ExecutionStatus.FAILED
result.errors.append(str(e))
logger.error(f"Artifact generation error: {e}")
phase.completed_at = time.time()
result.duration = phase.completed_at - phase.started_at
result.cost = 0.02
return result
def _gather_artifact_data(self, context: ExecutionContext) -> Dict[str, Any]:
data = {}
for phase_id, result in context.phase_results.items():
if result.outputs:
data[phase_id] = result.outputs
if "raw_data" in context.global_context:
data["data"] = context.global_context["raw_data"]
if "insights" in context.global_context:
data["findings"] = context.global_context["insights"]
return data
def _generate_artifact_title(self, intent: TaskIntent) -> str:
words = intent.objective.split()[:5]
title = " ".join(words)
if intent.artifact_type:
title = f"{intent.artifact_type.value.title()}: {title}"
return title
def _compile_result(
self,
context: ExecutionContext,
stats: ExecutionStats,
intent: TaskIntent
) -> Dict[str, Any]:
result = {
"status": context.plan.status.value,
"plan_id": context.plan.plan_id,
"objective": context.plan.objective,
"execution_stats": {
"total_cost": stats.total_cost,
"total_duration": stats.total_duration,
"phases_completed": stats.phases_completed,
"phases_failed": stats.phases_failed,
"tools_called": stats.tools_called,
"effectiveness_score": stats.effectiveness_score
},
"phase_results": {},
"outputs": {},
"artifacts": [],
"errors": []
}
for phase_id, phase_result in context.phase_results.items():
phase = context.plan.get_phase(phase_id)
result["phase_results"][phase_id] = {
"name": phase.name if phase else phase_id,
"status": phase_result.status.value,
"duration": phase_result.duration,
"cost": phase_result.cost,
"outputs": list(phase_result.outputs.keys())
}
if phase_result.errors:
result["errors"].extend(phase_result.errors)
if "artifact" in phase_result.outputs:
result["artifacts"].append(phase_result.outputs["artifact"])
if "generated_artifact" in context.global_context:
artifact = context.global_context["generated_artifact"]
result["primary_artifact"] = {
"type": artifact.artifact_type.value,
"title": artifact.title,
"file_path": artifact.file_path
}
result["outputs"] = {
k: v for k, v in context.global_context.items()
if k not in ["original_task", "generated_artifact"]
}
return result
def _notify(self, event_type: str, data: Dict[str, Any]):
if self.verbose:
print(f"[{event_type}] {json.dumps(data, default=str)[:100]}")
for callback in self.callbacks:
try:
callback(event_type, data)
except Exception:
pass
def execute_simple(self, task: str) -> str:
result = self.execute(task)
if result["status"] == "completed":
summary_parts = [f"Task completed successfully."]
summary_parts.append(f"Cost: ${result['execution_stats']['total_cost']:.4f}")
summary_parts.append(f"Duration: {result['execution_stats']['total_duration']:.1f}s")
if result.get("primary_artifact"):
artifact = result["primary_artifact"]
summary_parts.append(f"Generated {artifact['type']}: {artifact['file_path']}")
if result.get("errors"):
summary_parts.append(f"Warnings: {len(result['errors'])}")
return " | ".join(summary_parts)
else:
errors = result.get("errors", ["Unknown error"])
return f"Task failed: {'; '.join(errors[:3])}"
def get_statistics(self) -> Dict[str, Any]:
return {
"monitor": self.monitor.get_statistics(),
"model_usage": self.model_selector.get_usage_statistics(),
"cost_breakdown": self.monitor.get_cost_breakdown()
}
def generate_artifact(
self,
artifact_type: ArtifactType,
data: Dict[str, Any],
title: str = "Generated Artifact"
) -> Artifact:
return self.artifact_generator.generate(artifact_type, data, title)
def create_labs_executor(
assistant,
output_dir: str = "/tmp/artifacts",
verbose: bool = False
) -> LabsExecutor:
from rp.config import DB_PATH
def tool_executor(tool_name: str, arguments: Dict[str, Any]) -> Dict[str, Any]:
from rp.autonomous.mode import execute_single_tool
return execute_single_tool(assistant, tool_name, arguments)
def api_caller(messages, **kwargs):
from rp.core.api import call_api
from rp.tools import get_tools_definition
return call_api(
messages,
assistant.model,
assistant.api_url,
assistant.api_key,
assistant.use_tools,
get_tools_definition(),
verbose=assistant.verbose
)
return LabsExecutor(
tool_executor=tool_executor,
api_caller=api_caller,
db_path=DB_PATH,
output_dir=output_dir,
verbose=verbose
)